text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 Wintermute0110 <wintermute0110@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# Advanced Emulator Launcher platform and emulator information.
#
# This module has basic platform, DAT file information, etc.
# It has not external dependencies at all and can be used from anywhere.
# External tools are used to test the validity of the information in this file.
# -------------------------------------------------------------------------------------------------
# New platform engine
# -------------------------------------------------------------------------------------------------
DAT_MAME = 'MAME'
DAT_NOINTRO = 'No-Intro'
DAT_REDUMP = 'Redump'
DAT_LIBRETRO = 'Libretro'
DAT_NONE = None
DEFAULT_PLAT_TGDB = '0'
DEFAULT_PLAT_MOBYGAMES = '0'
DEFAULT_PLAT_SCREENSCRAPER = '0'
DEFAULT_PLAT_GAMEFAQS = '0'
PLATFORM_MAME_LONG = 'MAME'
PLATFORM_MAME_SHORT = 'arcade-mame'
PLATFORM_MAME_COMPACT = 'mame'
PLATFORM_UNKNOWN_LONG = 'Unknown'
PLATFORM_UNKNOWN_SHORT = 'unknown'
PLATFORM_UNKNOWN_COMPACT = 'unknown'
class Platform:
def __init__(self, name, shortname, compactname, aliasof = None,
TGDB_plat = None, MG_plat = None, SS_plat = None, GF_plat = None,
DAT = DAT_NONE, DAT_prefix = ''):
# Autocompleted later with data from the short name.
# Short names are "category-compact_name"
self.category = ''
self.long_name = name
self.short_name = shortname
self.compact_name = compactname
# Always use the compact name when definid aliases. Otherwise bad things will happen.
self.aliasof = aliasof
self.TGDB_plat = TGDB_plat
self.MG_plat = MG_plat
self.SS_plat = SS_plat
self.GF_plat = GF_plat
self.DAT = DAT
self.DAT_prefix = DAT_prefix
# * From this list create dictionaries with indices to access platform information.
#
# * Shorted alphabetically by long name. Alphabetical order is veryfied with the script
# ./dev-core/list_AEL_platforms.py
#
# * To be compatible with Retroplayer and Kodi artwork database, anything that can be launched
# by Retroarch must be a platform, including Doom, CaveStory, etc.
#
# * Platform is something that has ROMs to launch. Standalone cores do not need a platform,
# they are Kodi addons with its own artwork. CHECK THIS!
#
# * Platform names must have filesystem-safe characters.
#
# * When possible user No-Intro DAT-o-MATIC names. Fallback to Wikipedia names.
#
# * Unsuported scraper platforms must be set to None. The conversion function will then
# translate None to the appropiate value for the scraper.
#
# * The Offline Scraper database filenames use the long_name. The platform icons
# PNG/JPG files also use the platform long_name.
#
# Get TGDB platform list from script ./dev-scrapers/scrap_TGDB_list_platforms.py
# Get MobyGames platform list from script ./dev-scrapers/scrap_MobyGames_list_platforms.py
# Get ScreenScraper platform list from script ./dev-scrapers/scrap_ScreenScraper_list_platforms.py
# Get GameFAQs platform list from https://www.gamefaqs.com/search_advanced?game=ar
#
# Default values: Platform('', '', '', None, None, None, None, None, DAT_NONE, ''),
#
AEL_platforms = [
# --- 3DO Interactive Multiplayer ---
Platform('3DO Interactive Multiplayer', 'console-3do', '3do', None, '25', '35', '29', '61',
DAT_REDUMP, 'Panasonic - 3DO Interactive Multiplayer - Datfile'),
# --- Amstrad ---
Platform('Amstrad CPC', 'computer-cpc', 'cpc', None, '4914', '60', '65', '46', DAT_NONE),
# --- Atari ---
Platform('Atari 2600', 'atari-a2600', 'a2600', None, '22', '28', '26', '6',
DAT_NOINTRO, 'Atari - 2600'),
Platform('Atari 5200', 'atari-a5200', 'a5200', None, '26', '33', '40', '20',
DAT_NOINTRO, 'Atari - 5200'),
Platform('Atari 7800', 'atari-a7800', 'a7800', None, '27', '34', '41', '51',
DAT_NOINTRO, 'Atari - 7800'),
# Atari 8-bit includes: Atari 400, Atari 800, Atari 1200XL, Atari 65XE, Atari 130XE, Atari XEGS
Platform('Atari 8-bit', 'computer-atari-8bit', 'atari-8bit', None, '30', '39', '43', None, DAT_NONE),
# Atari Jaguar No-Intro DATs:
# *) Atari - Jaguar (J64) (20190518-213240).dat
# *) Atari - Jaguar (J64) (Parent-Clone) (Parent-Clone) (20190518-213240).dat
# *) Atari - Jaguar (ROM) (20190518-213240).dat
Platform('Atari Jaguar', 'atari-jaguar', 'jaguar', None, '28', '17', '27', '72',
DAT_NOINTRO, 'Atari - Jaguar (J64) (Parent-Clone)'),
Platform('Atari Jaguar CD', 'atari-jaguarcd', 'jaguarcd', None, '29', '17', '171', '82',
DAT_REDUMP, 'Atari - Jaguar CD Interactive Multimedia System - Datfile'),
Platform('Atari Lynx', 'atari-lynx', 'lynx', None, '4924', '18', '28', '58',
DAT_NOINTRO, 'Atari - Lynx'),
Platform('Atari ST', 'computer-atari-st', 'atari-st', None, '4937', '24', '42', '38',
DAT_NOINTRO, 'Atari - ST'),
# --- Bandai ---
Platform('Bandai WonderSwan', 'bandai-wswan', 'wswan', None, '4925', '48', '45', '90',
DAT_NOINTRO, 'Bandai - WonderSwan'),
Platform('Bandai WonderSwan Color', 'bandai-wswancolor', 'wswancolor', None, '4926', '49', '46', '95',
DAT_NOINTRO, 'Bandai - WonderSwan Color'),
Platform('Benesse Pocket Challenge V2', 'console-bpc', 'bpc', None, None, None, None, None,
DAT_NOINTRO, 'Benesse - Pocket Challenge V2'),
# --- Casio ---
Platform('Casio Loopy', 'console-loopy', 'loopy', None, None, '124', '98', None,
DAT_NOINTRO, 'Casio - Loopy'),
Platform('Casio PV-1000', 'console-pv1000', 'pv1000', None, '4964', '125', '74', None,
DAT_NOINTRO, 'Casio - PV-1000'),
# --- Coleco ---
Platform('Coleco Colecovision', 'console-cvision', 'cvision', None, '31', '29', '48', '29',
DAT_NOINTRO, 'Coleco - ColecoVision'),
# --- Commodore ---
# The Commodore 16 and the Plus/4 are the same computer, the Plus/4 having more memory
# and better ROM software. Make the Plus/4 an alias of the Commodore 16.
# No-Intro has a DAT for the Plus/4 and not for the C16.
# MobyGames "Commodore 16, Plus/4". Not found in GameFAQs.
Platform('Commodore 16', 'computer-16', 'c16', None, None, '115', '99', None,
DAT_NOINTRO, 'Commodore - Plus-4'),
# Commodore 64 No-Intro DATs:
# * Commodore - 64 (Parent-Clone) (20151122-035618).dat
# * Commodore - 64 (PP) (Parent-Clone) (20131204-081826).dat
# * Commodore - 64 (Tapes) (Parent-Clone) (20180307-232531).dat
Platform('Commodore 64', 'computer-c64', 'c64', None, '40', '27', '66', '24',
DAT_NOINTRO, 'Commodore - 64'),
Platform('Commodore Amiga', 'computer-amiga', 'amiga', None, '4911', '19', '64', '39',
DAT_NOINTRO, 'Commodore - Amiga'),
# The CD32 is part of a family of Amiga computers and is of similar specification to the
# Amiga 1200 computer.
Platform('Commodore Amiga CD32', 'console-cd32', 'cd32', None, '4947', '56', '130', '70',
DAT_REDUMP, 'Commodore - Amiga CD32 - Datfile'),
# The CDTV is essentially a Commodore Amiga 500 home computer with a CD-ROM drive and
# remote control.
Platform('Commodore Amiga CDTV', 'console-cdtv', 'cdtv', None, None, '83', '129', None,
DAT_REDUMP, 'Commodore - Amiga CDTV - Datfile'),
# The PET is the first computer sold by Commodore.
Platform('Commodore PET', 'computer-pet', 'pet', None, None, None, None, None),
# MobyGames "Commodore 16, Plus/4". Not found in GameFAQs.
Platform('Commodore Plus-4', 'computer-plus4', 'plus4', 'c16'),
Platform('Commodore VIC-20', 'computer-vic20', 'vic20', None, '4945', '43', '73', '11',
DAT_NOINTRO, 'Commodore - VIC-20'),
# --- Emerson ---
Platform('Emerson Arcadia 2001', 'console-arcadia2001', 'arcadia2001', None, '4963', '162', '94', None,
DAT_NOINTRO, 'Emerson - Arcadia 2001'),
Platform('Entex Adventure Vision', 'console-avision', 'avision', None, '4974', '210', '78', None,
DAT_NOINTRO, 'Entex - Adventure Vision'),
Platform('Epoch Super Cassette Vision', 'console-scvision', 'scvision', None, '4966', None, '67', None,
DAT_NOINTRO, 'Epoch - Super Cassette Vision'),
# --- Fairchild ---
Platform('Fairchild Channel F', 'console-channelf', 'channelf', None, '4928', '76', '80', None,
DAT_NOINTRO, 'Fairchild - Channel F'),
# --- Fujitsu ---
Platform('Fujitsu FM Towns Marty', 'console-fmtmarty', 'fmtmarty', None, '4932', '102', '97', '55', DAT_NONE),
Platform('Funtech Super Acan', 'console-superacan', 'superacan', None, None, '110', '100', None,
DAT_NOINTRO, 'Funtech - Super Acan'),
Platform('GamePark GP32', 'console-gp32', 'gp32', None, None, '108', '101', None,
DAT_NOINTRO, 'GamePark - GP32'),
# --- GCE ---
Platform('GCE Vectrex', 'console-vectrex', 'vectrex', None, '4939', '37', '102', '34',
DAT_NOINTRO, 'GCE - Vectrex'),
Platform('Hartung Game Master', 'console-gamemaster', 'gamemaster', None, None, None, '103', None,
DAT_NOINTRO, 'Hartung - Game Master'),
# The iQue Player is based on the Nintendo 64, but uses system-on-a-chip technology to reduce size.
# It plays Nintendo 64 games specifically ported to the system.
# iQue No-Intro DATs:
# *) iQue - iQue (CDN) (20190927-125114).dat
# *) iQue - iQue (Decrypted) (20190927-125114)
# *) iQue - iQue (Decrypted) (Parent-Clone) (Parent-Clone) (20190927-125114)
Platform('iQue iQue Player', 'console-ique', 'ique', 'n64', None, None, None, None,
DAT_NOINTRO, 'iQue - iQue (CDN) (Parent-Clone)'),
Platform('Konami Picno', 'console-picno', 'picno', None, None, None, None, None,
DAT_NOINTRO, 'Konami - Picno'),
Platform('LeapFrog LeapPad', 'console-leappad', 'leappad', None, None, None, None, None,
DAT_NOINTRO, 'LeapFrog - LeapPad'),
Platform('LeapFrog Leapster Learning Game System', 'console-llgs', 'llgs', None, None, None, None, None,
DAT_NOINTRO, 'LeapFrog - Leapster Learning Game System'),
# Platform('LeapFrog My First LeapPad', 'console-mfleappad', 'mfleappad', None, None, None, None, None,
# DAT_NOINTRO, 'LeapFrog - My First LeapPad'),
# --- Libretro ---
# Use nxengine and not cavestory because in the future there could be nxengine-evo.
# nxengine is able to launch several versions of the game so it's a ROM launcher.
Platform('Libretro Cave Story (NX Engine)', 'games-nxengine', 'nxengine', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro ChaiLove', 'games-chailove', 'chailove', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro Doom', 'games-doom', 'doom', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro Doom 3', 'games-doom3', 'doom3', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro EasyRPG', 'games-easyrpg', 'easyrpg', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro Game and Watch', 'games-gw', 'gw', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro Lutro', 'games-lutro', 'lutro', None, None, None, '206', None, DAT_LIBRETRO),
Platform('Libretro OpenLara', 'games-openlara', 'openlara', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro Quake', 'games-quake', 'quake', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro Quake 2', 'games-quake2', 'quake2', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro Quake 3', 'games-quake3', 'quake3', None, None, None, None, None, DAT_LIBRETRO),
Platform('Libretro TIC-80', 'games-tic80', 'tic80', None, None, None, '222', None, DAT_LIBRETRO),
# --- Magnavox ---
Platform('Magnavox Odyssey2', 'console-odyssey2', 'odyssey2', None, '4927', '78', '104', '9',
DAT_NOINTRO, 'Magnavox - Odyssey2'),
# --- MAME/Arcade ---
Platform(PLATFORM_MAME_LONG, PLATFORM_MAME_SHORT, PLATFORM_MAME_COMPACT, None, '23', '143', '75', '2', DAT_MAME),
# --- Mattel ---
Platform('Mattel Intellivision', 'console-ivision', 'ivision', None, '32', '30', '115', '16',
DAT_NOINTRO, 'Mattel - Intellivision'),
# --- Microsoft ---
Platform('Microsoft MS-DOS', 'microsoft-msdos', 'msdos', None, '1', '2', '135', '19', DAT_NONE),
Platform('Microsoft MSX', 'microsoft-msx', 'msx', None, '4929', '57', '113', '40',
DAT_NOINTRO, 'Microsoft - MSX'),
Platform('Microsoft MSX2', 'microsoft-msx2', 'msx2', None, '4929', '57', '116', '40',
DAT_NOINTRO, 'Microsoft - MSX2'),
# Modern versions of Windows.
Platform('Microsoft Windows', 'microsoft-windows', 'windows', None, None, None, None, None, DAT_NONE),
# MobyGames differentiates Windows = '3' and Windows 3.x = '5'
Platform('Microsoft Windows 3.x', 'microsoft-windows3x', 'windows3x', None, '1', '3', '136', '19', DAT_NONE),
Platform('Microsoft Xbox', 'microsoft-xbox', 'xbox', None, '14', '13', '32', '98', DAT_NONE),
Platform('Microsoft Xbox 360', 'microsoft-xbox360', 'xbox360', None, '15', '69', '33', '111', DAT_NONE),
Platform('Microsoft Xbox One', 'microsoft-xboxone', 'xboxone', None, '4920', '142', None, '121', DAT_NONE),
# --- NEC ---
Platform('NEC PC Engine', 'nec-pce', 'pce', None, '34', '40', '31', '53',
DAT_NOINTRO, 'NEC - PC Engine - TurboGrafx 16'),
Platform('NEC PC Engine CDROM2', 'nec-pcecd', 'pcecd', None, '4955', '45', '114', '56',
DAT_REDUMP, 'NEC - PC Engine CD & TurboGrafx CD - Datfile'),
Platform('NEC PC-FX', 'nec-pcfx', 'pcfx', None, '4930', '59', '72', '79',
DAT_REDUMP, 'NEC - PC-FX & PC-FXGA - Datfile'),
Platform('NEC PC-FXGA', 'nec-pcfxga', 'pcfxga', 'pcfx'),
Platform('NEC SuperGrafx', 'nec-sgx', 'sgx', None, '34', '127', '105', '53',
DAT_NOINTRO, 'NEC - PC Engine SuperGrafx'),
Platform('NEC TurboGrafx 16', 'nec-tg16', 'tg16', 'pce'),
Platform('NEC TurboGrafx CD', 'nec-tg16cd', 'tg16cd', 'pcecd'),
# --- Nintendo ---
# No-Intro Nintendo 3DS DAT files:
# *) Nintendo - Nintendo 3DS (Decrypted) (20191109-080816)
# *) Nintendo - Nintendo 3DS (Digital) (20190801-212709)
# *) Nintendo - Nintendo 3DS (Digital) (CDN) (CDN) (20191110-064909)
# *) Nintendo - Nintendo 3DS (Digital) (CDN) (CDN) (Parent-Clone) (Parent-Clone) (20191110-064909)
# *) Nintendo - Nintendo 3DS (Digital) (CDN) (CIA) (20191110-064909)
# *) Nintendo - Nintendo 3DS (Digital) (CDN) (Console) (20191110-064909)
# *) Nintendo - Nintendo 3DS (Digital) (Parent-Clone) (20190801-212709)
# *) Nintendo - Nintendo 3DS (Encrypted) (20191109-080816)
# *) Nintendo - Nintendo 3DS (Encrypted) (Parent-Clone) (Parent-Clone) (20191109-080816)
Platform('Nintendo 3DS', 'nintendo-n3ds', 'n3ds', None, '4912', '101', '17', '116',
DAT_NOINTRO, 'Nintendo - Nintendo 3DS (Decrypted) (Parent-Clone)'),
# No-Intro Nintendo 64 DAT files:
# *) Nintendo - Nintendo 64 (BigEndian) (20190918-121135)
# *) Nintendo - Nintendo 64 (BigEndian) (Parent-Clone) (Parent-Clone) (20190918-121135)
# *) Nintendo - Nintendo 64 (ByteSwapped) (20190918-121135)
Platform('Nintendo 64', 'nintendo-n64', 'n64', None, '3', '9', '14', '84',
DAT_NOINTRO, 'Nintendo - Nintendo 64 (BigEndian) (Parent-Clone)'),
# Nintendo 64DD not found on MobyGames.
Platform('Nintendo 64DD', 'nintendo-n64dd', 'n64dd', None, '3', '9', '122', '92',
DAT_NOINTRO, 'Nintendo - Nintendo 64DD'),
# No-Intro Nintendo DS DAT files:
# *) Nintendo - Nintendo DS (Decrypted) (20191117-150815)
# *) Nintendo - Nintendo DS (Decrypted) (Parent-Clone) (Parent-Clone) (20191117-150815)
# *) Nintendo - Nintendo DS (Download Play) (20190825-082425)
# *) Nintendo - Nintendo DS (Download Play) (Parent-Clone) (20190825-082425)
# *) Nintendo - Nintendo DS (Encrypted) (20191117-150815)
Platform('Nintendo DS', 'nintendo-nds', 'nds', None, '8', '44', '15', '108',
DAT_NOINTRO, 'Nintendo - Nintendo DS (Decrypted) (Parent-Clone)'),
# No-Intro Nintendo DSi DAT files:
# *) Nintendo - Nintendo DSi (Decrypted) (20190503-112150)
# *) Nintendo - Nintendo DSi (Decrypted) (Parent-Clone) (Parent-Clone) (20190503-112150)
# *) Nintendo - Nintendo DSi (Digital) (20190813-061824)
# *) Nintendo - Nintendo DSi (Digital) (Parent-Clone) (20190813-061824)
# *) Nintendo - Nintendo DSi (Encrypted) (20190503-112150)
Platform('Nintendo DSi', 'nintendo-ndsi', 'ndsi', None, '8', '87', '15', '108',
DAT_NOINTRO, 'Nintendo - Nintendo DSi (Decrypted) (Parent-Clone)'),
Platform('Nintendo e-Reader', 'nintendo-ereader', 'ereader', None, None, None, '119', None,
DAT_NOINTRO, 'Nintendo - e-Reader'),
Platform('Nintendo Famicon', 'nintendo-famicon', 'famicon', 'nes'),
# FDS not found on MobyGames, make same as NES.
# FDS No-Intro DAT files:
# *) Nintendo - Family Computer Disk System (FDS) (20191109-080316)
# *) Nintendo - Family Computer Disk System (FDS) (Parent-Clone) (Parent-Clone) (20191109-080316)
# *) Nintendo - Family Computer Disk System (FDSStickBIN) (20191109-080316)
# *) Nintendo - Family Computer Disk System (FDSStickRAW) (20191109-080316)
# *) Nintendo - Family Computer Disk System (QD) (20191109-080316)
Platform('Nintendo Famicon Disk System', 'nintendo-fds', 'fds', None, '4936', '22', '106', '47',
DAT_NOINTRO, 'Nintendo - Family Computer Disk System (FDS) (Parent-Clone)'),
Platform('Nintendo GameBoy', 'nintendo-gb', 'gb', None, '4', '10', '9', '59',
DAT_NOINTRO, 'Nintendo - Game Boy'),
Platform('Nintendo GameBoy Advance', 'nintendo-gba', 'gba', None, '5', '12', '12', '91',
DAT_NOINTRO, 'Nintendo - Game Boy Advance'),
Platform('Nintendo GameBoy Color', 'nintendo-gbcolor', 'gbcolor', None, '41', '11', '10', '57',
DAT_NOINTRO, 'Nintendo - Game Boy Color'),
Platform('Nintendo GameCube', 'nintendo-gamecube', 'gamecube', None, '2', '14', '13', '99',
DAT_REDUMP, 'Nintendo - GameCube - Datfile'),
Platform('Nintendo NES', 'nintendo-nes', 'nes', None, '7', '22', '3', '41',
DAT_NOINTRO, 'Nintendo - Nintendo Entertainment System'),
# No-Intro New Nintendo 3DS DAT files:
# *) Nintendo - New Nintendo 3DS (Decrypted) (20190402-125456)
# *) Nintendo - New Nintendo 3DS (Digital) (20181009-100544)
# *) Nintendo - New Nintendo 3DS (Digital) (Parent-Clone) (20181009-100544)
# *) Nintendo - New Nintendo 3DS (Encrypted) (20190402-125456)
# *) Nintendo - New Nintendo 3DS (Encrypted) (Parent-Clone) (Parent-Clone) (20190402-125456)
Platform('Nintendo New Nintendo 3DS', 'nintendo-new3ds', 'new3ds', None, None, None, None, None,
DAT_NOINTRO, 'Nintendo - New Nintendo 3DS (Decrypted) (Parent-Clone)'),
# Pokemon Mini not found in GameFAQs.
Platform('Nintendo Pokemon Mini', 'nintendo-pokemini', 'pokemini', None, '4957', '152', '211', None,
DAT_NOINTRO, 'Nintendo - Pokemon Mini'),
Platform('Nintendo Satellaview', 'nintendo-satellaview', 'satellaview', None, None, None, '107', None,
DAT_NOINTRO, 'Nintendo - Satellaview'),
Platform('Nintendo SNES', 'nintendo-snes', 'snes', None, '6', '15', '4', '63',
DAT_NOINTRO, 'Nintendo - Super Nintendo Entertainment System (Combined)'),
Platform('Nintendo Sufami Turbo', 'nintendo-sufami', 'sufami', None, None, None, '108', None,
DAT_NOINTRO, 'Nintendo - Sufami Turbo'),
Platform('Nintendo Switch', 'nintendo-switch', 'switch', None, '4971', '203', None, '124', DAT_NONE),
Platform('Nintendo Virtual Boy', 'nintendo-vb', 'vb', None, '4918', '38', '11', '83',
DAT_NOINTRO, 'Nintendo - Virtual Boy'),
# No-Intro has some DATs for Wii and Wii U with tags Digital, CDN and WAD.
Platform('Nintendo Wii', 'nintendo-wii', 'wii', None, '9', '82', '16', '114', DAT_NONE),
Platform('Nintendo Wii U', 'nintendo-wiiu', 'wiiu', None, '38', '132', '18', '118', DAT_NONE),
Platform('Ouya Ouya', 'console-ouya', 'ouya', None, '4921', '144', None, None,
DAT_NOINTRO, 'Ouya - Ouya'),
# --- Philips ---
# The Philips Videopac G7000 is the European name of the Magnavox Odyssey2.
Platform('Philips Videopac G7000', 'console-g7000', 'g7000', 'odyssey2'),
# Alias of g7000 in ScreenScraper. Not found in GameFAQs.
Platform('Philips Videopac Plus G7400', 'console-g7400', 'g7400', None, None, '128', '104', None,
DAT_NOINTRO, 'Philips - Videopac+'),
# --- RCA ---
Platform('RCA Studio II', 'console-studio2', 'studio2', None, '4967', '113', None, None,
DAT_NOINTRO, 'RCA - Studio II'),
# --- ScummVM ---
Platform('ScummVM', 'games-scummvm', 'scummvm', None, None, None, '123', None, DAT_NONE),
# --- Sega ---
Platform('Sega 32X', 'sega-32x', '32x', None, '33', '21', '19', '74',
DAT_NOINTRO, 'Sega - 32X'),
# The Advanced Pico Beena is an upgraded Sega PICO.
Platform('Sega Beena', 'sega-beena', 'beena', None, None, None, None, None,
DAT_NOINTRO, 'Sega - Beena'),
Platform('Sega Dreamcast', 'sega-dreamcast', 'dreamcast', None, '16', '8', '23', '67',
DAT_REDUMP, 'Sega - Dreamcast - Datfile'),
Platform('Sega Game Gear', 'sega-gamegear', 'gamegear', None, '20', '25', '21', '62',
DAT_NOINTRO, 'Sega - Game Gear'),
Platform('Sega Genesis', 'sega-genesis', 'genesis', 'megadrive'),
Platform('Sega Master System', 'sega-sms', 'sms', None, '35', '26', '2', '49',
DAT_NOINTRO, 'Sega - Master System - Mark III'),
Platform('Sega Mega Drive', 'sega-megadrive', 'megadrive', None, '36', '16', '1', '54',
DAT_NOINTRO, 'Sega - Mega Drive - Genesis'),
Platform('Sega MegaCD', 'sega-megacd', 'megacd', None, '21', '20', '20', '65',
DAT_REDUMP, 'Sega - Mega CD & Sega CD - Datfile'),
Platform('Sega PICO', 'sega-pico', 'pico', None, '4958', '103', None, None,
DAT_NOINTRO, 'Sega - PICO'),
Platform('Sega Saturn', 'sega-saturn', 'saturn', None, '17', '23', '22', '76',
DAT_REDUMP, 'Sega - Saturn - Datfile'),
# The SG-1000 was released in several forms, including the SC-3000 computer and
# the redesigned SG-1000 II.
Platform('Sega SC-3000', 'sega-sc3000', 'sc3000', 'sg1000'),
Platform('Sega SegaCD', 'sega-segacd', 'segacd', 'megacd'),
Platform('Sega SG-1000', 'sega-sg1000', 'sg1000', None, '4949', '114', '109', '43',
DAT_NOINTRO, 'Sega - SG-1000'),
# --- Sharp ---
Platform('Sharp X68000', 'computer-x68k', 'x68k', None, '4931', '106', '79', '52', DAT_NONE),
# --- Sinclair ---
Platform('Sinclair ZX Spectrum', 'computer-spectrum', 'spectrum', None, '4913', '41', '76', '35', DAT_NONE),
Platform('Sinclair ZX Spectrum Plus 3', 'computer-spectrump3', 'spectrump3', None, None, None, None, None,
DAT_NOINTRO, 'Sinclair - ZX Spectrum +3'),
# I think the ZX80 and the ZX81 are incompatible computers.
Platform('Sinclair ZX80', 'computer-zx80', 'zx80', None, None, '118', None, None, DAT_LIBRETRO),
Platform('Sinclair ZX81', 'computer-zx81', 'zx81', None, None, '119', '77', None, DAT_LIBRETRO),
# --- SNK ---
# MobyGames has a platform Neo Geo = '36'
# ScreenScraper has a platform Neo Geo AES = '142'
# GameFAQs has a platform NeoGeo = '64'
Platform('SNK Neo-Geo AES', 'snk-aes', 'aes', 'mame'),
Platform('SNK Neo-Geo CD', 'snk-neocd', 'neocd', None, '4956', '54', '70', '68',
DAT_REDUMP, 'SNK - Neo Geo CD - Datfile'),
# ScreenScraper has a platform for Neo Geo MVS = '68'
Platform('SNK Neo-Geo MVS', 'snk-mvs', 'mvs', 'mame'),
Platform('SNK Neo-Geo Pocket', 'snk-ngp', 'ngp', None, '4922', '52', '25', None,
DAT_NOINTRO, 'SNK - Neo Geo Pocket'),
Platform('SNK Neo-Geo Pocket Color', 'snk-ngpcolor', 'ngpcolor', None, '4923', '53', '82', '89',
DAT_NOINTRO, 'SNK - Neo Geo Pocket Color'),
# --- SONY ---
Platform('Sony PlayStation', 'sony-psx', 'psx', None, '10', '6', '57', '78',
DAT_REDUMP, 'Sony - PlayStation - Datfile'),
Platform('Sony PlayStation 2', 'sony-ps2', 'ps2', None, '11', '7', '58', '94',
DAT_REDUMP, 'Sony - PlayStation 2 - Datfile'),
Platform('Sony PlayStation 3', 'sony-ps3', 'ps3', None, '12', '81', '59', '113', DAT_NONE),
Platform('Sony PlayStation 4', 'sony-ps4', 'ps4', None, '4919', '141', None, '120', DAT_NONE),
# No-Intro has PSP DATs:
# *) Sony - PlayStation Portable (20191005-125849)
# *) Sony - PlayStation Portable (Parent-Clone) (20191005-125849)
# *) Sony - PlayStation Portable (PSN) (Decrypted) (20180929-050404)
# *) Sony - PlayStation Portable (PSN) (Encrypted) (20190111-145824)
# *) Sony - PlayStation Portable (PSN) (Encrypted) (Parent-Clone) (20190111-145824)
# *) Sony - PlayStation Portable (PSX2PSP) (20130318-035538)
# *) Sony - PlayStation Portable (PSX2PSP) (Parent-Clone) (20130318-035538)
# *) Sony - PlayStation Portable (UMD Music) (20180911-072923)
# *) Sony - PlayStation Portable (UMD Music) (Parent-Clone) (20180911-072923)
# *) Sony - PlayStation Portable (UMD Video) (20191023-221355)
# *) Sony - PlayStation Portable (UMD Video) (Parent-Clone) (20191023-221355)
#
# Should the Redump or No-Intro DAT used for PSP?
Platform('Sony PlayStation Portable', 'sony-psp', 'psp', None, '13', '46', '61', '109',
DAT_REDUMP, 'Sony - PlayStation Portable - Datfile'),
# No-Intro has PS Vita DATs.
Platform('Sony PlayStation Vita', 'sony-psvita', 'psvita', None, '39', '105', '62', '117', DAT_NONE),
Platform('Tiger Game.com', 'console-tigergame', 'tigergame', None, '4940', '50', '121', None,
DAT_NOINTRO, 'Tiger - Game.com'),
Platform('VTech CreatiVision', 'console-creativision', 'creativision', None, None, '212', None, None,
DAT_NOINTRO, 'VTech - CreatiVision'),
Platform('VTech V.Flash', 'console-vflash', 'vflash', None, None, '189', None, None,
DAT_REDUMP, 'VTech - V.Flash & V.Smile Pro - Datfile'),
Platform('VTech V.Smile', 'console-vsmile', 'vsmile', None, None, '42', '120', None,
DAT_NOINTRO, 'VTech - V.Smile'),
Platform('VTech V.Smile Pro', 'console-vsmilepro', 'vsmilepro', 'vflash'),
Platform('Watara Supervision', 'console-supervision', 'supervision', None, '4959', '109', '207', None,
DAT_NOINTRO, 'Watara - Supervision'),
Platform('Zeebo Zeebo', 'console-zeebo', 'zeebo', None, None, '88', None, None,
DAT_NOINTRO, 'Zeebo - Zeebo'),
# --- Unknown ---
Platform(PLATFORM_UNKNOWN_LONG, PLATFORM_UNKNOWN_SHORT, PLATFORM_UNKNOWN_COMPACT),
]
# --- Add category to platform objects ---
# The category is the first part of the short name.
for p_obj in AEL_platforms:
p_obj.category = p_obj.short_name.split('-')[0]
# Dictionaries for fast access to the platform information.
# Also, platform long name list for select() dialogs.
platform_long_to_index_dic = {}
platform_short_to_index_dic = {}
platform_compact_to_index_dic = {}
AEL_platform_list = []
for index, p_obj in enumerate(AEL_platforms):
platform_long_to_index_dic[p_obj.long_name] = index
platform_short_to_index_dic[p_obj.short_name] = index
platform_compact_to_index_dic[p_obj.compact_name] = index
AEL_platform_list.append(p_obj.long_name)
# Returns the platform numerical index from the platform name. If the platform name is not
# found then returns the index of the 'Unknown' platform.
# platform may be a long_name, short_name or compact_name, all platform names are searched
# in an efficient way.
def get_AEL_platform_index(platform):
try:
return platform_long_to_index_dic[platform]
except KeyError:
pass
try:
return platform_short_to_index_dic[platform]
except KeyError:
pass
try:
return platform_compact_to_index_dic[platform]
except KeyError:
pass
return platform_long_to_index_dic[PLATFORM_UNKNOWN_LONG]
# NOTE must take into account platform aliases.
# '0' means any platform in TGDB and must be returned when there is no platform matching.
def AEL_platform_to_TheGamesDB(platform_long_name):
if platform_long_name in platform_long_to_index_dic:
pobj = AEL_platforms[platform_long_to_index_dic[platform_long_name]]
else:
# Platform not found.
return DEFAULT_PLAT_TGDB
scraper_platform = pobj.TGDB_plat
# Check if platform is an alias.
# If alias does not have specific platform return platform of parent.
if pobj.aliasof is not None and scraper_platform is None:
parent_idx = platform_compact_to_index_dic[pobj.aliasof]
parent_long_name = AEL_platforms[parent_idx].long_name
return AEL_platform_to_TheGamesDB(parent_long_name)
# If platform is None then return default platform
return DEFAULT_PLAT_TGDB if scraper_platform is None else scraper_platform
# * MobyGames API cannot be used withouth a valid platform.
# * If '0' is used as the Unknown platform then MobyGames returns an HTTP error
# "HTTP Error 422: UNPROCESSABLE ENTITY"
# * If '' is used as the Unknwon platform then MobyGames returns and HTTP error
# "HTTP Error 400: BAD REQUEST"
# * The solution is to use '0' as the unknwon platform. AEL will detect this and
# will remove the '&platform={}' parameter from the search URL.
def AEL_platform_to_MobyGames(platform_long_name):
if platform_long_name in platform_long_to_index_dic:
pobj = AEL_platforms[platform_long_to_index_dic[platform_long_name]]
else:
return DEFAULT_PLAT_MOBYGAMES
scraper_platform = pobj.MG_plat
if pobj.aliasof is not None and scraper_platform is None:
parent_idx = platform_compact_to_index_dic[pobj.aliasof]
parent_long_name = AEL_platforms[parent_idx].long_name
return AEL_platform_to_MobyGames(parent_long_name)
return DEFAULT_PLAT_MOBYGAMES if scraper_platform is None else scraper_platform
def AEL_platform_to_ScreenScraper(platform_long_name):
if platform_long_name in platform_long_to_index_dic:
pobj = AEL_platforms[platform_long_to_index_dic[platform_long_name]]
else:
return DEFAULT_PLAT_SCREENSCRAPER
scraper_platform = pobj.SS_plat
if pobj.aliasof is not None and scraper_platform is None:
parent_idx = platform_compact_to_index_dic[pobj.aliasof]
parent_long_name = AEL_platforms[parent_idx].long_name
return AEL_platform_to_ScreenScraper(parent_long_name)
return DEFAULT_PLAT_SCREENSCRAPER if scraper_platform is None else scraper_platform
# Platform '0' means all platforms in GameFAQs.
def AEL_platform_to_GameFAQs(platform_long_name):
if platform_long_name in platform_long_to_index_dic:
pobj = AEL_platforms[platform_long_to_index_dic[platform_long_name]]
else:
return DEFAULT_PLAT_GAMEFAQS
scraper_platform = pobj.GF_plat
if pobj.aliasof is not None and scraper_platform is None:
parent_idx = platform_compact_to_index_dic[pobj.aliasof]
parent_long_name = AEL_platforms[parent_idx].long_name
return AEL_platform_to_GameFAQs(parent_long_name)
return DEFAULT_PLAT_GAMEFAQS if scraper_platform is None else scraper_platform
# -------------------------------------------------------------------------------------------------
# Miscellaneous emulator and gamesys (platforms) supported.
# -------------------------------------------------------------------------------------------------
def emudata_get_program_arguments(app_name):
# Based on the application name, retrieve the default arguments.
applications = {
'mame' : '"$rombasenoext$"',
'mednafen' : '-fs 1 "$rom$"',
'mupen64plus' : '--nogui --noask --noosd --fullscreen "$rom$"',
'nestopia' : '"$rom$"',
'xbmc' : 'PlayMedia($rom$)',
'kodi' : 'PlayMedia($rom$)',
'retroarch' : '-L /path/to/core -f "$rom$"',
'yabause' : '-a -f -i "$rom$"',
}
for app_key in applications:
if app_name.lower().find(app_key) >= 0:
return applications[app_key]
return '"$rom$"'
def emudata_get_program_extensions(app_name):
# Based on the application name, retrieve the recognized extension.
app_extensions = {
'mame' : 'zip|7z',
'mednafen' : 'zip|cue',
'mupen64plus': 'z64|zip|n64',
'nestopia' : 'nes|zip',
'retroarch' : 'zip|cue',
'yabause' : 'cue',
}
for app_key in app_extensions:
if app_name.lower().find(app_key) >= 0:
return app_extensions[app_key]
return ''
|
Wintermute0110/plugin.program.advanced.emulator.launcher
|
resources/platforms.py
|
Python
|
gpl-2.0
| 33,069
|
[
"Jaguar"
] |
a7593507ad1e9e8e4a0243a885e5b811cfd6d91f07516c877cca43eae49e1b08
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project Compiler
#
import os, sys, re, shutil, time, run, sgmllib, codecs, tempfile, subprocess
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..')))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..', 'common')))
from tiapp import *
import jspacker
from csspacker import CSSPacker
import traceback
try:
import json
except:
import simplejson as json
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store', '_svn'];
ignoreDirs = ['iphone', 'android', 'mobileweb', '.git', '.svn', 'CVS'];
HEADER = """/**
* Appcelerator Titanium Mobile
* This is generated code. Do not modify. Your changes *will* be lost.
* Generated code is Copyright (c) 2009-2012 by Appcelerator, Inc.
* All Rights Reserved.
*/
#import <Foundation/Foundation.h>
"""
INTERFACE_HEADER= """
@interface ApplicationRouting : NSObject {
}
+ (NSData*) resolveAppAsset:(NSString*)path;
"""
IMPL_HEADER= """#import "ApplicationRouting.h"
extern NSData* filterDataInRange(NSData* thedata, NSRange range);
@implementation ApplicationRouting
"""
FOOTER ="""
@end
"""
MODULE_IMPL_HEADER = """#import "ApplicationMods.h"
@implementation ApplicationMods
+ (NSArray*) compiledMods
{
NSMutableArray *modules = [NSMutableArray array];
"""
class HTMLParser(sgmllib.SGMLParser):
def parse(self, s):
self.feed(s)
self.close()
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self.scripts = []
def start_script(self, attributes):
for name, value in attributes:
if name == "src":
self.scripts.append(value)
def get_scripts(self):
return self.scripts
def read_module_properties(dir):
file = os.path.join(dir,'manifest')
dict = {}
if os.path.exists(file):
contents = open(file).read()
for line in contents.splitlines(True):
if line[0:1]=='#': continue
idx = line.find(':')
if idx==-1: continue
k=line[0:idx]
v=line[idx+1:].strip()
dict[k]=v
return dict
#Convert non-unicode obj to unicode encoded in utf-8.
def to_unicode_or_not(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
# Need to pre-parse xcconfig files to mangle variable names, and then
# dump them into a map so that we can re-assemble them later
def parse_xcconfig(xcconfig, moduleId, variables):
module_xcconfig = open(xcconfig)
new_xcconfig = ''
local_variables = {}
prefix = moduleId.upper().replace('.','_')
for line in module_xcconfig:
# Strip comments
comment = line.find('//')
if comment != -1:
line = line[0:comment]
# Generate new varname / value pairings
# The regular expression parses a valid line into components
# <var>=<value>
# <var>[<key>=<keyvalue>]=<value>
# e.g.
# OTHER_LDFLAGS=-framework EventKit
# OTHER_LDFLAGS[sdk=iphoneos4*]=-liconv
splitline = re.split('(([^\[=]+)(\[[^\]]+\])?) *=? *(.+)', line)
if len(splitline) >= 5:
varname = splitline[1]
value = splitline[4]
name = prefix + '_' + varname.strip()
name = re.sub(r'[^\w]', '_', name)
local_variables[varname] = name
new_xcconfig += name + '=' + value + '\n'
module_xcconfig.close()
# Update any local variable references with new varname
# and add variables to the global variables map
for (varname, name) in local_variables.iteritems():
source = '$(%s)' % varname
target = '$(%s)' % name
new_xcconfig = new_xcconfig.replace(source,target)
# Add new varname to the list
if not varname in variables:
variables[varname] = [name]
else:
variables[varname].append(name)
new_xcconfig += '\n'
return new_xcconfig
def softlink_resources(source,target,use_ignoreDirs=True):
if not os.path.exists(target):
os.makedirs(target)
for file in os.listdir(source):
if (use_ignoreDirs and (file in ignoreDirs)) or (file in ignoreFiles):
continue
from_ = to_unicode_or_not(os.path.join(source, file))
to_ = to_unicode_or_not(os.path.join(target, file))
if os.path.isdir(from_):
print "[DEBUG] creating: %s" % (to_)
softlink_resources(from_,to_,use_ignoreDirs)
else:
print "[DEBUG] linking: %s to %s" % (from_,to_)
if os.path.exists(to_):
if os.path.islink(to_):
os.remove(to_)
os.symlink(from_, to_)
else:
os.symlink(from_, to_)
def clear_application_routing(classes_dir):
impf = open(os.path.join(classes_dir,'ApplicationRouting.m'),'w+')
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
impf.write(" return nil;\n")
impf.write('}\n')
impf.write(FOOTER)
impf.close()
def softlink_for_simulator(project_dir,app_dir):
resources_dir = os.path.join(project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(project_dir,'platform','iphone')
softlink_resources(resources_dir,app_dir)
if(os.path.exists(iphone_resources_dir)):
softlink_resources(iphone_resources_dir,app_dir,False)
dest_mod_dir = os.path.join(app_dir,'modules')
src_mod_dir = os.path.join(project_dir,'modules')
if(os.path.exists(src_mod_dir)):
softlink_resources(src_mod_dir,dest_mod_dir)
src_mod_iphone_dir = os.path.join(src_mod_dir,'iphone')
if(os.path.exists(src_mod_iphone_dir)):
softlink_resources(os.path.join(project_dir,'modules','iphone'),dest_mod_dir,False)
iphone_classes_dir = os.path.join(project_dir,'build','iphone','Classes')
clear_application_routing(iphone_classes_dir)
#
# TODO/FIXME
#
# - encryptor
#
class Compiler(object):
def __init__(self, project_dir, appid, name, deploytype):
self.deploytype = deploytype
self.project_dir = project_dir
self.project_name = name
self.appid = appid
if deploytype != 'export-build' and deploytype != 'commonjs':
self.iphone_dir = os.path.join(project_dir,'build','iphone')
else:
self.iphone_dir = project_dir
self.classes_dir = os.path.join(self.iphone_dir,'Classes')
self.assets_dir = os.path.join(self.iphone_dir,'assets')
self.modules = []
self.modules_metadata = []
self.exports = []
# for now, these are required
self.defines = ['USE_TI_ANALYTICS','USE_TI_NETWORK','USE_TI_PLATFORM','USE_TI_UI', 'USE_TI_API']
def compileProject(self,xcode=False,devicefamily='ios',iphone_version='iphoneos',silent=False,sdk=None):
tiapp_xml = os.path.join(self.project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
if sdk is None:
sdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))
else:
sdk_version = sdk
if xcode:
app_name = os.environ['FULL_PRODUCT_NAME']
app_dir = os.path.join(os.environ['TARGET_BUILD_DIR'],os.environ['CONTENTS_FOLDER_PATH'])
else:
target = 'Debug'
if self.deploytype == 'production':
target = 'Release'
app_name = self.project_name+'.app'
app_folder_name = '%s-iphoneos' % target
app_dir = os.path.abspath(os.path.join(self.iphone_dir,'build',app_folder_name,app_name))
if not silent:
print "[INFO] Titanium SDK version: %s" % sdk_version
print "[INFO] iPhone Device family: %s" % devicefamily
print "[INFO] iPhone SDK version: %s" % iphone_version
if self.deploytype != 'export-build':
main_template_file = os.path.join(template_dir,'main.m')
main_template = codecs.open(main_template_file, encoding='utf-8').read()
main_template = main_template.replace('__PROJECT_NAME__',self.project_name)
main_template = main_template.replace('__PROJECT_ID__',self.appid)
main_template = main_template.replace('__DEPLOYTYPE__',self.deploytype)
main_template = main_template.replace('__APP_ID__',self.appid)
main_template = main_template.replace('__APP_ANALYTICS__',ti.properties['analytics'])
main_template = main_template.replace('__APP_PUBLISHER__',ti.properties['publisher'])
main_template = main_template.replace('__APP_URL__',ti.properties['url'])
main_template = main_template.replace('__APP_NAME__',ti.properties['name'])
main_template = main_template.replace('__APP_VERSION__',ti.properties['version'])
main_template = main_template.replace('__APP_DESCRIPTION__',ti.properties['description'])
main_template = main_template.replace('__APP_COPYRIGHT__',ti.properties['copyright'])
main_template = main_template.replace('__APP_GUID__',ti.properties['guid'])
main_template = main_template.replace('__APP_RESOURCE_DIR__','')
main_template_out = os.path.join(self.iphone_dir,'main.m')
main_file = codecs.open(main_template_out,'w+',encoding='utf-8')
main_file_contents = main_file.read()
if main_file_contents!=main_template:
main_file.write(main_template)
main_file.close()
resources_dir = os.path.join(self.project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(self.project_dir,'platform','iphone')
# copy in any resources in our module like icons
# NOTE: This means that any JS-only modules in the local project
# are hashed up and dumped into the export.
has_modules = False
missing_modules, modules, module_js = ([], [], [])
module_js_dir = os.path.join(self.project_dir,'modules')
if os.path.exists(module_js_dir):
for file in os.listdir(module_js_dir):
if file.endswith('.js'):
module_js.append({'from':os.path.join(module_js_dir,file),'to':os.path.join(app_dir,file),'path':'modules/'+file})
if self.deploytype != 'export-build':
# Have to load the module detection here, in order to
# prevent distributing even MORE stuff in export/transport
sys.path.append(os.path.join(template_dir,'../module'))
from module import ModuleDetector
detector = ModuleDetector(self.project_dir)
missing_modules, modules = detector.find_app_modules(ti, 'iphone', self.deploytype)
# we have to copy these even in simulator given the path difference
if os.path.exists(app_dir):
self.copy_resources([iphone_resources_dir],app_dir,False)
if os.path.exists(app_dir):
self.copy_resources([iphone_platform_dir],app_dir,False)
# generate the includes for all compiled modules
xcconfig_c = "// this is a generated file - DO NOT EDIT\n\n"
if len(modules) > 0:
mods = open(os.path.join(self.classes_dir,'ApplicationMods.m'),'w+')
variables = {}
mods.write(MODULE_IMPL_HEADER)
for module in modules:
if module.js:
# CommonJS module
module_js.append({'from': module.js, 'path': 'modules/' + os.path.basename(module.js)})
module_id = module.manifest.moduleid.lower()
module_name = module.manifest.name.lower()
module_version = module.manifest.version
module_guid = ''
module_licensekey = ''
if module.manifest.has_property('guid'):
module_guid = module.manifest.guid
if module.manifest.has_property('licensekey'):
module_licensekey = module.manifest.licensekey
self.modules_metadata.append({'guid':module_guid,'name':module_name,'id':module_id,'dir':module.path,'version':module_version,'licensekey':module_licensekey})
xcfile = module.get_resource('module.xcconfig')
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
xcfile = os.path.join(self.project_dir,'modules','iphone',"%s.xcconfig" % module_name)
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
mods.write(" [modules addObject:[NSDictionary dictionaryWithObjectsAndKeys:@\"%s\",@\"name\",@\"%s\",@\"moduleid\",@\"%s\",@\"version\",@\"%s\",@\"guid\",@\"%s\",@\"licensekey\",nil]];\n" % (module_name,module_id,module_version,module_guid,module_licensekey));
# Load export symbols from modules...
metadata_path = os.path.join(module.path, 'metadata.json')
if os.path.exists(metadata_path):
self.load_metadata(metadata_path)
mods.write(" return modules;\n")
mods.write("}\n")
mods.write(FOOTER)
mods.close()
for (name, values) in variables.iteritems():
xcconfig_c += name + '=$(inherited) '
for value in values:
xcconfig_c += '$(%s) ' % value
xcconfig_c += '\n'
has_modules = True
xcconfig = os.path.join(self.iphone_dir,"module.xcconfig")
make_xcc = True
if os.path.exists(xcconfig):
existing_xcc = open(xcconfig).read()
# only copy if different so we don't trigger re-compile in xcode
make_xcc = existing_xcc!=xcconfig_c
if make_xcc:
xcconfig = open(xcconfig,'w')
xcconfig.write(xcconfig_c)
xcconfig.close()
#endif deploytype != 'export-build'
else:
# ... And for exported projects, load export symbols from
# the 'metadata' dir.
metadata_dir = os.path.join(self.iphone_dir, 'metadata')
if os.path.isdir(metadata_dir):
for file in os.listdir(metadata_dir):
self.load_metadata(os.path.join(metadata_dir,file))
if self.deploytype=='simulator' or self.deploytype=='export':
shutil.copy(os.path.join(template_dir,'Classes','defines.h'),os.path.join(self.classes_dir,'defines.h'))
if self.deploytype!='development' or has_modules:
if os.path.exists(app_dir) and self.deploytype != 'development':
self.copy_resources([resources_dir],app_dir,self.deploytype != 'test',module_js)
if self.deploytype == 'production':
debugger_plist = os.path.join(app_dir,'debugger.plist')
if os.path.exists(debugger_plist):
os.remove(debugger_plist)
if self.deploytype!='development' and self.deploytype!='export':
defines_file = os.path.join(self.classes_dir, 'defines.h')
defines_header = open(defines_file,'w+')
defines_content = "// Warning: this is generated file. Do not modify!\n\n"
defines_content+= "#define TI_VERSION %s\n"%sdk_version
for sym in self.defines:
defines_content+="#define %s\n" % sym
if defines_content!=defines_header.read():
defines_header.write(defines_content)
defines_header.close()
# deploy any module image files
for module in self.modules:
img_dir = os.path.join(template_dir,'modules',module.lower(),'images')
print "[DEBUG] module image = %s" % img_dir
if not os.path.exists(img_dir): continue
dest_img_dir = os.path.join(app_dir,'modules',module.lower(),'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
self.copy_resources([img_dir],dest_img_dir,False)
if self.deploytype!='development' and os.path.exists(app_dir):
# optimize PNGs - since we don't include them in the Resources of the xcodeproj
# the ones we copy in won't get optimized so we need to run it manually
# we can skip this on the simulator but should do it on device
dev_path = "/Developer"
# we need to ask xcode where the root path is
path = run.run(["/usr/bin/xcode-select","-print-path"],True,False)
if path:
dev_path = path.strip()
run.run(["%s/Platforms/iPhoneOS.platform/Developer/usr/bin/iphoneos-optimize"%dev_path,app_dir],False)
# remove empty directories
os.chdir(app_dir)
os.system("find . -type d -empty -delete")
else:
print "[INFO] Skipping JS compile, running from simulator"
if self.deploytype=='development':
softlink_for_simulator(self.project_dir,app_dir)
def compile_module(self):
appid_js_file = os.path.join(self.assets_dir, self.appid+'.js')
if not os.path.exists(appid_js_file):
appid_js_file = os.path.join(self.project_dir, '..', 'assets', self.appid+'.js')
root_asset = self.compile_commonjs_file(self.appid+'.js', appid_js_file)
js_files = []
for root, dirs, files in os.walk(self.assets_dir, True, None, True):
for file in [f for f in files if os.path.splitext(f)[1] == '.js']:
full_path = os.path.join(root, file)
self.compile_js_file(os.path.relpath(full_path, self.assets_dir), full_path, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
module_assets = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
# Clean up the generated assets
for file in js_files:
os.remove(os.path.join(self.assets_dir, file))
return (root_asset, module_assets)
def load_metadata(self, file):
module_metadata = open(file,'r')
metadata = json.load(module_metadata)
module_metadata.close()
for symbol in metadata['exports']:
self.add_symbol(symbol)
def add_symbol(self,api):
print "[DEBUG] detected symbol: %s" % api
curtoken = ''
tokens = api.split(".")
try:
self.modules.index(tokens[0])
except:
self.modules.append(tokens[0])
for token in tokens:
curtoken+=token+"."
symbol = 'USE_TI_%s' % (curtoken.replace('.create','').replace('.','').replace('-','_').upper())
try:
self.defines.index(symbol)
except:
self.defines.append(symbol)
def extract_tokens(self,sym,line):
# sloppy joe parsing coooode
# could be prettier and faster but it works and rather reliable
c = 0
tokens = []
search = sym + "."
size = len(search)
while True:
i = line.find(search,c)
if i < 0:
break
found = False
buf = ''
x = 0
for n in line[i+size:]:
# look for a terminal - this could probably be easier
if n in ['(',')','{','}','=',',',' ',':','!','[',']','+','*','/','~','^','%','\n','\t','\r']:
found = True
break
buf+=n
x+=1
tokens.append(buf)
if found:
c = i + x + 1
continue
break
return sorted(set(tokens))
def compile_js(self,file_contents):
for line in file_contents.split(';'):
for symbol in ('Titanium','Ti'):
for sym in self.extract_tokens(symbol,line):
self.add_symbol(sym)
self.exports.append(sym)
def process_html_files(self,data,source_root):
compile = []
if data.has_key('.js'):
for entry in data['.html']:
html_file = entry['from']
file_contents = open(os.path.expanduser(html_file)).read()
parser = HTMLParser()
parser.parse(file_contents)
# extract all our scripts that are dependencies and we
# don't compile these
scripts = parser.get_scripts()
if len(scripts) > 0:
js_files = data['.js']
for script in scripts:
# if a remote script, ignore
if script.startswith('http:') or script.startswith('https:'):
continue
if script.startswith('app://'):
script = script[6:]
# build a file relative to the html file
fullpath = os.path.abspath(os.path.join(os.path.dirname(html_file),script))
# remove this script from being compiled
for f in js_files:
if f['from']==fullpath:
# target it to be compiled
compile.append(f)
js_files.remove(f)
break
return compile
def compile_js_asset_file(self,path,file):
file_contents = open(os.path.expanduser(file)).read()
if self.deploytype == 'production' or self.deploytype == 'commonjs':
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
self.compile_js(file_contents)
path = os.path.join(self.assets_dir,path)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
tfile = open(path,'w+')
tfile.write(file_contents)
tfile.close()
# TODO: We should remove this when we can "safely" say we no longer support
# versions prior to 2.1, and also change the module loader code in iOS to
# no longer check for moduleAsset.
def compile_commonjs_file(self,path,from_):
js_files = []
self.compile_js_file(path, from_, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
return so
def compile_js_file(self, path, from_, js_files):
print "[DEBUG] compiling: %s" % from_
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def copy_resources(self,sources,target,write_routing=True,module_js=[]):
js_files = []
if write_routing:
intf = open(os.path.join(self.classes_dir,'ApplicationRouting.h'),'w+')
impf = open(os.path.join(self.classes_dir,'ApplicationRouting.m'),'w+')
intf.write(HEADER)
intf.write(INTERFACE_HEADER)
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
if not os.path.exists(self.assets_dir):
os.makedirs(self.assets_dir)
def compile_js_file(path,from_):
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) compiling: %s" % (hour, minute, second, from_)
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def compile_js_files():
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging javascript" % (hour, minute, second)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
impf.write(so)
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging finished" % (hour, minute, second)
def add_compiled_resources(source,target):
print "[DEBUG] copy resources from %s to %s" % (source,target)
compiled_targets = {}
for root, dirs, files in os.walk(source, True, None, True):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
prefix = root[len(source):]
from_ = to_unicode_or_not(os.path.join(root, file))
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(os.path.split(to_)[0])
if not os.path.exists(to_directory):
os.makedirs(to_directory)
fp = os.path.splitext(file)
ext = fp[1]
if ext == '.jss': continue
if len(fp)>1 and ext in ['.html','.js','.css']:
path = prefix + os.sep + file
path = path[1:]
entry = {'path':path,'from':from_,'to':to_}
if compiled_targets.has_key(ext):
compiled_targets[ext].append(entry)
else:
compiled_targets[ext]=[entry]
if not (write_routing and len(fp)>1 and ext in ['.html','.js','.css']):
# only copy if different filesize or doesn't exist
if not os.path.exists(to_) or os.path.getsize(from_)!=os.path.getsize(to_):
print "[DEBUG] copying: %s to %s" % (from_,to_)
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.html'):
compiled = self.process_html_files(compiled_targets,source)
if len(compiled) > 0:
for c in compiled:
from_ = c['from']
to_ = c['to']
path = c['path']
print "[DEBUG] copying: %s to %s" % (from_,to_)
file_contents = open(from_).read()
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
to = open(to_,'w')
to.write(file_contents)
to.close()
for ext in ('.css','.html'):
if compiled_targets.has_key(ext):
for css_file in compiled_targets[ext]:
from_ = css_file['from']
to_ = css_file['to']
print "[DEBUG] copying: %s to %s" % (from_,to_)
if path.endswith('.css'):
file_contents = open(from_).read()
packer = CSSPacker(file_contents)
file_contents = packer.pack()
to = open(to_,'w')
to.write(file_contents)
to.close()
else:
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.js'):
for js_file in compiled_targets['.js']:
path = js_file['path']
from_ = js_file['from']
compile_js_file(path, from_)
# copy in any module assets
for metadata in self.modules_metadata:
tp_dir = os.path.join(metadata['dir'],'assets')
if not os.path.exists(tp_dir): continue
tp_id = metadata['id']
t = '%s/modules/%s' %(target,tp_id)
add_compiled_resources(tp_dir,t)
for source in sources:
add_compiled_resources(source,target)
for js_file in module_js:
compile_js_file(js_file['path'], js_file['from'])
if write_routing:
compile_js_files();
impf.write("\tNSNumber *index = [map objectForKey:path];\n")
impf.write("\tif (index == nil) { return nil; }\n")
impf.write("\treturn filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);\n")
impf.write('}\n')
intf.write(FOOTER)
impf.write(FOOTER)
intf.close()
impf.close()
if __name__ == "__main__":
argv = sys.argv
if len(argv) < 3:
print "[USAGE] %s <dir> <deploytype> [devicetype] [ios_version] [sdk_version]" % argv[0]
exit(1)
project_dir = argv[1]
deploytype = argv[2]
if deploytype == 'export-build':
xcode = True
else:
xcode = False
if len(argv) >= 4:
devicefamily = argv[3]
else:
devicefamily = 'unknown'
if len(argv) >= 5:
ios = argv[4]
else:
ios = 'unknown'
if len(argv) >= 6:
sdk = argv[5]
else:
sdk = None
tiapp_xml = os.path.join(project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
appid = ti.properties['id']
name = ti.properties['name']
c = Compiler(project_dir,appid,name,deploytype)
c.compileProject(xcode,devicefamily,ios,sdk=sdk)
|
openbaoz/titanium_mobile
|
support/iphone/compiler.py
|
Python
|
apache-2.0
| 26,373
|
[
"VisIt"
] |
977e88a4ba901b8f264409123fd34a05ad90cc7a145bf4f3a7dd077520fd7a9e
|
#
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
'''
The pkgtest command operates in two modes:
1. In development mode it uses the FastR 'Rscript' command and the internal GNU R for test comparison
2. In production mode it uses the GraalVM 'Rscript' command and a GNU R loaded as a sibling suite. This is indicated
by the environment variable 'FASTR_GRAALVM' being set. (GRAALVM_FASTR is also accepted for backwards cmpatibility)
Evidently in case 2, there is the potential for a version mismatch between FastR and GNU R, and this is checked.
In either case all the output is placed in the fastr suite dir. Separate directories are used for FastR and GNU R package installs
and tests, namely 'lib.install.packages.{fastr,gnur}' and 'test.{fastr,gnur}' (sh syntax).
'''
from os.path import join, relpath
from datetime import datetime
import shutil, os, re
import subprocess
import hashlib
import mx
import mx_fastr
quiet = False
graalvm = None
def _fastr_suite_dir():
return mx_fastr._fastr_suite.dir
def _mx_gnur():
return mx.suite('gnur')
def _gnur_rscript():
'''
returns path to Rscript in sibling gnur directory
'''
return _mx_gnur().extensions._gnur_rscript_path()
def _gnur_include_path():
if _graalvm():
return _mx_gnur().extensions._gnur_include_path()
else:
gnur_include_p = join(mx_fastr._gnur_path(), "include")
return gnur_include_p
def _fastr_include_path():
return join(_fastr_suite_dir(), 'include')
def _graalvm_rscript():
assert graalvm is not None
return join(graalvm, 'bin', 'Rscript')
def _check_graalvm():
if os.environ.has_key('FASTR_GRAALVM'):
return os.environ['FASTR_GRAALVM']
elif os.environ.has_key('GRAALVM_FASTR'):
return os.environ['GRAALVM_FASTR']
else:
return None
def _graalvm():
global graalvm
if graalvm is None:
graalvm = _check_graalvm()
if graalvm:
# version check
gnur_version = _mx_gnur().extensions.r_version().split('-')[1]
graalvm_version = subprocess.check_output([_graalvm_rscript(), '--version'], stderr=subprocess.STDOUT).rstrip()
if not gnur_version in graalvm_version:
mx.abort('graalvm R version does not match gnur suite')
return graalvm
def _create_libinstall(rvm, test_installed):
'''
Create lib.install.packages.<rvm>/install.tmp.<rvm>/test.<rvm> for <rvm>: fastr or gnur
If use_installed_pkgs is True, assume lib.install exists and is populated (development)
'''
libinstall = join(_fastr_suite_dir(), "lib.install.packages." + rvm)
if not test_installed:
# make sure its empty
shutil.rmtree(libinstall, ignore_errors=True)
os.mkdir(libinstall)
install_tmp = join(_fastr_suite_dir(), "install.tmp." + rvm)
# install_tmp = join(_fastr_suite_dir(), "install.tmp")
shutil.rmtree(install_tmp, ignore_errors=True)
os.mkdir(install_tmp)
_create_testdot(rvm)
return libinstall, install_tmp
def _create_testdot(rvm):
testdir = join(_fastr_suite_dir(), "test." + rvm)
shutil.rmtree(testdir, ignore_errors=True)
os.mkdir(testdir)
return testdir
def _log_timestamp():
if not quiet:
print "timestamp: {0}".format(str(datetime.now()))
def _log_step(state, step, rvariant):
if not quiet:
print "{0} {1} with {2}".format(state, step, rvariant)
_log_timestamp()
def _packages_test_project():
return 'com.oracle.truffle.r.test.packages'
def _packages_test_project_dir():
return mx.project(_packages_test_project()).dir
def _ensure_R_on_PATH(env, bindir):
'''
Some packages (e.g. stringi) require that 'R' is actually on the PATH
'''
env['PATH'] = join(bindir) + os.pathsep + os.environ['PATH']
def installpkgs(args):
_installpkgs(args)
def _installpkgs_script():
packages_test = _packages_test_project_dir()
return join(packages_test, 'r', 'install.packages.R')
def _installpkgs(args, **kwargs):
'''
Runs the R script that does package/installation and testing.
If we are running in a binary graalvm environment, which is indicated
by the FASTR_GRAALVM environment variable, we can't use mx to invoke
FastR, but instead have to invoke the command directly.
'''
if kwargs.has_key('env'):
env = kwargs['env']
else:
env = os.environ.copy()
kwargs['env'] = env
script = _installpkgs_script()
if _graalvm() is None:
_ensure_R_on_PATH(env, join(_fastr_suite_dir(), 'bin'))
return mx_fastr.rscript([script] + args, **kwargs)
else:
_ensure_R_on_PATH(env, os.path.dirname(_graalvm_rscript()))
return mx.run([_graalvm_rscript(), script] + args, **kwargs)
_pta_main_class = 'com.oracle.truffle.r.test.packages.analyzer.PTAMain'
def _pta_project():
return 'com.oracle.truffle.r.test.packages.analyzer'
def pta(args, **kwargs):
'''
Run analysis for package installation/testing results.
'''
vmArgs = mx.get_runtime_jvm_args(_pta_project())
vmArgs += [_pta_main_class]
mx.run_java(vmArgs + args)
def pkgtest(args):
'''
Package installation/testing.
rc: 0 for success; 1: install fail, 2: test fail, 3: install&test fail
'''
test_installed = '--no-install' in args
fastr_libinstall, fastr_install_tmp = _create_libinstall('fastr', test_installed)
gnur_libinstall, gnur_install_tmp = _create_libinstall('gnur', test_installed)
if "--quiet" in args:
global quiet
quiet = True
install_args = list(args)
class OutputCapture:
def __init__(self):
self.install_data = None
self.pkg = None
self.mode = None
self.start_install_pattern = re.compile(r"^BEGIN processing: (?P<package>[a-zA-Z0-9\.\-]+) .*")
self.test_pattern = re.compile(r"^(?P<status>BEGIN|END) testing: (?P<package>[a-zA-Z0-9\.\-]+) .*")
self.time_pattern = re.compile(r"^TEST_TIME: (?P<package>[a-zA-Z0-9\.\-]+) (?P<time>[0-9\.\-]+) .*")
self.status_pattern = re.compile(r"^(?P<package>[a-zA-Z0-9\.\-]+): (?P<status>OK|FAILED).*")
self.install_data = dict()
self.install_status = dict()
self.test_info = dict()
def __call__(self, data):
print data,
if data == "BEGIN package installation\n":
self.mode = "install"
return
elif data == "BEGIN install status\n":
self.mode = "install_status"
return
elif data == "BEGIN package tests\n":
self.mode = "test"
return
if self.mode == "install":
start_install = re.match(self.start_install_pattern, data)
if start_install:
pkg_name = start_install.group(1)
self.pkg = pkg_name
self.install_data[self.pkg] = ""
if self.pkg:
self.install_data[self.pkg] += data
elif self.mode == "install_status":
if data == "END install status\n":
self.mode = None
return
status = re.match(self.status_pattern, data)
pkg_name = status.group(1)
self.install_status[pkg_name] = status.group(2) == "OK"
elif self.mode == "test":
test_match = re.match(self.test_pattern, data)
if test_match:
begin_end = test_match.group(1)
pkg_name = test_match.group(2)
if begin_end == "END":
_get_test_outputs('fastr', pkg_name, self.test_info)
else:
time_match = re.match(self.time_pattern, data)
if time_match:
pkg_name = time_match.group(1)
test_time = time_match.group(2)
with open(join(_pkg_testdir('fastr', pkg_name), 'test_time'), 'w') as f:
f.write(test_time)
env = os.environ.copy()
env["TMPDIR"] = fastr_install_tmp
env['R_LIBS_USER'] = fastr_libinstall
env['FASTR_OPTION_PrintErrorStacktracesToFile'] = 'false'
env['FASTR_OPTION_PrintErrorStacktraces'] = 'true'
out = OutputCapture()
# install and test the packages, unless just listing versions
if not '--list-versions' in install_args:
install_args += ['--run-tests']
install_args += ['--testdir', 'test.fastr']
if not '--print-install-status' in install_args:
install_args += ['--print-install-status']
# If '--cache-pkgs' is set, then also set the native API version value
_set_pkg_cache_api_version(install_args, _fastr_include_path())
_log_step('BEGIN', 'install/test', 'FastR')
# Currently installpkgs does not set a return code (in install.packages.R)
rc = _installpkgs(install_args, nonZeroIsFatal=False, env=env, out=out, err=out)
if rc == 100:
# fatal error connecting to package repo
mx.abort(rc)
rc = 0
for status in out.install_status.itervalues():
if not status:
rc = 1
_log_step('END', 'install/test', 'FastR')
single_pkg = len(out.install_status) == 1
install_failure = single_pkg and rc == 1
if '--run-tests' in install_args and not install_failure:
# in order to compare the test output with GnuR we have to install/test the same
# set of packages with GnuR
ok_pkgs = [k for k, v in out.install_status.iteritems() if v]
gnur_args = _args_to_forward_to_gnur(args)
# If '--cache-pkgs' is set, then also set the native API version value
_set_pkg_cache_api_version(gnur_args, _gnur_include_path())
_gnur_install_test(gnur_args, ok_pkgs, gnur_libinstall, gnur_install_tmp)
_set_test_status(out.test_info)
print 'Test Status'
for pkg, test_status in out.test_info.iteritems():
if test_status.status != "OK":
rc = rc | 2
print '{0}: {1}'.format(pkg, test_status.status)
diffdir = _create_testdot('diffs')
for pkg, _ in out.test_info.iteritems():
diff_file = join(diffdir, pkg)
subprocess.call(['diff', '-r', _pkg_testdir('fastr', pkg), _pkg_testdir('gnur', pkg)], stdout=open(diff_file, 'w'))
shutil.rmtree(fastr_install_tmp, ignore_errors=True)
return rc
def _set_pkg_cache_api_version(arg_list, include_dir):
'''
Looks for argument '--cache-pkgs' and appends the native API version to the value list of this argument.
'''
if "--cache-pkgs" in arg_list:
pkg_cache_values_idx = arg_list.index("--cache-pkgs") + 1
if pkg_cache_values_idx < len(arg_list):
arg_list[pkg_cache_values_idx] = arg_list[pkg_cache_values_idx] + ",version={0}".format(computeApiChecksum(include_dir))
class TestFileStatus:
'''
Records the status of a test file. status is either "OK" or "FAILED".
The latter means that the file had a .fail extension.
'''
def __init__(self, status, abspath):
self.status = status
self.abspath = abspath
class TestStatus:
'''Records the test status of a package. status ends up as either "OK" or "FAILED",
unless GnuR also failed in which case it stays as UNKNOWN.
The testfile_outputs dict is keyed by the relative path of the output file to
the 'test/pkgname' directory. The value is an instance of TestFileStatus.
'''
def __init__(self):
self.status = "UNKNOWN"
self.testfile_outputs = dict()
def _pkg_testdir(rvm, pkg_name):
return join(_fastr_suite_dir(), 'test.' + rvm, pkg_name)
def _get_test_outputs(rvm, pkg_name, test_info):
pkg_testdir = _pkg_testdir(rvm, pkg_name)
for root, _, files in os.walk(pkg_testdir):
if not test_info.has_key(pkg_name):
test_info[pkg_name] = TestStatus()
for f in files:
ext = os.path.splitext(f)[1]
# suppress .pdf's for now (we can't compare them)
# ignore = ['.R', '.Rin', '.prev', '.bug', '.pdf', '.save']
# if f == 'test_time' or ext in ignore:
# continue
included = ['.Rout', '.fail']
if f == 'test_time' or not ext in included:
continue
status = "OK"
if ext == '.fail':
# some fatal error during the test
status = "FAILED"
f = os.path.splitext(f)[0]
absfile = join(root, f)
relfile = relpath(absfile, pkg_testdir)
test_info[pkg_name].testfile_outputs[relfile] = TestFileStatus(status, absfile)
def _args_to_forward_to_gnur(args):
forwarded_args = ['--repos', '--run-mode', '--cache-pkgs']
result = []
i = 0
while i < len(args):
arg = args[i]
if arg in forwarded_args:
result.append(arg)
i = i + 1
result.append(args[i])
i = i + 1
return result
def _gnur_install_test(forwarded_args, pkgs, gnur_libinstall, gnur_install_tmp):
'''
Install/test with GNU R exactly those packages that installed correctly with FastR.
N.B. That means that regardless of how the packages were specified to pkgtest
we always use a --pkg-filelist' arg to GNU R
'''
gnur_packages = join(_fastr_suite_dir(), 'gnur.packages')
with open(gnur_packages, 'w') as f:
for pkg in pkgs:
f.write(pkg)
f.write('\n')
env = os.environ.copy()
env["TMPDIR"] = gnur_install_tmp
env['R_LIBS_USER'] = gnur_libinstall
env["TZDIR"] = "/usr/share/zoneinfo/"
args = []
if _graalvm():
args += [_gnur_rscript()]
# forward any explicit args to pkgtest
args += [_installpkgs_script()]
args += forwarded_args
args += ['--pkg-filelist', gnur_packages]
args += ['--run-tests']
args += ['--ignore-blacklist']
args += ['--testdir', 'test.gnur']
_log_step('BEGIN', 'install/test', 'GnuR')
if _graalvm():
_ensure_R_on_PATH(env, os.path.dirname(_gnur_rscript()))
mx.run(args, nonZeroIsFatal=False, env=env)
else:
_ensure_R_on_PATH(env, mx_fastr._gnur_path())
mx_fastr.gnu_rscript(args, env=env)
_log_step('END', 'install/test', 'GnuR')
def _set_test_status(fastr_test_info):
def _failed_outputs(outputs):
'''
return True iff outputs has any .fail files
'''
for _, testfile_status in outputs.iteritems():
if testfile_status.status == "FAILED":
return True
return False
gnur_test_info = dict()
for pkg, _ in fastr_test_info.iteritems():
_get_test_outputs('gnur', pkg, gnur_test_info)
# gnur is definitive so drive off that
for pkg in gnur_test_info.keys():
print 'BEGIN checking ' + pkg
gnur_test_status = gnur_test_info[pkg]
fastr_test_status = fastr_test_info[pkg]
gnur_outputs = gnur_test_status.testfile_outputs
fastr_outputs = fastr_test_status.testfile_outputs
if _failed_outputs(gnur_outputs):
# What this likely means is that some native package is not
# installed on the system so GNUR can't run the tests.
# Ideally this never happens.
print "{0}: GnuR test had .fail outputs".format(pkg)
if _failed_outputs(fastr_outputs):
# In addition to the similar comment for GNU R, this can happen
# if, say, the JVM crashes (possible with native code packages)
print "{0}: FastR test had .fail outputs".format(pkg)
fastr_test_status.status = "FAILED"
# Now for each successful GNU R output we compare content (assuming FastR didn't fail)
for gnur_test_output_relpath, gnur_testfile_status in gnur_outputs.iteritems():
# Can't compare if either GNUR or FastR failed
if gnur_testfile_status.status == "FAILED":
fastr_test_status.status = "INDETERMINATE"
break
if not gnur_test_output_relpath in fastr_outputs:
# FastR crashed on this test
fastr_test_status.status = "FAILED"
print "{0}: FastR is missing output file: {1}".format(pkg, gnur_test_output_relpath)
break
fastr_testfile_status = fastr_outputs[gnur_test_output_relpath]
if fastr_testfile_status.status == "FAILED":
break
gnur_content = None
with open(gnur_testfile_status.abspath) as f:
gnur_content = f.readlines()
fastr_content = None
with open(fastr_testfile_status.abspath) as f:
fastr_content = f.readlines()
result = _fuzzy_compare(gnur_content, fastr_content, gnur_testfile_status.abspath, fastr_testfile_status.abspath)
if result == -1:
print "{0}: content malformed: {1}".format(pkg, gnur_test_output_relpath)
fastr_test_status.status = "INDETERMINATE"
break
if result != 0:
fastr_test_status.status = "FAILED"
fastr_testfile_status.status = "FAILED"
print "{0}: FastR output mismatch: {1}".format(pkg, gnur_test_output_relpath)
break
# we started out as UNKNOWN
if not (fastr_test_status.status == "INDETERMINATE" or fastr_test_status.status == "FAILED"):
fastr_test_status.status = "OK"
# write out a file with the test status for each output (that exists)
with open(join(_pkg_testdir('fastr', pkg), 'testfile_status'), 'w') as f:
for fastr_relpath, fastr_testfile_status in fastr_outputs.iteritems():
if fastr_testfile_status.status == "FAILED":
relpath = fastr_relpath + ".fail"
else:
relpath = fastr_relpath
if os.path.exists(join(_pkg_testdir('fastr', pkg), relpath)):
f.write(relpath)
f.write(' ')
f.write(fastr_testfile_status.status)
f.write('\n')
print 'END checking ' + pkg
def _find_start(content):
marker = "Type 'q()' to quit R."
for i in range(len(content)):
line = content[i]
if marker in line:
# skip blank lines
j = i + 1
while j < len(content):
line = content[j].strip()
if len(line) > 0:
return j
j = j + 1
return None
def _find_end(content):
marker = "Time elapsed:"
for i in range(len(content)):
line = content[i]
if marker in line:
return i - 1
# not all files have a Time elapsed:
return len(content) - 1
def _find_line(gnur_line, fastr_content, fastr_i):
'''
Search forward in fastr_content from fastr_i searching for a match with gnur_line.
Do not match empty lines!
'''
if gnur_line == '\n':
return -1
while fastr_i < len(fastr_content):
fastr_line = fastr_content[fastr_i]
if fastr_line == gnur_line:
return fastr_i
fastr_i = fastr_i + 1
return -1
def _replace_engine_references(output):
for idx, val in enumerate(output):
if "RUNIT TEST PROTOCOL -- " in val:
# RUnit prints the current date and time
output[idx] = "RUNIT TEST PROTOCOL -- <date/time>"
else:
# ignore differences which come from test directory paths
output[idx] = val.replace('fastr', '<engine>').replace('gnur', '<engine>')
def _fuzzy_compare(gnur_content, fastr_content, gnur_filename, fastr_filename):
_replace_engine_references(gnur_content)
_replace_engine_references(fastr_content)
gnur_start = _find_start(gnur_content)
gnur_end = _find_end(gnur_content)
fastr_start = _find_start(fastr_content)
fastr_len = len(fastr_content)
if not gnur_start or not gnur_end or not fastr_start:
return -1
gnur_i = gnur_start
fastr_i = fastr_start
result = 0
while gnur_i < gnur_end:
gnur_line = gnur_content[gnur_i]
if fastr_i >= fastr_len:
result = 1
break
fastr_line = fastr_content[fastr_i]
if gnur_line != fastr_line:
if fastr_line.startswith('Warning') and 'FastR does not support graphics package' in fastr_content[fastr_i + 1]:
# ignore warning about FastR not supporting the graphics package
fastr_i = fastr_i + 2
if fastr_content[fastr_i].startswith('NULL') and not gnur_line.startswith('NULL'):
# ignore additional visible NULL
fastr_i = fastr_i + 1
continue
if gnur_line.startswith('Warning') and gnur_i + 1 < gnur_end and 'closing unused connection' in gnur_content[gnur_i + 1]:
# ignore message about closed connection
gnur_i = gnur_i + 2
continue
if gnur_i > 0 and gnur_content[gnur_i - 1].startswith(' user system elapsed'):
# ignore differences in timing
gnur_i = gnur_i + 1
fastr_i = fastr_i + 1
continue
# we are fuzzy on Error/Warning as FastR often differs
# in the context/format of the error/warniong message AND GnuR is sometimes
# inconsistent over which error message it uses. Unlike the unit test environment,
# we cannot tag tests in any way, so we simply check that FastR does report
# an error. We then scan forward to try to get the files back in sync, as the
# the number of error/warning lines may differ.
if 'Error' in gnur_line or 'Warning' in gnur_line:
to_match = 'Error' if 'Error' in gnur_line else 'Warning'
if to_match not in fastr_line:
result = 1
break
else:
# skip until lines match (or not)
gnur_i = gnur_i + 1
fastr_i = fastr_i + 1
if gnur_i == gnur_end - 1:
# at end (there is always a blank line)
break
ni = -1
while gnur_i < gnur_end:
ni = _find_line(gnur_content[gnur_i], fastr_content, fastr_i)
if ni > 0:
break
gnur_i = gnur_i + 1
if ni > 0:
fastr_i = ni
continue
else:
result = 1
break
else:
# genuine difference (modulo whitespace)
if not _ignore_whitespace(gnur_line, fastr_line):
result = 1
break
gnur_i = gnur_i + 1
fastr_i = fastr_i + 1
if result == 1:
print gnur_filename + ':%d' % gnur_i + ' vs. ' + fastr_filename + ':%d' % fastr_i
print gnur_line.strip()
print "vs."
print fastr_line.strip()
return result
def _ignore_whitespace(gnur_line, fastr_line):
return gnur_line.translate(None, ' \t') == fastr_line.translate(None, ' \t')
def pkgtest_cmp(args):
with open(args[0]) as f:
gnur_content = f.readlines()
with open(args[1]) as f:
fastr_content = f.readlines()
return _fuzzy_compare(gnur_content, fastr_content, args[0], args[1])
def find_top100(args):
libinstall = join(_fastr_suite_dir(), "top100.tmp")
if not os.path.exists(libinstall):
os.mkdir(libinstall)
os.environ['R_LIBS_USER'] = libinstall
_installpkgs(['--find-top100', '--use-installed-pkgs'])
def remove_dup_pkgs(args):
pkgs = args[0].split(",")
x = dict()
for p in pkgs:
x[p] = 1
result = []
for p in x.iterkeys():
result += p
return result
def computeApiChecksum(includeDir):
'''
Computes a checksum of the header files found in the provided directory (recursively).
The result is a SHA256 checksum (as string with hex digits) of all header files.
'''
m = hashlib.sha256()
rootDir = includeDir
for root, _, files in os.walk(rootDir):
mx.logvv("Visiting directory {0}".format(root))
for f in files:
fileName = join(root, f)
if fileName.endswith('.h'):
try:
mx.logvv("Including file {0}".format(fileName))
with open(fileName) as f:
m.update(f.read())
except IOError as e:
# Ignore errors on broken symlinks
if not os.path.islink(fileName) or os.path.exists(fileName):
raise e
hxdigest = m.hexdigest()
mx.logv("Computed API version checksum {0}".format(hxdigest))
return hxdigest
|
akunft/fastr
|
mx.fastr/mx_fastr_pkgs.py
|
Python
|
gpl-2.0
| 26,188
|
[
"VisIt"
] |
4c427e655baae0552590defc4916c40af4a36685aa2ae7713be720127c0e32be
|
#
# Copyright (C) 2004-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
import doctest
import gzip
import io
import os
import unittest
from rdkit import Chem
from rdkit import DistanceGeometry as DG
from rdkit import Geometry
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures, rdDistGeom
from rdkit.Chem.Pharm3D import EmbedLib
from rdkit.Chem.Pharm3D import Pharmacophore
from rdkit.six import PY3
from rdkit.six.moves import cPickle
def feq(n1, n2, tol=1e-5):
return abs(n1 - n2) <= tol
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(
doctest.DocTestSuite(EmbedLib, optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE))
return tests
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDCodeDir, 'Chem/Pharm3D/test_data')
self.fdefBlock = """
DefineFeature HAcceptor1 [N,O;H0]
Family HBondAcceptor
Weights 1.0
EndFeature
DefineFeature HDonor1 [N,O;!H0]
Family HBondDonor
Weights 1.0
EndFeature
DefineFeature Aromatic1 c1ccccc1
Family Aromatic
Weights 1.,1.,1.,1.,1.,1.
EndFeature\n"""
self.featFactory = ChemicalFeatures.BuildFeatureFactoryFromString(self.fdefBlock)
self.feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1',
Geometry.Point3D(2.65, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1',
Geometry.Point3D(5.12, 0.908, 0.0)), ]
self.pcophore = Pharmacophore.Pharmacophore(self.feats)
self.pcophore.setLowerBound(0, 1, 2.0)
self.pcophore.setUpperBound(0, 1, 3.3)
self.pcophore.setLowerBound(0, 2, 5.0)
self.pcophore.setUpperBound(0, 2, 5.4)
self.pcophore.setLowerBound(1, 2, 2.6)
self.pcophore.setUpperBound(1, 2, 3.0)
def _matchMol(self, tpl, pcophore, featFactory, downSample):
_, molPkl, boundsMat = tpl
mol = Chem.Mol(molPkl)
matched, matches = EmbedLib.MatchPharmacophoreToMol(mol, featFactory, pcophore)
if matched:
r = EmbedLib.MatchPharmacophore(matches, boundsMat, pcophore, useDownsampling=downSample)
if r[0]:
return 0
else:
return 1
else:
return 0
def test1SearchFullMat(self):
inF = gzip.open(os.path.join(self.dataDir, 'cdk2-syn-clip100.pkl.gz'), 'rb')
# outF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.new.gz'),'wb+')
nDone = 0
nHits = 0
while 1:
try:
tpl = cPickle.load(inF, encoding='latin1')
if PY3:
tpl = tpl[0], tpl[1].encode('latin1'), tpl[2]
# tpl=tpl[0],tpl[1],numpy.array(tpl[2])
# cPickle.dump(tpl,outF)
except Exception:
break
if self._matchMol(tpl, self.pcophore, self.featFactory, 0):
nHits += 1
nDone += 1
self.assertEqual(nDone, 100)
# print 'nHits:',nHits
self.assertEqual(nHits, 47)
def test2SearchDownsample(self):
inF = gzip.open(os.path.join(self.dataDir, 'cdk2-syn-clip100.pkl.gz'), 'rb')
nDone = 0
nHits = 0
while 1:
try:
tpl = cPickle.load(inF, encoding='latin1')
if PY3:
tpl = tpl[0], tpl[1].encode('latin1'), tpl[2]
except Exception:
break
if self._matchMol(tpl, self.pcophore, self.featFactory, 1):
nHits += 1
nDone += 1
self.assertEqual(nDone, 100)
# print 'nHits:',nHits
self.assertEqual(nHits, 47)
def test3Embed(self):
testResults = {
'mol_197': (218.80, 35.75, 110.33, 11.58, 109.66, 11.09, 90.35, 2.95, 0.00),
'mol_223': (259.19, 6.27, 134.13, 1.12, 134.06, 1.12, 85.74, 0.61, 0.00),
'mol_269': (204.51, 7.89, 103.89, 1.20, 102.66, 1.20, 88.07, 1.21, 6.00),
}
inF = gzip.open(os.path.join(self.dataDir, 'cdk2-syn-clip100.pkl.gz'), 'rb')
nDone = 0
nHits = 0
while 1:
try:
name, molPkl, _ = cPickle.load(inF, encoding='latin1')
if PY3:
molPkl = bytes(molPkl, encoding='latin1')
except Exception:
break
nDone += 1
mol = Chem.Mol(molPkl)
nboundsMat = rdDistGeom.GetMoleculeBoundsMatrix(mol)
DG.DoTriangleSmoothing(nboundsMat)
matched, matches = EmbedLib.MatchPharmacophoreToMol(mol, self.featFactory, self.pcophore)
if matched:
failed, _, match, stats = EmbedLib.MatchPharmacophore(matches, nboundsMat, self.pcophore,
useDownsampling=1)
if not failed:
nHits += 1
if name in testResults:
stats = EmbedLib.EmbedOne(mol, name, match, self.pcophore, count=10, silent=1,
randomSeed=23)
tgt = testResults[name]
self.assertEqual(len(tgt), len(stats))
# print(name)
# print(','.join(['%.2f' % x for x in stats]))
# we'll use different tolerances for the different values:
self.assertTrue(feq(tgt[0], stats[0], 5.0), (tgt[0], stats[0]))
for i in range(2, len(tgt)):
self.assertTrue(feq(tgt[i], stats[i], 5.0), (tgt[i], stats[i]))
self.assertEqual(nDone, 100)
# print 'nHits:',nHits
self.assertEqual(nHits, 50)
def test4Search(self):
featFactory = ChemicalFeatures.BuildFeatureFactory(
os.path.join(self.dataDir, 'BaseFeatures.fdef'))
activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Donor', Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Aromatic',
Geometry.Point3D(0.0, 0.0, 0.0))]
pcophore = Pharmacophore.Pharmacophore(activeFeats)
pcophore.setLowerBound(0, 1, 2.251)
pcophore.setUpperBound(0, 1, 2.451)
pcophore.setUpperBound2D(0, 1, 3)
pcophore.setLowerBound(0, 2, 4.970)
pcophore.setUpperBound(0, 2, 5.170)
pcophore.setUpperBound2D(0, 2, 6)
pcophore.setLowerBound(1, 2, 2.681)
pcophore.setUpperBound(1, 2, 2.881)
pcophore.setUpperBound2D(1, 2, 6)
inF = gzip.open(os.path.join(self.dataDir, 'cdk2-syn-clip100.pkl.gz'), 'rb')
nDone = 0
nMatches = 0
nHits = 0
while 1:
try:
_, molPkl, boundsMat = cPickle.load(inF, encoding='latin1')
if PY3:
molPkl = bytes(molPkl, encoding='latin1')
except Exception:
break
nDone += 1
mol = Chem.Mol(molPkl)
boundsMat = rdDistGeom.GetMoleculeBoundsMatrix(mol)
DG.DoTriangleSmoothing(boundsMat)
canMatch, matches = EmbedLib.MatchPharmacophoreToMol(mol, featFactory, pcophore)
if canMatch:
nMatches += 1
r = EmbedLib.MatchPharmacophore(matches, boundsMat, pcophore, useDownsampling=True,
use2DLimits=True, mol=mol)
failed = r[0]
if not failed:
nHits += 1
self.assertEqual(nDone, 100)
self.assertEqual(nMatches, 93)
# print 'nhits:',nHits
self.assertEqual(nHits, 67)
def testIssue268(self):
featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(self.dataDir, 'Issue268.fdef'))
m1 = Chem.MolFromMolFile(os.path.join(self.dataDir, 'Issue268_Mol1.mol'))
m2 = Chem.MolFromMolFile(os.path.join(self.dataDir, 'Issue268_Mol2.mol'))
with open(os.path.join(self.dataDir, 'Issue268_Pcop.pkl'), 'r') as inTF:
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
with io.BytesIO(buf) as inF:
pcop = cPickle.load(inF, encoding='latin1')
# pcop._boundsMat=numpy.array(pcop._boundsMat)
# pcop._boundsMat2D=numpy.array(pcop._boundsMat2D)
# cPickle.dump(pcop,file(os.path.join(self.dataDir,
# 'Issue268_Pcop.new.pkl'),'wb+'))
_, mList1 = EmbedLib.MatchFeatsToMol(m1, featFactory, pcop.getFeatures())
_, mList2 = EmbedLib.MatchFeatsToMol(m2, featFactory, pcop.getFeatures())
b1 = rdDistGeom.GetMoleculeBoundsMatrix(m1)
b2 = rdDistGeom.GetMoleculeBoundsMatrix(m2)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList1, b1, pcop)[2]), 4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList2, b2, pcop)[2]), 4)
self.assertEqual(
len(EmbedLib.MatchPharmacophore(mList1, b1, pcop, mol=m1, use2DLimits=True)[2]), 4)
self.assertEqual(
len(EmbedLib.MatchPharmacophore(mList2, b2, pcop, mol=m2, use2DLimits=True)[2]), 4)
self.assertTrue(DG.DoTriangleSmoothing(b1))
self.assertTrue(DG.DoTriangleSmoothing(b2))
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList1, b1, pcop)[2]), 4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList2, b2, pcop)[2]), 4)
self.assertEqual(
len(EmbedLib.MatchPharmacophore(mList1, b1, pcop, mol=m1, use2DLimits=True)[2]), 4)
self.assertEqual(
len(EmbedLib.MatchPharmacophore(mList2, b2, pcop, mol=m2, use2DLimits=True)[2]), 4)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
rvianello/rdkit
|
rdkit/Chem/Pharm3D/UnitTestEmbed.py
|
Python
|
bsd-3-clause
| 9,826
|
[
"RDKit"
] |
90dd8076cf3efc069c4c2770850debb6669612b6cee6557aa55cca39153147b3
|
# Python example script that uses the vtkMatlabEngineInterface to perform
# a calculation (sin(x)^2 + cos(x)^2 = 1) on VTK array data, and pass the
# result back to VTK.
# VTK must be built with VTK_USE_MATLAB_MEX turned on for this example to work!
from vtk import *
import math
if __name__ == "__main__":
# Create an instance of the Matlab Engine. Note, menginterface is not a VTK pipeline object.
menginterface = vtkMatlabEngineInterface()
# Create two arrays of doubles in VTK. y contains sin(x)^2
x = vtkDoubleArray()
y = vtkDoubleArray()
for d in range(0, 100):
x.InsertNextValue(d);
y.InsertNextValue(math.sin(d)**2)
# Copy the x and y to Matlab with the same variable names
menginterface.PutVtkDataArray("x", x)
menginterface.PutVtkDataArray("y", y)
# Calculate cos(x)^2 + sin(x)^2 = 1 in Matlab.
menginterface.EvalString("y = y + cos(x).^2")
# Copy y back to VTK as variable result
result = menginterface.GetVtkDataArray("y")
# Display contents of result, should be all ones.
print "\n\nContents of result array copied to VTK from Matlab\n\n"
for i in range(result.GetNumberOfTuples()):
t = result.GetTuple1(i)
print'result[%d] = %6.4f' % (i,t)
|
b3c/VTK-5.8
|
Examples/Infovis/Python/Matlab_engine_interface.py
|
Python
|
bsd-3-clause
| 1,237
|
[
"VTK"
] |
64c8b215118949a4474108ef8b6548d8cc78b8ba5968e4e3fe1ebfcf6857f9dc
|
from Firefly import logging
from Firefly.helpers.conditions import Conditions
from Firefly.helpers.events import Command
from Firefly import scheduler
class Action(object):
def __init__(self, ff_id, command, source, conditions=None, force=False, **kwargs):
self._ff_id = ff_id
self._command = command
self._source = source
self._kwargs = kwargs
self._force = force
self._delay_s = kwargs.get('delay_s')
self._delay_m = kwargs.get('delay_m')
self._conditions = None
if type(conditions) is Conditions:
self._conditions = conditions
if type(conditions) is dict:
self._conditions = Conditions(**conditions)
def execute_action(self, firefly):
if self._delay_s:
scheduler.runInS(self._delay_s,self.execute,firefly=firefly)
elif self._delay_m:
scheduler.runInM(self._delay_m,self.execute,firefly=firefly)
else:
self.execute(firefly)
def execute(self, firefly):
if self._force or self.conditions is None:
command = Command(self.id, self.source, self.command, force=self._force, **self._kwargs)
firefly.send_command(command)
return True
if self.conditions.check_conditions(firefly):
command = Command(self.id, self.source, self.command, force=self._force, **self._kwargs)
firefly.send_command(command)
return True
return False
def export(self) -> dict:
export_data = {
'ff_id': self.id,
'command': self.command,
'force': self._force,
'source': self.source
}
if self.conditions:
export_data['conditions'] = self.conditions.export()
export_data.update(**self._kwargs)
return export_data
@property
def id(self):
return self._ff_id
@property
def command(self):
return self._command
@property
def source(self):
return self._source
@property
def conditions(self):
return self._conditions
|
Firefly-Automation/Firefly
|
Firefly/helpers/action.py
|
Python
|
apache-2.0
| 1,905
|
[
"Firefly"
] |
4f1511a242314de410d81e9f2ef73b012c5b019e4930c67a30f48ec358b32ba1
|
import numpy as np #from numpy import mgrid, empty, sin, pi
import vtk
def simple_grid():
# Generate some points.
x, y, z = np.mgrid[1:6:11j, 0:4:13j, 0:3:6j]
base = x[..., 0] + y[..., 0]
# Some interesting z values.
for i in range(z.shape[2]):
z[..., i] = base * 0.25 * i
return x, y, z
def uniform_grid(bounds, dims):
# Generate some points.
x, y, z = np.mgrid[bounds[0]:bounds[1]:(dims[0] * 1j),
bounds[2]:bounds[3]:(dims[1] * 1j),
bounds[4]:bounds[5]:(dims[2] * 1j)
]
base = x[..., 0] + y[..., 0]
# Some interesting z values.
# for i in range(z.shape[2]):
# z[..., i] = base * 0.25 * i
return x, y, z
def reshape_pts(x,y,z):
# The actual points.
pts = np.empty(z.shape + (3,), dtype=float)
pts[..., 0] = x
pts[..., 1] = y
pts[..., 2] = z
# We reorder the points, scalars and vectors so this is as per VTK's
# requirement of x first, y next and z last.
pts = pts.transpose(2, 1, 0, 3).copy()
pts.shape = pts.size // 3, 3
return pts
def gen_data(x,y,z):
# Simple scalars.
scalars = x * x + y * y + z * z
# Some vectors
vectors = np.empty(z.shape + (3,), dtype=float)
vectors[..., 0] = (4 - y * 2)
vectors[..., 1] = (x * 3 - 12)
vectors[..., 2] = np.sin(z * np.pi)
scalars = scalars.T.copy()
vectors = vectors.transpose(2, 1, 0, 3).copy()
vectors.shape = vectors.size // 3, 3
return scalars, vectors
# def uniform_grid(x,y,z):
# # The actual points.
# pts = np.empty(z.shape + (3,), dtype=float)
# pts[..., 0] = x
# pts[..., 1] = y
# pts[..., 2] = z
# # Simple scalars.
# scalars = x * x + y * y + z * z
# # Some vectors
# vectors = np.empty(z.shape + (3,), dtype=float)
# vectors[..., 0] = (4 - y * 2)
# vectors[..., 1] = (x * 3 - 12)
# vectors[..., 2] = np.sin(z * np.pi)
# # We reorder the points, scalars and vectors so this is as per VTK's
# # requirement of x first, y next and z last.
# pts = pts.transpose(2, 1, 0, 3).copy()
# #print(pts.shape)
# pts.shape = pts.size // 3, 3
# scalars = scalars.T.copy()
# vectors = vectors.transpose(2, 1, 0, 3).copy()
# print(vectors.shape)
# vectors.shape = vectors.size // 3, 3
# print(vectors.shape)
# return pts, scalars, vectors
def test_simple_grid():
x,y,z = simple_grid()
pts = reshape_pts(x,y,z)
scalars, vectors = gen_data(x,y,z)
print(pts.shape, scalars.shape, vectors.shape)
vtk_pts = vtk.vtkPoints()
for pt in pts:
#print(pt)
vtk_pts.InsertNextPoint(pt)
# Uncomment the following if you want to add some noise to the data.
#pts += np.random.randn(dims[0]*dims[1]*dims[2], 3)*0.04
sgrid = vtk.vtkStructuredGrid()
sgrid.SetDimensions(x.shape)
sgrid.SetPoints(vtk_pts)
scalar_arr = vtk.vtkDoubleArray()
scalar_arr.SetNumberOfComponents(1)
scalar_arr.SetName("distance")
vec_arr = vtk.vtkDoubleArray()
vec_arr.SetNumberOfComponents(3)
vec_arr.SetName("vector")
#s = np.sqrt(pts[:,0]**2 + pts[:,1]**2 + pts[:,2]**2)
for idx, s_ in enumerate(scalars.ravel()):
scalar_arr.InsertNextTuple([s_])
vec_arr.InsertNextTuple(vectors[idx])
#print(s.shape)
sgrid.GetPointData().AddArray(scalar_arr)
sgrid.GetPointData().AddArray(vec_arr)
# sgrid.point_data.scalars.name = 'scalars'
# Uncomment the next two lines to save the dataset to a VTK XML file.
writer = vtk.vtkXMLStructuredGridWriter()
writer.SetFileName("test_2.vts")
writer.SetInputData(sgrid)
writer.Write()
print("success")
def test_uniform_grid():
bounds = [-10., 20., 20., 40., 0., 60.]
dims = (37, 23, 65)
x,y,z = uniform_grid(bounds, dims)
pts = reshape_pts(x,y,z)
scalars, vectors = gen_data(x,y,z)
#print(pts.shape, scalars.shape, vectors.shape)
vtk_pts = vtk.vtkPoints()
for pt in pts:
#print(pt)
vtk_pts.InsertNextPoint(pt)
# Uncomment the following if you want to add some noise to the data.
#pts += np.random.randn(dims[0]*dims[1]*dims[2], 3)*0.04
sgrid = vtk.vtkStructuredGrid()
sgrid.SetDimensions(x.shape)
sgrid.SetPoints(vtk_pts)
scalar_arr = vtk.vtkDoubleArray()
scalar_arr.SetNumberOfComponents(1)
scalar_arr.SetName("distance")
vec_arr = vtk.vtkDoubleArray()
vec_arr.SetNumberOfComponents(3)
vec_arr.SetName("vector")
for idx, s_ in enumerate(scalars.ravel()):
scalar_arr.InsertNextTuple([s_])
vec_arr.InsertNextTuple(vectors[idx])
#print(s.shape)
sgrid.GetPointData().AddArray(scalar_arr)
sgrid.GetPointData().AddArray(vec_arr)
centers = vtk.vtkCellCenters()
centers.SetInputData(sgrid)
centers.VertexCellsOn()
centers.Update()
# sgrid.point_data.scalars.name = 'scalars'
# Uncomment the next two lines to save the dataset to a VTK XML file.
writer = vtk.vtkXMLStructuredGridWriter()
writer.SetFileName("test_uniform.vts")
writer.SetInputData(sgrid)
writer.Write()
writer2 = vtk.vtkXMLPolyDataWriter()
writer2.SetFileName("test_uniform_centers.vtp")
writer2.SetInputConnection(centers.GetOutputPort())
writer2.Write()
print("success")
def main():
test_simple_grid()
test_uniform_grid()
if __name__ == '__main__':
main()
|
kayarre/Tools
|
vtk/simple_grid.py
|
Python
|
bsd-2-clause
| 5,131
|
[
"VTK"
] |
b68362caa2f36368f5c8ed8c6f63f9821c2f843b7d813bcd663a5c0f5ffeaa62
|
#!/bin/env python
""" create rst files for documentation of DIRAC """
import os
import shutil
import socket
import sys
import logging
import glob
logging.basicConfig(level=logging.INFO, format='%(name)s: %(levelname)8s: %(message)s')
LOG = logging.getLogger('MakeDoc')
def mkdir(folder):
"""create a folder, ignore if it exists"""
try:
folder = os.path.join(os.getcwd(), folder)
os.mkdir(folder)
except OSError as e:
LOG.debug("Exception when creating folder %s: %r", folder, e)
def writeLinesToFile(filename, lines):
"""Write a list of lines into a file.
Checks that there are actual changes to be done.
"""
newContent = '\n'.join(lines)
oldContent = None
if os.path.exists(filename):
with open(filename, 'r') as oldFile:
oldContent = ''.join(oldFile.readlines())
if oldContent is None or oldContent != newContent:
with open(filename, 'w') as rst:
LOG.info('Writing new content for %s', filename)
rst.write(newContent)
else:
LOG.debug('Not updating file content for %s', filename)
BASEPATH = "docs/source/CodeDocumentation"
DIRACPATH = os.environ.get("DIRAC", "") + "/DIRAC"
ORIGDIR = os.getcwd()
BASEPATH = os.path.join(DIRACPATH, BASEPATH)
# files that call parseCommandLine or similar issues
BAD_FILES = ("lfc_dfc_copy",
"lfc_dfc_db_copy",
"JobWrapperTemplate",
"PlotCache", # PlotCache creates a thread on import, which keeps sphinx from exiting
"PlottingHandler",
"setup.py", # configuration for style check
# "DataStoreClient", # instantiates itself
# "ReportsClient", ## causes gDataCache to start
# "ComponentInstaller", # tries to connect to a DB
# "ProxyDB", # tries to connect to security log server
# "SystemAdministratorHandler", # tries to connect to monitoring
# "GlobusComputingElement", # tries to connect to a DB
# "HTCondorCEComputingElememt", # tries to connect to a DB
# "TaskManager", #Tries to connect to security logging
)
FORCE_ADD_PRIVATE = ["FCConditionParser"]
# inherited functions give warnings in docstrings
NO_INHERITED = []
# global used inside the CustomizedDocs modules
CUSTOMIZED_DOCSTRINGS = {}
def getCustomDocs():
"""Import the dynamically created docstrings from the files in CustomizedDocs.
Use 'exec' to avoid a lot of relative import, pylint errors, etc.
"""
customizedPath = os.path.join(BASEPATH, '../../Tools/CustomizedDocs/*.py')
LOG.info('Looking for custom strings in %s', customizedPath)
for filename in glob.glob(customizedPath):
LOG.info('Found customization: %s', filename)
exec(open(filename).read(), globals()) # pylint: disable=exec-used
def mkRest(filename, modulename, fullmodulename, subpackages=None, modules=None):
"""make a rst file for filename"""
if modulename == "scripts":
return
else:
modulefinal = modulename
lines = []
lines.append("%s" % modulefinal)
lines.append("=" * len(modulefinal))
lines.append(".. module:: %s " % fullmodulename)
lines.append("")
if subpackages or modules:
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
subpackages = [s for s in subpackages if not s.endswith(("scripts", ))]
if subpackages:
LOG.info("Module %s with subpackages: %s", fullmodulename, ", ".join(subpackages))
lines.append("SubPackages")
lines.append("...........")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for package in sorted(subpackages):
lines.append(" %s/%s_Module.rst" % (package, package.split("/")[-1]))
#lines.append(" %s " % (package, ) )
lines.append("")
# remove CLI etc. because we drop them earlier
modules = [m for m in modules if not m.endswith("CLI") and "-" not in m]
if modules:
lines.append("Modules")
lines.append(".......")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for module in sorted(modules):
lines.append(" %s.rst" % (module.split("/")[-1],))
#lines.append(" %s " % (package, ) )
lines.append("")
writeLinesToFile(filename, lines)
def mkDummyRest(classname, fullclassname):
""" create a dummy rst file for files that behave badly """
filename = classname + ".rst"
lines = []
lines.append("%s" % classname)
lines.append("=" * len(classname))
lines.append("")
lines.append(" This is an empty file, because we cannot parse this file correctly or it causes problems")
lines.append(" , please look at the source code directly")
writeLinesToFile(filename, lines)
def mkModuleRest(classname, fullclassname, buildtype="full"):
""" create rst file for class"""
filename = classname + ".rst"
lines = []
lines.append("%s" % classname)
lines.append("=" * len(classname))
# if "-" not in classname:
# lines.append(".. autosummary::" )
# lines.append(" :toctree: %sGen" % classname )
# lines.append("")
# lines.append(" %s " % fullclassname )
# lines.append("")
lines.append(".. automodule:: %s" % fullclassname)
if buildtype == "full":
lines.append(" :members:")
if classname not in NO_INHERITED:
lines.append(" :inherited-members:")
lines.append(" :undoc-members:")
lines.append(" :show-inheritance:")
if classname in FORCE_ADD_PRIVATE:
lines.append(" :special-members:")
lines.append(" :private-members:")
else:
lines.append(" :special-members: __init__")
if classname.startswith("_"):
lines.append(" :private-members:")
if fullclassname in CUSTOMIZED_DOCSTRINGS:
ds = CUSTOMIZED_DOCSTRINGS[fullclassname]
if ds.replace:
lines = ds.doc_string
else:
lines.append(ds.doc_string)
writeLinesToFile(filename, lines)
def getsubpackages(abspath, direc):
"""return list of subpackages with full path"""
packages = []
for dire in direc:
if dire.lower() == "test" or dire.lower() == "tests" or "/test" in dire.lower():
LOG.debug("Skipping test directory: %s/%s", abspath, dire)
continue
if dire.lower() == 'docs' or '/docs' in dire.lower():
LOG.debug('Skipping docs directory: %s/%s', abspath, dire)
continue
if os.path.exists(os.path.join(DIRACPATH, abspath, dire, "__init__.py")):
packages.append(os.path.join(dire))
return packages
def getmodules(abspath, direc, files):
"""return list of subpackages with full path"""
packages = []
for filename in files:
if filename.lower().startswith('test') or filename.lower().endswith('test'):
LOG.debug("Skipping test file: %s/%s", abspath, filename)
continue
if 'test' in filename.lower():
LOG.warn("File contains 'test', but is kept: %s/%s", abspath, filename)
if filename != "__init__.py":
packages.append(filename.split(".py")[0])
return packages
def createDoc(buildtype="full"):
"""create the rst files for all the things we want them for"""
LOG.info("DIRACPATH: %s", DIRACPATH)
LOG.info("BASEPATH: %s", BASEPATH)
LOG.info("Host: %s", socket.gethostname())
# we need to replace existing rst files so we can decide how much code-doc to create
if os.path.exists(BASEPATH) and os.environ.get('READTHEDOCS', 'False') == 'True':
shutil.rmtree(BASEPATH)
mkdir(BASEPATH)
os.chdir(BASEPATH)
getCustomDocs()
LOG.info("Now creating rst files")
for root, direc, files in os.walk(DIRACPATH):
configTemplate = [os.path.join(root, _) for _ in files if _ == 'ConfigTemplate.cfg']
files = [_ for _ in files if _.endswith(".py")]
if "__init__.py" not in files:
continue
elif any(f.lower() in root.lower() for f in ("/test", "scripts",
'docs/Tools',
)):
LOG.debug('Skipping test, docs, or script folder: %s', root)
continue
modulename = root.split("/")[-1]
abspath = root.split(DIRACPATH)[1].strip("/")
fullmodulename = 'DIRAC.' + '.'.join(abspath.split('/'))
packages = getsubpackages(abspath, direc)
LOG.debug("Trying to create folder: %s", abspath)
if abspath:
mkdir(abspath)
os.chdir(abspath)
if modulename == "DIRAC":
createCodeDocIndex(
subpackages=packages,
modules=getmodules(
abspath,
direc,
files),
buildtype=buildtype)
elif buildtype == "limited":
os.chdir(BASEPATH)
return 0
else:
mkRest(
modulename + "_Module.rst",
modulename,
fullmodulename,
subpackages=packages,
modules=getmodules(
abspath,
direc,
files))
for filename in files:
# Skip things that call parseCommandLine or similar issues
fullclassname = ".".join(abspath.split("/") + [filename])
if any(f in filename for f in BAD_FILES):
mkDummyRest(filename.split(".py")[0], fullclassname.split(".py")[0])
continue
elif not filename.endswith(".py") or \
filename.endswith("CLI.py") or \
filename.lower().startswith("test") or \
filename == "__init__.py" or \
"-" in filename: # not valid python identifier, e.g. dirac-pilot
continue
if not fullclassname.startswith("DIRAC."):
fullclassname = "DIRAC." + fullclassname
# Remove some FrameworkServices because things go weird
mkModuleRest(filename.split(".py")[0], fullclassname.split(".py")[0], buildtype)
if configTemplate:
shutil.copy(configTemplate[0], os.path.join(BASEPATH, abspath))
os.chdir(BASEPATH)
shutil.copy(os.path.join(DIRACPATH, 'dirac.cfg'), BASEPATH)
return 0
def createCodeDocIndex(subpackages, modules, buildtype="full"):
"""create the main index file"""
filename = "index.rst"
lines = []
lines.append(".. _code_documentation:")
lines.append("")
lines.append("Code Documentation (|release|)")
lines.append("------------------------------")
# for limited builds we only create the most basic code documentation so
# we let users know there is more elsewhere
if buildtype == "limited":
lines.append("")
lines.append(".. warning::")
lines.append(
" This a limited build of the code documentation, for the full code documentation please look at the website")
lines.append("")
else:
if subpackages or modules:
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
if subpackages:
systemPackages = sorted([pck for pck in subpackages if pck.endswith("System")])
otherPackages = sorted([pck for pck in subpackages if not pck.endswith("System")])
lines.append("=======")
lines.append("Systems")
lines.append("=======")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for package in systemPackages:
lines.append(" %s/%s_Module.rst" % (package, package.split("/")[-1]))
lines.append("")
lines.append("=====")
lines.append("Other")
lines.append("=====")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for package in otherPackages:
lines.append(" %s/%s_Module.rst" % (package, package.split("/")[-1]))
if modules:
for module in sorted(modules):
lines.append(" %s.rst" % (module.split("/")[-1],))
#lines.append(" %s " % (package, ) )
writeLinesToFile(filename, lines)
def checkBuildTypeAndRun():
""" check for input argument and then create the doc rst files """
buildtypes = ("full", "limited")
buildtype = "full" if len(sys.argv) <= 1 else sys.argv[1]
if buildtype not in buildtypes:
LOG.error("Unknown build type: %s use %s ", buildtype, " ".join(buildtypes))
return 1
LOG.info("buildtype: %s", buildtype)
exit(createDoc(buildtype))
if __name__ == "__main__":
# get the options
if '-ddd' in ''.join(sys.argv):
LOG.setLevel(logging.DEBUG)
SUPER_DEBUG = True
if '-dd' in ''.join(sys.argv):
LOG.setLevel(logging.DEBUG)
SUPER_DEBUG = False
exit(checkBuildTypeAndRun())
|
petricm/DIRAC
|
docs/Tools/MakeDoc.py
|
Python
|
gpl-3.0
| 12,409
|
[
"DIRAC"
] |
13432f16299c12c67cc31314fd5e4ec83112509ab6b8dce7917e805fe3f55fe7
|
import numpy as np
from ase.data import atomic_numbers
from gpaw.utilities import pack2
from gpaw.utilities.tools import md5_new
from gpaw.atom.radialgd import AERadialGridDescriptor
from gpaw.atom.atompaw import AtomPAW
from gpaw.atom.configurations import configurations
from gpaw.basis_data import Basis, BasisFunction
from gpaw.pseudopotential import PseudoPotential
setups = {} # Filled out during parsing below
sc_setups = {} # Semicore
# Tabulated values of Gamma(m + 1/2)
half_integer_gamma = [np.sqrt(np.pi)]
for m in range(20):
half_integer_gamma.append(half_integer_gamma[m] * (m + 0.5))
class HGHSetupData:
"""Setup-compatible class implementing HGH pseudopotential.
To the PAW code this will appear as a legit PAW setup, but is
in fact considerably simpler. In particular, all-electron and
pseudo partial waves are all zero, and compensation charges do not
depend on environment.
A HGH setup has the following form::
----
\
V = Vlocal + ) | p > h < p |
/ i ij j
----
ij
Vlocal contains a short-range term which is Gaussian-shaped and
implemented as vbar of a PAW setup, along with a long-range term
which goes like 1/r and is implemented in terms of a compensation
charge.
The non-local part contains KB projector functions which are
essentially similar to those in PAW, while h_ij are constants.
h_ij are provided by setting the K_p variable of the normal
setup.
Most other properties of a PAW setup do not exist for HGH setups, for
which reason they are generally set to zero:
* All-electron partial waves: always zero
* Pseudo partial waves: always zero
* Projectors: HGH projectors
* Zero potential (vbar): Gaussian times polynomial
* Compensation charges: One Gaussian-shaped spherically symmetric charge
* All-electron core density: Delta function corresponding to core electron
charge
* Pseudo core density: always zero
* Pseudo valence density: always zero
* PS/AE Kinetic energy density: always zero
* The mysterious constants K_p of a setup correspond to h_ij.
Note that since the pseudo partial waves are set to zero,
initialization of atomic orbitals requires loading a custom basis
set.
Absolute energies become numerically large since no atomic
reference is subtracted.
"""
def __init__(self, hghdata):
if isinstance(hghdata, str):
symbol = hghdata
if symbol.endswith('.sc'):
hghdata = sc_setups[symbol[:-3]]
else:
hghdata = setups[symbol]
self.hghdata = hghdata
chemsymbol = hghdata.symbol
if '.' in chemsymbol:
chemsymbol, sc = chemsymbol.split('.')
assert sc == 'sc'
self.symbol = chemsymbol
self.type = hghdata.symbol
self.name = 'LDA'
self.initialize_setup_data()
def initialize_setup_data(self):
hghdata = self.hghdata
beta = 0.1
N = 450
rgd = AERadialGridDescriptor(beta / N, 1.0 / N, N,
default_spline_points=100)
#rgd = EquidistantRadialGridDescriptor(0.001, 10000)
self.rgd = rgd
self.Z = hghdata.Z
self.Nc = hghdata.Z - hghdata.Nv
self.Nv = hghdata.Nv
self.rcgauss = np.sqrt(2.0) * hghdata.rloc
threshold = 1e-8
if len(hghdata.c_n) > 0:
vloc_g = create_local_shortrange_potential(rgd.r_g, hghdata.rloc,
hghdata.c_n)
gcutvbar, rcutvbar = self.find_cutoff(rgd.r_g, rgd.dr_g, vloc_g,
threshold)
self.vbar_g = np.sqrt(4.0 * np.pi) * vloc_g[:gcutvbar]
else:
rcutvbar = 0.5
gcutvbar = rgd.ceil(rcutvbar)
self.vbar_g = np.zeros(gcutvbar)
nj = sum([v.nn for v in hghdata.v_l])
if nj == 0:
nj = 1 # Code assumes nj > 0 elsewhere, we fill out with zeroes
if not hghdata.v_l:
# No projectors. But the remaining code assumes that everything
# has projectors! We'll just add the zero function then
hghdata.v_l = [VNonLocal(0, 0.01, [[0.]])]
n_j = []
l_j = []
# j ordering is significant, must be nl rather than ln
for n, l in self.hghdata.nl_iter():
n_j.append(n + 1) # Note: actual n must be positive!
l_j.append(l)
assert nj == len(n_j)
self.nj = nj
self.l_j = l_j
self.l_orb_j = l_j
self.n_j = n_j
self.rcut_j = []
self.pt_jg = []
for n, l in zip(n_j, l_j):
# Note: even pseudopotentials without projectors will get one
# projector, but the coefficients h_ij should be zero so it
# doesn't matter
pt_g = create_hgh_projector(rgd.r_g, l, n, hghdata.v_l[l].r0)
norm = np.sqrt(np.dot(rgd.dr_g, pt_g**2 * rgd.r_g**2))
assert np.abs(1 - norm) < 1e-5, str(1 - norm)
gcut, rcut = self.find_cutoff(rgd.r_g, rgd.dr_g, pt_g, threshold)
if rcut < 0.5:
rcut = 0.5
gcut = rgd.ceil(rcut)
pt_g = pt_g[:gcut].copy()
rcut = max(rcut, 0.5)
self.rcut_j.append(rcut)
self.pt_jg.append(pt_g)
# This is the correct magnitude of the otherwise normalized
# compensation charge
self.Delta0 = -self.Nv / np.sqrt(4.0 * np.pi)
f_ln = self.hghdata.get_occupation_numbers()
f_j = [0] * nj
for j, (n, l) in enumerate(self.hghdata.nl_iter()):
try:
f_j[j] = f_ln[l][n]
except IndexError:
pass
self.f_ln = f_ln
self.f_j = f_j
def find_cutoff(self, r_g, dr_g, f_g, sqrtailnorm=1e-5):
g = len(r_g)
acc_sqrnorm = 0.0
while acc_sqrnorm <= sqrtailnorm:
g -= 1
acc_sqrnorm += (r_g[g] * f_g[g])**2.0 * dr_g[g]
if r_g[g] < 0.5: # XXX
return g, r_g[g]
return g, r_g[g]
def expand_hamiltonian_matrix(self):
"""Construct K_p from individual h_nn for each l."""
ni = sum([2 * l + 1 for l in self.l_j])
H_ii = np.zeros((ni, ni))
# The H_ii used in gpaw is much larger and more general than the one
# required for HGH pseudopotentials. This means a lot of the elements
# must be assigned the same value. Not a performance issue though,
# since these are small matrices
M1start = 0
for n1, l1 in self.hghdata.nl_iter():
M1end = M1start + 2 * l1 + 1
M2start = 0
v = self.hghdata.v_l[l1]
for n2, l2 in self.hghdata.nl_iter():
M2end = M2start + 2 * l2 + 1
if l1 == l2:
h_nn = v.expand_hamiltonian_diagonal()
H_mm = np.identity(M2end - M2start) * h_nn[n1, n2]
H_ii[M1start:M1end, M2start:M2end] += H_mm
M2start = M2end
M1start = M1end
K_p = pack2(H_ii)
return K_p
def __str__(self):
return "HGHSetupData('%s')" % self.type
def __repr__(self):
return self.__str__()
def print_info(self, text, _setup):
self.hghdata.print_info(text)
def plot(self):
"""Plot localized functions of HGH setup."""
import pylab as pl
rgd = self.rgd
pl.subplot(211) # vbar, compensation charge
rloc = self.hghdata.rloc
gloc = self.rgd.ceil(rloc)
gcutvbar = len(self.vbar_g)
pl.plot(rgd.r_g[:gcutvbar], self.vbar_g, 'r', label='vloc',
linewidth=3)
rcc, gcc = self.get_compensation_charge_functions()
gcc = gcc[0]
pl.plot(rcc, gcc * self.Delta0, 'b--', label='Comp charge [arb. unit]',
linewidth=3)
pl.legend(loc='best')
pl.subplot(212) # projectors
for j, (n, l, pt_g) in enumerate(zip(self.n_j, self.l_j, self.pt_jg)):
label = 'n=%d, l=%d' % (n, l)
pl.ylabel('$p_n^l(r)$')
ng = len(pt_g)
r_g = rgd.r_g[:ng]
pl.plot(r_g, pt_g, label=label)
r0 = self.hghdata.v_l[self.l_j[j]].r0
g0 = self.rgd.ceil(r0)
pl.legend()
def get_projectors(self):
# XXX equal-range projectors still required for some reason
maxlen = max([len(pt_g) for pt_g in self.pt_jg])
pt_j = []
for l, pt1_g in zip(self.l_j, self.pt_jg):
pt2_g = self.rgd.zeros()[:maxlen]
pt2_g[:len(pt1_g)] = pt1_g
pt_j.append(self.rgd.spline(pt2_g, self.rgd.r_g[maxlen - 1], l))
return pt_j
def create_basis_functions(self):
from gpaw.pseudopotential import generate_basis_functions
return generate_basis_functions(self)
def get_compensation_charge_functions(self):
alpha = self.rcgauss**-2
rcutgauss = self.rcgauss * 5.0 # smaller values break charge conservation
r = np.linspace(0.0, rcutgauss, 100)
g = alpha**1.5 * np.exp(-alpha * r**2) * 4.0 / np.sqrt(np.pi)
g[-1] = 0.0
return r, [0], [g]
def get_local_potential(self):
n = len(self.vbar_g)
return self.rgd.spline(self.vbar_g, self.rgd.r_g[n - 1])
def build(self, xcfunc, lmax, basis, filter=None):
if basis is None:
basis = self.create_basis_functions()
setup = PseudoPotential(self, basis)
setup.fingerprint = md5_new(str(self.hghdata)).hexdigest()
return setup
def create_local_shortrange_potential(r_g, rloc, c_n):
rr_g = r_g / rloc # "Relative r"
rr2_g = rr_g**2
rr4_g = rr2_g**2
rr6_g = rr4_g * rr2_g
gaussianpart = np.exp(-.5 * rr2_g)
polypart = np.zeros(r_g.shape)
for c, rrn_g in zip(c_n, [1, rr2_g, rr4_g, rr6_g]):
polypart += c * rrn_g
vloc_g = gaussianpart * polypart
return vloc_g
def create_hgh_projector(r_g, l, n, r0):
poly_g = r_g**(l + 2 * (n - 1))
gauss_g = np.exp(-.5 * r_g**2 / r0**2)
A = r0**(l + (4 * n - 1) / 2.0)
assert (4 * n - 1) % 2 == 1
B = half_integer_gamma[l + (4 * n - 1) // 2]**.5
pt_g = 2.**.5 / A / B * poly_g * gauss_g
return pt_g
# Coefficients determining off-diagonal elements of h_nn for l = 0...2
# given the diagonal elements
hcoefs_l = [
[-.5 * (3. / 5.)**.5, .5 * (5. / 21.)**.5, -.5 * (100. / 63.)**.5],
[-.5 * (5. / 7.)**.5, 1./6. * (35. / 11.)**.5, -1./6. * 14./11.**.5],
[-.5 * (7. / 9.)**.5, .5 * (63. / 143)**.5, -.5 * 18. / 143.**.5]
]
class VNonLocal:
"""Wrapper class for one nonlocal term of an HGH potential."""
def __init__(self, l, r0, h_n):
self.l = l
self.r0 = r0
h_n = np.array(h_n)
nn = len(h_n)
self.nn = nn
self.h_n = h_n
def expand_hamiltonian_diagonal(self):
"""Construct full atomic Hamiltonian from diagonal elements."""
nn = self.nn
h_n = self.h_n
h_nn = np.zeros((nn, nn))
for n, h in enumerate(h_n):
h_nn[n, n] = h
if self.l > 2:
#print 'Warning: no diagonal elements for l=%d' % l
# Some elements have projectors corresponding to l=3, but
# the HGH article only specifies how to calculate the
# diagonal elements of the atomic hamiltonian for l = 0, 1, 2 !
return
coefs = hcoefs_l[self.l]
if nn > 2:
h_nn[0, 2] = h_nn[2, 0] = coefs[1] * h_n[2]
h_nn[1, 2] = h_nn[2, 1] = coefs[2] * h_n[2]
if nn > 1:
h_nn[0, 1] = h_nn[1, 0] = coefs[0] * h_n[1]
return h_nn
def copy(self):
return VNonLocal(self.l, self.r0, self.h_n.copy())
def serialize(self): # no spin-orbit part
return ' '.join([' ', '%-10s' % self.r0] +
['%10f' % h for h in self.h_n])
class HGHParameterSet:
"""Wrapper class for HGH-specific data corresponding to one element."""
def __init__(self, symbol, Z, Nv, rloc, c_n, v_l):
self.symbol = symbol # Identifier, e.g. 'Na', 'Na.sc', ...
self.Z = Z # Actual atomic number
self.Nv = Nv # Valence electron count
self.rloc = rloc # Characteristic radius of local part
self.c_n = np.array(c_n) # Polynomial coefficients for local part
self.v_l = list(v_l) # Non-local parts
Z, nlfe_j = configurations[self.symbol.split('.')[0]]
self.configuration = nlfe_j
def __str__(self):
strings = ['HGH setup for %s\n' % self.symbol,
' Valence Z=%d, rloc=%.05f\n' % (self.Nv, self.rloc)]
if len(self.c_n) > 0:
coef_string = ', '.join(['%.05f' % c for c in self.c_n])
else:
coef_string = 'zeros'
strings.append(' Local part coeffs: %s\n' % coef_string)
strings.append(' Projectors:\n')
if not self.v_l:
strings.append(' None\n')
for v in self.v_l:
strings.append(' l=%d, rc=%.05f\n' % (v.l, v.r0))
strings.append(' Diagonal coefficients of nonlocal parts:')
if not self.v_l:
strings.append('\n None\n')
for v in self.v_l:
strings.append('\n')
strings.append(' l=%d: ' % v.l +
', '.join(['%8.05f' % h for h in v.h_n]))
return ''.join(strings)
def copy(self):
other = HGHParameterSet(self.symbol, self.Z, self.Nv, self.rloc,
self.c_n, self.v_l)
return other
def print_info(self, txt):
txt(str(self))
txt()
def nl_iter(self):
for n in range(4):
for l, v in enumerate(self.v_l):
if n < v.nn:
yield n, l
def get_occupation_numbers(self):
nlfe_j = list(self.configuration)
nlfe_j.reverse()
f_ln = [[], [], []] # [[s], [p], [d]]
# f states will be ignored as the atomic Hamiltonians
# of those are, carelessly, not defined in the article.
lmax = len(self.v_l) - 1
Nv = 0
# Right. We need to find the occupation numbers of each state and
# put them into a nice list of lists f_ln.
#
# We loop over states starting with the least bound one
# (i.e. reversed nlfe_j), adding the occupation numbers of each state
# as appropriate. Once we have the right number of electrons, we
# end the loop.
#
# Some states in the standard configuration might
# be f-type; these should be skipped (unless the HGH setup actually
# has a valence f-state; however as noted above, some of the
# parameters are undefined in that case so are ignored anyway). More
# generally if for some state l > lmax,
# we can skip that state.
for n, l, f, e in nlfe_j:
if l > lmax:
continue
Nv += f
f_n = f_ln[l]
assert f_n == [] or self.symbol.endswith('.sc')
f_n.append(f)
if Nv >= self.Nv:
break
assert Nv == self.Nv
return f_ln
def zeropad(self):
"""Return a new HGHParameterSet with all arrays zero padded so they
have the same (max) length for all such HGH setups. Makes
plotting multiple HGH setups easier because they have compatible
arrays."""
c_n = np.zeros(4)
for n, c in enumerate(self.c_n):
c_n[n] = c
v_l = []
for l, v in enumerate(self.v_l):
h_n = np.zeros(3)
h_n[:len(v.h_n)] = list(v.h_n)
v2 = VNonLocal(l, v.r0, h_n)
v_l.append(v2)
for l in range(len(self.v_l), 3):
v_l.append(VNonLocal(l, 0.5, np.zeros(3)))
copy = HGHParameterSet(self.symbol, self.Z, self.Nv, self.rloc, c_n,
v_l)
return copy
def serialize(self):
string1 = '%-5s %-12s %10s ' % (self.symbol, self.Z, self.rloc)
string2 = ' '.join(['%.10s' % c for c in self.c_n])
nonlocal_strings = [v.serialize() for v in self.v_l]
return '\n'.join([string1 + string2] + nonlocal_strings)
def parse_local_part(string):
"""Create HGHParameterSet object with local part initialized."""
tokens = iter(string.split())
symbol = tokens.next()
actual_chemical_symbol = symbol.split('.')[0]
Z = atomic_numbers[actual_chemical_symbol]
Nv = int(tokens.next())
rloc = float(tokens.next())
c_n = [float(token) for token in tokens]
return symbol, Z, Nv, rloc, c_n
class HGHBogusNumbersError(ValueError):
"""Error which is raised when the HGH parameters contain f-type
or higher projectors. The HGH article only defines atomic Hamiltonian
matrices up to l=2, so these are meaningless."""
pass
def parse_hgh_setup(lines):
"""Initialize HGHParameterSet object from text representation."""
lines = iter(lines)
symbol, Z, Nv, rloc, c_n = parse_local_part(lines.next())
def pair_up_nonlocal_lines(lines):
yield lines.next(), ''
while True:
yield lines.next(), lines.next()
v_l = []
for l, (nonlocal, spinorbit) in enumerate(pair_up_nonlocal_lines(lines)):
# we discard the spinorbit 'k_n' data so far
nltokens = nonlocal.split()
r0 = float(nltokens[0])
h_n = [float(token) for token in nltokens[1:]]
#if h_n[-1] == 0.0: # Only spin-orbit contributes. Discard.
# h_n.pop()
# Actually the above causes trouble. Probably it messes up state
# ordering or something else that shouldn't have any effect.
vnl = VNonLocal(l, r0, h_n)
v_l.append(vnl)
if l > 2:
raise HGHBogusNumbersError
hgh = HGHParameterSet(symbol, Z, Nv, rloc, c_n, v_l)
return hgh
def str2hgh(string):
return parse_hgh_setups(string.splitlines())
def hgh2str(hgh):
return hgh.serialize()
def parse_setups(lines):
"""Read HGH data from file."""
setups = {}
entry_lines = [i for i in xrange(len(lines))
if lines[i][0].isalpha()]
lines_by_element = [lines[entry_lines[i]:entry_lines[i + 1]]
for i in xrange(len(entry_lines) - 1)]
lines_by_element.append(lines[entry_lines[-1]:])
for elines in lines_by_element:
try:
hgh = parse_hgh_setup(elines)
except HGHBogusNumbersError:
continue
assert hgh.symbol not in setups
setups[hgh.symbol] = hgh
return setups
def plot(symbol, extension=None):
import pylab as pl
try:
s = HGHSetupData(symbol)
except IndexError:
print 'Nooooo'
return
s.plot()
if extension is not None:
pl.savefig('hgh.%s.%s' % (symbol, extension))
def plot_many(*symbols):
import pylab as pl
if not symbols:
symbols = setups.keys() + [key + '.sc' for key in sc_setups.keys()]
for symbol in symbols:
pl.figure(1)
plot(symbol, extension='png')
pl.clf()
def parse_default_setups():
from hgh_parameters import parameters
lines = parameters.splitlines()
setups0 = parse_setups(lines)
for key, value in setups0.items():
if key.endswith('.sc'):
sym, sc = key.split('.')
sc_setups[sym] = value
else:
setups[key] = value
parse_default_setups()
|
robwarm/gpaw-symm
|
gpaw/hgh.py
|
Python
|
gpl-3.0
| 19,812
|
[
"ASE",
"GPAW",
"Gaussian"
] |
98716138b8a053aedd815fab3f8c852ce04f6e69cb76215aee38f52cf59d273c
|
#
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members, builtin_type=False):
if builtin_type:
return mcgen('''
typedef struct %(name)sList
{
union {
%(type)s value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
type=c_type(name),
name=name)
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
union {
%(name)s *value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_fwd_enum_struct(name, members):
return mcgen('''
typedef struct %(name)sList
{
union {
%(name)s value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct_fields(members):
ret = ''
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct("", argname, argentry)
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
return ret
def generate_struct(structname, fieldname, members):
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
ret += generate_struct_fields(members)
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value)
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum_name(name):
if name.isupper():
return c_fun(name, False)
new_name = ''
for c in c_fun(name, False):
if c.isupper():
new_name += '_'
new_name += c
return new_name.lstrip('_').upper()
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_decl += mcgen('''
%(abbrev)s_%(value)s = %(i)d,
''',
abbrev=de_camel_case(name).upper(),
value=generate_enum_name(value),
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_anon_union_qtypes(expr):
name = expr['union']
members = expr['data']
ret = mcgen('''
const int %(name)s_qtypes[QTYPE_MAX] = {
''',
name=name)
for key in members:
qapi_type = members[key]
if builtin_type_qtypes.has_key(qapi_type):
qtype = builtin_type_qtypes[qapi_type]
elif find_struct(qapi_type):
qtype = "QTYPE_QDICT"
elif find_union(qapi_type):
qtype = "QTYPE_QDICT"
else:
assert False, "Invalid anonymous union member"
ret += mcgen('''
[ %(qtype)s ] = %(abbrev)s_KIND_%(enum)s,
''',
qtype = qtype,
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key),False).upper())
ret += mcgen('''
};
''')
return ret
def generate_union(expr):
name = expr['union']
typeinfo = expr['data']
base = expr.get('base')
discriminator = expr.get('discriminator')
ret = mcgen('''
struct %(name)s
{
%(name)sKind kind;
union {
void *data;
''',
name=name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_fun(key))
ret += mcgen('''
};
''')
if base:
base_fields = find_struct(base)['data']
if discriminator:
base_fields = base_fields.copy()
del base_fields[discriminator]
ret += generate_struct_fields(base_fields)
else:
assert not discriminator
ret += mcgen('''
};
''')
if discriminator == {}:
ret += mcgen('''
extern const int %(name)s_qtypes[];
''',
name=name)
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:o:",
["source", "header", "builtins",
"prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
do_c = False
do_h = False
do_builtins = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
elif o in ("-b", "--builtins"):
do_builtins = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include <stdbool.h>
#include <stdint.h>
''',
guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
exprs = filter(lambda expr: not expr.has_key('gen'), exprs)
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for typename in builtin_types:
fdecl.write(generate_fwd_struct(typename, None, builtin_type=True))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data']) + "\n"
ret += generate_fwd_enum_struct(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
fdef.write(generate_enum_lookup('%sKind' % expr['union'], expr['data'].keys()))
if expr.get('discriminator') == {}:
fdef.write(generate_anon_union_qtypes(expr))
else:
continue
fdecl.write(ret)
# to avoid header dependency hell, we always generate declarations
# for built-in types in our header files and simply guard them
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
for typename in builtin_types:
fdecl.write(generate_type_cleanup_decl(typename + "List"))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
# ...this doesn't work for cases where we link in multiple objects that
# have the functions defined, so we use -b option to provide control
# over these cases
if do_builtins:
fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for typename in builtin_types:
fdef.write(generate_type_cleanup(typename + "List"))
fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr['type'], "", expr['data']) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr)
ret += generate_type_cleanup_decl(expr['union'] + "List")
fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['union'])
fdef.write(generate_type_cleanup(expr['union']) + "\n")
elif expr.has_key('enum'):
ret += generate_type_cleanup_decl(expr['enum'] + "List")
fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n")
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
|
vincent-d/qemu
|
scripts/qapi-types.py
|
Python
|
gpl-2.0
| 10,549
|
[
"VisIt"
] |
01d8b9ac6503d5b429cb050bb8975811dcc21a03d683189fcd7b7feafa4778f1
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
import pysollib.game
from pysollib.game import Game
from pysollib.gamedb import GI, GameInfo, registerGame
from pysollib.games.canfield import CanfieldRush_Talon
from pysollib.hint import CautiousDefaultHint
from pysollib.hint import FreeCellSolverWrapper
from pysollib.hint import KlondikeType_Hint
from pysollib.layout import Layout
from pysollib.mfxutil import Struct, kwdefault
from pysollib.mygettext import _
from pysollib.pysoltk import MfxCanvasText
from pysollib.stack import \
AC_RowStack, \
BO_RowStack, \
DealRowTalonStack, \
InitialDealTalonStack, \
KingAC_RowStack, \
KingSS_RowStack, \
OpenStack, \
OpenTalonStack, \
RK_FoundationStack, \
RK_RowStack, \
RedealTalonStack, \
ReserveStack, \
SC_RowStack, \
SS_FoundationStack, \
SS_RowStack, \
Stack, \
StackWrapper, \
SuperMoveAC_RowStack, \
UD_SS_RowStack, \
WasteStack, \
WasteTalonStack, \
isSameColorSequence
from pysollib.util import ACE, ANY_RANK, ANY_SUIT, KING, NO_RANK
# ************************************************************************
# * Klondike
# ************************************************************************
class Klondike(Game):
Layout_Method = staticmethod(Layout.klondikeLayout)
Talon_Class = WasteTalonStack
Foundation_Class = SS_FoundationStack
RowStack_Class = KingAC_RowStack
Hint_Class = KlondikeType_Hint
def createGame(self, max_rounds=-1, num_deal=1, **layout):
# create layout
lay, s = Layout(self), self.s
kwdefault(layout, rows=7, waste=1, texts=1, playcards=16)
self.Layout_Method.__get__(lay, lay.__class__)(**layout)
# self.__class__.Layout_Method(lay, **layout)
self.setSize(lay.size[0], lay.size[1])
# create stacks
s.talon = self.Talon_Class(lay.s.talon.x, lay.s.talon.y, self,
max_rounds=max_rounds, num_deal=num_deal)
if lay.s.waste:
s.waste = WasteStack(lay.s.waste.x, lay.s.waste.y, self)
for r in lay.s.foundations:
s.foundations.append(
self.Foundation_Class(r.x, r.y, self, suit=r.suit))
for r in lay.s.rows:
s.rows.append(self.RowStack_Class(r.x, r.y, self))
# default
lay.defaultAll()
return lay
def startGame(self, flip=0, reverse=1):
for i in range(1, len(self.s.rows)):
self.s.talon.dealRow(
rows=self.s.rows[i:], flip=flip, frames=0, reverse=reverse)
self.startDealSample()
self.s.talon.dealRow(reverse=reverse)
if self.s.waste:
self.s.talon.dealCards() # deal first card to WasteStack
shallHighlightMatch = Game._shallHighlightMatch_AC
# ************************************************************************
# * Vegas Klondike
# ************************************************************************
class VegasKlondike(Klondike):
getGameScore = Game.getGameScoreCasino
getGameBalance = Game.getGameScoreCasino
def createGame(self, max_rounds=1):
lay = Klondike.createGame(self, max_rounds=max_rounds)
self.texts.score = MfxCanvasText(self.canvas,
8, self.height - 8, anchor="sw",
font=self.app.getFont("canvas_large"))
return lay
def updateText(self):
if self.preview > 1:
return
b1, b2 = self.app.stats.gameid_balance, 0
if self.shallUpdateBalance():
b2 = self.getGameBalance()
t = _("Balance $%d") % (b1 + b2)
self.texts.score.config(text=t)
def getDemoInfoTextAttr(self, tinfo):
return tinfo[1] # "se" corner
# ************************************************************************
# * Casino Klondike
# ************************************************************************
class CasinoKlondike(VegasKlondike):
def createGame(self):
lay = VegasKlondike.createGame(self, max_rounds=3)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
# ************************************************************************
# * Klondike by Threes
# ************************************************************************
class KlondikeByThrees(Klondike):
def createGame(self):
Klondike.createGame(self, num_deal=3)
# ************************************************************************
# * Trigon
# ************************************************************************
class Trigon(Klondike):
RowStack_Class = KingSS_RowStack
# ************************************************************************
# * Thumb and Pouch
# * Chinaman
# ************************************************************************
class ThumbAndPouch(Klondike):
RowStack_Class = BO_RowStack
def createGame(self):
Klondike.createGame(self, max_rounds=1)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.suit != card2.suit and
(card1.rank + 1 == card2.rank or
card2.rank + 1 == card1.rank))
class Chinaman(ThumbAndPouch):
RowStack_Class = StackWrapper(BO_RowStack, base_rank=KING)
def createGame(self):
lay = Klondike.createGame(self, num_deal=3,
max_rounds=2, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
# ************************************************************************
# * Whitehead
# ************************************************************************
class Whitehead_RowStack(SS_RowStack):
def _isAcceptableSequence(self, cards):
return isSameColorSequence(cards, self.cap.mod, self.cap.dir)
def getHelp(self):
return _('Tableau. Build down by color. Sequences of cards '
'in the same suit can be moved as a unit.')
class Whitehead(Klondike):
RowStack_Class = Whitehead_RowStack
Hint_Class = CautiousDefaultHint
def createGame(self):
Klondike.createGame(self, max_rounds=1)
def startGame(self):
Klondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_SS
getQuickPlayScore = Game._getSpiderQuickPlayScore
# ************************************************************************
# * Small Harp (Klondike in a different layout)
# ************************************************************************
class SmallHarp(Klondike):
Layout_Method = staticmethod(Layout.gypsyLayout)
def startGame(self):
for i in range(len(self.s.rows)):
self.s.talon.dealRow(rows=self.s.rows[:i], flip=0, frames=0)
self._startAndDealRowAndCards()
# ************************************************************************
# * Eastcliff
# * Easthaven
# ************************************************************************
class Eastcliff(Klondike):
RowStack_Class = AC_RowStack
def createGame(self):
Klondike.createGame(self, max_rounds=1)
def startGame(self):
for i in range(2):
self.s.talon.dealRow(flip=0, frames=0)
self.startDealSample()
self.s.talon.dealRow()
if self.s.waste:
self.s.talon.dealCards() # deal first card to WasteStack
class Easthaven(Eastcliff):
Talon_Class = DealRowTalonStack
def createGame(self):
Klondike.createGame(self, max_rounds=1, waste=0)
class DoubleEasthaven(Easthaven):
def createGame(self):
Klondike.createGame(self, rows=8, max_rounds=1, waste=0, playcards=20)
class TripleEasthaven(Easthaven):
def createGame(self):
Klondike.createGame(self, rows=12, max_rounds=1, waste=0, playcards=26)
# ************************************************************************
# * Westcliff
# * Westhaven
# ************************************************************************
class Westcliff(Eastcliff):
Foundation_Class = StackWrapper(SS_FoundationStack, max_move=0)
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=10)
class Westhaven(Westcliff):
Talon_Class = DealRowTalonStack
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=10, waste=0)
# ************************************************************************
# * Pas Seul
# ************************************************************************
class PasSeul(pysollib.game.StartDealRowAndCards, Eastcliff):
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=6)
# ************************************************************************
# * Blind Alleys
# ************************************************************************
class BlindAlleys(Eastcliff):
def createGame(self):
lay = Klondike.createGame(self, max_rounds=2, rows=6, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
def _shuffleHook(self, cards):
# move Aces to top of the Talon (i.e. first cards to be dealt)
return self._shuffleHookMoveToTop(
cards, lambda c: (c.rank == 0, c.suit))
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
Eastcliff.startGame(self)
# ************************************************************************
# * Somerset
# * Morehead
# * Usk
# ************************************************************************
class Somerset(Klondike):
Talon_Class = InitialDealTalonStack
RowStack_Class = SuperMoveAC_RowStack
Hint_Class = CautiousDefaultHint
Solver_Class = FreeCellSolverWrapper()
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=10, waste=0, texts=0)
def startGame(self):
for i in range(6):
self.s.talon.dealRow(rows=self.s.rows[i:], frames=0)
self.startDealSample()
self.s.talon.dealRow(rows=self.s.rows[6:])
self.s.talon.dealRow(rows=self.s.rows[7:])
class Morehead(Somerset):
RowStack_Class = StackWrapper(BO_RowStack, max_move=1)
Solver_Class = None
class Usk(Somerset):
Talon_Class = RedealTalonStack
RowStack_Class = StackWrapper(AC_RowStack, base_rank=KING)
Solver_Class = None
def createGame(self):
lay = Klondike.createGame(self, max_rounds=2, rows=10,
waste=False, texts=False, round_text=True)
lay.createRoundText(self.s.talon, 'ne')
def redealCards(self):
n = 0
while self.s.talon.cards:
self.s.talon.dealRowAvail(rows=self.s.rows[n:], frames=4)
n += 1
# ************************************************************************
# * Canister
# * American Canister
# * British Canister
# ************************************************************************
class AmericanCanister(Klondike):
Talon_Class = InitialDealTalonStack
RowStack_Class = AC_RowStack
Solver_Class = FreeCellSolverWrapper(sm='unlimited')
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=8, waste=0, texts=0)
def startGame(self):
self._startDealNumRows(5)
self.s.talon.dealRow()
self.s.talon.dealRow(rows=self.s.rows[2:6])
class Canister(AmericanCanister):
RowStack_Class = RK_RowStack
Solver_Class = FreeCellSolverWrapper(sbb='rank', sm='unlimited')
shallHighlightMatch = Game._shallHighlightMatch_RK
class BritishCanister(AmericanCanister):
RowStack_Class = StackWrapper(KingAC_RowStack, max_move=1)
Solver_Class = FreeCellSolverWrapper(esf='kings')
# ************************************************************************
# * Agnes Sorel
# ************************************************************************
class AgnesSorel(Klondike):
Talon_Class = DealRowTalonStack
Foundation_Class = StackWrapper(
SS_FoundationStack, mod=13, base_rank=NO_RANK, max_move=0)
RowStack_Class = StackWrapper(SC_RowStack, mod=13, base_rank=NO_RANK)
def createGame(self):
Klondike.createGame(self, max_rounds=1, waste=0)
def startGame(self):
Klondike.startGame(self, flip=1)
self.s.talon.dealSingleBaseCard()
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.color == card2.color and
((card1.rank + 1) % 13 == card2.rank or
(card2.rank + 1) % 13 == card1.rank))
# ************************************************************************
# * 8 x 8
# * Achtmal Acht
# * Eight by Eight
# ************************************************************************
class EightTimesEight(Klondike):
Layout_Method = staticmethod(Layout.gypsyLayout)
RowStack_Class = AC_RowStack
def createGame(self):
Klondike.createGame(self, rows=8)
def startGame(self):
self._startDealNumRows(7)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
class AchtmalAcht(EightTimesEight):
def createGame(self):
lay = Klondike.createGame(self, rows=8, max_rounds=3, round_text=True)
lay.createRoundText(self.s.talon, 'sw', dx=-lay.XS)
class EightByEight_RowStack(RK_RowStack):
def acceptsCards(self, from_stack, cards):
if not RK_RowStack.acceptsCards(self, from_stack, cards):
return False
if not self.cards:
return len(cards) == 1
return True
class EightByEight(EightTimesEight):
Layout_Method = staticmethod(Layout.klondikeLayout) # gypsyLayout
Talon_Class = CanfieldRush_Talon
RowStack_Class = EightByEight_RowStack
def createGame(self):
lay = Klondike.createGame(self, rows=8, playcards=20,
max_rounds=3, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
shallHighlightMatch = Game._shallHighlightMatch_RK
# ************************************************************************
# * Batsford
# * Batsford Again
# ************************************************************************
class Batsford_ReserveStack(ReserveStack):
def acceptsCards(self, from_stack, cards):
if not ReserveStack.acceptsCards(self, from_stack, cards):
return False
# must be a King
return cards[0].rank == KING
def getHelp(self):
return _('Reserve. Only Kings are acceptable.')
class Batsford(Klondike):
def createGame(self, **layout):
kwdefault(layout, rows=10, max_rounds=1, playcards=22)
round_text = (layout['max_rounds'] > 1)
layout['round_text'] = round_text
lay = Klondike.createGame(self, **layout)
s = self.s
x, y = lay.XM, self.height - lay.YS
s.reserves.append(Batsford_ReserveStack(x, y, self, max_cards=3))
self.setRegion(
s.reserves, (-999, y - lay.YM - lay.CH//2,
x + lay.XS - lay.CW//2, 999999),
priority=1)
lay.createText(s.reserves[0], "se")
if round_text:
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
lay.defaultStackGroups()
class BatsfordAgain(Batsford):
def createGame(self):
Batsford.createGame(self, max_rounds=2)
# ************************************************************************
# * Jumbo
# ************************************************************************
class Jumbo(Klondike):
def createGame(self):
lay = Klondike.createGame(self, rows=9, max_rounds=2, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
def startGame(self, flip=0):
for i in range(9):
self.s.talon.dealRow(rows=self.s.rows[:i], flip=flip, frames=0)
self._startAndDealRowAndCards()
class OpenJumbo(Jumbo):
def startGame(self):
Jumbo.startGame(self, flip=1)
# ************************************************************************
# * Stonewall
# * Flower Garden
# ************************************************************************
class Stonewall(Klondike):
Talon_Class = InitialDealTalonStack
RowStack_Class = AC_RowStack
DEAL = (0, 1, 0, 1, -1, 0, 1)
def createGame(self):
lay = Klondike.createGame(self, rows=6, waste=0, max_rounds=1, texts=0)
s = self.s
h = max(self.height, lay.YM+4*lay.YS)
self.setSize(self.width + lay.XM+4*lay.XS, h)
for i in range(4):
for j in range(4):
x, y = self.width + (j-4)*lay.XS, lay.YM + i*lay.YS
s.reserves.append(OpenStack(x, y, self, max_accept=0))
lay.defaultStackGroups()
def startGame(self):
frames = 0
for flip in self.DEAL:
if flip < 0:
frames = -1
self.startDealSample()
else:
self.s.talon.dealRow(flip=flip, frames=frames)
self.s.talon.dealRow(rows=self.s.reserves)
class FlowerGarden(Stonewall):
RowStack_Class = StackWrapper(RK_RowStack, max_move=1)
Hint_Class = CautiousDefaultHint
DEAL = (1, 1, 1, 1, -1, 1, 1)
shallHighlightMatch = Game._shallHighlightMatch_RK
# ************************************************************************
# * King Albert
# * Raglan
# * Brigade
# * Relaxed Raglan
# * Queen Victoria
# ************************************************************************
class KingAlbert(Klondike):
Talon_Class = InitialDealTalonStack
RowStack_Class = StackWrapper(AC_RowStack, max_move=1)
Hint_Class = CautiousDefaultHint
ROWS = 9
RESERVES = (2, 2, 2, 1)
def createGame(self):
lay = Klondike.createGame(
self, max_rounds=1, rows=self.ROWS, waste=0, texts=0)
s = self.s
rw, rh = max(self.RESERVES), len(self.RESERVES)
h = max(self.height, lay.YM+rh*lay.YS)
self.setSize(self.width + 2*lay.XM+rw*lay.XS, h)
for i in range(rh):
for j in range(self.RESERVES[i]):
x, y = self.width + (j-rw)*lay.XS, lay.YM + i*lay.YS
s.reserves.append(OpenStack(x, y, self, max_accept=0))
lay.defaultStackGroups()
def startGame(self):
Klondike.startGame(self, flip=1, reverse=0)
self.s.talon.dealRow(rows=self.s.reserves)
class Raglan(KingAlbert):
RESERVES = (2, 2, 2)
def _shuffleHook(self, cards):
# move Aces to bottom of the Talon (i.e. last cards to be dealt)
return self._shuffleHookMoveToBottom(
cards, lambda c: (c.rank == 0, c.suit))
def startGame(self):
for i in range(6):
self.s.talon.dealRow(rows=self.s.rows[i:], frames=0)
self.startDealSample()
self.s.talon.dealRow(rows=self.s.rows[6:])
self.s.talon.dealRow(rows=self.s.reserves)
self.s.talon.dealRow(rows=self.s.foundations)
class Brigade(Raglan):
RowStack_Class = StackWrapper(RK_RowStack, max_move=1)
ROWS = 7
RESERVES = (4, 4, 4, 1)
def startGame(self):
self._startDealNumRows(4)
self.s.talon.dealRow()
self.s.talon.dealRow(rows=self.s.reserves)
self.s.talon.dealRow(rows=self.s.foundations)
shallHighlightMatch = Game._shallHighlightMatch_RK
class RelaxedRaglan(Raglan):
RowStack_Class = AC_RowStack
class QueenVictoria(KingAlbert):
RowStack_Class = AC_RowStack
# ************************************************************************
# * Jane
# * Agnes Bernauer
# ************************************************************************
class Jane_Talon(OpenTalonStack):
rightclickHandler = OpenStack.rightclickHandler
doubleclickHandler = OpenStack.doubleclickHandler
def canFlipCard(self):
return False
def canDealCards(self):
return len(self.cards) >= 2
def dealCards(self, sound=False):
c = 0
if len(self.cards) > 2:
c = self.dealRow(self.game.s.reserves, sound=sound)
if len(self.cards) == 2:
self.game.flipMove(self)
self.game.moveMove(1, self, self.game.s.waste, frames=4, shadow=0)
self.game.flipMove(self)
c = c + 1
return c
class Jane(Klondike):
Talon_Class = Jane_Talon
Foundation_Class = StackWrapper(
SS_FoundationStack, mod=13, base_rank=NO_RANK, min_cards=1)
RowStack_Class = StackWrapper(AC_RowStack, mod=13, base_rank=NO_RANK)
def createGame(self, max_rounds=1, rows=7, reserves=7, playcards=16):
lay, s = Layout(self), self.s
maxrows = max(rows, 7)
w = lay.XM+maxrows*lay.XS+lay.XM+2*lay.XS
h = max(lay.YM+2*lay.YS+playcards*lay.YOFFSET+lay.TEXT_HEIGHT,
lay.YM+4*lay.YS)
self.setSize(w, h)
x, y = lay.XM, lay.YM
s.talon = self.Talon_Class(x, y, self, max_rounds=max_rounds)
lay.createText(s.talon, 's')
x += lay.XS
s.waste = WasteStack(x, y, self)
x += 2*lay.XS
for i in range(4):
s.foundations.append(self.Foundation_Class(x, y, self, suit=i))
x += lay.XS
x, y = lay.XM, lay.YM+lay.YS+lay.TEXT_HEIGHT
for i in range(rows):
s.rows.append(self.RowStack_Class(x, y, self))
x += lay.XS
x0, y = self.width - 2*lay.XS, lay.YM
for i in range(reserves):
x = x0 + ((i+1) & 1) * lay.XS
stack = OpenStack(x, y, self, max_accept=0)
stack.CARD_YOFFSET = lay.YM // 3
s.reserves.append(stack)
y = y + lay.YS // 2
# not needed, as no cards may be placed on the reserves
# self.setRegion(s.reserves, (x0-lay.XM//2, -999, 999999, 999999),
# priority=1)
lay.defaultStackGroups()
self.sg.dropstacks.append(s.talon)
def startGame(self, flip=0, reverse=1):
for i in range(1, len(self.s.rows)):
self.s.talon.dealRow(
rows=self.s.rows[i:], flip=flip, frames=0, reverse=reverse)
self.startDealSample()
self.s.talon.dealRow(reverse=reverse)
self.s.talon.dealRow(rows=self.s.reserves)
c = self.s.talon.dealSingleBaseCard()
# update base rank of row stacks
cap = Struct(base_rank=(c.rank - 1) % 13)
for s in self.s.rows:
s.cap.update(cap.__dict__)
self.saveinfo.stack_caps.append((s.id, cap))
shallHighlightMatch = Game._shallHighlightMatch_ACW
def _autoDeal(self, sound=True):
return 0
class AgnesBernauer_Talon(DealRowTalonStack):
def dealCards(self, sound=False):
return self.dealRowAvail(self.game.s.reserves, sound=sound)
class AgnesBernauer(Jane):
Talon_Class = AgnesBernauer_Talon
Foundation_Class = StackWrapper(
SS_FoundationStack, mod=13, base_rank=NO_RANK, max_move=0)
def startGame(self):
Jane.startGame(self, flip=1)
# ************************************************************************
# * Senate
# ************************************************************************
class Senate(Jane):
def createGame(self, rows=4):
playcards = 10
lay, s = Layout(self), self.s
self.setSize(lay.XM+(rows+7)*lay.XS,
lay.YM+2*(lay.YS+playcards*lay.YOFFSET))
x, y = lay.XM, lay.YM
for i in range(rows):
s.rows.append(SS_RowStack(x, y, self))
x += lay.XS
for y in lay.YM, lay.YM+lay.YS+playcards*lay.YOFFSET:
x = lay.XM+rows*lay.XS+lay.XS//2
for i in range(4):
stack = OpenStack(x, y, self, max_accept=0)
stack.CARD_XOFFSET, stack.CARD_YOFFSET = 0, lay.YOFFSET
s.reserves.append(stack)
x += lay.XS
x = lay.XM+(rows+5)*lay.XS
for i in range(2):
y = lay.YM+lay.YS
for j in range(4):
s.foundations.append(SS_FoundationStack(x, y, self, suit=j))
y += lay.YS
x += lay.XS
x, y = self.width-lay.XS, lay.YM
s.talon = AgnesBernauer_Talon(x, y, self)
lay.createText(s.talon, 'nw')
lay.defaultStackGroups()
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
self.startDealSample()
self.s.talon.dealRow(rows=self.s.reserves)
self.s.talon.dealRow()
def _shuffleHook(self, cards):
# move Aces to top of the Talon (i.e. first cards to be dealt)
return self._shuffleHookMoveToTop(
cards,
lambda c: (c.rank == ACE, (c.deck, c.suit)))
shallHighlightMatch = Game._shallHighlightMatch_SS
class SenatePlus(Senate):
def createGame(self):
Senate.createGame(self, rows=5)
# ************************************************************************
# * Phoenix
# * Arizona
# ************************************************************************
class Phoenix(Klondike):
Hint_Class = CautiousDefaultHint
RowStack_Class = AC_RowStack
def createGame(self):
lay, s = Layout(self), self.s
self.setSize(lay.XM + 10*lay.XS, lay.YM + 4*(lay.YS+lay.YM))
for i in range(2):
x = lay.XM + i*lay.XS
for j in range(4):
y = lay.YM + j*(lay.YS+lay.YM)
s.reserves.append(OpenStack(x, y, self, max_accept=0))
for i in range(2):
x = lay.XM + (8+i)*lay.XS
for j in range(4):
y = lay.YM + j*(lay.YS+lay.YM)
s.reserves.append(OpenStack(x, y, self, max_accept=0))
for i in range(4):
s.foundations.append(
SS_FoundationStack(lay.XM+(3+i)*lay.XS, lay.YM, self, i))
for i in range(6):
s.rows.append(
self.RowStack_Class(lay.XM+(2+i)*lay.XS, lay.YM+lay.YS, self))
s.talon = InitialDealTalonStack(
lay.XM+int(4.5*lay.XS), lay.YM+3*(lay.YS+lay.YM), self)
lay.defaultStackGroups()
def startGame(self):
self._startDealNumRows(6)
self.s.talon.dealRow(rows=self.s.reserves)
class Arizona(Phoenix):
RowStack_Class = RK_RowStack
shallHighlightMatch = Game._shallHighlightMatch_RK
# ************************************************************************
# * Lanes
# ************************************************************************
class Lanes(Klondike):
Hint_Class = CautiousDefaultHint
Foundation_Class = StackWrapper(SS_FoundationStack, max_move=0)
RowStack_Class = StackWrapper(AC_RowStack, base_rank=ANY_RANK, max_move=1)
def createGame(self):
lay = Klondike.createGame(self, rows=6, max_rounds=2, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
def _shuffleHook(self, cards):
# move Aces to top of the Talon (i.e. first cards to be dealt)
return self._shuffleHookMoveToTop(cards,
lambda c: (c.rank == ACE, c.suit))
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
self._startDealNumRows(2)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
# ************************************************************************
# * Thirty Six
# ************************************************************************
class ThirtySix(Klondike):
Foundation_Class = StackWrapper(SS_FoundationStack, max_move=0)
RowStack_Class = StackWrapper(RK_RowStack, base_rank=ANY_RANK)
def createGame(self):
Klondike.createGame(self, rows=6, max_rounds=1)
def _fillOne(self):
for r in self.s.rows:
if r.cards:
c = r.cards[-1]
for f in self.s.foundations:
if f.acceptsCards(r, [c]):
self.moveMove(1, r, f, frames=4, shadow=0)
return 1
return 0
def startGame(self):
self.startDealSample()
for i in range(6):
self.s.talon.dealRow()
while True:
if not self._fillOne():
break
self.s.talon.dealCards() # deal first card to WasteStack
shallHighlightMatch = Game._shallHighlightMatch_RK
# ************************************************************************
# * Q.C.
# ************************************************************************
class Q_C_(Klondike):
Hint_Class = CautiousDefaultHint
Foundation_Class = StackWrapper(SS_FoundationStack, max_move=0)
RowStack_Class = StackWrapper(SS_RowStack, base_rank=ANY_RANK, max_move=1)
def createGame(self):
lay = Klondike.createGame(self, rows=6, max_rounds=2)
lay.createRoundText(self.s.talon, 'sss')
def startGame(self):
self._startDealNumRows(3)
self.s.talon.dealRow()
while self.s.talon.cards:
self.s.talon.dealCards() # deal first card to WasteStack
if not self.fillWaste():
break
def fillWaste(self):
waste = self.s.waste
if waste.cards:
c = waste.cards[-1]
for f in self.s.foundations:
if f.acceptsCards(self.s.waste, [c]):
waste.moveMove(1, f)
return True
return False
def fillStack(self, stack=None):
waste = self.s.waste
while True:
if not self.fillWaste():
break
if stack in self.s.rows and not stack.cards:
if not waste.cards:
while self.s.talon.cards:
self.s.talon.dealCards()
if not self.fillWaste():
break
if waste.cards:
waste.moveMove(1, stack)
shallHighlightMatch = Game._shallHighlightMatch_SS
# ************************************************************************
# * Northwest Territory
# * Artic Garden
# ************************************************************************
class NorthwestTerritory(KingAlbert):
RowStack_Class = StackWrapper(AC_RowStack, base_rank=KING)
RESERVES = (4, 4, 4, 4)
ROWS = 8
def startGame(self):
Klondike.startGame(self, flip=0, reverse=0)
self.s.talon.dealRow(rows=self.s.reserves)
class ArticGarden(NorthwestTerritory):
def startGame(self):
Klondike.startGame(self, flip=1, reverse=0)
self.s.talon.dealRow(rows=self.s.reserves)
# ************************************************************************
# * Aunt Mary
# ************************************************************************
class AuntMary(Klondike):
def createGame(self):
Klondike.createGame(self, rows=6, max_rounds=1)
def startGame(self):
for i in range(5):
j = i+1
self.s.talon.dealRow(rows=self.s.rows[:j], frames=0, flip=1)
self.s.talon.dealRow(rows=self.s.rows[j:], frames=0, flip=0)
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards()
# ************************************************************************
# * Double Dot
# ************************************************************************
class DoubleDot(Klondike):
Talon_Class = DealRowTalonStack
RowStack_Class = StackWrapper(RK_RowStack, dir=-2, mod=13)
Foundation_Class = StackWrapper(SS_FoundationStack, dir=2, mod=13)
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=8, waste=0)
def _shuffleHook(self, cards):
return self._shuffleHookMoveToTop(
cards,
lambda c: ((c.rank == ACE and c.suit in (0, 1)) or
(c.rank == 1 and c.suit in (2, 3)), c.suit))
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
self._startAndDealRow()
# def shallHighlightMatch(self, stack1, card1, stack2, card2):
# return abs(card1.rank-card2.rank) == 2
shallHighlightMatch = Game._shallHighlightMatch_RKW
# ************************************************************************
# * Seven Devils
# ************************************************************************
class SevenDevils_RowStack(AC_RowStack):
def acceptsCards(self, from_stack, cards):
if not AC_RowStack.acceptsCards(self, from_stack, cards):
return False
return from_stack not in self.game.s.reserves
class SevenDevils(Klondike):
Hint_Class = CautiousDefaultHint
RowStack_Class = StackWrapper(SevenDevils_RowStack, max_move=1)
def createGame(self):
lay, s = Layout(self), self.s
self.setSize(lay.XM + 10*lay.XS, lay.YM+3*lay.YS+12*lay.YOFFSET)
x, y = lay.XM, lay.YM
for i in range(8):
s.foundations.append(SS_FoundationStack(x, y, self, suit=i//2))
x += lay.XS
x, y = lay.XM+lay.XS//2, lay.YM+lay.YS
for i in range(7):
s.rows.append(self.RowStack_Class(x, y, self))
x += lay.XS
x0, y = self.width - 2*lay.XS, lay.YM
for i in range(7):
x = x0 + ((i+1) & 1) * lay.XS
s.reserves.append(OpenStack(x, y, self, max_accept=0))
y += lay.YS // 2
x, y = lay.XM, self.height-lay.YS
s.talon = WasteTalonStack(x, y, self, max_rounds=1)
lay.createText(s.talon, 'n')
x += lay.XS
s.waste = WasteStack(x, y, self)
lay.createText(s.waste, 'n')
lay.defaultStackGroups()
def startGame(self, flip=0, reverse=1):
Klondike.startGame(self)
self.s.talon.dealRow(rows=self.s.reserves)
# ************************************************************************
# * Moving Left
# * Souter
# ************************************************************************
class MovingLeft(Klondike):
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=10, playcards=24)
def fillStack(self, stack):
if not stack.cards:
old_state = self.enterState(self.S_FILL)
if stack in self.s.rows:
i = list(self.s.rows).index(stack)
if i < len(self.s.rows)-1:
from_stack = self.s.rows[i+1]
pile = from_stack.getPile()
if pile:
from_stack.moveMove(len(pile), stack)
self.leaveState(old_state)
class Souter(MovingLeft):
def createGame(self):
lay = Klondike.createGame(self, max_rounds=2, rows=10,
playcards=24, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
# ************************************************************************
# * Big Forty
# * Ali Baba
# * Cassim
# ************************************************************************
class BigForty(Klondike):
RowStack_Class = SS_RowStack
def createGame(self):
Klondike.createGame(self, rows=10)
def startGame(self):
self._startDealNumRowsAndDealRowAndCards(3)
shallHighlightMatch = Game._shallHighlightMatch_SS
class AliBaba(BigForty):
def _shuffleHook(self, cards):
# move Aces to top of the Talon (i.e. first cards to be dealt)
return self._shuffleHookMoveToTop(cards,
lambda c: (c.rank == ACE, c.suit))
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
BigForty.startGame(self)
class Cassim(AliBaba):
def createGame(self):
Klondike.createGame(self, rows=7)
# ************************************************************************
# * Saratoga
# ************************************************************************
class Saratoga(Klondike):
def createGame(self):
Klondike.createGame(self, num_deal=3)
def startGame(self):
Klondike.startGame(self, flip=1)
# ************************************************************************
# * Whitehorse
# ************************************************************************
class Whitehorse(Klondike):
def createGame(self):
Klondike.createGame(self, num_deal=3)
def startGame(self):
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards()
def fillStack(self, stack):
if not stack.cards:
old_state = self.enterState(self.S_FILL)
if stack in self.s.rows:
if not self.s.waste.cards:
self.s.talon.dealCards()
if self.s.waste.cards:
self.s.waste.moveMove(1, stack)
self.leaveState(old_state)
# ************************************************************************
# * Boost
# ************************************************************************
class Boost(Klondike):
def createGame(self):
lay = Klondike.createGame(self, rows=4, max_rounds=3, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
# ************************************************************************
# * Gold Rush
# ************************************************************************
class GoldRush(Klondike):
Talon_Class = CanfieldRush_Talon
def createGame(self):
lay = Klondike.createGame(self, max_rounds=3, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
# ************************************************************************
# * Gold Mine
# ************************************************************************
class GoldMine_RowStack(AC_RowStack):
getBottomImage = Stack._getReserveBottomImage
class GoldMine(Klondike):
RowStack_Class = GoldMine_RowStack
def createGame(self):
Klondike.createGame(self, max_rounds=1, num_deal=3)
def startGame(self):
self.startDealSample()
self.s.talon.dealCards()
# ************************************************************************
# * Lucky Thirteen
# * Lucky Piles
# ************************************************************************
class LuckyThirteen(Game):
Hint_Class = CautiousDefaultHint
RowStack_Class = StackWrapper(RK_RowStack, base_rank=NO_RANK)
def createGame(self, xoffset=0, playcards=0):
lay, s = Layout(self), self.s
if xoffset:
xoffset = lay.XOFFSET
w0 = lay.XS+playcards*lay.XOFFSET
self.setSize(lay.XM + 5*w0, lay.YM+4*lay.YS)
x, y = lay.XM, lay.YM+lay.YS
for i in range(5):
stack = self.RowStack_Class(x, y, self, max_move=1)
s.rows.append(stack)
stack.CARD_XOFFSET = xoffset
stack.CARD_YOFFSET = 0
x += w0
x, y = lay.XM+w0, lay.YM+2*lay.YS
for i in range(3):
stack = self.RowStack_Class(x, y, self, max_move=1)
s.rows.append(stack)
stack.CARD_XOFFSET = xoffset
stack.CARD_YOFFSET = 0
x += w0
x, y = lay.XM, lay.YM+3*lay.YS
for i in range(5):
stack = self.RowStack_Class(x, y, self, max_move=1)
s.rows.append(stack)
stack.CARD_XOFFSET = xoffset
stack.CARD_YOFFSET = 0
x += w0
x, y = (self.width-4*lay.XS)//2, lay.YM
for i in range(4):
s.foundations.append(SS_FoundationStack(x, y, self, suit=i))
x += lay.XS
x, y = lay.XM, self.height-lay.YS
s.talon = InitialDealTalonStack(x, y, self, max_rounds=1)
lay.defaultStackGroups()
def startGame(self):
self._startDealNumRowsAndDealSingleRow(3)
shallHighlightMatch = Game._shallHighlightMatch_RK
class LuckyPiles(LuckyThirteen):
RowStack_Class = StackWrapper(UD_SS_RowStack, base_rank=KING)
def createGame(self):
LuckyThirteen.createGame(self, xoffset=1, playcards=7)
shallHighlightMatch = Game._shallHighlightMatch_SS
# ************************************************************************
# * Legion
# ************************************************************************
class Legion(Klondike):
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=8)
def startGame(self):
self.startDealSample()
self.s.talon.dealRow()
for i in (1, 2, 3):
self.s.talon.dealRow(rows=self.s.rows[i:-i], flip=0)
self.s.talon.dealRow(rows=self.s.rows[i:-i])
self.s.talon.dealCards()
# ************************************************************************
# * Big Bertha
# ************************************************************************
class BigBertha(Game):
def createGame(self):
lay, s = Layout(self), self.s
self.setSize(lay.XM+15*lay.XS, lay.YM+3*lay.YS+15*lay.YOFFSET)
x, y = lay.XM, lay.YM
s.talon = InitialDealTalonStack(x, y, self)
x, y = lay.XM+3.5*lay.XS, lay.YM
for i in range(8):
s.foundations.append(SS_FoundationStack(x, y, self,
suit=i % 4, max_cards=12))
x += lay.XS
x, y = lay.XM, lay.YM+lay.YS
for i in range(15):
s.rows.append(AC_RowStack(x, y, self))
x += lay.XS
x, y = lay.XM, self.height-lay.YS
for i in range(14):
s.reserves.append(OpenStack(x, y, self, max_accept=0))
x += lay.XS
s.foundations.append(RK_FoundationStack(x, y, self, suit=ANY_SUIT,
base_rank=KING, dir=0, max_cards=8))
lay.defaultStackGroups()
def startGame(self):
self._startDealNumRows(5)
self.s.talon.dealRow()
self.s.talon.dealRow(rows=self.s.reserves)
shallHighlightMatch = Game._shallHighlightMatch_AC
# ************************************************************************
# * Athena
# ************************************************************************
class Athena(Klondike):
def startGame(self):
self.s.talon.dealRow(frames=0, flip=0)
self.s.talon.dealRow(frames=0)
self.s.talon.dealRow(frames=0, flip=0)
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards()
# ************************************************************************
# * Kingsley
# ************************************************************************
class Kingsley(Klondike):
Foundation_Class = StackWrapper(SS_FoundationStack, base_rank=KING, dir=-1)
RowStack_Class = StackWrapper(KingAC_RowStack, base_rank=ACE, dir=1)
def createGame(self):
Klondike.createGame(self, max_rounds=1)
# ************************************************************************
# * Scarp
# ************************************************************************
class Scarp(Klondike):
Talon_Class = DealRowTalonStack
RowStack_Class = AC_RowStack
def createGame(self):
Klondike.createGame(self, max_rounds=1, rows=13, waste=0, playcards=28)
def startGame(self):
Klondike.startGame(self, flip=1)
# ************************************************************************
# * Eight Sages
# ************************************************************************
class EightSages_Row(AC_RowStack):
def acceptsCards(self, from_stack, cards):
if not AC_RowStack.acceptsCards(self, from_stack, cards):
return False
return from_stack is self.game.s.waste
class EightSages(Klondike):
RowStack_Class = EightSages_Row
def createGame(self):
lay = Klondike.createGame(self, max_rounds=2, rows=8,
playcards=12, round_text=True)
lay.createRoundText(self.s.talon, 'ne', dx=lay.XS)
def startGame(self):
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards()
# ************************************************************************
# * Guardian
# ************************************************************************
class Guardian_RowStack(AC_RowStack):
STEP = (3, 3, 3, 4, 4, 4, 4)
def basicIsBlocked(self):
r, step = self.game.s.rows, self.STEP
i, n, mylen = self.id, 1, len(step)
while i < mylen:
i = i + step[i]
n = n + 1
for j in range(i, i + n):
if r[j].cards:
return True
return False
def acceptsCards(self, from_stack, cards):
if len(self.cards) == 0 and self.id > 2:
return False
return AC_RowStack.acceptsCards(self, from_stack, cards)
class Guardian(Game):
def createGame(self):
lay, s = Layout(self), self.s
self.setSize((7 * lay.XS) + lay.XM,
(2.5 * lay.YS) + (13 * lay.YOFFSET) + lay.YM)
# create stacks
for i in range(3):
x = lay.XM + (4 - i) * lay.XS // 2
y = lay.YM + lay.TEXT_HEIGHT + lay.YS + i * lay.YS // 4
for j in range(i + 3):
s.rows.append(Guardian_RowStack(x, y, self))
x = x + lay.XS
x, y = lay.XM, lay.YM
s.talon = WasteTalonStack(x, y, self,
max_rounds=-1, num_deal=3)
lay.createText(s.talon, "s")
x += lay.XS
s.waste = WasteStack(x, y, self)
lay.createText(s.waste, "s")
x += lay.XS
for i in range(4):
x += lay.XS
s.foundations.append(SS_FoundationStack(x, y, self, i,
mod=13, max_move=0))
lay.defaultStackGroups()
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.rows[:7], flip=0)
self.s.talon.dealRow(rows=self.s.rows[7:])
self.s.talon.dealCards() # deal first card to WasteStack
# register the game
registerGame(GameInfo(2, Klondike, "Klondike",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED,
altnames=("Classic Solitaire", "American Patience")))
registerGame(GameInfo(61, CasinoKlondike, "Casino Klondike",
GI.GT_KLONDIKE | GI.GT_SCORE, 1, 2, GI.SL_BALANCED))
registerGame(GameInfo(129, VegasKlondike, "Vegas Klondike",
GI.GT_KLONDIKE | GI.GT_SCORE, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(18, KlondikeByThrees, "Klondike by Threes",
GI.GT_KLONDIKE, 1, -1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(58, ThumbAndPouch, "Thumb and Pouch",
GI.GT_KLONDIKE, 1, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(67, Whitehead, "Whitehead",
GI.GT_KLONDIKE, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(39, SmallHarp, "Small Harp",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED,
altnames=("Die kleine Harfe",)))
registerGame(GameInfo(66, Eastcliff, "Eastcliff",
GI.GT_KLONDIKE, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(224, Easthaven, "Easthaven",
GI.GT_GYPSY, 1, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(33, Westcliff, "Westcliff",
GI.GT_KLONDIKE, 1, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(225, Westhaven, "Westhaven",
GI.GT_GYPSY, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(107, PasSeul, "Pas Seul",
GI.GT_KLONDIKE, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(81, BlindAlleys, "Blind Alleys",
GI.GT_KLONDIKE, 1, 1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(215, Somerset, "Somerset",
GI.GT_BELEAGUERED_CASTLE | GI.GT_OPEN, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(231, Canister, "Canister",
GI.GT_BELEAGUERED_CASTLE | GI.GT_OPEN, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(229, AgnesSorel, "Agnes Sorel",
GI.GT_GYPSY, 1, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(4, EightTimesEight, "8 x 8",
GI.GT_KLONDIKE, 2, -1, GI.SL_BALANCED))
registerGame(GameInfo(127, AchtmalAcht, "Eight Times Eight",
GI.GT_KLONDIKE, 2, 2, GI.SL_BALANCED,
altnames=("Achtmal Acht",)))
registerGame(GameInfo(133, Batsford, "Batsford",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(221, Stonewall, "Stonewall",
GI.GT_RAGLAN, 1, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(222, FlowerGarden, "Flower Garden",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL,
altnames=("The Bouquet", "The Garden",)))
registerGame(GameInfo(233, KingAlbert, "King Albert",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL,
altnames=("Idiot's Delight",)))
registerGame(GameInfo(232, Raglan, "Raglan",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(223, Brigade, "Brigade",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(230, Jane, "Jane",
GI.GT_RAGLAN, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(236, AgnesBernauer, "Agnes Bernauer",
GI.GT_RAGLAN, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(263, Phoenix, "Phoenix",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(283, Jumbo, "Jumbo",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(333, OpenJumbo, "Open Jumbo",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(326, Lanes, "Lanes",
GI.GT_KLONDIKE, 1, 1, GI.SL_BALANCED))
registerGame(GameInfo(327, ThirtySix, "Thirty Six",
GI.GT_KLONDIKE, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(350, Q_C_, "Q.C.",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(361, NorthwestTerritory, "Northwest Territory",
GI.GT_RAGLAN, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(362, Morehead, "Morehead",
GI.GT_BELEAGUERED_CASTLE | GI.GT_OPEN, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(388, Senate, "Senate",
GI.GT_RAGLAN, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(389, SenatePlus, "Senate +",
GI.GT_RAGLAN, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(390, Arizona, "Arizona",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(407, AuntMary, "Aunt Mary",
GI.GT_KLONDIKE, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(420, DoubleDot, "Double Dot",
GI.GT_1DECK_TYPE, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(434, SevenDevils, "Seven Devils",
GI.GT_RAGLAN, 2, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(452, DoubleEasthaven, "Double Easthaven",
GI.GT_GYPSY, 2, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(453, TripleEasthaven, "Triple Easthaven",
GI.GT_GYPSY, 3, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(470, MovingLeft, "Moving Left",
GI.GT_KLONDIKE, 2, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(471, Souter, "Souter",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(473, BigForty, "Big Forty",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
registerGame(GameInfo(474, AliBaba, "Ali Baba",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
registerGame(GameInfo(475, Cassim, "Cassim",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
registerGame(GameInfo(479, Saratoga, "Saratoga",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
registerGame(GameInfo(491, Whitehorse, "Whitehorse",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
registerGame(GameInfo(518, Boost, "Boost",
GI.GT_KLONDIKE | GI.GT_ORIGINAL, 1, 2, GI.SL_BALANCED))
registerGame(GameInfo(522, ArticGarden, "Artic Garden",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(532, GoldRush, "Gold Rush",
GI.GT_KLONDIKE, 1, 2, GI.SL_BALANCED))
registerGame(GameInfo(539, Usk, "Usk",
GI.GT_KLONDIKE | GI.GT_OPEN, 1, 1, GI.SL_BALANCED))
registerGame(GameInfo(541, BatsfordAgain, "Batsford Again",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(572, GoldMine, "Gold Mine",
GI.GT_NUMERICA, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(585, LuckyThirteen, "Lucky Thirteen",
GI.GT_1DECK_TYPE, 1, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(586, LuckyPiles, "Lucky Piles",
GI.GT_FAN_TYPE | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(601, AmericanCanister, "American Canister",
GI.GT_BELEAGUERED_CASTLE | GI.GT_OPEN, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(602, BritishCanister, "British Canister",
GI.GT_BELEAGUERED_CASTLE | GI.GT_OPEN, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(607, Legion, "Legion",
GI.GT_KLONDIKE, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(627, QueenVictoria, "Queen Victoria",
GI.GT_RAGLAN | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(630, BigBertha, "Big Bertha",
GI.GT_RAGLAN | GI.GT_OPEN, 2, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(633, Athena, "Athena",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
registerGame(GameInfo(634, Chinaman, "Chinaman",
GI.GT_KLONDIKE, 1, 1, GI.SL_BALANCED))
registerGame(GameInfo(651, EightByEight, "Eight by Eight",
GI.GT_KLONDIKE, 2, 2, GI.SL_BALANCED))
registerGame(GameInfo(667, Kingsley, "Kingsley",
GI.GT_KLONDIKE, 1, 0, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(669, Scarp, "Scarp",
GI.GT_GYPSY | GI.GT_ORIGINAL, 3, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(726, EightSages, "Eight Sages",
GI.GT_KLONDIKE, 2, 1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(821, Trigon, "Trigon",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
registerGame(GameInfo(849, RelaxedRaglan, "Relaxed Raglan",
GI.GT_RAGLAN | GI.GT_RELAXED | GI.GT_OPEN, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(852, Guardian, "Guardian",
GI.GT_KLONDIKE, 1, -1, GI.SL_BALANCED))
|
shlomif/PySolFC
|
pysollib/games/klondike.py
|
Python
|
gpl-3.0
| 55,884
|
[
"CASINO"
] |
193afc70cd2573d70c69a8ec41ec4ffa9408ce59e764d3671f52ab2ef9346957
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for translation_helper.py."""
import unittest
import os
import sys
# pylint: disable=relative-import
import translation_helper
here = os.path.realpath(__file__)
testdata_path = os.path.normpath(os.path.join(here, '..', '..', 'testdata'))
class TcHelperTest(unittest.TestCase):
def test_get_translatable_grds(self):
grds = translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'not_translated.grd', 'internal.grd'],
os.path.join(testdata_path,
'translation_expectations_without_unlisted_file.pyl'))
self.assertEqual(1, len(grds))
# There should be no references to not_translated.grd (mentioning the
# filename here so that it doesn't appear unused).
grd = grds[0]
self.assertEqual(os.path.join(testdata_path, 'test.grd'), grd.path)
self.assertEqual(testdata_path, grd.dir)
self.assertEqual('test.grd', grd.name)
self.assertEqual([os.path.join(testdata_path, 'part.grdp')], grd.grdp_paths)
self.assertEqual([], grd.structure_paths)
self.assertEqual([os.path.join(testdata_path, 'test_en-GB.xtb')],
grd.xtb_paths)
self.assertEqual({'en-GB': os.path.join(testdata_path, 'test_en-GB.xtb')},
grd.lang_to_xtb_path)
self.assertTrue(grd.appears_translatable)
self.assertEquals(['en-GB'], grd.expected_languages)
# The expectations list an untranslatable file (not_translated.grd), but the
# grd list doesn't contain it.
def test_missing_untranslatable(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'internal.grd'], TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - not_translated.grd is listed in the translation expectations, '
'but this grd file does not exist.' % TRANSLATION_EXPECTATIONS,
context.exception.message)
# The expectations list an internal file (internal.grd), but the grd list
# doesn't contain it.
def test_missing_internal(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'not_translated.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - internal.grd is listed in translation expectations as an internal '
'file to be ignored, but this grd file does not exist.' %
TRANSLATION_EXPECTATIONS, context.exception.message)
# The expectations list a translatable file (test.grd), but the grd list
# doesn't contain it.
def test_missing_translatable(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['not_translated.grd', 'internal.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - test.grd is listed in the translation expectations, but this grd '
'file does not exist.' % TRANSLATION_EXPECTATIONS,
context.exception.message)
# The grd list contains a file (part.grdp) that's not listed in translation
# expectations.
def test_expectations_not_updated(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path,
['test.grd', 'part.grdp', 'not_translated.grd', 'internal.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - part.grdp appears to be translatable (because it contains <file> '
'or <message> elements), but is not listed in the translation '
'expectations.' % TRANSLATION_EXPECTATIONS, context.exception.message)
if __name__ == '__main__':
unittest.main()
|
endlessm/chromium-browser
|
tools/translation/helper/translation_helper_unittest.py
|
Python
|
bsd-3-clause
| 4,575
|
[
"xTB"
] |
6a38e3ac74815eb3cc790bce0f6205934c7aa221375bb6bbeb005c4a801d918c
|
from sklearn.linear_model import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.metrics import mean_squared_error
# Fix the number of samples and our seed
NUM_SAMPLES = 200
np.random.seed(42)
# Our "true function"
def f(x):
return 1.5*x + 0.5
#Construct array of (x,f(x))-pairs where x is sampled randomly from unit interval
data = np.array([[x,f(x) ] for x in np.random.random(NUM_SAMPLES)])
# Create regular grid of x values and the values of f
gridx = np.linspace(0, 1, 200)
gridy = np.array([f(x) for x in gridx])
# Add Gaussian noise with sigma=0.6
normaly = data[:,1]+0.6*np.random.randn(NUM_SAMPLES)
#Plot the messy data
plt.scatter(data[:,0], normaly )
plt.title("Scatter plot of synthetic data with normal errors")
#Plot the true function
plt.plot(gridx, gridy, label = "True function", color = 'Red')
plt.legend(loc = 2)
# Save and clear
plt.savefig("scatter_normal.png")
plt.cla()
# Fit linear regressors to increasingly large intervals of data
lm = LinearRegression()
for i in range(1, NUM_SAMPLES+1):
# Fit the regressor
lm.fit(data[:i,0].reshape((i,1)), normaly[:i].reshape((i,1)))
# Get the predictions on all of the sample points
predictions = lm.predict(data[:,0].reshape(NUM_SAMPLES,1))
# Get MSE
mse = mean_squared_error(predictions, normaly)
# Plot the messy data
plt.scatter(data[:,0], normaly)
plt.title("Linear regression on {} points with normal error".format(i))
# Plot the true function
plt.plot(gridx, gridy, label = "True function", color = 'Red')
# Plot the regression line
plt.plot(gridx, [lm.coef_[0] * x + lm.intercept_[0] for x in gridx], label = "Linear regressor line MSE = {:0.4f}".format(mse), color = 'Green')
plt.legend(loc = 2)
# Save and clear
plt.savefig("linreg_normal_{:03d}.png".format(i))
plt.cla()
|
JustinNoel1/ML-Course
|
linear-regression/python/linreg.py
|
Python
|
apache-2.0
| 1,895
|
[
"Gaussian"
] |
38a9db269d9ff296775f830134d6629e1ba0089ecc34f16622c1eeec58e742e2
|
#! /usr/bin/env python3
# Script to crawl the file system for changed files
#
# fs-crawler dirkpetersen / Oct 2019
#
import sys, os, argparse, subprocess, re, time, datetime, tempfile, random, threading, filecmp
class KeyboardInterruptError(Exception): pass
def main():
#log = logger('fs-crawler', args.debug)
#log.info('starting to check folder %s for files older than %s days...' % (args.folder, args.days))
#log.debug('Parsed arguments: %s' % args)
start = time.time()
interval = 1
maxinterval = 10
lastcheck = 0
lastt = 0
currdir = os.getcwd()
curruser = os.getlogin() #pwd.getpwuid(os.getuid()).pw_name
tmpdir = tempfile.gettempdir()
days_back_as_secs = time.time() - (args.days * 24 * 3600)
days_back_datestr = str(datetime.date.today() + datetime.timedelta(args.days * -1)) # e.g. '2014-07-01'
filedict = {} # list of files to delete (grouped by key uid)
infodict = {} # contains list per uid: numfiles, sizefiles, numwarnfiles, sizewarnfiles
if args.folder == '/':
print('root folder not allowed !')
return False
numfiles=0
numfolders=0
for root, folders, files in mywalk(args.folder,noparallel=args.noparallel):
#print(root)
#for folder in folders:
#print ('...folder:%s' % folder)
# check if the user wanted to archive
numfolders+=1
numfiles+=len(files)
check = time.time()
if lastcheck+interval<check:
t=numfolders+numfiles
print ("folders: %s, files: %s, avg objects/s: %s, last objects/s: %s, current path: %s"
% (numfolders, numfiles, "{0:.0f}".format(t/(check-start)), "{0:.0f}".format((t-lastt)/(check-lastcheck)), root))
lastcheck=check
lastt=t
interval+=1
if maxinterval<=interval:
interval=maxinterval
if args.target:
troot = root.replace(args.folder,args.target)
if os.path.exists(troot):
dc = filecmp.dircmp(root, root.replace(args.folder,args.target), ignore=['.snapshot'])
if dc.left_list:
print ('*** Copy -> :', dc.left_list)
elif dc.right_list:
print ('*** Delete -> :', dc.right_list)
continue
for f in files:
p=os.path.join(root,f)
if args.nostat:
continue
stat=getstat(p)
if not stat:
continue
recent_time = stat.st_ctime
if stat.st_mtime > recent_time:
recent_time = stat.st_mtime
if stat.st_uid not in infodict:
infodict[stat.st_uid] = [0, 0, 0, 0]
if recent_time >= days_back_as_secs:
if stat.st_uid not in filedict:
filedict[stat.st_uid] = list()
filedict[stat.st_uid].append(p)
infodict[stat.st_uid][0]+=1
infodict[stat.st_uid][1]+=stat.st_size
infodict[stat.st_uid][2]+=1
infodict[stat.st_uid][3]+=stat.st_size
for k, v in filedict.items():
user=uid2user(k)
random.shuffle(v)
fn=len(v)
if fn>30:
fn=30
print("\n ##### FILES <= %s DAYS OLD ##########################################################" % args.days)
print("Total of %s files (%s GB total) owned by '%s'" % (infodict[k][0], "{0:.3f}".format(infodict[k][1]/float(1073741824)), user))
print('List of files that would have been backed up (max 30 randomly selected files):')
for i in range(fn):
print(v[i])
end = time.time()
print("\nTotal Time: %s sec (%s min)" % ("{0:.1f}".format(end-start),"{0:.1f}".format((end-start)/60)))
def startswithpath(pathlist, pathstr):
""" checks if at least one of the paths in a list of paths starts with a string """
for path in pathlist:
if (os.path.join(pathstr, '')).startswith(path):
return True
return False
def getstartpath(pathlist, pathstr):
""" return the path from pathlist that is the frist part of pathstr"""
for path in pathlist:
if (os.path.join(pathstr, '')).startswith(path):
return path
return ''
def getstat(path):
""" returns the stat information of a file"""
statinfo=None
try:
statinfo=os.lstat(path)
except (IOError, OSError) as e: # FileNotFoundError only since python 3.3
if args.debug:
sys.stderr.write(str(e))
except:
raise
return statinfo
def setfiletime(path,attr="atime"):
""" sets the a time of a file to the current time """
try:
statinfo=getstat(path)
if attr=="atime" or attr=="all":
os.utime(path,(time.time(),statinfo.st_atime))
if attr=="mtime" or attr=="all":
os.utime(path,(time.time(),statinfo.st_mtime))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def uid2user(uidNumber):
""" attempts to convert uidNumber to username """
try:
import pwd
return pwd.getpwuid(int(uidNumber)).pw_name
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return str(uidNumber)
def list2file(mylist,path):
""" dumps a list into a text file, one line per item"""
try:
with open(path,'w') as f:
for item in mylist:
f.write("{}\r\n".format(item))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def pathlist2file(mylist,path,root):
""" dumps a list into a text file, one line per item, but removes
a root folder from all paths. Used for --files-from feature in rsync"""
try:
with open(path,'w') as f:
for item in mylist:
f.write("{}\r\n".format(item[len(root):]))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def mywalk(top, noparallel=False, skipdirs=['.snapshot',]):
""" returns subset of os.walk """
if noparallel:
for root, dirs, files in os.walk(top,topdown=True,onerror=walkerr):
for skipdir in skipdirs:
if skipdir in dirs:
dirs.remove(skipdir) # don't visit this directory
yield root, dirs, files
else:
for root, dirs, files in walk(top):
for skipdir in skipdirs:
if skipdir in dirs:
dirs.remove(skipdir) # don't visit this directory
yield root, dirs, files
def walkerr(oserr):
sys.stderr.write(str(oserr))
sys.stderr.write('\n')
return 0
def walk(top, threads=36):
"""Multi-threaded version of os.walk().
from here: https://gist.github.com/jart/0a71cde3ca7261f77080a3625a21672b
This routine provides multiple orders of a magnitude performance improvement
when top is mapped to a network filesystem where i/o operations are slow, but
unlimited. For spinning disks it should still run faster regardless of thread
count because it uses a LIFO scheduler that guarantees locality. For SSDs it
will go tolerably slower.
The more exotic coroutine features of os.walk() can not be supported, such as
the ability to selectively inhibit recursion by mutating subdirs.
Args:
top: Path of parent directory to search recursively.
threads: Size of fixed thread pool.
Yields:
A (path, subdirs, files) tuple for each directory within top, including
itself. These tuples come in no particular order; however, the contents of
each tuple itself is sorted.
"""
if not os.path.isdir(top):
return
lock = threading.Lock()
on_input = threading.Condition(lock)
on_output = threading.Condition(lock)
state = {'tasks': 1}
paths = [top]
output = []
def worker():
while True:
with lock:
while True:
if not state['tasks']:
output.append(None)
on_output.notify()
return
if not paths:
on_input.wait()
continue
path = paths.pop()
break
try:
dirs = []
files = []
for item in os.listdir(path): #for item in sorted(os.listdir(path))
subpath = os.path.join(path, item)
if os.path.isdir(subpath):
dirs.append(item)
with lock:
state['tasks'] += 1
paths.append(subpath)
on_input.notify()
else:
files.append(item)
with lock:
output.append((path, dirs, files))
on_output.notify()
except OSError as e:
print(e, file=sys.stderr)
finally:
with lock:
state['tasks'] -= 1
if not state['tasks']:
on_input.notifyAll()
workers = [threading.Thread(target=worker,
name="fastio.walk %d %s" % (i, top))
for i in range(threads)]
for w in workers:
w.start()
while threads or output: # TODO(jart): Why is 'or output' necessary?
with lock:
while not output:
on_output.wait()
item = output.pop()
if item:
yield item
else:
threads -= 1
def send_mail(to, subject, text, attachments=[], cc=[], bcc=[], smtphost="", fromaddr=""):
if sys.version_info[0] == 2:
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
else:
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders as Encoders
from string import Template
import socket
import smtplib
if not isinstance(to,list):
print("the 'to' parameter needs to be a list")
return False
if len(to)==0:
print("no 'to' email addresses")
return False
myhost=socket.getfqdn()
if smtphost == '':
smtphost = get_mx_from_email_or_fqdn(myhost)
if not smtphost:
sys.stderr.write('could not determine smtp mail host !\n')
if fromaddr == '':
fromaddr = os.path.basename(__file__) + '-no-reply@' + \
'.'.join(myhost.split(".")[-2:]) #extract domain from host
tc=0
for t in to:
if '@' not in t:
# if no email domain given use domain from local host
to[tc]=t + '@' + '.'.join(myhost.split(".")[-2:])
tc+=1
message = MIMEMultipart()
message['From'] = fromaddr
message['To'] = COMMASPACE.join(to)
message['Date'] = formatdate(localtime=True)
message['Subject'] = subject
message['Cc'] = COMMASPACE.join(cc)
message['Bcc'] = COMMASPACE.join(bcc)
body = Template('This is a notification message from $application, running on \n' + \
'host $host. Please review the following message:\n\n' + \
'$notify_text\n\nIf output is being captured, you may find additional\n' + \
'information in your logs.\n'
)
host_name = socket.gethostname()
full_body = body.substitute(host=host_name.upper(), notify_text=text, application=os.path.basename(__file__))
message.attach(MIMEText(full_body))
for f in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(f, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
message.attach(part)
addresses = []
for x in to:
addresses.append(x)
for x in cc:
addresses.append(x)
for x in bcc:
addresses.append(x)
smtp = smtplib.SMTP(smtphost)
smtp.sendmail(fromaddr, addresses, message.as_string())
smtp.close()
return True
def get_mx_from_email_or_fqdn(addr):
"""retrieve the first mail exchanger dns name from an email address."""
# Match the mail exchanger line in nslookup output.
MX = re.compile(r'^.*\s+mail exchanger = (?P<priority>\d+) (?P<host>\S+)\s*$')
# Find mail exchanger of this email address or the current host
if '@' in addr:
domain = addr.rsplit('@', 2)[1]
else:
domain = '.'.join(addr.rsplit('.')[-2:])
p = os.popen('/usr/bin/nslookup -q=mx %s' % domain, 'r')
mxes = list()
for line in p:
m = MX.match(line)
if m is not None:
mxes.append(m.group('host')[:-1]) #[:-1] just strips the ending dot
if len(mxes) == 0:
return ''
else:
return mxes[0]
def logger(name=None, stderr=False):
import logging, logging.handlers
# levels: CRITICAL:50,ERROR:40,WARNING:30,INFO:20,DEBUG:10,NOTSET:0
if not name:
name=__file__.split('/')[-1:][0]
l=logging.getLogger(name)
l.setLevel(logging.INFO)
f=logging.Formatter('%(name)s: %(levelname)s:%(module)s.%(lineno)d: %(message)s')
# logging to syslog
s=logging.handlers.SysLogHandler('/dev/log')
s.formatter = f
l.addHandler(s)
if stderr:
l.setLevel(logging.DEBUG)
# logging to stderr
c=logging.StreamHandler()
c.formatter = f
l.addHandler(c)
return l
def parse_arguments():
"""
Gather command-line arguments.
"""
parser = argparse.ArgumentParser(prog='fs-crawler',
description=' walk the file system tree ' + \
' ' + \
' ')
parser.add_argument( '--debug', '-g', dest='debug', action='store_true',
help='show the actual shell commands that are executed (git, chmod, cd)',
default=False )
parser.add_argument( '--no-parallel', '-p', dest='noparallel', action='store_true',
help='just count files, no stat calls',
default=False )
parser.add_argument( '--no-stat', '-n', dest='nostat', action='store_true',
help='just count files, no stat calls',
default=False )
parser.add_argument( '--suppress-emails', '-s', dest='suppress_emails', action='store_true',
help='do not send any emails to end users',
default=False )
parser.add_argument( '--email-notify', '-e', dest='email',
action='store',
help='notify this email address of any error ',
default='' )
parser.add_argument( '--days', '-d', dest='days',
action='store',
type=int,
help='remove files older than x days (default: 1461 days or 4 years) ',
default=1461 )
parser.add_argument( '--folder', '-f', dest='folder',
action='store',
help='search this folder and below for files to remove')
parser.add_argument( '--target', '-t', dest='target',
action='store',
help='targeet directory tree to sync to')
args = parser.parse_args()
if not args.folder:
parser.error('required option --folder not given !')
if args.debug:
print('DEBUG: Arguments/Options: %s' % args)
return args
if __name__ == '__main__':
# Parse command-line arguments
args = parse_arguments()
sys.exit(main())
|
FredHutch/sc-benchmark
|
fs-crawler.py
|
Python
|
apache-2.0
| 15,580
|
[
"VisIt"
] |
8c1fe4dfdf20e5e7e93c17d1b9efef33f3d59ce45bee8cf33e97cfd510b56a7f
|
"""Bulk Al(fcc) test"""
from ase import Atoms
from ase.visualize import view
from gpaw import GPAW, PW
name = 'Al-fcc'
a = 4.05 # fcc lattice paramter
b = a / 2
bulk = Atoms('Al',
cell=[[0, b, b],
[b, 0, b],
[b, b, 0]],
pbc=True)
view(bulk)
k = 4
calc = GPAW(mode=PW(300), # cutoff
kpts=(k, k, k), # k-points
txt=name + '.txt') # output file
bulk.set_calculator(calc)
energy = bulk.get_potential_energy()
calc.write(name + '.gpw')
print 'Energy:', energy, 'eV'
|
robwarm/gpaw-symm
|
doc/exercises/aluminium/Al_fcc.py
|
Python
|
gpl-3.0
| 568
|
[
"ASE",
"GPAW"
] |
16854621683a6e7a45c75ad7171809892f6335f2426c9636721b3b05cbe8faab
|
import math
import torch
from ..constraints import Positive
from ..lazy import MatmulLazyTensor, RootLazyTensor
from .kernel import Kernel
class SpectralDeltaKernel(Kernel):
"""
A kernel that supports spectral learning for GPs, where the underlying spectral density is modeled as a mixture
of delta distributions (e.g., with point masses). This has been explored e.g. in Lazaro-Gredilla et al., 2010.
Conceptually, this kernel is similar to random Fourier features as implemented in RFFKernel, but instead of sampling
a Gaussian to determine the spectrum sites, they are treated as learnable parameters.
When using CG for inference, this kernel supports linear space and time (in N) for training and inference.
:param int num_dims: Dimensionality of input data that this kernel will operate on. Note that if active_dims is
used, this should be the length of the active dim set.
:param int num_deltas: Number of point masses to learn.
"""
has_lengthscale = True
def __init__(self, num_dims, num_deltas=128, Z_constraint=None, batch_shape=torch.Size([]), **kwargs):
Kernel.__init__(self, has_lengthscale=True, batch_shape=batch_shape, **kwargs)
self.raw_Z = torch.nn.Parameter(torch.rand(*batch_shape, num_deltas, num_dims))
if Z_constraint:
self.register_constraint("raw_Z", Z_constraint)
else:
self.register_constraint("raw_Z", Positive())
self.num_dims = num_dims
def initialize_from_data(self, train_x, train_y):
"""
Initialize the point masses for this kernel from the empirical spectrum of the data. To do this, we estimate
the empirical spectrum's CDF and then simply sample from it. This is analogous to how the SM kernel's mixture
is initialized, but we skip the last step of fitting a GMM to the samples and just use the samples directly.
"""
import numpy as np
from scipy.fftpack import fft
from scipy.integrate import cumtrapz
N = train_x.size(-2)
emp_spect = np.abs(fft(train_y.cpu().detach().numpy())) ** 2 / N
M = math.floor(N / 2)
freq1 = np.arange(M + 1)
freq2 = np.arange(-M + 1, 0)
freq = np.hstack((freq1, freq2)) / N
freq = freq[: M + 1]
emp_spect = emp_spect[: M + 1]
total_area = np.trapz(emp_spect, freq)
spec_cdf = np.hstack((np.zeros(1), cumtrapz(emp_spect, freq)))
spec_cdf = spec_cdf / total_area
a = np.random.rand(self.raw_Z.size(-2), 1)
p, q = np.histogram(a, spec_cdf)
bins = np.digitize(a, q)
slopes = (spec_cdf[bins] - spec_cdf[bins - 1]) / (freq[bins] - freq[bins - 1])
intercepts = spec_cdf[bins - 1] - slopes * freq[bins - 1]
inv_spec = (a - intercepts) / slopes
self.Z = inv_spec
def initialize_from_data_simple(self, train_x, train_y, **kwargs):
if not torch.is_tensor(train_x) or not torch.is_tensor(train_y):
raise RuntimeError("train_x and train_y should be tensors")
if train_x.ndimension() == 1:
train_x = train_x.unsqueeze(-1)
if train_x.ndimension() == 2:
train_x = train_x.unsqueeze(0)
train_x_sort = train_x.sort(1)[0]
min_dist_sort = (train_x_sort[:, 1:, :] - train_x_sort[:, :-1, :]).squeeze(0)
ard_num_dims = 1 if self.ard_num_dims is None else self.ard_num_dims
min_dist = torch.zeros(1, ard_num_dims, dtype=self.Z.dtype, device=self.Z.device)
for ind in range(ard_num_dims):
min_dist[:, ind] = min_dist_sort[(torch.nonzero(min_dist_sort[:, ind]))[0], ind]
z_init = torch.rand_like(self.Z).mul_(0.5).div_(min_dist)
self.Z = z_init
@property
def Z(self):
return self.raw_Z_constraint.transform(self.raw_Z)
@Z.setter
def Z(self, value):
self._set_Z(value)
def _set_Z(self, value):
if not torch.is_tensor(value):
value = torch.as_tensor(value).to(self.raw_Z)
self.initialize(raw_Z=self.raw_Z_constraint.inverse_transform(value))
def forward(self, x1, x2, diag=False, **params):
x1_ = x1.div(self.lengthscale)
x2_ = x2.div(self.lengthscale)
Z = self.Z
# Z1_ and Z2_ are s x d
x1z1 = x1_.matmul(Z.transpose(-2, -1)) # n x s
x2z2 = x2_.matmul(Z.transpose(-2, -1)) # n x s
x1z1 = x1z1 * 2 * math.pi
x2z2 = x2z2 * 2 * math.pi
x1z1 = torch.cat([x1z1.cos(), x1z1.sin()], dim=-1) / math.sqrt(x1z1.size(-1))
x2z2 = torch.cat([x2z2.cos(), x2z2.sin()], dim=-1) / math.sqrt(x2z2.size(-1))
if x1.size() == x2.size() and torch.equal(x1, x2):
prod = RootLazyTensor(x1z1)
else:
prod = MatmulLazyTensor(x1z1, x2z2.transpose(-2, -1))
if diag:
return prod.diag()
else:
return prod
|
jrg365/gpytorch
|
gpytorch/kernels/spectral_delta_kernel.py
|
Python
|
mit
| 4,931
|
[
"Gaussian"
] |
1e47d7a4153179acab104678f8070ed1882b011d1b7845360b2db75e8e547a0f
|
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
#
# WelcomeModuleTests
#
class WelcomeModuleTests:
def __init__(self, parent):
parent.title = "WelcomeModuleTests" # TODO make this more human readable by adding spaces
parent.categories = ["Testing.TestCases"]
parent.dependencies = []
parent.contributors = ["Johan Andruejol (Kitware)"] # replace with "Firstname Lastname (Org)"
parent.helpText = """
"""
parent.acknowledgementText = """TODO""" # replace with organization, grant and thanks.
self.parent = parent
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['WelcomeModuleTests'] = self.runTest
def runTest(self):
tester = WelcomeModuleTestsTest()
tester.runTests()
#
# qWelcomeModuleTestsTest
#
class WelcomeModuleTestsTest(unittest.TestCase):
def delayDisplay(self,message,msec=1000):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def getTestMethodNames(self):
methods = []
for method in dir(self):
if (callable(getattr(self, method)) and method.find('test_') != -1):
methods.append(method)
return methods
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.util.selectModule('Welcome')
slicer.mrmlScene.Clear(0)
def tearDown(self):
slicer.util.selectModule('WelcomeModuleTests')
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
children = []
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def runTests(self):
"""Run as few or as many tests as needed here.
"""
for methodName in self.getTestMethodNames():
self.runTest(methodName)
def runTest(self, method):
self.setUp()
getattr(self, method)()
self.tearDown()
def test_TestReturnToWelcome(self):
self.delayDisplay('test_TestReturnToWelcome')
# Find the return to welcome screen button
mainWindow = slicer.util.mainWindow()
returnToWelcomScreenPushButton = self.findWidget(mainWindow, 'ReturnToWelcomeScreenButton')
slicer.util.selectModule(slicer.modules.volumes)
returnToWelcomScreenPushButton.click()
self.assertEqual(slicer.util.selectedModule(), 'Welcome')
slicer.util.selectModule(slicer.modules.welcome)
returnToWelcomScreenPushButton.click()
self.assertEqual(slicer.util.selectedModule(), 'Welcome')
self.delayDisplay('Test passed!')
def test_TestHiddenWidget(self):
self.delayDisplay('test_TestHiddenWidget')
mainWindow = slicer.util.mainWindow()
# Test hidden widgets
self.assertFalse(mainWindow.moduleSelector().isVisible())
hiddenWidgetNames = ['CaptureToolBar',
'DialogToolBar',
'MainToolBar',
'ModuleToolBar',
'MouseModeToolBar',
'ViewersToolBar',
'ViewToolBar'
]
for hiddenWidgetName in hiddenWidgetNames:
w = self.findWidget(mainWindow, hiddenWidgetName)
self.assertFalse(w.isVisible())
self.delayDisplay('Test passed!')
def test_TestLayout(self):
self.delayDisplay('test_TestLayout')
self.assertEquals(slicer.app.layoutManager().layout, slicer.vtkMRMLLayoutNode.SlicerLayoutUserView)
self.delayDisplay('Test passed!')
#
# qWelcomeModuleTestWidget
#
class WelcomeModuleTestsWidget():
def __init__(self, parent = None):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
self.moduleName = 'WelcomeModuleTests'
self.tester = WelcomeModuleTestsTest()
def setup(self):
# Instantiate and connect widgets ...
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "Tests Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# reload and test button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadAndTestButton = qt.QPushButton("Reload and Test")
self.reloadAndTestButton.toolTip = "Reload this module and then run the self tests."
self.layout.addWidget(self.reloadAndTestButton)
self.reloadAndTestButton.connect('clicked()', self.onReloadAndTest)
self.testButton = qt.QPushButton('Run Tests')
self.layout.addWidget(self.testButton)
self.testButton.connect('clicked(bool)', self.tester.runTests)
# Add vertical spacer
self.layout.addStretch(1)
def onReload(self):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default.
"""
globals()[self.moduleName] = slicer.util.reloadScriptedModule(self.moduleName)
def onReloadAndTest(self):
self.onReload()
self.tester.runTests()
|
agirault/VesselView
|
Applications/App/Testing/Python/WelcomeModuleTests.py
|
Python
|
apache-2.0
| 6,219
|
[
"VTK"
] |
d88ce26681edf0c7eff3c54664f5cefd291390bdfe331e3cbb8db46710941bb4
|
"""Test string and unicode support in VTK-Python
The following string features have to be tested for string and unicode
- Pass a string arg by value
- Pass a string arg by reference
- Return a string arg by value
- Return a string arg by reference
The following features are not supported
- Pointers to strings, arrays of strings
- Passing a string arg by reference and returning a value in it
Created on May 12, 2010 by David Gobbi
"""
import sys
import exceptions
import vtk
from vtk.test import Testing
unicode_support = False
try:
unicode('hello')
unicode_support = True
except:
print "unicode not supported on this python installation"
class TestString(Testing.vtkTest):
def testPassByValue(self):
"""Pass string by value... hard to find examples of this,
because "const char *" methods shadow "vtkStdString" methods.
"""
self.assertEqual('y', 'y')
def testReturnByValue(self):
"""Return a string by value."""
a = vtk.vtkArray.CreateArray(1, vtk.VTK_INT)
a.Resize(1,1)
a.SetDimensionLabel(0, 'x')
s = a.GetDimensionLabel(0)
self.assertEqual(s, 'x')
def testPassByReference(self):
"""Pass a string by reference."""
a = vtk.vtkArray.CreateArray(0, vtk.VTK_STRING)
a.SetName("myarray")
s = a.GetName()
self.assertEqual(s, "myarray")
def testReturnByReference(self):
"""Return a string by reference."""
a = vtk.vtkStringArray()
s = "hello"
a.InsertNextValue(s)
t = a.GetValue(0)
self.assertEqual(t, s)
def testPassAndReturnUnicodeByReference(self):
"""Pass a unicode string by const reference"""
if not unicode_support:
return
a = vtk.vtkUnicodeStringArray()
a.InsertNextValue(u'Fran\xe7ois')
u = a.GetValue(0)
self.assertEqual(u, u'Fran\xe7ois')
def testPassStringAsUnicode(self):
"""Pass a string when unicode is expected. Should fail."""
if not unicode_support:
return
a = vtk.vtkUnicodeStringArray()
self.assertRaises(exceptions.TypeError,
a.InsertNextValue, ('Francois',))
def testPassUnicodeAsString(self):
"""Pass a unicode where a string is expected. Should succeed."""
if not unicode_support:
return
a = vtk.vtkStringArray()
a.InsertNextValue(u'Francois')
s = a.GetValue(0)
self.assertEqual(s, 'Francois')
if __name__ == "__main__":
Testing.main([(TestString, 'test')])
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Common/Core/Testing/Python/TestStrings.py
|
Python
|
mit
| 2,690
|
[
"VTK"
] |
235cb224c741f6d293925a8731fc1feb6f6767e68a445958b1820f6461f8976d
|
import numpy as np
import scipy.stats
from scipy.optimize import approx_fprime
from pyhmc import hmc
def lnprob_gaussian(x, icov):
logp = -np.dot(x, np.dot(icov, x)) / 2.0
grad = -np.dot(x, icov)
return logp, grad
def test_1():
# test sampling from a highly-correlated gaussian
dim = 2
x0 = np.zeros(dim)
cov = np.array([[1, 1.98], [1.98, 4]])
icov = np.linalg.inv(cov)
samples, logp, diag = hmc(lnprob_gaussian, x0, args=(icov,),
n_samples=10**4, n_burn=10**3,
n_steps=10, epsilon=0.20, return_diagnostics=True,
return_logp=True, random_state=2)
C = np.cov(samples, rowvar=0, bias=1)
np.testing.assert_array_almost_equal(cov, C, 1)
for i in range(100):
np.testing.assert_almost_equal(
lnprob_gaussian(samples[i], icov)[0],
logp[i])
def test_2():
# test that random state is used correctly
dim = 2
x0 = np.zeros(dim)
cov = np.array([[1, 1.98], [1.98, 4]])
icov = np.linalg.inv(cov)
samples1 = hmc(lnprob_gaussian, x0, args=(icov,),
n_samples=10, n_burn=0,
n_steps=10, epsilon=0.25, return_diagnostics=False,
random_state=0)
samples2 = hmc(lnprob_gaussian, x0, args=(icov,),
n_samples=10, n_burn=0,
n_steps=10, epsilon=0.25, return_diagnostics=False,
random_state=0)
np.testing.assert_array_almost_equal(samples1, samples2)
def test_3():
rv = scipy.stats.loggamma(c=1)
eps = np.sqrt(np.finfo(float).resolution)
def logprob(x):
return rv.logpdf(x), approx_fprime(x, rv.logpdf, eps)
samples = hmc(logprob, [0], epsilon=1, n_steps=10, window=3, persistence=True)
# import matplotlib.pyplot as pp
(osm, osr), (slope, intercept, r) = scipy.stats.probplot(
samples[:,0], dist=rv, fit=True)
assert r > 0.99
# pp.show()
|
rmcgibbo/pyhmc
|
pyhmc/tests/test_hmc.py
|
Python
|
bsd-3-clause
| 1,947
|
[
"Gaussian"
] |
684cd3bf2be06da31ea77f049916808208d9613aea98a67bdf10055f045174b0
|
#!/usr/bin/env python
#Original script from /home/james/work/encode/feature_partitions/split_by_partitions.py
#Usage: python(2.4) split_by_partitions.py partition_index in_file out_file chrCol startCol endCol strandCol
from __future__ import division
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.bitset import *
from bx.bitset_builders import *
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
def main():
GALAXY_DATA_INDEX_DIR = sys.argv[1]
partition_index = '%s/encode_feature_partitions/partition_list.txt' % GALAXY_DATA_INDEX_DIR
partition_offset = "%s/encode_feature_partitions/" % GALAXY_DATA_INDEX_DIR
warnings = []
# Load up the partitions
partitions = list()
try:
for line in open( partition_index ):
name, score, filename = line.split()
partitions.append( ( name, score, binned_bitsets_from_file( open( partition_offset+filename ) ) ) )
except:
stop_err( "Error loading partitioning dataset." )
try:
in_file = open( sys.argv[2] )
except:
stop_err( "Bad input data." )
try:
out_file = open( sys.argv[3], "w" )
except:
stop_err( "Bad output file." )
try:
chrCol = int( sys.argv[4] ) - 1
except:
stop_err( "Bad chr column: %s" % ( str( sys.argv[4] ) ) )
try:
startCol = int( sys.argv[5] ) - 1
except:
stop_err( "Bad start column: %s" % ( str( sys.argv[5] ) ) )
try:
endCol = int( sys.argv[6] ) - 1
except:
stop_err( "Bad end column: %s" % ( str( sys.argv[6] ) ) )
try:
strandCol = int( sys.argv[7] )-1
except:
strandCol = -1
line_count = 0
skipped_lines = 0
first_invalid_line = None
invalid_line = ''
try:
for line in in_file:
line_count += 1
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
fields = line.split( '\t' )
try:
chr, start, end = fields[chrCol], int( fields[startCol] ), int( fields[endCol] )
except:
skipped_lines += 1
if first_invalid_line is None:
first_invalid_line = line_count
invalid_line = line
continue
label = "input_line_" + str( line_count ) #if input file type was known to be bed, then could guess at label column
if strandCol < 0:
strand = "+"
else:
try:
strand = fields[strandCol]
except:
strand = "+"
# Find which partition it overlaps
overlap = 0
for name, score, bb in partitions:
# Is there at least 1bp overlap?
if chr in bb:
overlap = bb[chr].count_range( start, end-start )
if overlap > 0:
break
else:
# No overlap with any partition? For now throw this since the
# partitions tile the encode regions completely, indicate an interval
# that does not even overlap an encode region
warning = "warning: Interval (%s, %d, %d) does not overlap any partition" % ( chr, start, end ) + ", line[" + str( line_count ) + "]. "
warnings.append( warning )
name = "no_overlap"
score = 0
# Annotate with the name of the partition
frac_overlap = overlap / ( end-start )
# BED6 plus?
print >>out_file, "%s\t%d\t%d\t%s\t%s\t%s\t%s\t%0.4f" % ( chr, start, end, label, score, strand, name, frac_overlap )
except:
out_file.close()
in_file.close()
stop_err( "Unknown error while processing line # %d: %s" % ( line_count, line ) )
out_file.close()
in_file.close()
if warnings:
warn_msg = "This tool is useful on ENCODE regions only, %d warnings, 1st is: " % len( warnings )
warn_msg += warnings[0]
print warn_msg
if skipped_lines:
print "Skipped %d invalid lines starting at line # %d: %s" % ( skipped_lines, first_invalid_line, invalid_line )
if __name__ == "__main__": main()
|
volpino/Yeps-EURAC
|
tools/encode/split_by_partitions.py
|
Python
|
mit
| 4,581
|
[
"Galaxy"
] |
f1bb7326eedd03311127051b2ce9c412be6efbfc6f86fc4d8aef0ab72041f736
|
#!/usr/bin/env python
#
# Public Domain
"""
CairoSVG - A Simple SVG Converter Based on Cairo
================================================
CairoSVG is an SVG converter based on Cairo. It can convert SVG files to PDF,
EPS, PS, and PNG files.
For further information, please visit the `CairoSVG Website
<http://cairosvg.org/>`_.
"""
import sys
from setuptools import setup
if sys.version_info.major < 3:
raise RuntimeError(
'CairoSVG does not support Python 2.x anymore. '
'Please use Python 3 or install an older version of CairoSVG.')
setup()
|
Kozea/CairoSVG
|
setup.py
|
Python
|
lgpl-3.0
| 574
|
[
"VisIt"
] |
24479ad9096dea79667ba79349c1ccd54e85da7127917b0be14bb45138031d6e
|
import os
import sys
import time as timer
import math
import traceback
import Queue
import multiprocessing
import numpy as np
import netCDF4
from paegan.location4d import Location4D
from paegan.transport.utils.asatransport import AsaTransport
from paegan.transport.shoreline import Shoreline
from paegan.transport.bathymetry import Bathymetry
from paegan.transport.exceptions import DataControllerError
from paegan.cdm.dataset import CommonDataset
from paegan.cdm.timevar import date2num
from paegan.logger import logger
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, n_run, nproc_lock, active, get_data, **kwargs):
"""
This is the process class that does all the handling of queued tasks
"""
multiprocessing.Process.__init__(self, **kwargs)
self.task_queue = task_queue
self.result_queue = result_queue
self.n_run = n_run
self.nproc_lock = nproc_lock
self.active = active
self.get_data = get_data
def run(self):
while True:
try:
next_task = self.task_queue.get(True, 10)
except Queue.Empty:
logger.info("No tasks left to complete, closing %s" % self.name)
break
else:
answer = (None, None)
try:
answer = (1, next_task(self.name, self.active))
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.exception("Disabling Error")
if isinstance(next_task, DataController):
answer = (-2, "DataController")
# Tell the particles that the DataController is releasing file
self.get_data.value = False
# The data controller has died, so don't process any more tasks
self.active.value = False
elif isinstance(next_task, ForceParticle):
answer = (-1, next_task.part)
else:
logger.warn("Strange task raised an exception: %s" % str(next_task.__class__))
answer = (None, None)
finally:
self.result_queue.put(answer)
self.nproc_lock.acquire()
self.n_run.value = self.n_run.value - 1
self.nproc_lock.release()
self.task_queue.task_done()
class DataController(object):
def __init__(self, hydrodataset, common_variables, n_run, get_data, write_lock, has_write_lock, read_lock, read_count,
time_chunk, horiz_chunk, times, start_time, point_get, start, **kwargs):
"""
The data controller controls the updating of the
local netcdf data cache
"""
assert "cache_path" in kwargs
self.cache_path = kwargs["cache_path"]
self.caching = kwargs.get("caching", True)
self.hydrodataset = hydrodataset
if self.cache_path == self.hydrodataset and self.caching is True:
raise DataControllerError("Caching is set to True but the cache path and data path are the same. Refusing to overwrite the data path.")
self.n_run = n_run
self.get_data = get_data
self.write_lock = write_lock
self.has_write_lock = has_write_lock
self.read_lock = read_lock
self.read_count = read_count
self.inds = None # np.arange(init_size+1)
self.time_size = time_chunk
self.horiz_size = horiz_chunk
self.point_get = point_get
self.start_time = start_time
self.times = times
self.start = start
# Set common variable names
self.uname = common_variables.get("u", None)
self.vname = common_variables.get("v", None)
self.wname = common_variables.get("w", None)
self.temp_name = common_variables.get("temp", None)
self.salt_name = common_variables.get("salt", None)
self.xname = common_variables.get("x", None)
self.yname = common_variables.get("y", None)
self.zname = common_variables.get("z", None)
self.tname = common_variables.get("time", None)
def get_remote_data(self, localvars, remotevars, inds, shape):
"""
Method that does the updating of local netcdf cache
with remote data
"""
# If user specifies 'all' then entire xy domain is
# grabbed, default is 4, specified in the model_controller
if self.horiz_size == 'all':
y, y_1 = 0, shape[-2]
x, x_1 = 0, shape[-1]
else:
r = self.horiz_size
x, x_1 = self.point_get.value[2]-r, self.point_get.value[2]+r+1
y, y_1 = self.point_get.value[1]-r, self.point_get.value[1]+r+1
x, x_1 = x[0], x_1[0]
y, y_1 = y[0], y_1[0]
if y < 0:
y = 0
if x < 0:
x = 0
if y_1 > shape[-2]:
y_1 = shape[-2]
if x_1 > shape[-1]:
x_1 = shape[-1]
# Update domain variable for where we will add data
domain = self.local.variables['domain']
if len(shape) == 4:
domain[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1] = np.ones((inds[-1]+1-inds[0], shape[1], y_1-y, x_1-x))
elif len(shape) == 3:
domain[inds[0]:inds[-1]+1, y:y_1, x:x_1] = np.ones((inds[-1]+1-inds[0], y_1-y, x_1-x))
# Update the local variables with remote data
logger.debug("Filling cache with: Time - %s:%s, Lat - %s:%s, Lon - %s:%s" % (str(inds[0]), str(inds[-1]+1), str(y), str(y_1), str(x), str(x_1)))
for local, remote in zip(localvars, remotevars):
if len(shape) == 4:
local[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1] = remote[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1]
else:
local[inds[0]:inds[-1]+1, y:y_1, x:x_1] = remote[inds[0]:inds[-1]+1, y:y_1, x:x_1]
def __call__(self, proc, active):
c = 0
self.dataset = CommonDataset.open(self.hydrodataset)
self.remote = self.dataset.nc
# Calculate the datetimes of the model timesteps like
# the particle objects do, so we can figure out unique
# time indices
modelTimestep, newtimes = AsaTransport.get_time_objects_from_model_timesteps(self.times, start=self.start_time)
timevar = self.dataset.gettimevar(self.uname)
# Don't need to grab the last datetime, as it is not needed for forcing, only
# for setting the time of the final particle forcing
time_indexs = timevar.nearest_index(newtimes[0:-1], select='before')
# Have to make sure that we get the plus 1 for the
# linear interpolation of u,v,w,temp,salt
self.inds = np.unique(time_indexs)
self.inds = np.append(self.inds, self.inds.max()+1)
# While there is at least 1 particle still running,
# stay alive, if not break
while self.n_run.value > 1:
if self.caching is False:
logger.debug("Caching is False, not doing much. Just hanging out until all of the particles finish.")
timer.sleep(10)
continue
# If particle asks for data, do the following
if self.get_data.value is True:
logger.debug("Particle asked for data!")
# Wait for particles to get out
while True:
self.read_lock.acquire()
logger.debug("Read count: %d" % self.read_count.value)
if self.read_count.value > 0:
logger.debug("Waiting for write lock on cache file (particles must stop reading)...")
self.read_lock.release()
timer.sleep(2)
else:
break
# Get write lock on the file. Already have read lock.
self.write_lock.acquire()
self.has_write_lock.value = os.getpid()
if c == 0:
logger.debug("Creating cache file")
try:
# Open local cache for writing, overwrites
# existing file with same name
self.local = netCDF4.Dataset(self.cache_path, 'w')
indices = self.dataset.get_indices(self.uname, timeinds=[np.asarray([0])], point=self.start)
self.point_get.value = [self.inds[0], indices[-2], indices[-1]]
# Create dimensions for u and v variables
self.local.createDimension('time', None)
self.local.createDimension('level', None)
self.local.createDimension('x', None)
self.local.createDimension('y', None)
# Create 3d or 4d u and v variables
if self.remote.variables[self.uname].ndim == 4:
self.ndim = 4
dimensions = ('time', 'level', 'y', 'x')
coordinates = "time z lon lat"
elif self.remote.variables[self.uname].ndim == 3:
self.ndim = 3
dimensions = ('time', 'y', 'x')
coordinates = "time lon lat"
shape = self.remote.variables[self.uname].shape
# If there is no FillValue defined in the dataset, use np.nan.
# Sometimes it will work out correctly and other times we will
# have a huge cache file.
try:
fill = self.remote.variables[self.uname].missing_value
except Exception:
fill = np.nan
# Create domain variable that specifies
# where there is data geographically/by time
# and where there is not data,
# Used for testing if particle needs to
# ask cache to update
domain = self.local.createVariable('domain', 'i', dimensions, zlib=False, fill_value=0)
domain.coordinates = coordinates
# Create local u and v variables
u = self.local.createVariable('u', 'f', dimensions, zlib=False, fill_value=fill)
v = self.local.createVariable('v', 'f', dimensions, zlib=False, fill_value=fill)
v.coordinates = coordinates
u.coordinates = coordinates
localvars = [u, v, ]
remotevars = [self.remote.variables[self.uname], self.remote.variables[self.vname]]
# Create local w variable
if self.wname is not None:
w = self.local.createVariable('w', 'f', dimensions, zlib=False, fill_value=fill)
w.coordinates = coordinates
localvars.append(w)
remotevars.append(self.remote.variables[self.wname])
if self.temp_name is not None and self.salt_name is not None:
# Create local temp and salt vars
temp = self.local.createVariable('temp', 'f', dimensions, zlib=False, fill_value=fill)
salt = self.local.createVariable('salt', 'f', dimensions, zlib=False, fill_value=fill)
temp.coordinates = coordinates
salt.coordinates = coordinates
localvars.append(temp)
localvars.append(salt)
remotevars.append(self.remote.variables[self.temp_name])
remotevars.append(self.remote.variables[self.salt_name])
# Create local lat/lon coordinate variables
if self.remote.variables[self.xname].ndim == 2:
lon = self.local.createVariable('lon', 'f', ("y", "x"), zlib=False)
lon[:] = self.remote.variables[self.xname][:, :]
lat = self.local.createVariable('lat', 'f', ("y", "x"), zlib=False)
lat[:] = self.remote.variables[self.yname][:, :]
if self.remote.variables[self.xname].ndim == 1:
lon = self.local.createVariable('lon', 'f', ("x"), zlib=False)
lon[:] = self.remote.variables[self.xname][:]
lat = self.local.createVariable('lat', 'f', ("y"), zlib=False)
lat[:] = self.remote.variables[self.yname][:]
# Create local z variable
if self.zname is not None:
if self.remote.variables[self.zname].ndim == 4:
z = self.local.createVariable('z', 'f', ("time", "level", "y", "x"), zlib=False)
remotez = self.remote.variables[self.zname]
localvars.append(z)
remotevars.append(remotez)
elif self.remote.variables[self.zname].ndim == 3:
z = self.local.createVariable('z', 'f', ("level", "y", "x"), zlib=False)
z[:] = self.remote.variables[self.zname][:, :, :]
elif self.remote.variables[self.zname].ndim == 1:
z = self.local.createVariable('z', 'f', ("level",), zlib=False)
z[:] = self.remote.variables[self.zname][:]
# Create local time variable
time = self.local.createVariable('time', 'f8', ("time",), zlib=False)
if self.tname is not None:
time[:] = self.remote.variables[self.tname][self.inds]
if self.point_get.value[0]+self.time_size > np.max(self.inds):
current_inds = np.arange(self.point_get.value[0], np.max(self.inds)+1)
else:
current_inds = np.arange(self.point_get.value[0], self.point_get.value[0] + self.time_size)
# Get data from remote dataset and add
# to local cache.
# Try 20 times on the first attempt
current_attempt = 1
max_attempts = 20
while True:
try:
assert current_attempt <= max_attempts
self.get_remote_data(localvars, remotevars, current_inds, shape)
except AssertionError:
raise
except:
logger.warn("DataController failed to get remote data. Trying again in 20 seconds. %s attemps left." % unicode(max_attempts-current_attempt))
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.warn("Data Access Error: " + repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
timer.sleep(20)
current_attempt += 1
else:
break
c += 1
except (StandardError, AssertionError):
logger.error("DataController failed to get data (first request)")
raise
finally:
self.local.sync()
self.local.close()
self.has_write_lock.value = -1
self.write_lock.release()
self.get_data.value = False
self.read_lock.release()
logger.debug("Done updating cache file, closing file, and releasing locks")
else:
logger.debug("Updating cache file")
try:
# Open local cache dataset for appending
self.local = netCDF4.Dataset(self.cache_path, 'a')
# Create local and remote variable objects
# for the variables of interest
u = self.local.variables['u']
v = self.local.variables['v']
time = self.local.variables['time']
remoteu = self.remote.variables[self.uname]
remotev = self.remote.variables[self.vname]
# Create lists of variable objects for
# the data updater
localvars = [u, v, ]
remotevars = [remoteu, remotev, ]
if self.salt_name is not None and self.temp_name is not None:
salt = self.local.variables['salt']
temp = self.local.variables['temp']
remotesalt = self.remote.variables[self.salt_name]
remotetemp = self.remote.variables[self.temp_name]
localvars.append(salt)
localvars.append(temp)
remotevars.append(remotesalt)
remotevars.append(remotetemp)
if self.wname is not None:
w = self.local.variables['w']
remotew = self.remote.variables[self.wname]
localvars.append(w)
remotevars.append(remotew)
if self.zname is not None:
remotez = self.remote.variables[self.zname]
if remotez.ndim == 4:
z = self.local.variables['z']
localvars.append(z)
remotevars.append(remotez)
if self.tname is not None:
# remotetime = self.remote.variables[self.tname]
time[self.inds] = self.remote.variables[self.inds]
if self.point_get.value[0]+self.time_size > np.max(self.inds):
current_inds = np.arange(self.point_get.value[0], np.max(self.inds)+1)
else:
current_inds = np.arange(self.point_get.value[0], self.point_get.value[0] + self.time_size)
# Get data from remote dataset and add
# to local cache
while True:
try:
self.get_remote_data(localvars, remotevars, current_inds, shape)
except:
logger.warn("DataController failed to get remote data. Trying again in 30 seconds")
timer.sleep(30)
else:
break
c += 1
except StandardError:
logger.error("DataController failed to get data (not first request)")
raise
finally:
self.local.sync()
self.local.close()
self.has_write_lock.value = -1
self.write_lock.release()
self.get_data.value = False
self.read_lock.release()
logger.debug("Done updating cache file, closing file, and releasing locks")
else:
logger.debug("Particles are still running, waiting for them to request data...")
timer.sleep(2)
self.dataset.closenc()
return "DataController"
class ForceParticle(object):
def __str__(self):
return self.part.__str__()
def __init__(self, hydrodataset, part, common_variables, timevar, times, start_time, models,
release_location_centroid, usebathy, useshore, usesurface,
get_data, n_run, read_lock, has_read_lock, read_count,
point_get, data_request_lock, has_data_request_lock, reverse_distance=None, bathy=None,
shoreline_path=None, shoreline_feature=None, time_method=None, caching=None):
"""
This is the task/class/object/job that forces an
individual particle and communicates with the
other particles and data controller for local
cache updates
"""
assert "hydrodataset" is not None
self.hydrodataset = hydrodataset
self.bathy = bathy
self.common_variables = common_variables
self.release_location_centroid = release_location_centroid
self.part = part
self.times = times
self.start_time = start_time
self.models = models
self.usebathy = usebathy
self.useshore = useshore
self.usesurface = usesurface
self.get_data = get_data
self.n_run = n_run
self.read_lock = read_lock
self.has_read_lock = has_read_lock
self.read_count = read_count
self.point_get = point_get
self.data_request_lock = data_request_lock
self.has_data_request_lock = has_data_request_lock
self.shoreline_path = shoreline_path
self.shoreline_feature = shoreline_feature
self.timevar = timevar
if caching is None:
caching = True
self.caching = caching
# Set common variable names
self.uname = common_variables.get("u", None)
self.vname = common_variables.get("v", None)
self.wname = common_variables.get("w", None)
self.temp_name = common_variables.get("temp", None)
self.salt_name = common_variables.get("salt", None)
self.xname = common_variables.get("x", None)
self.yname = common_variables.get("y", None)
self.zname = common_variables.get("z", None)
self.tname = common_variables.get("time", None)
self.reverse_distance = reverse_distance
if time_method is None:
time_method = 'interp'
self.time_method = time_method
def need_data(self, i):
"""
Method to test if cache contains the data that
the particle needs
"""
# If we are not caching, we always grab data from the raw source
if self.caching is False:
return False
logger.debug("Checking cache for data availability at %s." % self.part.location.logstring())
try:
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
self.dataset.opennc()
# Test if the cache has the data we need
# If the point we request contains fill values,
# we need data
cached_lookup = self.dataset.get_values('domain', timeinds=[np.asarray([i])], point=self.part.location)
logger.debug("Type of result: %s" % type(cached_lookup))
logger.debug("Double mean of result: %s" % np.mean(np.mean(cached_lookup)))
logger.debug("Type of Double mean of result: %s" % type(np.mean(np.mean(cached_lookup))))
if type(np.mean(np.mean(cached_lookup))) == np.ma.core.MaskedConstant:
need = True
logger.debug("I NEED data. Got back: %s" % cached_lookup)
else:
need = False
logger.debug("I DO NOT NEED data")
except StandardError:
# If the time index doesnt even exist, we need
need = True
logger.debug("I NEED data (no time index exists in cache)")
finally:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
return need # return true if need data or false if dont
def linterp(self, setx, sety, x):
"""
Linear interp of model data values between time steps
"""
if math.isnan(sety[0]) or math.isnan(setx[0]):
return np.nan
#if math.isnan(sety[0]):
# sety[0] = 0.
#if math.isnan(sety[1]):
# sety[1] = 0.
return sety[0] + (x - setx[0]) * ( (sety[1]-sety[0]) / (setx[1]-setx[0]) )
def data_interp(self, i, currenttime):
"""
Method to streamline request for data from cache,
Uses linear interpolation bewtween timesteps to
get u,v,w,temp,salt
"""
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to release cache file so I can read from it...")
timer.sleep(2)
pass
if self.need_data(i+1):
# Acquire lock for asking for data
self.data_request_lock.acquire()
self.has_data_request_lock.value = os.getpid()
try:
# Do I still need data?
if self.need_data(i+1):
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
# Open netcdf file on disk from commondataset
self.dataset.opennc()
# Get the indices for the current particle location
indices = self.dataset.get_indices('u', timeinds=[np.asarray([i-1])], point=self.part.location )
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
# Override the time
# get the current time index data
self.point_get.value = [indices[0] + 1, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to update cache with the CURRENT time index")
timer.sleep(2)
pass
# Do we still need to get the next timestep?
if self.need_data(i+1):
# get the next time index data
self.point_get.value = [indices[0] + 2, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to update cache with the NEXT time index")
timer.sleep(2)
pass
except StandardError:
logger.warn("Particle failed to request data correctly")
raise
finally:
# Release lock for asking for data
self.has_data_request_lock.value = -1
self.data_request_lock.release()
if self.caching is True:
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
try:
# Open the Cache netCDF file on disk
self.dataset.opennc()
# Grab data at time index closest to particle location
u = [np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i+1])], point=self.part.location )))]
v = [np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i+1])], point=self.part.location )))]
# if there is vertical velocity inthe dataset, get it
if 'w' in self.dataset.nc.variables:
w = [np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i+1])], point=self.part.location )))]
else:
w = [0.0, 0.0]
# If there is salt and temp in the dataset, get it
if self.temp_name is not None and self.salt_name is not None:
temp = [np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i+1])], point=self.part.location )))]
salt = [np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i+1])], point=self.part.location )))]
# Check for nans that occur in the ocean (happens because
# of model and coastline resolution mismatches)
if np.isnan(u).any() or np.isnan(v).any() or np.isnan(w).any():
# Take the mean of the closest 4 points
# If this includes nan which it will, result is nan
uarray1 = self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location, num=2)
varray1 = self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location, num=2)
uarray2 = self.dataset.get_values('u', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
varray2 = self.dataset.get_values('v', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
if 'w' in self.dataset.nc.variables:
warray1 = self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location, num=2)
warray2 = self.dataset.get_values('w', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
w = [warray1.mean(), warray2.mean()]
else:
w = [0.0, 0.0]
if self.temp_name is not None and self.salt_name is not None:
temparray1 = self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location, num=2)
saltarray1 = self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location, num=2)
temparray2 = self.dataset.get_values('temp', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
saltarray2 = self.dataset.get_values('salt', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
temp = [temparray1.mean(), temparray2.mean()]
salt = [saltarray1.mean(), saltarray2.mean()]
u = [uarray1.mean(), uarray2.mean()]
v = [varray1.mean(), varray2.mean()]
# Linear interp of data between timesteps
currenttime = date2num(currenttime)
timevar = self.timevar.datenum
u = self.linterp(timevar[i:i+2], u, currenttime)
v = self.linterp(timevar[i:i+2], v, currenttime)
w = self.linterp(timevar[i:i+2], w, currenttime)
if self.temp_name is not None and self.salt_name is not None:
temp = self.linterp(timevar[i:i+2], temp, currenttime)
salt = self.linterp(timevar[i:i+2], salt, currenttime)
if self.temp_name is None:
temp = np.nan
if self.salt_name is None:
salt = np.nan
except StandardError:
logger.error("Error in data_interp method on ForceParticle")
raise
finally:
# If caching is False, we don't have to close the dataset. We can stay in read-only mode.
if self.caching is True:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
return u, v, w, temp, salt
def data_nearest(self, i, currenttime):
"""
Method to streamline request for data from cache,
Uses nearest time to get u,v,w,temp,salt
"""
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to release cache file so I can read from it...")
timer.sleep(2)
pass
if self.need_data(i):
# Acquire lock for asking for data
self.data_request_lock.acquire()
self.has_data_request_lock.value = os.getpid()
try:
if self.need_data(i):
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
# Open netcdf file on disk from commondataset
self.dataset.opennc()
# Get the indices for the current particle location
indices = self.dataset.get_indices('u', timeinds=[np.asarray([i-1])], point=self.part.location )
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
# Override the time
self.point_get.value = [indices[0]+1, indices[-2], indices[-1]]
# Request that the data controller update the cache
# DATA CONTOLLER STARTS
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to update cache...")
timer.sleep(2)
pass
except StandardError:
raise
finally:
self.has_data_request_lock.value = -1
self.data_request_lock.release()
# Tell the DataController that we are going to be reading from the file
if self.caching is True:
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
try:
# Open netcdf file on disk from commondataset
self.dataset.opennc()
# Grab data at time index closest to particle location
u = np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location )))
v = np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location )))
# if there is vertical velocity inthe dataset, get it
if 'w' in self.dataset.nc.variables:
w = np.mean(np.mean(self.dataset.get_values('w', timeindsf=[np.asarray([i])], point=self.part.location )))
else:
w = 0.0
# If there is salt and temp in the dataset, get it
if self.temp_name is not None and self.salt_name is not None:
temp = np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location )))
salt = np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location )))
# Check for nans that occur in the ocean (happens because
# of model and coastline resolution mismatches)
if np.isnan(u).any() or np.isnan(v).any() or np.isnan(w).any():
# Take the mean of the closest 4 points
# If this includes nan which it will, result is nan
uarray1 = self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location, num=2)
varray1 = self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location, num=2)
if 'w' in self.dataset.nc.variables:
warray1 = self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location, num=2)
w = warray1.mean()
else:
w = 0.0
if self.temp_name is not None and self.salt_name is not None:
temparray1 = self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location, num=2)
saltarray1 = self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location, num=2)
temp = temparray1.mean()
salt = saltarray1.mean()
u = uarray1.mean()
v = varray1.mean()
if self.temp_name is None:
temp = np.nan
if self.salt_name is None:
salt = np.nan
#logger.info(self.dataset.get_xyind_from_point('u', self.part.location, num=1))
except StandardError:
logger.error("Error in data_nearest on ForceParticle")
raise
finally:
# If caching is False, we don't have to close the dataset. We are in read only anyway.
if self.caching is True:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
return u, v, w, temp, salt
def __call__(self, proc, active):
self.active = active
if self.usebathy is True:
try:
self._bathymetry = Bathymetry(file=self.bathy)
except Exception:
logger.exception("Could not load Bathymetry file: %s, using no Bathymetry for this run!" % self.bathy)
self.usebathy = False
self._shoreline = None
if self.useshore is True:
self._shoreline = Shoreline(path=self.shoreline_path, feature_name=self.shoreline_feature, point=self.release_location_centroid, spatialbuffer=0.25)
# Make sure we are not starting on land. Raises exception if we are.
self._shoreline.intersect(start_point=self.release_location_centroid, end_point=self.release_location_centroid)
if self.active.value is True:
while self.get_data.value is True:
logger.info("Waiting for DataController to start...")
timer.sleep(5)
pass
# Initialize commondataset of local cache, then
# close the related netcdf file
try:
if self.caching is True:
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
self.dataset = CommonDataset.open(self.hydrodataset)
self.dataset.closenc()
except StandardError:
logger.warn("No source dataset: %s. Particle exiting" % self.hydrodataset)
raise
finally:
if self.caching is True:
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
# Calculate datetime at every timestep
modelTimestep, newtimes = AsaTransport.get_time_objects_from_model_timesteps(self.times, start=self.start_time)
if self.time_method == 'interp':
time_indexs = self.timevar.nearest_index(newtimes, select='before')
elif self.time_method == 'nearest':
time_indexs = self.timevar.nearest_index(newtimes)
else:
logger.warn("Method for computing u,v,w,temp,salt not supported!")
try:
assert len(newtimes) == len(time_indexs)
except AssertionError:
logger.error("Time indexes are messed up. Need to have equal datetime and time indexes")
raise
# loop over timesteps
# We don't loop over the last time_index because
# we need to query in the time_index and set the particle's
# location as the 'newtime' object.
for loop_i, i in enumerate(time_indexs[0:-1]):
if self.active.value is False:
raise ValueError("Particle exiting due to Failure.")
newloc = None
# Get the variable data required by the models
if self.time_method == 'nearest':
u, v, w, temp, salt = self.data_nearest(i, newtimes[loop_i])
elif self.time_method == 'interp':
u, v, w, temp, salt = self.data_interp(i, newtimes[loop_i])
else:
logger.warn("Method for computing u,v,w,temp,salt is unknown. Only 'nearest' and 'interp' are supported.")
# Get the bathy value at the particles location
if self.usebathy is True:
bathymetry_value = self._bathymetry.get_depth(self.part.location)
else:
bathymetry_value = -999999999999999
# Age the particle by the modelTimestep (seconds)
# 'Age' meaning the amount of time it has been forced.
self.part.age(seconds=modelTimestep[loop_i])
# loop over models - sort these in the order you want them to run
for model in self.models:
movement = model.move(self.part, u, v, w, modelTimestep[loop_i], temperature=temp, salinity=salt, bathymetry_value=bathymetry_value)
newloc = Location4D(latitude=movement['latitude'], longitude=movement['longitude'], depth=movement['depth'], time=newtimes[loop_i+1])
logger.debug("%s - moved %.3f meters (horizontally) and %.3f meters (vertically) by %s with data from %s" % (self.part.logstring(), movement['distance'], movement['vertical_distance'], model.__class__.__name__, newtimes[loop_i].isoformat()))
if newloc:
self.boundary_interaction(particle=self.part, starting=self.part.location, ending=newloc,
distance=movement['distance'], angle=movement['angle'],
azimuth=movement['azimuth'], reverse_azimuth=movement['reverse_azimuth'],
vertical_distance=movement['vertical_distance'], vertical_angle=movement['vertical_angle'])
logger.debug("%s - was forced by %s and is now at %s" % (self.part.logstring(), model.__class__.__name__, self.part.location.logstring()))
self.part.note = self.part.outputstring()
# Each timestep, save the particles status and environmental variables.
# This keep fields such as temp, salt, halted, settled, and dead matched up with the number of timesteps
self.part.save()
self.dataset.closenc()
# We won't pull data for the last entry in locations, but we need to populate it with fill data.
self.part.fill_environment_gap()
if self.usebathy is True:
self._bathymetry.close()
if self.useshore is True:
self._shoreline.close()
return self.part
def boundary_interaction(self, **kwargs):
"""
Returns a list of Location4D objects
"""
particle = kwargs.pop('particle')
starting = kwargs.pop('starting')
ending = kwargs.pop('ending')
# shoreline
if self.useshore:
intersection_point = self._shoreline.intersect(start_point=starting.point, end_point=ending.point)
if intersection_point:
# Set the intersection point.
hitpoint = Location4D(point=intersection_point['point'], time=starting.time + (ending.time - starting.time))
particle.location = hitpoint
# This relies on the shoreline to put the particle in water and not on shore.
resulting_point = self._shoreline.react(start_point=starting,
end_point=ending,
hit_point=hitpoint,
reverse_distance=self.reverse_distance,
feature=intersection_point['feature'],
distance=kwargs.get('distance'),
angle=kwargs.get('angle'),
azimuth=kwargs.get('azimuth'),
reverse_azimuth=kwargs.get('reverse_azimuth'))
ending.latitude = resulting_point.latitude
ending.longitude = resulting_point.longitude
ending.depth = resulting_point.depth
logger.debug("%s - hit the shoreline at %s. Setting location to %s." % (particle.logstring(), hitpoint.logstring(), ending.logstring()))
# bathymetry
if self.usebathy:
if not particle.settled:
bintersect = self._bathymetry.intersect(start_point=starting, end_point=ending)
if bintersect:
pt = self._bathymetry.react(type='reverse', start_point=starting, end_point=ending)
logger.debug("%s - hit the bottom at %s. Setting location to %s." % (particle.logstring(), ending.logstring(), pt.logstring()))
ending.latitude = pt.latitude
ending.longitude = pt.longitude
ending.depth = pt.depth
# sea-surface
if self.usesurface:
if ending.depth > 0:
logger.debug("%s - rose out of the water. Setting depth to 0." % particle.logstring())
ending.depth = 0
particle.location = ending
return
|
asascience-open/paegan-transport
|
paegan/transport/parallel_manager.py
|
Python
|
gpl-3.0
| 47,626
|
[
"NetCDF"
] |
1cc4136abb8570f38d7773cd5c047a113db21b613381a4467fc46224a6da4830
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
import json
from nose.plugins.attrib import attr
from datetime import datetime, timedelta
import ddt
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from ..helpers import UniqueCourseTest, EventsTestMixin
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware import CoursewarePage, CoursewareSequentialTabPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.problem import ProblemPage
from ...pages.common.logout import LogoutPage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from ...pages.lms.dashboard import DashboardPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class CoursewareTest(UniqueCourseTest):
"""
Test courseware.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
)
)
).install()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _goto_problem_page(self):
"""
Open problem page with assertion.
"""
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser) # pylint: disable=attribute-defined-outside-init
self.assertEqual(self.problem_page.problem_name, 'Test Problem 1')
def _create_breadcrumb(self, index):
""" Create breadcrumb """
return ['Test Section {}'.format(index), 'Test Subsection {}'.format(index), 'Test Problem {}'.format(index)]
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def test_courseware(self):
"""
Test courseware if recent visited subsection become unpublished.
"""
# Visit problem page as a student.
self._goto_problem_page()
# Logout and login as a staff user.
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
# Visit course outline page in studio.
self.course_outline.visit()
# Set release date for subsection in future.
self.course_outline.change_problem_release_date()
# Logout and login as a student.
LogoutPage(self.browser).visit()
self._auto_auth(self.USERNAME, self.EMAIL, False)
# Visit courseware as a student.
self.courseware_page.visit()
# Problem name should be "Test Problem 2".
self.assertEqual(self.problem_page.problem_name, 'Test Problem 2')
def test_course_tree_breadcrumb(self):
"""
Scenario: Correct course tree breadcrumb is shown.
Given that I am a registered user
And I visit my courseware page
Then I should see correct course tree breadcrumb
"""
self.courseware_page.visit()
xblocks = self.course_fix.get_nested_xblocks(category="problem")
for index in range(1, len(xblocks) + 1):
self.course_nav.go_to_section('Test Section {}'.format(index), 'Test Subsection {}'.format(index))
courseware_page_breadcrumb = self.courseware_page.breadcrumb
expected_breadcrumb = self._create_breadcrumb(index) # pylint: disable=no-member
self.assertEqual(courseware_page_breadcrumb, expected_breadcrumb)
@ddt.ddt
class ProctoredExamTest(UniqueCourseTest):
"""
Test courseware.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(ProctoredExamTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
).install()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(
self.browser, self.course_id, entry_point='verify-now'
)
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
self.problem_page = ProblemPage(self.browser)
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate',
min_price=10, suggested_prices='10,20'
).visit()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _login_as_a_verified_user(self):
"""
login as a verififed user
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
# the track selection page cannot be visited. see the other tests to see if any prereq is there.
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
def test_can_create_proctored_exam_in_studio(self):
"""
Given that I am a staff member
When I visit the course outline page in studio.
And open the subsection edit dialog
Then I can view all settings related to Proctored and timed exams
"""
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
self.course_outline.open_subsection_settings_dialog()
self.assertTrue(self.course_outline.proctoring_items_are_displayed())
def test_proctored_exam_flow(self):
"""
Given that I am a staff member on the exam settings section
select advanced settings tab
When I Make the exam proctored.
And I login as a verified student.
And visit the courseware as a verified student.
Then I can see an option to take the exam as a proctored exam.
"""
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
self.course_outline.open_subsection_settings_dialog()
self.course_outline.select_advanced_tab()
self.course_outline.make_exam_proctored()
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
self.assertTrue(self.courseware_page.can_start_proctored_exam)
@ddt.data(True, False)
def test_timed_exam_flow(self, hide_after_due):
"""
Given that I am a staff member on the exam settings section
select advanced settings tab
When I Make the exam timed.
And I login as a verified student.
And visit the courseware as a verified student.
And I start the timed exam
Then I am taken to the exam with a timer bar showing
When I finish the exam
Then I see the exam submitted dialog in place of the exam
When I log back into studio as a staff member
And change the problem's due date to be in the past
And log back in as the original verified student
Then I see the exam or message in accordance with the hide_after_due setting
"""
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
self.course_outline.open_subsection_settings_dialog()
self.course_outline.select_advanced_tab()
self.course_outline.make_exam_timed(hide_after_due=hide_after_due)
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
self.courseware_page.start_timed_exam()
self.assertTrue(self.courseware_page.is_timer_bar_present)
self.courseware_page.stop_timed_exam()
self.assertTrue(self.courseware_page.has_submitted_exam_message())
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
last_week = (datetime.today() - timedelta(days=7)).strftime("%m/%d/%Y")
self.course_outline.change_problem_due_date(last_week)
LogoutPage(self.browser).visit()
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_page.visit()
self.assertEqual(self.courseware_page.has_submitted_exam_message(), hide_after_due)
def test_field_visiblity_with_all_exam_types(self):
"""
Given that I am a staff member
And I have visited the course outline page in studio.
And the subsection edit dialog is open
select advanced settings tab
For each of None, Timed, Proctored, and Practice exam types
The time allotted, review rules, and hide after due fields have proper visibility
None: False, False, False
Timed: True, False, True
Proctored: True, True, False
Practice: True, False, False
"""
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
self.course_outline.open_subsection_settings_dialog()
self.course_outline.select_advanced_tab()
self.course_outline.select_none_exam()
self.assertFalse(self.course_outline.time_allotted_field_visible())
self.assertFalse(self.course_outline.exam_review_rules_field_visible())
self.assertFalse(self.course_outline.hide_after_due_field_visible())
self.course_outline.select_timed_exam()
self.assertTrue(self.course_outline.time_allotted_field_visible())
self.assertFalse(self.course_outline.exam_review_rules_field_visible())
self.assertTrue(self.course_outline.hide_after_due_field_visible())
self.course_outline.select_proctored_exam()
self.assertTrue(self.course_outline.time_allotted_field_visible())
self.assertTrue(self.course_outline.exam_review_rules_field_visible())
self.assertFalse(self.course_outline.hide_after_due_field_visible())
self.course_outline.select_practice_exam()
self.assertTrue(self.course_outline.time_allotted_field_visible())
self.assertFalse(self.course_outline.exam_review_rules_field_visible())
self.assertFalse(self.course_outline.hide_after_due_field_visible())
class CoursewareMultipleVerticalsTest(UniqueCourseTest, EventsTestMixin):
"""
Test courseware with multiple verticals
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareMultipleVerticalsTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 2', data="<problem>problem 2 dummy body</problem>"),
XBlockFixtureDesc('html', 'html 2', data="<html>html 2 dummy body</html>"),
),
XBlockFixtureDesc('sequential', 'Test Subsection 1,2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3', data='<problem>problem 3 dummy body</problem>'),
),
XBlockFixtureDesc(
'sequential', 'Test HIDDEN Subsection', metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('problem', 'Test HIDDEN Problem', data='<problem>hidden problem</problem>'),
),
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 4', data='<problem>problem 4 dummy body</problem>'),
),
),
XBlockFixtureDesc('chapter', 'Test HIDDEN Section', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('sequential', 'Test HIDDEN Subsection'),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
self.courseware_page.visit()
self.course_nav = CourseNavPage(self.browser)
def test_navigation_buttons(self):
# start in first section
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 0, next_enabled=True, prev_enabled=False)
# next takes us to next tab in sequential
self.courseware_page.click_next_button_on_top()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 1, next_enabled=True, prev_enabled=True)
# go to last sequential position
self.courseware_page.go_to_sequential_position(4)
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 3, next_enabled=True, prev_enabled=True)
# next takes us to next sequential
self.courseware_page.click_next_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,2', 0, next_enabled=True, prev_enabled=True)
# next takes us to next chapter
self.courseware_page.click_next_button_on_top()
self.assert_navigation_state('Test Section 2', 'Test Subsection 2,1', 0, next_enabled=False, prev_enabled=True)
# previous takes us to previous chapter
self.courseware_page.click_previous_button_on_top()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,2', 0, next_enabled=True, prev_enabled=True)
# previous takes us to last tab in previous sequential
self.courseware_page.click_previous_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 3, next_enabled=True, prev_enabled=True)
# previous takes us to previous tab in sequential
self.courseware_page.click_previous_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 2, next_enabled=True, prev_enabled=True)
# test UI events emitted by navigation
filter_sequence_ui_event = lambda event: event.get('name', '').startswith('edx.ui.lms.sequence.')
sequence_ui_events = self.wait_for_events(event_filter=filter_sequence_ui_event, timeout=2)
legacy_events = [ev for ev in sequence_ui_events if ev['event_type'] in {'seq_next', 'seq_prev', 'seq_goto'}]
nonlegacy_events = [ev for ev in sequence_ui_events if ev not in legacy_events]
self.assertTrue(all('old' in json.loads(ev['event']) for ev in legacy_events))
self.assertTrue(all('new' in json.loads(ev['event']) for ev in legacy_events))
self.assertFalse(any('old' in json.loads(ev['event']) for ev in nonlegacy_events))
self.assertFalse(any('new' in json.loads(ev['event']) for ev in nonlegacy_events))
self.assert_events_match(
[
{
'event_type': 'seq_next',
'event': {
'old': 1,
'new': 2,
'current_tab': 1,
'tab_count': 4,
'widget_placement': 'top',
}
},
{
'event_type': 'seq_goto',
'event': {
'old': 2,
'new': 4,
'current_tab': 2,
'target_tab': 4,
'tab_count': 4,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.next_selected',
'event': {
'current_tab': 4,
'tab_count': 4,
'widget_placement': 'bottom',
}
},
{
'event_type': 'edx.ui.lms.sequence.next_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.previous_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.previous_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'bottom',
}
},
{
'event_type': 'seq_prev',
'event': {
'old': 4,
'new': 3,
'current_tab': 4,
'tab_count': 4,
'widget_placement': 'bottom',
}
},
],
sequence_ui_events
)
def test_outline_selected_events(self):
self.course_nav.go_to_section('Test Section 1', 'Test Subsection 1,2')
self.course_nav.go_to_section('Test Section 2', 'Test Subsection 2,1')
# test UI events emitted by navigating via the course outline
filter_selected_events = lambda event: event.get('name', '') == 'edx.ui.lms.outline.selected'
selected_events = self.wait_for_events(event_filter=filter_selected_events, timeout=2)
# note: target_url is tested in unit tests, as the url changes here with every test (it includes GUIDs).
self.assert_events_match(
[
{
'event_type': 'edx.ui.lms.outline.selected',
'name': 'edx.ui.lms.outline.selected',
'event': {
'target_name': 'Test Subsection 1,2 ',
'widget_placement': 'accordion',
}
},
{
'event_type': 'edx.ui.lms.outline.selected',
'name': 'edx.ui.lms.outline.selected',
'event': {
'target_name': 'Test Subsection 2,1 ',
'widget_placement': 'accordion',
}
},
],
selected_events
)
def test_link_clicked_events(self):
"""
Given that I am a user in the courseware
When I navigate via the left-hand nav
Then a link clicked event is logged
"""
self.course_nav.go_to_section('Test Section 1', 'Test Subsection 1,2')
self.course_nav.go_to_section('Test Section 2', 'Test Subsection 2,1')
filter_link_clicked = lambda event: event.get('name', '') == 'edx.ui.lms.link_clicked'
link_clicked_events = self.wait_for_events(event_filter=filter_link_clicked, timeout=2)
self.assertEqual(len(link_clicked_events), 2)
def assert_navigation_state(
self, section_title, subsection_title, subsection_position, next_enabled, prev_enabled
):
"""
Verifies that the navigation state is as expected.
"""
self.assertTrue(self.course_nav.is_on_section(section_title, subsection_title))
self.assertEquals(self.courseware_page.sequential_position, subsection_position)
self.assertEquals(self.courseware_page.is_next_button_enabled, next_enabled)
self.assertEquals(self.courseware_page.is_previous_button_enabled, prev_enabled)
def test_tab_position(self):
# test that using the position in the url direct to correct tab in courseware
self.course_nav.go_to_section('Test Section 1', 'Test Subsection 1,1')
subsection_url = self.course_nav.active_subsection_url
url_part_list = subsection_url.split('/')
self.assertEqual(len(url_part_list), 9)
course_id = url_part_list[4]
chapter_id = url_part_list[-3]
subsection_id = url_part_list[-2]
problem1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=1
).visit()
self.assertIn('problem 1 dummy body', problem1_page.get_selected_tab_content())
html1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=2
).visit()
self.assertIn('html 1 dummy body', html1_page.get_selected_tab_content())
problem2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=3
).visit()
self.assertIn('problem 2 dummy body', problem2_page.get_selected_tab_content())
html2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=4
).visit()
self.assertIn('html 2 dummy body', html2_page.get_selected_tab_content())
@attr('a11y')
def test_courseware_a11y(self):
"""
Run accessibility audit for the problem type.
"""
self.course_nav.go_to_section('Test Section 1', 'Test Subsection 1,1')
# Set the scope to the sequence navigation
self.courseware_page.a11y_audit.config.set_scope(
include=['div.sequence-nav'])
self.courseware_page.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
],
})
self.courseware_page.a11y_audit.check_for_accessibility_errors()
class ProblemStateOnNavigationTest(UniqueCourseTest):
"""
Test courseware with problems in multiple verticals
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
problem1_name = 'MULTIPLE CHOICE TEST PROBLEM 1'
problem2_name = 'MULTIPLE CHOICE TEST PROBLEM 2'
def setUp(self):
super(ProblemStateOnNavigationTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with section, tabs and multiple choice problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
self.create_multiple_choice_problem(self.problem1_name),
self.create_multiple_choice_problem(self.problem2_name),
),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(
self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False
).visit()
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
def create_multiple_choice_problem(self, problem_name):
"""
Return the Multiple Choice Problem Descriptor, given the name of the problem.
"""
factory = MultipleChoiceResponseXMLFactory()
xml_data = factory.build_xml(
question_text='The correct answer is Choice 2',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
return XBlockFixtureDesc(
'problem',
problem_name,
data=xml_data,
metadata={'rerandomize': 'always'}
)
def go_to_tab_and_assert_problem(self, position, problem_name):
"""
Go to sequential tab and assert that we are on problem whose name is given as a parameter.
Args:
position: Position of the sequential tab
problem_name: Name of the problem
"""
self.courseware_page.go_to_sequential_position(position)
self.problem_page.wait_for_element_presence(
self.problem_page.CSS_PROBLEM_HEADER,
'wait for problem header'
)
self.assertEqual(self.problem_page.problem_name, problem_name)
def test_perform_problem_check_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then I click check button
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state by clicking check button.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_check()
self.problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect')
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_content
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_content
self.assertEqual(problem1_content_before_switch, problem1_content_after_coming_back)
def test_perform_problem_save_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then I click save button
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state by clicking save button.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_save()
self.problem_page.wait_for_expected_status('div.capa_alert', 'saved')
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_content
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_content
self.assertIn(problem1_content_after_coming_back, problem1_content_before_switch)
def test_perform_problem_reset_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then perform the action – check and reset
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state – by performing reset operation.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_check()
self.problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect')
self.problem_page.click_reset()
self.problem_page.wait_for_expected_status('span.unanswered', 'unanswered')
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_content
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_content
self.assertEqual(problem1_content_before_switch, problem1_content_after_coming_back)
|
waheedahmed/edx-platform
|
common/test/acceptance/tests/lms/test_lms_courseware.py
|
Python
|
agpl-3.0
| 33,212
|
[
"VisIt"
] |
1a31f63995307114d0253b5a6bcc8f23bf1d461dcc1d1ab95c8a95edbcf75017
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is intended to be used to compute Pourbaix diagrams
of arbitrary compositions and formation energies. If you use
this module in your work, please consider citing the following:
General formalism for solid-aqueous equilibria from DFT:
Persson et al., DOI: 10.1103/PhysRevB.85.235438
Decomposition maps, or Pourbaix hull diagrams
Singh et al., DOI: 10.1021/acs.chemmater.7b03980
Fast computation of many-element Pourbaix diagrams:
Patel et al., https://arxiv.org/abs/1909.00035 (submitted)
"""
import logging
import numpy as np
import itertools
import re
from copy import deepcopy
from functools import cmp_to_key, partial, lru_cache
from monty.json import MSONable, MontyDecoder
from multiprocessing import Pool
import warnings
from scipy.spatial import ConvexHull, HalfspaceIntersection
try:
from scipy.special import comb
except ImportError:
from scipy.misc import comb
from pymatgen.util.coord import Simplex
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
from pymatgen.analysis.phase_diagram import PhaseDiagram, PDEntry
from tqdm import tqdm
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.4"
__maintainer__ = "Joseph Montoya"
__credits__ = "Arunima Singh, Joseph Montoya, Anjli Patel"
__email__ = "joseph.montoya@tri.global"
__status__ = "Production"
__date__ = "Nov 1, 2012"
logger = logging.getLogger(__name__)
MU_H2O = -2.4583
PREFAC = 0.0591
# TODO: Revise to more closely reflect PDEntry, invoke from energy/composition
# TODO: PourbaixEntries depend implicitly on having entry energies be
# formation energies, should be a better way to get from raw energies
# TODO: uncorrected_energy is a bit of a misnomer, but not sure what to rename
class PourbaixEntry(MSONable):
"""
An object encompassing all data relevant to a solid or ion
in a pourbaix diagram. Each bulk solid/ion has an energy
g of the form: e = e0 + 0.0591 log10(conc) - nO mu_H2O
+ (nH - 2nO) pH + phi (-nH + 2nO + q)
Note that the energies corresponding to the input entries
should be formation energies with respect to hydrogen and
oxygen gas in order for the pourbaix diagram formalism to
work. This may be changed to be more flexible in the future.
Args:
entry (ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry): An
entry object
"""
def __init__(self, entry, entry_id=None, concentration=1e-6):
self.entry = entry
if isinstance(entry, IonEntry):
self.concentration = concentration
self.phase_type = "Ion"
self.charge = entry.ion.charge
else:
self.concentration = 1.0
self.phase_type = "Solid"
self.charge = 0.0
self.uncorrected_energy = entry.energy
if entry_id is not None:
self.entry_id = entry_id
elif hasattr(entry, "entry_id") and entry.entry_id:
self.entry_id = entry.entry_id
else:
self.entry_id = None
@property
def npH(self):
return self.entry.composition.get("H", 0.) \
- 2 * self.entry.composition.get("O", 0.)
@property
def nH2O(self):
return self.entry.composition.get("O", 0.)
@property
def nPhi(self):
return self.npH - self.charge
@property
def name(self):
if self.phase_type == "Solid":
return self.entry.composition.reduced_formula + "(s)"
elif self.phase_type == "Ion":
return self.entry.name
@property
def energy(self):
"""
returns energy
Returns (float): total energy of the pourbaix
entry (at pH, V = 0 vs. SHE)
"""
# Note: this implicitly depends on formation energies as input
return self.uncorrected_energy + self.conc_term - (MU_H2O * self.nH2O)
@property
def energy_per_atom(self):
"""
energy per atom of the pourbaix entry
Returns (float): energy per atom
"""
return self.energy / self.composition.num_atoms
def energy_at_conditions(self, pH, V):
"""
Get free energy for a given pH and V
Args:
pH (float): pH at which to evaluate free energy
V (float): voltage at which to evaluate free energy
Returns:
free energy at conditions
"""
return self.energy + self.npH * PREFAC * pH + self.nPhi * V
def get_element_fraction(self, element):
"""
Gets the elemental fraction of a given non-OH element
Args:
element (Element or str): string or element corresponding
to element to get from composition
Returns:
fraction of element / sum(all non-OH elements)
"""
return self.composition.get(element) * self.normalization_factor
@property
def normalized_energy(self):
"""
Returns:
energy normalized by number of non H or O atoms, e. g.
for Zn2O6, energy / 2 or for AgTe3(OH)3, energy / 4
"""
return self.energy * self.normalization_factor
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor
@property
def conc_term(self):
"""
Returns the concentration contribution to the free energy,
and should only be present when there are ions in the entry
"""
return PREFAC * np.log10(self.concentration)
# TODO: not sure if these are strictly necessary with refactor
def as_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if isinstance(self.entry, IonEntry):
d["entry_type"] = "Ion"
else:
d["entry_type"] = "Solid"
d["entry"] = self.entry.as_dict()
d["concentration"] = self.concentration
d["entry_id"] = self.entry_id
return d
@classmethod
def from_dict(cls, d):
"""
Invokes
"""
entry_type = d["entry_type"]
if entry_type == "Ion":
entry = IonEntry.from_dict(d["entry"])
else:
entry = PDEntry.from_dict(d["entry"])
entry_id = d["entry_id"]
concentration = d["concentration"]
return PourbaixEntry(entry, entry_id, concentration)
@property
def normalization_factor(self):
"""
Sum of number of atoms minus the number of H and O in composition
"""
return 1.0 / (self.num_atoms - self.composition.get('H', 0)
- self.composition.get('O', 0))
@property
def composition(self):
"""
Returns composition
"""
return self.entry.composition
@property
def num_atoms(self):
"""
Return number of atoms in current formula. Useful for normalization
"""
return self.composition.num_atoms
def __repr__(self):
return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {} ".format(
self.entry.composition, self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id)
def __str__(self):
return self.__repr__()
class MultiEntry(PourbaixEntry):
"""
PourbaixEntry-like object for constructing multi-elemental Pourbaix
diagrams.
"""
def __init__(self, entry_list, weights=None):
"""
Initializes a MultiEntry.
Args:
entry_list ([PourbaixEntry]): List of component PourbaixEntries
weights ([float]): Weights associated with each entry. Default is None
"""
if weights is None:
self.weights = [1.0] * len(entry_list)
else:
self.weights = weights
self.entry_list = entry_list
@lru_cache()
def __getattr__(self, item):
"""
Because most of the attributes here are just weighted
averages of the entry_list, we save some space by
having a set of conditionals to define the attributes
"""
# Attributes that are weighted averages of entry attributes
if item in ["energy", "npH", "nH2O", "nPhi", "conc_term",
"composition", "uncorrected_energy"]:
# TODO: Composition could be changed for compat with sum
if item == "composition":
start = Composition({})
else:
start = 0
return sum([getattr(e, item) * w
for e, w in zip(self.entry_list, self.weights)], start)
# Attributes that are just lists of entry attributes
elif item in ["entry_id", "phase_type"]:
return [getattr(e, item) for e in self.entry_list]
# normalization_factor, num_atoms should work from superclass
return self.__getattribute__(item)
@property
def name(self):
"""
MultiEntry name, i. e. the name of each entry joined by ' + '
"""
return " + ".join([e.name for e in self.entry_list])
def __repr__(self):
return "Multiple Pourbaix Entry: energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {}, species: {}" \
.format(self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id, self.name)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry_list": [e.as_dict() for e in self.entry_list],
"weights": self.weights}
@classmethod
def from_dict(cls, d):
entry_list = [PourbaixEntry.from_dict(e) for e in d.get("entry_list")]
return cls(entry_list, d.get("weights"))
# TODO: this class isn't particularly useful in its current form, could be
# refactored to include information about the reference solid
class IonEntry(PDEntry):
"""
Object similar to PDEntry, but contains an Ion object instead of a
Composition object.
Args:
ion: Ion object
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the
chemical formula.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
"""
def __init__(self, ion, energy, name=None, attribute=None):
self.ion = ion
# Auto-assign name
name = name if name else self.ion.reduced_formula
super(IonEntry, self).__init__(
composition=ion.composition, energy=energy, name=name,
attribute=attribute)
@classmethod
def from_dict(cls, d):
"""
Returns an IonEntry object from a dict.
"""
return IonEntry(Ion.from_dict(d["ion"]), d["energy"], d.get("name"),
d.get("attribute"))
def as_dict(self):
"""
Creates a dict of composition, energy, and ion name
"""
d = {"ion": self.ion.as_dict(), "energy": self.energy,
"name": self.name}
return d
def __repr__(self):
return "IonEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def ion_or_solid_comp_object(formula):
"""
Returns either an ion object or composition object given
a formula.
Args:
formula: String formula. Eg. of ion: NaOH(aq), Na[+];
Eg. of solid: Fe2O3(s), Fe(s), Na2O
Returns:
Composition/Ion object
"""
m = re.search(r"\[([^\[\]]+)\]|\(aq\)", formula)
if m:
comp_obj = Ion.from_formula(formula)
elif re.search(r"\(s\)", formula):
comp_obj = Composition(formula[:-3])
else:
comp_obj = Composition(formula)
return comp_obj
ELEMENTS_HO = {Element('H'), Element('O')}
# TODO: the solids filter breaks some of the functionality of the
# heatmap plotter, because the reference states for decomposition
# don't include oxygen/hydrogen in the OER/HER regions
# TODO: create a from_phase_diagram class method for non-formation energy
# invocation
# TODO: invocation from a MultiEntry entry list could be a bit more robust
# TODO: serialization is still a bit rough around the edges
class PourbaixDiagram(MSONable):
"""
Class to create a Pourbaix diagram from entries
Args:
entries ([PourbaixEntry] or [MultiEntry]): Entries list
containing Solids and Ions or a list of MultiEntries
comp_dict ({str: float}): Dictionary of compositions,
defaults to equal parts of each elements
conc_dict ({str: float}): Dictionary of ion concentrations,
defaults to 1e-6 for each element
filter_solids (bool): applying this filter to a pourbaix
diagram ensures all included phases are filtered by
stability on the compositional phase diagram. This
breaks some of the functionality of the analysis,
though, so use with caution.
nproc (int): number of processes to generate multientries with
in parallel. Defaults to None (serial processing)
"""
def __init__(self, entries, comp_dict=None, conc_dict=None,
filter_solids=False, nproc=None):
entries = deepcopy(entries)
# Get non-OH elements
self.pbx_elts = set(itertools.chain.from_iterable(
[entry.composition.elements for entry in entries]))
self.pbx_elts = list(self.pbx_elts - ELEMENTS_HO)
self.dim = len(self.pbx_elts) - 1
# Process multientry inputs
if isinstance(entries[0], MultiEntry):
self._processed_entries = entries
# Extract individual entries
single_entries = list(set(itertools.chain.from_iterable(
[e.entry_list for e in entries])))
self._unprocessed_entries = single_entries
self._filtered_entries = single_entries
self._conc_dict = None
self._elt_comp = {k: v for k, v in entries[0].composition.items()
if k not in ELEMENTS_HO}
self._multielement = True
# Process single entry inputs
else:
# Set default conc/comp dicts
if not comp_dict:
comp_dict = {elt.symbol: 1. / len(self.pbx_elts) for elt in self.pbx_elts}
if not conc_dict:
conc_dict = {elt.symbol: 1e-6 for elt in self.pbx_elts}
self._conc_dict = conc_dict
self._elt_comp = comp_dict
self.pourbaix_elements = self.pbx_elts
solid_entries = [entry for entry in entries
if entry.phase_type == "Solid"]
ion_entries = [entry for entry in entries
if entry.phase_type == "Ion"]
# If a conc_dict is specified, override individual entry concentrations
for entry in ion_entries:
ion_elts = list(set(entry.composition.elements) - ELEMENTS_HO)
# TODO: the logic here for ion concentration setting is in two
# places, in PourbaixEntry and here, should be consolidated
if len(ion_elts) == 1:
entry.concentration = conc_dict[ion_elts[0].symbol] \
* entry.normalization_factor
elif len(ion_elts) > 1 and not entry.concentration:
raise ValueError("Elemental concentration not compatible "
"with multi-element ions")
self._unprocessed_entries = solid_entries + ion_entries
if not len(solid_entries + ion_entries) == len(entries):
raise ValueError("All supplied entries must have a phase type of "
"either \"Solid\" or \"Ion\"")
if filter_solids:
# O is 2.46 b/c pbx entry finds energies referenced to H2O
entries_HO = [ComputedEntry('H', 0), ComputedEntry('O', 2.46)]
solid_pd = PhaseDiagram(solid_entries + entries_HO)
solid_entries = list(set(solid_pd.stable_entries) - set(entries_HO))
self._filtered_entries = solid_entries + ion_entries
if len(comp_dict) > 1:
self._multielement = True
self._processed_entries = self._preprocess_pourbaix_entries(
self._filtered_entries, nproc=nproc)
else:
self._processed_entries = self._filtered_entries
self._multielement = False
self._stable_domains, self._stable_domain_vertices = \
self.get_pourbaix_domains(self._processed_entries)
def _convert_entries_to_points(self, pourbaix_entries):
"""
Args:
pourbaix_entries ([PourbaixEntry]): list of pourbaix entries
to process into vectors in nph-nphi-composition space
Returns:
list of vectors, [[nph, nphi, e0, x1, x2, ..., xn-1]]
corresponding to each entry in nph-nphi-composition space
"""
vecs = [[entry.npH, entry.nPhi, entry.energy] +
[entry.composition.get(elt) for elt in self.pbx_elts[:-1]]
for entry in pourbaix_entries]
vecs = np.array(vecs)
norms = np.transpose([[entry.normalization_factor
for entry in pourbaix_entries]])
vecs *= norms
return vecs
def _get_hull_in_nph_nphi_space(self, entries):
"""
Generates convex hull of pourbaix diagram entries in composition,
npH, and nphi space. This enables filtering of multi-entries
such that only compositionally stable combinations of entries
are included.
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to construct
the convex hull
Returns: list of entries and stable facets corresponding to that
list of entries
"""
ion_entries = [entry for entry in entries
if entry.phase_type == "Ion"]
solid_entries = [entry for entry in entries
if entry.phase_type == "Solid"]
# Pre-filter solids based on min at each composition
logger.debug("Pre-filtering solids by min energy at each composition")
sorted_entries = sorted(
solid_entries, key=lambda x: (x.composition.reduced_composition,
x.entry.energy_per_atom))
grouped_by_composition = itertools.groupby(
sorted_entries, key=lambda x: x.composition.reduced_composition)
min_entries = [list(grouped_entries)[0]
for comp, grouped_entries in grouped_by_composition]
min_entries += ion_entries
logger.debug("Constructing nph-nphi-composition points for qhull")
vecs = self._convert_entries_to_points(min_entries)
maxes = np.max(vecs[:, :3], axis=0)
extra_point = np.concatenate(
[maxes, np.ones(self.dim) / self.dim], axis=0)
# Add padding for extra point
pad = 1000
extra_point[2] += pad
points = np.concatenate([vecs, np.array([extra_point])], axis=0)
logger.debug("Constructing convex hull in nph-nphi-composition space")
hull = ConvexHull(points, qhull_options="QJ i")
# Create facets and remove top
facets = [facet for facet in hull.simplices
if not len(points) - 1 in facet]
if self.dim > 1:
logger.debug("Filtering facets by pourbaix composition")
valid_facets = []
for facet in facets:
comps = vecs[facet][:, 3:]
full_comps = np.concatenate([
comps, 1 - np.sum(comps, axis=1).reshape(len(comps), 1)], axis=1)
# Ensure an compositional interior point exists in the simplex
if np.linalg.matrix_rank(full_comps) > self.dim:
valid_facets.append(facet)
else:
valid_facets = facets
return min_entries, valid_facets
def _preprocess_pourbaix_entries(self, entries, nproc=None):
"""
Generates multi-entries for pourbaix diagram
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to preprocess
into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
Returns:
([MultiEntry]) list of stable MultiEntry candidates
"""
# Get composition
tot_comp = Composition(self._elt_comp)
min_entries, valid_facets = self._get_hull_in_nph_nphi_space(entries)
combos = []
for facet in valid_facets:
for i in range(1, self.dim + 2):
these_combos = list()
for combo in itertools.combinations(facet, i):
these_entries = [min_entries[i] for i in combo]
these_combos.append(frozenset(these_entries))
combos.append(these_combos)
all_combos = set(itertools.chain.from_iterable(combos))
list_combos = []
for i in all_combos:
list_combos.append(list(i))
all_combos = list_combos
multi_entries = []
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=tot_comp)
with Pool(nproc) as p:
multi_entries = list(tqdm(p.imap(f, all_combos),
total=len(all_combos)))
multi_entries = list(filter(bool, multi_entries))
else:
# Serial processing of multi-entry generation
for combo in tqdm(all_combos):
multi_entry = self.process_multientry(combo, prod_comp=tot_comp)
if multi_entry:
multi_entries.append(multi_entry)
return multi_entries
def _generate_multielement_entries(self, entries, nproc=None):
"""
Create entries for multi-element Pourbaix construction.
This works by finding all possible linear combinations
of entries that can result in the specified composition
from the initialized comp_dict.
Args:
entries ([PourbaixEntries]): list of pourbaix entries
to process into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
"""
N = len(self._elt_comp) # No. of elements
total_comp = Composition(self._elt_comp)
# generate all combinations of compounds that have all elements
entry_combos = [itertools.combinations(
entries, j + 1) for j in range(N)]
entry_combos = itertools.chain.from_iterable(entry_combos)
entry_combos = filter(lambda x: total_comp < MultiEntry(x).composition,
entry_combos)
# Generate and filter entries
processed_entries = []
total = sum([comb(len(entries), j + 1)
for j in range(N)])
if total > 1e6:
warnings.warn("Your pourbaix diagram includes {} entries and may "
"take a long time to generate.".format(total))
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=total_comp)
with Pool(nproc) as p:
processed_entries = list(tqdm(p.imap(f, entry_combos),
total=total))
processed_entries = list(filter(bool, processed_entries))
# Serial processing of multi-entry generation
else:
for entry_combo in entry_combos:
processed_entry = self.process_multientry(entry_combo, total_comp)
if processed_entry is not None:
processed_entries.append(processed_entry)
return processed_entries
@staticmethod
def process_multientry(entry_list, prod_comp, coeff_threshold=1e-4):
"""
Static method for finding a multientry based on
a list of entries and a product composition.
Essentially checks to see if a valid aqueous
reaction exists between the entries and the
product composition and returns a MultiEntry
with weights according to the coefficients if so.
Args:
entry_list ([Entry]): list of entries from which to
create a MultiEntry
prod_comp (Composition): composition constraint for setting
weights of MultiEntry
coeff_threshold (float): threshold of stoichiometric
coefficients to filter, if weights are lower than
this value, the entry is not returned
"""
dummy_oh = [Composition("H"), Composition("O")]
try:
# Get balanced reaction coeffs, ensuring all < 0 or conc thresh
# Note that we get reduced compositions for solids and non-reduced
# compositions for ions because ions aren't normalized due to
# their charge state.
entry_comps = [e.composition for e in entry_list]
rxn = Reaction(entry_comps + dummy_oh, [prod_comp])
coeffs = -np.array([rxn.get_coeff(comp) for comp in entry_comps])
# Return None if reaction coeff threshold is not met
if (coeffs > coeff_threshold).all():
return MultiEntry(entry_list, weights=coeffs.tolist())
else:
return None
except ReactionError:
return None
@staticmethod
def get_pourbaix_domains(pourbaix_entries, limits=None):
"""
Returns a set of pourbaix stable domains (i. e. polygons) in
pH-V space from a list of pourbaix_entries
This function works by using scipy's HalfspaceIntersection
function to construct all of the 2-D polygons that form the
boundaries of the planes corresponding to individual entry
gibbs free energies as a function of pH and V. Hyperplanes
of the form a*pH + b*V + 1 - g(0, 0) are constructed and
supplied to HalfspaceIntersection, which then finds the
boundaries of each pourbaix region using the intersection
points.
Args:
pourbaix_entries ([PourbaixEntry]): Pourbaix entries
with which to construct stable pourbaix domains
limits ([[float]]): limits in which to do the pourbaix
analysis
Returns:
Returns a dict of the form {entry: [boundary_points]}.
The list of boundary points are the sides of the N-1
dim polytope bounding the allowable ph-V range of each entry.
"""
if limits is None:
limits = [[-2, 16], [-4, 4]]
# Get hyperplanes
hyperplanes = [np.array([-PREFAC * entry.npH, -entry.nPhi,
0, -entry.energy]) * entry.normalization_factor
for entry in pourbaix_entries]
hyperplanes = np.array(hyperplanes)
hyperplanes[:, 2] = 1
max_contribs = np.max(np.abs(hyperplanes), axis=0)
g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1])
# Add border hyperplanes and generate HalfspaceIntersection
border_hyperplanes = [[-1, 0, 0, limits[0][0]],
[1, 0, 0, -limits[0][1]],
[0, -1, 0, limits[1][0]],
[0, 1, 0, -limits[1][1]],
[0, 0, -1, 2 * g_max]]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = np.average(limits, axis=1).tolist() + [g_max]
hs_int = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# organize the boundary points by entry
pourbaix_domains = {entry: [] for entry in pourbaix_entries}
for intersection, facet in zip(hs_int.intersections,
hs_int.dual_facets):
for v in facet:
if v < len(pourbaix_entries):
this_entry = pourbaix_entries[v]
pourbaix_domains[this_entry].append(intersection)
# Remove entries with no pourbaix region
pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v}
pourbaix_domain_vertices = {}
for entry, points in pourbaix_domains.items():
points = np.array(points)[:, :2]
# Initial sort to ensure consistency
points = points[np.lexsort(np.transpose(points))]
center = np.average(points, axis=0)
points_centered = points - center
# Sort points by cross product of centered points,
# isn't strictly necessary but useful for plotting tools
points_centered = sorted(points_centered,
key=cmp_to_key(lambda x, y: x[0] * y[1] - x[1] * y[0]))
points = points_centered + center
# Create simplices corresponding to pourbaix boundary
simplices = [Simplex(points[indices])
for indices in ConvexHull(points).simplices]
pourbaix_domains[entry] = simplices
pourbaix_domain_vertices[entry] = points
return pourbaix_domains, pourbaix_domain_vertices
def find_stable_entry(self, pH, V):
"""
Finds stable entry at a pH,V condition
Args:
pH (float): pH to find stable entry
V (float): V to find stable entry
Returns:
"""
energies_at_conditions = [e.normalized_energy_at_conditions(pH, V)
for e in self.stable_entries]
return self.stable_entries[np.argmin(energies_at_conditions)]
def get_decomposition_energy(self, entry, pH, V):
"""
Finds decomposition to most stable entries in eV/atom,
supports vectorized inputs for pH and V
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float, [float]): pH at which to find the decomposition
V (float, [float]): voltage at which to find the decomposition
Returns:
Decomposition energy for the entry, i. e. the energy above
the "pourbaix hull" in eV/atom at the given conditions
"""
# Check composition consistency between entry and Pourbaix diagram:
pbx_comp = Composition(self._elt_comp).fractional_composition
entry_pbx_comp = Composition(
{elt: coeff for elt, coeff in entry.composition.items()
if elt not in ELEMENTS_HO}).fractional_composition
if entry_pbx_comp != pbx_comp:
raise ValueError("Composition of stability entry does not match "
"Pourbaix Diagram")
entry_normalized_energy = entry.normalized_energy_at_conditions(pH, V)
hull_energy = self.get_hull_energy(pH, V)
decomposition_energy = entry_normalized_energy - hull_energy
# Convert to eV/atom instead of eV/normalized formula unit
decomposition_energy /= entry.normalization_factor
decomposition_energy /= entry.composition.num_atoms
return decomposition_energy
def get_hull_energy(self, pH, V):
"""
Gets the minimum energy of the pourbaix "basin" that is formed
from the stable pourbaix planes. Vectorized.
Args:
pH (float or [float]): pH at which to find the hull energy
V (float or [float]): V at which to find the hull energy
Returns:
(float or [float]) minimum pourbaix energy at conditions
"""
all_gs = np.array([e.normalized_energy_at_conditions(
pH, V) for e in self.stable_entries])
base = np.min(all_gs, axis=0)
return base
def get_stable_entry(self, pH, V):
"""
Gets the stable entry at a given pH, V condition
Args:
pH (float): pH at a given condition
V (float): V at a given condition
Returns:
(PourbaixEntry or MultiEntry): pourbaix or multi-entry
corresponding ot the minimum energy entry at a given
pH, V condition
"""
all_gs = np.array([e.normalized_energy_at_conditions(
pH, V) for e in self.stable_entries])
return self.stable_entries[np.argmin(all_gs)]
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_domains.keys())
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries used to generate the pourbaix diagram
"""
return self._processed_entries
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
def as_dict(self, include_unprocessed_entries=False):
if include_unprocessed_entries:
entries = [e.as_dict() for e in self._unprocessed_entries]
else:
entries = [e.as_dict() for e in self._processed_entries]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": entries,
"comp_dict": self._elt_comp,
"conc_dict": self._conc_dict}
return d
@classmethod
def from_dict(cls, d):
decoded_entries = MontyDecoder().process_decoded(d['entries'])
return cls(decoded_entries, d.get('comp_dict'),
d.get('conc_dict'))
class PourbaixPlotter:
"""
A plotter class for phase diagrams.
Args:
pourbaix_diagram (PourbaixDiagram): A PourbaixDiagram object.
"""
def __init__(self, pourbaix_diagram):
self._pbx = pourbaix_diagram
def show(self, *args, **kwargs):
"""
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
"""
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show()
def get_pourbaix_plot(self, limits=None, title="",
label_domains=True, plt=None):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
"""
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pbx._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, 'k-', linewidth=lw)
if label_domains:
plt.annotate(generate_entry_label(entry), center, ha='center',
va='center', fontsize=20, color="b").draggable()
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def plot_entry_stability(self, entry, pH_range=None, pH_resolution=100,
V_range=None, V_resolution=100, e_hull_max=1,
cmap='RdYlBu_r', **kwargs):
if pH_range is None:
pH_range = [-2, 16]
if V_range is None:
V_range = [-3, 3]
# plot the Pourbaix diagram
plt = self.get_pourbaix_plot(**kwargs)
pH, V = np.mgrid[pH_range[0]:pH_range[1]:pH_resolution * 1j, V_range[0]:V_range[1]:V_resolution * 1j]
stability = self._pbx.get_decomposition_energy(entry, pH, V)
# Plot stability map
plt.pcolor(pH, V, stability, cmap=cmap, vmin=0, vmax=e_hull_max)
cbar = plt.colorbar()
cbar.set_label("Stability of {} (eV/atom)".format(
generate_entry_label(entry)))
# Set ticklabels
# ticklabels = [t.get_text() for t in cbar.ax.get_yticklabels()]
# ticklabels[-1] = '>={}'.format(ticklabels[-1])
# cbar.ax.set_yticklabels(ticklabels)
return plt
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
return self._pbx._stable_domain_vertices[entry]
def generate_entry_label(entry):
"""
Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for
"""
if isinstance(entry, MultiEntry):
return " + ".join([latexify_ion(latexify(e.name)) for e in entry.entry_list])
else:
return latexify_ion(latexify(entry.name))
def latexify_ion(formula):
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
|
fraricci/pymatgen
|
pymatgen/analysis/pourbaix_diagram.py
|
Python
|
mit
| 39,890
|
[
"pymatgen"
] |
923ed68cfa12db8805fbefa5d90a862a887bbf22f1ced978e04ced37502f8a66
|
"""
Representations and Inference for Logic. (Chapters 7-9, 12)
Covers both Propositional and First-Order Logic. First we have four
important data types:
KB Abstract class holds a knowledge base of logical expressions
KB_Agent Abstract class subclasses agents.Agent
Expr A logical expression, imported from utils.py
substitution Implemented as a dictionary of var:value pairs, {x:1, y:x}
Be careful: some functions take an Expr as argument, and some take a KB.
Logical expressions can be created with Expr or expr, imported from utils, TODO
or with expr, which adds the capability to write a string that uses
the connectives ==>, <==, <=>, or <=/=>. But be careful: these have the
operator precedence of commas; you may need to add parens to make precedence work.
See logic.ipynb for examples.
Then we implement various functions for doing logical inference:
pl_true Evaluate a propositional logical sentence in a model
tt_entails Say if a statement is entailed by a KB
pl_resolution Do resolution on propositional sentences
dpll_satisfiable See if a propositional sentence is satisfiable
WalkSAT Try to find a solution for a set of clauses
And a few other functions:
to_cnf Convert to conjunctive normal form
unify Do unification of two FOL sentences
diff, simp Symbolic differentiation and simplification
"""
import heapq
import itertools
import random
from collections import defaultdict, Counter
import networkx as nx
from agents import Agent, Glitter, Bump, Stench, Breeze, Scream
from csp import parse_neighbors, UniversalDict
from search import astar_search, PlanRoute
from utils import remove_all, unique, first, probability, isnumber, issequence, Expr, expr, subexpressions, extend
class KB:
"""A knowledge base to which you can tell and ask sentences.
To create a KB, first subclass this class and implement
tell, ask_generator, and retract. Why ask_generator instead of ask?
The book is a bit vague on what ask means --
For a Propositional Logic KB, ask(P & Q) returns True or False, but for an
FOL KB, something like ask(Brother(x, y)) might return many substitutions
such as {x: Cain, y: Abel}, {x: Abel, y: Cain}, {x: George, y: Jeb}, etc.
So ask_generator generates these one at a time, and ask either returns the
first one or returns False."""
def __init__(self, sentence=None):
if sentence:
self.tell(sentence)
def tell(self, sentence):
"""Add the sentence to the KB."""
raise NotImplementedError
def ask(self, query):
"""Return a substitution that makes the query true, or, failing that, return False."""
return first(self.ask_generator(query), default=False)
def ask_generator(self, query):
"""Yield all the substitutions that make query true."""
raise NotImplementedError
def retract(self, sentence):
"""Remove sentence from the KB."""
raise NotImplementedError
class PropKB(KB):
"""A KB for propositional logic. Inefficient, with no indexing."""
def __init__(self, sentence=None):
super().__init__(sentence)
self.clauses = []
def tell(self, sentence):
"""Add the sentence's clauses to the KB."""
self.clauses.extend(conjuncts(to_cnf(sentence)))
def ask_generator(self, query):
"""Yield the empty substitution {} if KB entails query; else no results."""
if tt_entails(Expr('&', *self.clauses), query):
yield {}
def ask_if_true(self, query):
"""Return True if the KB entails query, else return False."""
for _ in self.ask_generator(query):
return True
return False
def retract(self, sentence):
"""Remove the sentence's clauses from the KB."""
for c in conjuncts(to_cnf(sentence)):
if c in self.clauses:
self.clauses.remove(c)
# ______________________________________________________________________________
def KBAgentProgram(kb):
"""
[Figure 7.1]
A generic logical knowledge-based agent program.
"""
steps = itertools.count()
def program(percept):
t = next(steps)
kb.tell(make_percept_sentence(percept, t))
action = kb.ask(make_action_query(t))
kb.tell(make_action_sentence(action, t))
return action
def make_percept_sentence(percept, t):
return Expr('Percept')(percept, t)
def make_action_query(t):
return expr('ShouldDo(action, {})'.format(t))
def make_action_sentence(action, t):
return Expr('Did')(action[expr('action')], t)
return program
def is_symbol(s):
"""A string s is a symbol if it starts with an alphabetic char.
>>> is_symbol('R2D2')
True
"""
return isinstance(s, str) and s[:1].isalpha()
def is_var_symbol(s):
"""A logic variable symbol is an initial-lowercase string.
>>> is_var_symbol('EXE')
False
"""
return is_symbol(s) and s[0].islower()
def is_prop_symbol(s):
"""A proposition logic symbol is an initial-uppercase string.
>>> is_prop_symbol('exe')
False
"""
return is_symbol(s) and s[0].isupper()
def variables(s):
"""Return a set of the variables in expression s.
>>> variables(expr('F(x, x) & G(x, y) & H(y, z) & R(A, z, 2)')) == {x, y, z}
True
"""
return {x for x in subexpressions(s) if is_variable(x)}
def is_definite_clause(s):
"""Returns True for exprs s of the form A & B & ... & C ==> D,
where all literals are positive. In clause form, this is
~A | ~B | ... | ~C | D, where exactly one clause is positive.
>>> is_definite_clause(expr('Farmer(Mac)'))
True
"""
if is_symbol(s.op):
return True
elif s.op == '==>':
antecedent, consequent = s.args
return is_symbol(consequent.op) and all(is_symbol(arg.op) for arg in conjuncts(antecedent))
else:
return False
def parse_definite_clause(s):
"""Return the antecedents and the consequent of a definite clause."""
assert is_definite_clause(s)
if is_symbol(s.op):
return [], s
else:
antecedent, consequent = s.args
return conjuncts(antecedent), consequent
# Useful constant Exprs used in examples and code:
A, B, C, D, E, F, G, P, Q, a, x, y, z, u = map(Expr, 'ABCDEFGPQaxyzu')
# ______________________________________________________________________________
def tt_entails(kb, alpha):
"""
[Figure 7.10]
Does kb entail the sentence alpha? Use truth tables. For propositional
kb's and sentences. Note that the 'kb' should be an Expr which is a
conjunction of clauses.
>>> tt_entails(expr('P & Q'), expr('Q'))
True
"""
assert not variables(alpha)
symbols = list(prop_symbols(kb & alpha))
return tt_check_all(kb, alpha, symbols, {})
def tt_check_all(kb, alpha, symbols, model):
"""Auxiliary routine to implement tt_entails."""
if not symbols:
if pl_true(kb, model):
result = pl_true(alpha, model)
assert result in (True, False)
return result
else:
return True
else:
P, rest = symbols[0], symbols[1:]
return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and
tt_check_all(kb, alpha, rest, extend(model, P, False)))
def prop_symbols(x):
"""Return the set of all propositional symbols in x."""
if not isinstance(x, Expr):
return set()
elif is_prop_symbol(x.op):
return {x}
else:
return {symbol for arg in x.args for symbol in prop_symbols(arg)}
def constant_symbols(x):
"""Return the set of all constant symbols in x."""
if not isinstance(x, Expr):
return set()
elif is_prop_symbol(x.op) and not x.args:
return {x}
else:
return {symbol for arg in x.args for symbol in constant_symbols(arg)}
def predicate_symbols(x):
"""Return a set of (symbol_name, arity) in x.
All symbols (even functional) with arity > 0 are considered."""
if not isinstance(x, Expr) or not x.args:
return set()
pred_set = {(x.op, len(x.args))} if is_prop_symbol(x.op) else set()
pred_set.update({symbol for arg in x.args for symbol in predicate_symbols(arg)})
return pred_set
def tt_true(s):
"""Is a propositional sentence a tautology?
>>> tt_true('P | ~P')
True
"""
s = expr(s)
return tt_entails(True, s)
def pl_true(exp, model={}):
"""Return True if the propositional logic expression is true in the model,
and False if it is false. If the model does not specify the value for
every proposition, this may return None to indicate 'not obvious';
this may happen even when the expression is tautological.
>>> pl_true(P, {}) is None
True
"""
if exp in (True, False):
return exp
op, args = exp.op, exp.args
if is_prop_symbol(op):
return model.get(exp)
elif op == '~':
p = pl_true(args[0], model)
if p is None:
return None
else:
return not p
elif op == '|':
result = False
for arg in args:
p = pl_true(arg, model)
if p is True:
return True
if p is None:
result = None
return result
elif op == '&':
result = True
for arg in args:
p = pl_true(arg, model)
if p is False:
return False
if p is None:
result = None
return result
p, q = args
if op == '==>':
return pl_true(~p | q, model)
elif op == '<==':
return pl_true(p | ~q, model)
pt = pl_true(p, model)
if pt is None:
return None
qt = pl_true(q, model)
if qt is None:
return None
if op == '<=>':
return pt == qt
elif op == '^': # xor or 'not equivalent'
return pt != qt
else:
raise ValueError('Illegal operator in logic expression' + str(exp))
# ______________________________________________________________________________
# Convert to Conjunctive Normal Form (CNF)
def to_cnf(s):
"""
[Page 253]
Convert a propositional logical sentence to conjunctive normal form.
That is, to the form ((A | ~B | ...) & (B | C | ...) & ...)
>>> to_cnf('~(B | C)')
(~B & ~C)
"""
s = expr(s)
if isinstance(s, str):
s = expr(s)
s = eliminate_implications(s) # Steps 1, 2 from p. 253
s = move_not_inwards(s) # Step 3
return distribute_and_over_or(s) # Step 4
def eliminate_implications(s):
"""Change implications into equivalent form with only &, |, and ~ as logical operators."""
s = expr(s)
if not s.args or is_symbol(s.op):
return s # Atoms are unchanged.
args = list(map(eliminate_implications, s.args))
a, b = args[0], args[-1]
if s.op == '==>':
return b | ~a
elif s.op == '<==':
return a | ~b
elif s.op == '<=>':
return (a | ~b) & (b | ~a)
elif s.op == '^':
assert len(args) == 2 # TODO: relax this restriction
return (a & ~b) | (~a & b)
else:
assert s.op in ('&', '|', '~')
return Expr(s.op, *args)
def move_not_inwards(s):
"""Rewrite sentence s by moving negation sign inward.
>>> move_not_inwards(~(A | B))
(~A & ~B)
"""
s = expr(s)
if s.op == '~':
def NOT(b):
return move_not_inwards(~b)
a = s.args[0]
if a.op == '~':
return move_not_inwards(a.args[0]) # ~~A ==> A
if a.op == '&':
return associate('|', list(map(NOT, a.args)))
if a.op == '|':
return associate('&', list(map(NOT, a.args)))
return s
elif is_symbol(s.op) or not s.args:
return s
else:
return Expr(s.op, *list(map(move_not_inwards, s.args)))
def distribute_and_over_or(s):
"""Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
>>> distribute_and_over_or((A & B) | C)
((A | C) & (B | C))
"""
s = expr(s)
if s.op == '|':
s = associate('|', s.args)
if s.op != '|':
return distribute_and_over_or(s)
if len(s.args) == 0:
return False
if len(s.args) == 1:
return distribute_and_over_or(s.args[0])
conj = first(arg for arg in s.args if arg.op == '&')
if not conj:
return s
others = [a for a in s.args if a is not conj]
rest = associate('|', others)
return associate('&', [distribute_and_over_or(c | rest)
for c in conj.args])
elif s.op == '&':
return associate('&', list(map(distribute_and_over_or, s.args)))
else:
return s
def associate(op, args):
"""Given an associative op, return an expression with the same
meaning as Expr(op, *args), but flattened -- that is, with nested
instances of the same op promoted to the top level.
>>> associate('&', [(A&B),(B|C),(B&C)])
(A & B & (B | C) & B & C)
>>> associate('|', [A|(B|(C|(A&B)))])
(A | B | C | (A & B))
"""
args = dissociate(op, args)
if len(args) == 0:
return _op_identity[op]
elif len(args) == 1:
return args[0]
else:
return Expr(op, *args)
_op_identity = {'&': True, '|': False, '+': 0, '*': 1}
def dissociate(op, args):
"""Given an associative op, return a flattened list result such
that Expr(op, *result) means the same as Expr(op, *args).
>>> dissociate('&', [A & B])
[A, B]
"""
result = []
def collect(subargs):
for arg in subargs:
if arg.op == op:
collect(arg.args)
else:
result.append(arg)
collect(args)
return result
def conjuncts(s):
"""Return a list of the conjuncts in the sentence s.
>>> conjuncts(A & B)
[A, B]
>>> conjuncts(A | B)
[(A | B)]
"""
return dissociate('&', [s])
def disjuncts(s):
"""Return a list of the disjuncts in the sentence s.
>>> disjuncts(A | B)
[A, B]
>>> disjuncts(A & B)
[(A & B)]
"""
return dissociate('|', [s])
# ______________________________________________________________________________
def pl_resolution(kb, alpha):
"""
[Figure 7.12]
Propositional-logic resolution: say if alpha follows from KB.
>>> pl_resolution(horn_clauses_KB, A)
True
"""
clauses = kb.clauses + conjuncts(to_cnf(~alpha))
new = set()
while True:
n = len(clauses)
pairs = [(clauses[i], clauses[j])
for i in range(n) for j in range(i + 1, n)]
for (ci, cj) in pairs:
resolvents = pl_resolve(ci, cj)
if False in resolvents:
return True
new = new.union(set(resolvents))
if new.issubset(set(clauses)):
return False
for c in new:
if c not in clauses:
clauses.append(c)
def pl_resolve(ci, cj):
"""Return all clauses that can be obtained by resolving clauses ci and cj."""
clauses = []
for di in disjuncts(ci):
for dj in disjuncts(cj):
if di == ~dj or ~di == dj:
clauses.append(associate('|', unique(remove_all(di, disjuncts(ci)) + remove_all(dj, disjuncts(cj)))))
return clauses
# ______________________________________________________________________________
class PropDefiniteKB(PropKB):
"""A KB of propositional definite clauses."""
def tell(self, sentence):
"""Add a definite clause to this KB."""
assert is_definite_clause(sentence), "Must be definite clause"
self.clauses.append(sentence)
def ask_generator(self, query):
"""Yield the empty substitution if KB implies query; else nothing."""
if pl_fc_entails(self.clauses, query):
yield {}
def retract(self, sentence):
self.clauses.remove(sentence)
def clauses_with_premise(self, p):
"""Return a list of the clauses in KB that have p in their premise.
This could be cached away for O(1) speed, but we'll recompute it."""
return [c for c in self.clauses if c.op == '==>' and p in conjuncts(c.args[0])]
def pl_fc_entails(kb, q):
"""
[Figure 7.15]
Use forward chaining to see if a PropDefiniteKB entails symbol q.
>>> pl_fc_entails(horn_clauses_KB, expr('Q'))
True
"""
count = {c: len(conjuncts(c.args[0])) for c in kb.clauses if c.op == '==>'}
inferred = defaultdict(bool)
agenda = [s for s in kb.clauses if is_prop_symbol(s.op)]
while agenda:
p = agenda.pop()
if p == q:
return True
if not inferred[p]:
inferred[p] = True
for c in kb.clauses_with_premise(p):
count[c] -= 1
if count[c] == 0:
agenda.append(c.args[1])
return False
"""
[Figure 7.13]
Simple inference in a wumpus world example
"""
wumpus_world_inference = expr('(B11 <=> (P12 | P21)) & ~B11')
"""
[Figure 7.16]
Propositional Logic Forward Chaining example
"""
horn_clauses_KB = PropDefiniteKB()
for clause in ['P ==> Q',
'(L & M) ==> P',
'(B & L) ==> M',
'(A & P) ==> L',
'(A & B) ==> L',
'A', 'B']:
horn_clauses_KB.tell(expr(clause))
"""
Definite clauses KB example
"""
definite_clauses_KB = PropDefiniteKB()
for clause in ['(B & F) ==> E',
'(A & E & F) ==> G',
'(B & C) ==> F',
'(A & B) ==> D',
'(E & F) ==> H',
'(H & I) ==>J',
'A', 'B', 'C']:
definite_clauses_KB.tell(expr(clause))
# ______________________________________________________________________________
# Heuristics for SAT Solvers
def no_branching_heuristic(symbols, clauses):
return first(symbols), True
def min_clauses(clauses):
min_len = min(map(lambda c: len(c.args), clauses), default=2)
return filter(lambda c: len(c.args) == (min_len if min_len > 1 else 2), clauses)
def moms(symbols, clauses):
"""
MOMS (Maximum Occurrence in clauses of Minimum Size) heuristic
Returns the literal with the most occurrences in all clauses of minimum size
"""
scores = Counter(l for c in min_clauses(clauses) for l in prop_symbols(c))
return max(symbols, key=lambda symbol: scores[symbol]), True
def momsf(symbols, clauses, k=0):
"""
MOMS alternative heuristic
If f(x) the number of occurrences of the variable x in clauses with minimum size,
we choose the variable maximizing [f(x) + f(-x)] * 2^k + f(x) * f(-x)
Returns x if f(x) >= f(-x) otherwise -x
"""
scores = Counter(l for c in min_clauses(clauses) for l in disjuncts(c))
P = max(symbols,
key=lambda symbol: (scores[symbol] + scores[~symbol]) * pow(2, k) + scores[symbol] * scores[~symbol])
return P, True if scores[P] >= scores[~P] else False
def posit(symbols, clauses):
"""
Freeman's POSIT version of MOMs
Counts the positive x and negative x for each variable x in clauses with minimum size
Returns x if f(x) >= f(-x) otherwise -x
"""
scores = Counter(l for c in min_clauses(clauses) for l in disjuncts(c))
P = max(symbols, key=lambda symbol: scores[symbol] + scores[~symbol])
return P, True if scores[P] >= scores[~P] else False
def zm(symbols, clauses):
"""
Zabih and McAllester's version of MOMs
Counts the negative occurrences only of each variable x in clauses with minimum size
"""
scores = Counter(l for c in min_clauses(clauses) for l in disjuncts(c) if l.op == '~')
return max(symbols, key=lambda symbol: scores[~symbol]), True
def dlis(symbols, clauses):
"""
DLIS (Dynamic Largest Individual Sum) heuristic
Choose the variable and value that satisfies the maximum number of unsatisfied clauses
Like DLCS but we only consider the literal (thus Cp and Cn are individual)
"""
scores = Counter(l for c in clauses for l in disjuncts(c))
P = max(symbols, key=lambda symbol: scores[symbol])
return P, True if scores[P] >= scores[~P] else False
def dlcs(symbols, clauses):
"""
DLCS (Dynamic Largest Combined Sum) heuristic
Cp the number of clauses containing literal x
Cn the number of clauses containing literal -x
Here we select the variable maximizing Cp + Cn
Returns x if Cp >= Cn otherwise -x
"""
scores = Counter(l for c in clauses for l in disjuncts(c))
P = max(symbols, key=lambda symbol: scores[symbol] + scores[~symbol])
return P, True if scores[P] >= scores[~P] else False
def jw(symbols, clauses):
"""
Jeroslow-Wang heuristic
For each literal compute J(l) = \sum{l in clause c} 2^{-|c|}
Return the literal maximizing J
"""
scores = Counter()
for c in clauses:
for l in prop_symbols(c):
scores[l] += pow(2, -len(c.args))
return max(symbols, key=lambda symbol: scores[symbol]), True
def jw2(symbols, clauses):
"""
Two Sided Jeroslow-Wang heuristic
Compute J(l) also counts the negation of l = J(x) + J(-x)
Returns x if J(x) >= J(-x) otherwise -x
"""
scores = Counter()
for c in clauses:
for l in disjuncts(c):
scores[l] += pow(2, -len(c.args))
P = max(symbols, key=lambda symbol: scores[symbol] + scores[~symbol])
return P, True if scores[P] >= scores[~P] else False
# ______________________________________________________________________________
# DPLL-Satisfiable [Figure 7.17]
def dpll_satisfiable(s, branching_heuristic=no_branching_heuristic):
"""Check satisfiability of a propositional sentence.
This differs from the book code in two ways: (1) it returns a model
rather than True when it succeeds; this is more useful. (2) The
function find_pure_symbol is passed a list of unknown clauses, rather
than a list of all clauses and the model; this is more efficient.
>>> dpll_satisfiable(A |'<=>'| B) == {A: True, B: True}
True
"""
return dpll(conjuncts(to_cnf(s)), prop_symbols(s), {}, branching_heuristic)
def dpll(clauses, symbols, model, branching_heuristic=no_branching_heuristic):
"""See if the clauses are true in a partial model."""
unknown_clauses = [] # clauses with an unknown truth value
for c in clauses:
val = pl_true(c, model)
if val is False:
return False
if val is None:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P, value = find_pure_symbol(symbols, unknown_clauses)
if P:
return dpll(clauses, remove_all(P, symbols), extend(model, P, value), branching_heuristic)
P, value = find_unit_clause(clauses, model)
if P:
return dpll(clauses, remove_all(P, symbols), extend(model, P, value), branching_heuristic)
P, value = branching_heuristic(symbols, unknown_clauses)
return (dpll(clauses, remove_all(P, symbols), extend(model, P, value), branching_heuristic) or
dpll(clauses, remove_all(P, symbols), extend(model, P, not value), branching_heuristic))
def find_pure_symbol(symbols, clauses):
"""Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A])
(A, True)
"""
for s in symbols:
found_pos, found_neg = False, False
for c in clauses:
if not found_pos and s in disjuncts(c):
found_pos = True
if not found_neg and ~s in disjuncts(c):
found_neg = True
if found_pos != found_neg:
return s, found_pos
return None, None
def find_unit_clause(clauses, model):
"""Find a forced assignment if possible from a clause with only 1
variable not bound in the model.
>>> find_unit_clause([A|B|C, B|~C, ~A|~B], {A:True})
(B, False)
"""
for clause in clauses:
P, value = unit_clause_assign(clause, model)
if P:
return P, value
return None, None
def unit_clause_assign(clause, model):
"""Return a single variable/value pair that makes clause true in
the model, if possible.
>>> unit_clause_assign(A|B|C, {A:True})
(None, None)
>>> unit_clause_assign(B|~C, {A:True})
(None, None)
>>> unit_clause_assign(~A|~B, {A:True})
(B, False)
"""
P, value = None, None
for literal in disjuncts(clause):
sym, positive = inspect_literal(literal)
if sym in model:
if model[sym] == positive:
return None, None # clause already True
elif P:
return None, None # more than 1 unbound variable
else:
P, value = sym, positive
return P, value
def inspect_literal(literal):
"""The symbol in this literal, and the value it should take to
make the literal true.
>>> inspect_literal(P)
(P, True)
>>> inspect_literal(~P)
(P, False)
"""
if literal.op == '~':
return literal.args[0], False
else:
return literal, True
# ______________________________________________________________________________
# CDCL - Conflict-Driven Clause Learning with 1UIP Learning Scheme,
# 2WL Lazy Data Structure, VSIDS Branching Heuristic & Restarts
def no_restart(conflicts, restarts, queue_lbd, sum_lbd):
return False
def luby(conflicts, restarts, queue_lbd, sum_lbd, unit=512):
# in the state-of-art tested with unit value 1, 2, 4, 6, 8, 12, 16, 32, 64, 128, 256 and 512
def _luby(i):
k = 1
while True:
if i == (1 << k) - 1:
return 1 << (k - 1)
elif (1 << (k - 1)) <= i < (1 << k) - 1:
return _luby(i - (1 << (k - 1)) + 1)
k += 1
return unit * _luby(restarts) == len(queue_lbd)
def glucose(conflicts, restarts, queue_lbd, sum_lbd, x=100, k=0.7):
# in the state-of-art tested with (x, k) as (50, 0.8) and (100, 0.7)
# if there were at least x conflicts since the last restart, and then the average LBD of the last
# x learnt clauses was at least k times higher than the average LBD of all learnt clauses
return len(queue_lbd) >= x and sum(queue_lbd) / len(queue_lbd) * k > sum_lbd / conflicts
def cdcl_satisfiable(s, vsids_decay=0.95, restart_strategy=no_restart):
"""
>>> cdcl_satisfiable(A |'<=>'| B) == {A: True, B: True}
True
"""
clauses = TwoWLClauseDatabase(conjuncts(to_cnf(s)))
symbols = prop_symbols(s)
scores = Counter()
G = nx.DiGraph()
model = {}
dl = 0
conflicts = 0
restarts = 1
sum_lbd = 0
queue_lbd = []
while True:
conflict = unit_propagation(clauses, symbols, model, G, dl)
if conflict:
if dl == 0:
return False
conflicts += 1
dl, learn, lbd = conflict_analysis(G, dl)
queue_lbd.append(lbd)
sum_lbd += lbd
backjump(symbols, model, G, dl)
clauses.add(learn, model)
scores.update(l for l in disjuncts(learn))
for symbol in scores:
scores[symbol] *= vsids_decay
if restart_strategy(conflicts, restarts, queue_lbd, sum_lbd):
backjump(symbols, model, G)
queue_lbd.clear()
restarts += 1
else:
if not symbols:
return model
dl += 1
assign_decision_literal(symbols, model, scores, G, dl)
def assign_decision_literal(symbols, model, scores, G, dl):
P = max(symbols, key=lambda symbol: scores[symbol] + scores[~symbol])
value = True if scores[P] >= scores[~P] else False
symbols.remove(P)
model[P] = value
G.add_node(P, val=value, dl=dl)
def unit_propagation(clauses, symbols, model, G, dl):
def check(c):
if not model or clauses.get_first_watched(c) == clauses.get_second_watched(c):
return True
w1, _ = inspect_literal(clauses.get_first_watched(c))
if w1 in model:
return c in (clauses.get_neg_watched(w1) if model[w1] else clauses.get_pos_watched(w1))
w2, _ = inspect_literal(clauses.get_second_watched(c))
if w2 in model:
return c in (clauses.get_neg_watched(w2) if model[w2] else clauses.get_pos_watched(w2))
def unit_clause(watching):
w, p = inspect_literal(watching)
G.add_node(w, val=p, dl=dl)
G.add_edges_from(zip(prop_symbols(c) - {w}, itertools.cycle([w])), antecedent=c)
symbols.remove(w)
model[w] = p
def conflict_clause(c):
G.add_edges_from(zip(prop_symbols(c), itertools.cycle('K')), antecedent=c)
while True:
bcp = False
for c in filter(check, clauses.get_clauses()):
# we need only visit each clause when one of its two watched literals is assigned to 0 because, until
# this happens, we can guarantee that there cannot be more than n-2 literals in the clause assigned to 0
first_watched = pl_true(clauses.get_first_watched(c), model)
second_watched = pl_true(clauses.get_second_watched(c), model)
if first_watched is None and clauses.get_first_watched(c) == clauses.get_second_watched(c):
unit_clause(clauses.get_first_watched(c))
bcp = True
break
elif first_watched is False and second_watched is not True:
if clauses.update_second_watched(c, model):
bcp = True
else:
# if the only literal with a non-zero value is the other watched literal then
if second_watched is None: # if it is free, then the clause is a unit clause
unit_clause(clauses.get_second_watched(c))
bcp = True
break
else: # else (it is False) the clause is a conflict clause
conflict_clause(c)
return True
elif second_watched is False and first_watched is not True:
if clauses.update_first_watched(c, model):
bcp = True
else:
# if the only literal with a non-zero value is the other watched literal then
if first_watched is None: # if it is free, then the clause is a unit clause
unit_clause(clauses.get_first_watched(c))
bcp = True
break
else: # else (it is False) the clause is a conflict clause
conflict_clause(c)
return True
if not bcp:
return False
def conflict_analysis(G, dl):
conflict_clause = next(G[p]['K']['antecedent'] for p in G.pred['K'])
P = next(node for node in G.nodes() - 'K' if G.nodes[node]['dl'] == dl and G.in_degree(node) == 0)
first_uip = nx.immediate_dominators(G, P)['K']
G.remove_node('K')
conflict_side = nx.descendants(G, first_uip)
while True:
for l in prop_symbols(conflict_clause).intersection(conflict_side):
antecedent = next(G[p][l]['antecedent'] for p in G.pred[l])
conflict_clause = pl_binary_resolution(conflict_clause, antecedent)
# the literal block distance is calculated by taking the decision levels from variables of all
# literals in the clause, and counting how many different decision levels were in this set
lbd = [G.nodes[l]['dl'] for l in prop_symbols(conflict_clause)]
if lbd.count(dl) == 1 and first_uip in prop_symbols(conflict_clause):
return 0 if len(lbd) == 1 else heapq.nlargest(2, lbd)[-1], conflict_clause, len(set(lbd))
def pl_binary_resolution(ci, cj):
for di in disjuncts(ci):
for dj in disjuncts(cj):
if di == ~dj or ~di == dj:
return pl_binary_resolution(associate('|', remove_all(di, disjuncts(ci))),
associate('|', remove_all(dj, disjuncts(cj))))
return associate('|', unique(disjuncts(ci) + disjuncts(cj)))
def backjump(symbols, model, G, dl=0):
delete = {node for node in G.nodes() if G.nodes[node]['dl'] > dl}
G.remove_nodes_from(delete)
for node in delete:
del model[node]
symbols |= delete
class TwoWLClauseDatabase:
def __init__(self, clauses):
self.__twl = {}
self.__watch_list = defaultdict(lambda: [set(), set()])
for c in clauses:
self.add(c, None)
def get_clauses(self):
return self.__twl.keys()
def set_first_watched(self, clause, new_watching):
if len(clause.args) > 2:
self.__twl[clause][0] = new_watching
def set_second_watched(self, clause, new_watching):
if len(clause.args) > 2:
self.__twl[clause][1] = new_watching
def get_first_watched(self, clause):
if len(clause.args) == 2:
return clause.args[0]
if len(clause.args) > 2:
return self.__twl[clause][0]
return clause
def get_second_watched(self, clause):
if len(clause.args) == 2:
return clause.args[-1]
if len(clause.args) > 2:
return self.__twl[clause][1]
return clause
def get_pos_watched(self, l):
return self.__watch_list[l][0]
def get_neg_watched(self, l):
return self.__watch_list[l][1]
def add(self, clause, model):
self.__twl[clause] = self.__assign_watching_literals(clause, model)
w1, p1 = inspect_literal(self.get_first_watched(clause))
w2, p2 = inspect_literal(self.get_second_watched(clause))
self.__watch_list[w1][0].add(clause) if p1 else self.__watch_list[w1][1].add(clause)
if w1 != w2:
self.__watch_list[w2][0].add(clause) if p2 else self.__watch_list[w2][1].add(clause)
def remove(self, clause):
w1, p1 = inspect_literal(self.get_first_watched(clause))
w2, p2 = inspect_literal(self.get_second_watched(clause))
del self.__twl[clause]
self.__watch_list[w1][0].discard(clause) if p1 else self.__watch_list[w1][1].discard(clause)
if w1 != w2:
self.__watch_list[w2][0].discard(clause) if p2 else self.__watch_list[w2][1].discard(clause)
def update_first_watched(self, clause, model):
# if a non-zero literal different from the other watched literal is found
found, new_watching = self.__find_new_watching_literal(clause, self.get_first_watched(clause), model)
if found: # then it will replace the watched literal
w, p = inspect_literal(self.get_second_watched(clause))
self.__watch_list[w][0].remove(clause) if p else self.__watch_list[w][1].remove(clause)
self.set_second_watched(clause, new_watching)
w, p = inspect_literal(new_watching)
self.__watch_list[w][0].add(clause) if p else self.__watch_list[w][1].add(clause)
return True
def update_second_watched(self, clause, model):
# if a non-zero literal different from the other watched literal is found
found, new_watching = self.__find_new_watching_literal(clause, self.get_second_watched(clause), model)
if found: # then it will replace the watched literal
w, p = inspect_literal(self.get_first_watched(clause))
self.__watch_list[w][0].remove(clause) if p else self.__watch_list[w][1].remove(clause)
self.set_first_watched(clause, new_watching)
w, p = inspect_literal(new_watching)
self.__watch_list[w][0].add(clause) if p else self.__watch_list[w][1].add(clause)
return True
def __find_new_watching_literal(self, clause, other_watched, model):
# if a non-zero literal different from the other watched literal is found
if len(clause.args) > 2:
for l in disjuncts(clause):
if l != other_watched and pl_true(l, model) is not False:
# then it is returned
return True, l
return False, None
def __assign_watching_literals(self, clause, model=None):
if len(clause.args) > 2:
if model is None or not model:
return [clause.args[0], clause.args[-1]]
else:
return [next(l for l in disjuncts(clause) if pl_true(l, model) is None),
next(l for l in disjuncts(clause) if pl_true(l, model) is False)]
# ______________________________________________________________________________
# Walk-SAT [Figure 7.18]
def WalkSAT(clauses, p=0.5, max_flips=10000):
"""Checks for satisfiability of all clauses by randomly flipping values of variables
>>> WalkSAT([A & ~A], 0.5, 100) is None
True
"""
# Set of all symbols in all clauses
symbols = {sym for clause in clauses for sym in prop_symbols(clause)}
# model is a random assignment of true/false to the symbols in clauses
model = {s: random.choice([True, False]) for s in symbols}
for i in range(max_flips):
satisfied, unsatisfied = [], []
for clause in clauses:
(satisfied if pl_true(clause, model) else unsatisfied).append(clause)
if not unsatisfied: # if model satisfies all the clauses
return model
clause = random.choice(unsatisfied)
if probability(p):
sym = random.choice(list(prop_symbols(clause)))
else:
# Flip the symbol in clause that maximizes number of sat. clauses
def sat_count(sym):
# Return the the number of clauses satisfied after flipping the symbol.
model[sym] = not model[sym]
count = len([clause for clause in clauses if pl_true(clause, model)])
model[sym] = not model[sym]
return count
sym = max(prop_symbols(clause), key=sat_count)
model[sym] = not model[sym]
# If no solution is found within the flip limit, we return failure
return None
# ______________________________________________________________________________
# Map Coloring SAT Problems
def MapColoringSAT(colors, neighbors):
"""Make a SAT for the problem of coloring a map with different colors
for any two adjacent regions. Arguments are a list of colors, and a
dict of {region: [neighbor,...]} entries. This dict may also be
specified as a string of the form defined by parse_neighbors."""
if isinstance(neighbors, str):
neighbors = parse_neighbors(neighbors)
colors = UniversalDict(colors)
clauses = []
for state in neighbors.keys():
clause = [expr(state + '_' + c) for c in colors[state]]
clauses.append(clause)
for t in itertools.combinations(clause, 2):
clauses.append([~t[0], ~t[1]])
visited = set()
adj = set(neighbors[state]) - visited
visited.add(state)
for n_state in adj:
for col in colors[n_state]:
clauses.append([expr('~' + state + '_' + col), expr('~' + n_state + '_' + col)])
return associate('&', map(lambda c: associate('|', c), clauses))
australia_sat = MapColoringSAT(list('RGB'), """SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: """)
france_sat = MapColoringSAT(list('RGBY'),
"""AL: LO FC; AQ: MP LI PC; AU: LI CE BO RA LR MP; BO: CE IF CA FC RA
AU; BR: NB PL; CA: IF PI LO FC BO; CE: PL NB NH IF BO AU LI PC; FC: BO
CA LO AL RA; IF: NH PI CA BO CE; LI: PC CE AU MP AQ; LO: CA AL FC; LR:
MP AU RA PA; MP: AQ LI AU LR; NB: NH CE PL BR; NH: PI IF CE NB; NO:
PI; PA: LR RA; PC: PL CE LI AQ; PI: NH NO CA IF; PL: BR NB CE PC; RA:
AU BO FC PA LR""")
usa_sat = MapColoringSAT(list('RGBY'),
"""WA: OR ID; OR: ID NV CA; CA: NV AZ; NV: ID UT AZ; ID: MT WY UT;
UT: WY CO AZ; MT: ND SD WY; WY: SD NE CO; CO: NE KA OK NM; NM: OK TX AZ;
ND: MN SD; SD: MN IA NE; NE: IA MO KA; KA: MO OK; OK: MO AR TX;
TX: AR LA; MN: WI IA; IA: WI IL MO; MO: IL KY TN AR; AR: MS TN LA;
LA: MS; WI: MI IL; IL: IN KY; IN: OH KY; MS: TN AL; AL: TN GA FL;
MI: OH IN; OH: PA WV KY; KY: WV VA TN; TN: VA NC GA; GA: NC SC FL;
PA: NY NJ DE MD WV; WV: MD VA; VA: MD DC NC; NC: SC; NY: VT MA CT NJ;
NJ: DE; DE: MD; MD: DC; VT: NH MA; MA: NH RI CT; CT: RI; ME: NH;
HI: ; AK: """)
# ______________________________________________________________________________
# Expr functions for WumpusKB and HybridWumpusAgent
def facing_east(time):
return Expr('FacingEast', time)
def facing_west(time):
return Expr('FacingWest', time)
def facing_north(time):
return Expr('FacingNorth', time)
def facing_south(time):
return Expr('FacingSouth', time)
def wumpus(x, y):
return Expr('W', x, y)
def pit(x, y):
return Expr('P', x, y)
def breeze(x, y):
return Expr('B', x, y)
def stench(x, y):
return Expr('S', x, y)
def wumpus_alive(time):
return Expr('WumpusAlive', time)
def have_arrow(time):
return Expr('HaveArrow', time)
def percept_stench(time):
return Expr('Stench', time)
def percept_breeze(time):
return Expr('Breeze', time)
def percept_glitter(time):
return Expr('Glitter', time)
def percept_bump(time):
return Expr('Bump', time)
def percept_scream(time):
return Expr('Scream', time)
def move_forward(time):
return Expr('Forward', time)
def shoot(time):
return Expr('Shoot', time)
def turn_left(time):
return Expr('TurnLeft', time)
def turn_right(time):
return Expr('TurnRight', time)
def ok_to_move(x, y, time):
return Expr('OK', x, y, time)
def location(x, y, time=None):
if time is None:
return Expr('L', x, y)
else:
return Expr('L', x, y, time)
# Symbols
def implies(lhs, rhs):
return Expr('==>', lhs, rhs)
def equiv(lhs, rhs):
return Expr('<=>', lhs, rhs)
# Helper Function
def new_disjunction(sentences):
t = sentences[0]
for i in range(1, len(sentences)):
t |= sentences[i]
return t
# ______________________________________________________________________________
class WumpusKB(PropKB):
"""
Create a Knowledge Base that contains the a temporal "Wumpus physics" and temporal rules with time zero.
"""
def __init__(self, dimrow):
super().__init__()
self.dimrow = dimrow
self.tell(~wumpus(1, 1))
self.tell(~pit(1, 1))
for y in range(1, dimrow + 1):
for x in range(1, dimrow + 1):
pits_in = list()
wumpus_in = list()
if x > 1: # West room exists
pits_in.append(pit(x - 1, y))
wumpus_in.append(wumpus(x - 1, y))
if y < dimrow: # North room exists
pits_in.append(pit(x, y + 1))
wumpus_in.append(wumpus(x, y + 1))
if x < dimrow: # East room exists
pits_in.append(pit(x + 1, y))
wumpus_in.append(wumpus(x + 1, y))
if y > 1: # South room exists
pits_in.append(pit(x, y - 1))
wumpus_in.append(wumpus(x, y - 1))
self.tell(equiv(breeze(x, y), new_disjunction(pits_in)))
self.tell(equiv(stench(x, y), new_disjunction(wumpus_in)))
# Rule that describes existence of at least one Wumpus
wumpus_at_least = list()
for x in range(1, dimrow + 1):
for y in range(1, dimrow + 1):
wumpus_at_least.append(wumpus(x, y))
self.tell(new_disjunction(wumpus_at_least))
# Rule that describes existence of at most one Wumpus
for i in range(1, dimrow + 1):
for j in range(1, dimrow + 1):
for u in range(1, dimrow + 1):
for v in range(1, dimrow + 1):
if i != u or j != v:
self.tell(~wumpus(i, j) | ~wumpus(u, v))
# Temporal rules at time zero
self.tell(location(1, 1, 0))
for i in range(1, dimrow + 1):
for j in range(1, dimrow + 1):
self.tell(implies(location(i, j, 0), equiv(percept_breeze(0), breeze(i, j))))
self.tell(implies(location(i, j, 0), equiv(percept_stench(0), stench(i, j))))
if i != 1 or j != 1:
self.tell(~location(i, j, 0))
self.tell(wumpus_alive(0))
self.tell(have_arrow(0))
self.tell(facing_east(0))
self.tell(~facing_north(0))
self.tell(~facing_south(0))
self.tell(~facing_west(0))
def make_action_sentence(self, action, time):
actions = [move_forward(time), shoot(time), turn_left(time), turn_right(time)]
for a in actions:
if action is a:
self.tell(action)
else:
self.tell(~a)
def make_percept_sentence(self, percept, time):
# Glitter, Bump, Stench, Breeze, Scream
flags = [0, 0, 0, 0, 0]
# Things perceived
if isinstance(percept, Glitter):
flags[0] = 1
self.tell(percept_glitter(time))
elif isinstance(percept, Bump):
flags[1] = 1
self.tell(percept_bump(time))
elif isinstance(percept, Stench):
flags[2] = 1
self.tell(percept_stench(time))
elif isinstance(percept, Breeze):
flags[3] = 1
self.tell(percept_breeze(time))
elif isinstance(percept, Scream):
flags[4] = 1
self.tell(percept_scream(time))
# Things not perceived
for i in range(len(flags)):
if flags[i] == 0:
if i == 0:
self.tell(~percept_glitter(time))
elif i == 1:
self.tell(~percept_bump(time))
elif i == 2:
self.tell(~percept_stench(time))
elif i == 3:
self.tell(~percept_breeze(time))
elif i == 4:
self.tell(~percept_scream(time))
def add_temporal_sentences(self, time):
if time == 0:
return
t = time - 1
# current location rules
for i in range(1, self.dimrow + 1):
for j in range(1, self.dimrow + 1):
self.tell(implies(location(i, j, time), equiv(percept_breeze(time), breeze(i, j))))
self.tell(implies(location(i, j, time), equiv(percept_stench(time), stench(i, j))))
s = list()
s.append(equiv(location(i, j, time), location(i, j, time) & ~move_forward(time) | percept_bump(time)))
if i != 1:
s.append(location(i - 1, j, t) & facing_east(t) & move_forward(t))
if i != self.dimrow:
s.append(location(i + 1, j, t) & facing_west(t) & move_forward(t))
if j != 1:
s.append(location(i, j - 1, t) & facing_north(t) & move_forward(t))
if j != self.dimrow:
s.append(location(i, j + 1, t) & facing_south(t) & move_forward(t))
# add sentence about location i,j
self.tell(new_disjunction(s))
# add sentence about safety of location i,j
self.tell(equiv(ok_to_move(i, j, time), ~pit(i, j) & ~wumpus(i, j) & wumpus_alive(time)))
# Rules about current orientation
a = facing_north(t) & turn_right(t)
b = facing_south(t) & turn_left(t)
c = facing_east(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_east(time), a | b | c)
self.tell(s)
a = facing_north(t) & turn_left(t)
b = facing_south(t) & turn_right(t)
c = facing_west(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_west(time), a | b | c)
self.tell(s)
a = facing_east(t) & turn_left(t)
b = facing_west(t) & turn_right(t)
c = facing_north(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_north(time), a | b | c)
self.tell(s)
a = facing_west(t) & turn_left(t)
b = facing_east(t) & turn_right(t)
c = facing_south(t) & ~turn_left(t) & ~turn_right(t)
s = equiv(facing_south(time), a | b | c)
self.tell(s)
# Rules about last action
self.tell(equiv(move_forward(t), ~turn_right(t) & ~turn_left(t)))
# Rule about the arrow
self.tell(equiv(have_arrow(time), have_arrow(t) & ~shoot(t)))
# Rule about Wumpus (dead or alive)
self.tell(equiv(wumpus_alive(time), wumpus_alive(t) & ~percept_scream(time)))
def ask_if_true(self, query):
return pl_resolution(self, query)
# ______________________________________________________________________________
class WumpusPosition:
def __init__(self, x, y, orientation):
self.X = x
self.Y = y
self.orientation = orientation
def get_location(self):
return self.X, self.Y
def set_location(self, x, y):
self.X = x
self.Y = y
def get_orientation(self):
return self.orientation
def set_orientation(self, orientation):
self.orientation = orientation
def __eq__(self, other):
if other.get_location() == self.get_location() and other.get_orientation() == self.get_orientation():
return True
else:
return False
# ______________________________________________________________________________
class HybridWumpusAgent(Agent):
"""
[Figure 7.20]
An agent for the wumpus world that does logical inference.
"""
def __init__(self, dimentions):
self.dimrow = dimentions
self.kb = WumpusKB(self.dimrow)
self.t = 0
self.plan = list()
self.current_position = WumpusPosition(1, 1, 'UP')
super().__init__(self.execute)
def execute(self, percept):
self.kb.make_percept_sentence(percept, self.t)
self.kb.add_temporal_sentences(self.t)
temp = list()
for i in range(1, self.dimrow + 1):
for j in range(1, self.dimrow + 1):
if self.kb.ask_if_true(location(i, j, self.t)):
temp.append(i)
temp.append(j)
if self.kb.ask_if_true(facing_north(self.t)):
self.current_position = WumpusPosition(temp[0], temp[1], 'UP')
elif self.kb.ask_if_true(facing_south(self.t)):
self.current_position = WumpusPosition(temp[0], temp[1], 'DOWN')
elif self.kb.ask_if_true(facing_west(self.t)):
self.current_position = WumpusPosition(temp[0], temp[1], 'LEFT')
elif self.kb.ask_if_true(facing_east(self.t)):
self.current_position = WumpusPosition(temp[0], temp[1], 'RIGHT')
safe_points = list()
for i in range(1, self.dimrow + 1):
for j in range(1, self.dimrow + 1):
if self.kb.ask_if_true(ok_to_move(i, j, self.t)):
safe_points.append([i, j])
if self.kb.ask_if_true(percept_glitter(self.t)):
goals = list()
goals.append([1, 1])
self.plan.append('Grab')
actions = self.plan_route(self.current_position, goals, safe_points)
self.plan.extend(actions)
self.plan.append('Climb')
if len(self.plan) == 0:
unvisited = list()
for i in range(1, self.dimrow + 1):
for j in range(1, self.dimrow + 1):
for k in range(self.t):
if self.kb.ask_if_true(location(i, j, k)):
unvisited.append([i, j])
unvisited_and_safe = list()
for u in unvisited:
for s in safe_points:
if u not in unvisited_and_safe and s == u:
unvisited_and_safe.append(u)
temp = self.plan_route(self.current_position, unvisited_and_safe, safe_points)
self.plan.extend(temp)
if len(self.plan) == 0 and self.kb.ask_if_true(have_arrow(self.t)):
possible_wumpus = list()
for i in range(1, self.dimrow + 1):
for j in range(1, self.dimrow + 1):
if not self.kb.ask_if_true(wumpus(i, j)):
possible_wumpus.append([i, j])
temp = self.plan_shot(self.current_position, possible_wumpus, safe_points)
self.plan.extend(temp)
if len(self.plan) == 0:
not_unsafe = list()
for i in range(1, self.dimrow + 1):
for j in range(1, self.dimrow + 1):
if not self.kb.ask_if_true(ok_to_move(i, j, self.t)):
not_unsafe.append([i, j])
temp = self.plan_route(self.current_position, not_unsafe, safe_points)
self.plan.extend(temp)
if len(self.plan) == 0:
start = list()
start.append([1, 1])
temp = self.plan_route(self.current_position, start, safe_points)
self.plan.extend(temp)
self.plan.append('Climb')
action = self.plan[0]
self.plan = self.plan[1:]
self.kb.make_action_sentence(action, self.t)
self.t += 1
return action
def plan_route(self, current, goals, allowed):
problem = PlanRoute(current, goals, allowed, self.dimrow)
return astar_search(problem).solution()
def plan_shot(self, current, goals, allowed):
shooting_positions = set()
for loc in goals:
x = loc[0]
y = loc[1]
for i in range(1, self.dimrow + 1):
if i < x:
shooting_positions.add(WumpusPosition(i, y, 'EAST'))
if i > x:
shooting_positions.add(WumpusPosition(i, y, 'WEST'))
if i < y:
shooting_positions.add(WumpusPosition(x, i, 'NORTH'))
if i > y:
shooting_positions.add(WumpusPosition(x, i, 'SOUTH'))
# Can't have a shooting position from any of the rooms the Wumpus could reside
orientations = ['EAST', 'WEST', 'NORTH', 'SOUTH']
for loc in goals:
for orientation in orientations:
shooting_positions.remove(WumpusPosition(loc[0], loc[1], orientation))
actions = list()
actions.extend(self.plan_route(current, shooting_positions, allowed))
actions.append('Shoot')
return actions
# ______________________________________________________________________________
def SAT_plan(init, transition, goal, t_max, SAT_solver=cdcl_satisfiable):
"""
[Figure 7.22]
Converts a planning problem to Satisfaction problem by translating it to a cnf sentence.
>>> transition = {'A': {'Left': 'A', 'Right': 'B'}, 'B': {'Left': 'A', 'Right': 'C'}, 'C': {'Left': 'B', 'Right': 'C'}}
>>> SAT_plan('A', transition, 'C', 1) is None
True
"""
# Functions used by SAT_plan
def translate_to_SAT(init, transition, goal, time):
clauses = []
states = [state for state in transition]
# Symbol claiming state s at time t
state_counter = itertools.count()
for s in states:
for t in range(time + 1):
state_sym[s, t] = Expr('S_{}'.format(next(state_counter)))
# Add initial state axiom
clauses.append(state_sym[init, 0])
# Add goal state axiom
clauses.append(state_sym[first(clause[0] for clause in state_sym
if set(conjuncts(clause[0])).issuperset(conjuncts(goal))), time]) \
if isinstance(goal, Expr) else clauses.append(state_sym[goal, time])
# All possible transitions
transition_counter = itertools.count()
for s in states:
for action in transition[s]:
s_ = transition[s][action]
for t in range(time):
# Action 'action' taken from state 's' at time 't' to reach 's_'
action_sym[s, action, t] = Expr('T_{}'.format(next(transition_counter)))
# Change the state from s to s_
clauses.append(action_sym[s, action, t] | '==>' | state_sym[s, t])
clauses.append(action_sym[s, action, t] | '==>' | state_sym[s_, t + 1])
# Allow only one state at any time
for t in range(time + 1):
# must be a state at any time
clauses.append(associate('|', [state_sym[s, t] for s in states]))
for s in states:
for s_ in states[states.index(s) + 1:]:
# for each pair of states s, s_ only one is possible at time t
clauses.append((~state_sym[s, t]) | (~state_sym[s_, t]))
# Restrict to one transition per timestep
for t in range(time):
# list of possible transitions at time t
transitions_t = [tr for tr in action_sym if tr[2] == t]
# make sure at least one of the transitions happens
clauses.append(associate('|', [action_sym[tr] for tr in transitions_t]))
for tr in transitions_t:
for tr_ in transitions_t[transitions_t.index(tr) + 1:]:
# there cannot be two transitions tr and tr_ at time t
clauses.append(~action_sym[tr] | ~action_sym[tr_])
# Combine the clauses to form the cnf
return associate('&', clauses)
def extract_solution(model):
true_transitions = [t for t in action_sym if model[action_sym[t]]]
# Sort transitions based on time, which is the 3rd element of the tuple
true_transitions.sort(key=lambda x: x[2])
return [action for s, action, time in true_transitions]
# Body of SAT_plan algorithm
for t in range(t_max + 1):
# dictionaries to help extract the solution from model
state_sym = {}
action_sym = {}
cnf = translate_to_SAT(init, transition, goal, t)
model = SAT_solver(cnf)
if model is not False:
return extract_solution(model)
return None
# ______________________________________________________________________________
def unify(x, y, s={}):
"""
[Figure 9.1]
Unify expressions x,y with substitution s; return a substitution that
would make x,y equal, or None if x,y can not unify. x and y can be
variables (e.g. Expr('x')), constants, lists, or Exprs.
>>> unify(x, 3, {})
{x: 3}
"""
if s is None:
return None
elif x == y:
return s
elif is_variable(x):
return unify_var(x, y, s)
elif is_variable(y):
return unify_var(y, x, s)
elif isinstance(x, Expr) and isinstance(y, Expr):
return unify(x.args, y.args, unify(x.op, y.op, s))
elif isinstance(x, str) or isinstance(y, str):
return None
elif issequence(x) and issequence(y) and len(x) == len(y):
if not x:
return s
return unify(x[1:], y[1:], unify(x[0], y[0], s))
else:
return None
def is_variable(x):
"""A variable is an Expr with no args and a lowercase symbol as the op."""
return isinstance(x, Expr) and not x.args and x.op[0].islower()
def unify_var(var, x, s):
if var in s:
return unify(s[var], x, s)
elif x in s:
return unify(var, s[x], s)
elif occur_check(var, x, s):
return None
else:
new_s = extend(s, var, x)
cascade_substitution(new_s)
return new_s
def occur_check(var, x, s):
"""Return true if variable var occurs anywhere in x
(or in subst(s, x), if s has a binding for x)."""
if var == x:
return True
elif is_variable(x) and x in s:
return occur_check(var, s[x], s)
elif isinstance(x, Expr):
return (occur_check(var, x.op, s) or
occur_check(var, x.args, s))
elif isinstance(x, (list, tuple)):
return first(e for e in x if occur_check(var, e, s))
else:
return False
def subst(s, x):
"""Substitute the substitution s into the expression x.
>>> subst({x: 42, y:0}, F(x) + y)
(F(42) + 0)
"""
if isinstance(x, list):
return [subst(s, xi) for xi in x]
elif isinstance(x, tuple):
return tuple([subst(s, xi) for xi in x])
elif not isinstance(x, Expr):
return x
elif is_var_symbol(x.op):
return s.get(x, x)
else:
return Expr(x.op, *[subst(s, arg) for arg in x.args])
def cascade_substitution(s):
"""This method allows to return a correct unifier in normal form
and perform a cascade substitution to s.
For every mapping in s perform a cascade substitution on s.get(x)
and if it is replaced with a function ensure that all the function
terms are correct updates by passing over them again.
>>> s = {x: y, y: G(z)}
>>> cascade_substitution(s)
>>> s == {x: G(z), y: G(z)}
True
"""
for x in s:
s[x] = subst(s, s.get(x))
if isinstance(s.get(x), Expr) and not is_variable(s.get(x)):
# Ensure Function Terms are correct updates by passing over them again
s[x] = subst(s, s.get(x))
def unify_mm(x, y, s={}):
"""Unify expressions x,y with substitution s using an efficient rule-based
unification algorithm by Martelli & Montanari; return a substitution that
would make x,y equal, or None if x,y can not unify. x and y can be
variables (e.g. Expr('x')), constants, lists, or Exprs.
>>> unify_mm(x, 3, {})
{x: 3}
"""
set_eq = extend(s, x, y)
s = set_eq.copy()
while True:
trans = 0
for x, y in set_eq.items():
if x == y:
# if x = y this mapping is deleted (rule b)
del s[x]
elif not is_variable(x) and is_variable(y):
# if x is not a variable and y is a variable, rewrite it as y = x in s (rule a)
if s.get(y, None) is None:
s[y] = x
del s[x]
else:
# if a mapping already exist for variable y then apply
# variable elimination (there is a chance to apply rule d)
s[x] = vars_elimination(y, s)
elif not is_variable(x) and not is_variable(y):
# in which case x and y are not variables, if the two root function symbols
# are different, stop with failure, else apply term reduction (rule c)
if x.op is y.op and len(x.args) == len(y.args):
term_reduction(x, y, s)
del s[x]
else:
return None
elif isinstance(y, Expr):
# in which case x is a variable and y is a function or a variable (e.g. F(z) or y),
# if y is a function, we must check if x occurs in y, then stop with failure, else
# try to apply variable elimination to y (rule d)
if occur_check(x, y, s):
return None
s[x] = vars_elimination(y, s)
if y == s.get(x):
trans += 1
else:
trans += 1
if trans == len(set_eq):
# if no transformation has been applied, stop with success
return s
set_eq = s.copy()
def term_reduction(x, y, s):
"""Apply term reduction to x and y if both are functions and the two root function
symbols are equals (e.g. F(x1, x2, ..., xn) and F(x1', x2', ..., xn')) by returning
a new mapping obtained by replacing x: y with {x1: x1', x2: x2', ..., xn: xn'}
"""
for i in range(len(x.args)):
if x.args[i] in s:
s[s.get(x.args[i])] = y.args[i]
else:
s[x.args[i]] = y.args[i]
def vars_elimination(x, s):
"""Apply variable elimination to x: if x is a variable and occurs in s, return
the term mapped by x, else if x is a function recursively applies variable
elimination to each term of the function."""
if not isinstance(x, Expr):
return x
if is_variable(x):
return s.get(x, x)
return Expr(x.op, *[vars_elimination(arg, s) for arg in x.args])
def standardize_variables(sentence, dic=None):
"""Replace all the variables in sentence with new variables."""
if dic is None:
dic = {}
if not isinstance(sentence, Expr):
return sentence
elif is_var_symbol(sentence.op):
if sentence in dic:
return dic[sentence]
else:
v = Expr('v_{}'.format(next(standardize_variables.counter)))
dic[sentence] = v
return v
else:
return Expr(sentence.op, *[standardize_variables(a, dic) for a in sentence.args])
standardize_variables.counter = itertools.count()
# ______________________________________________________________________________
def parse_clauses_from_dimacs(dimacs_cnf):
"""Converts a string into CNF clauses according to the DIMACS format used in SAT competitions"""
return map(lambda c: associate('|', c),
map(lambda c: [expr('~X' + str(abs(l))) if l < 0 else expr('X' + str(l)) for l in c],
map(lambda line: map(int, line.split()),
filter(None, ' '.join(
filter(lambda line: line[0] not in ('c', 'p'),
filter(None, dimacs_cnf.strip().replace('\t', ' ').split('\n')))).split(' 0')))))
# ______________________________________________________________________________
class FolKB(KB):
"""A knowledge base consisting of first-order definite clauses.
>>> kb0 = FolKB([expr('Farmer(Mac)'), expr('Rabbit(Pete)'),
... expr('(Rabbit(r) & Farmer(f)) ==> Hates(f, r)')])
>>> kb0.tell(expr('Rabbit(Flopsie)'))
>>> kb0.retract(expr('Rabbit(Pete)'))
>>> kb0.ask(expr('Hates(Mac, x)'))[x]
Flopsie
>>> kb0.ask(expr('Wife(Pete, x)'))
False
"""
def __init__(self, clauses=None):
super().__init__()
self.clauses = [] # inefficient: no indexing
if clauses:
for clause in clauses:
self.tell(clause)
def tell(self, sentence):
if is_definite_clause(sentence):
self.clauses.append(sentence)
else:
raise Exception('Not a definite clause: {}'.format(sentence))
def ask_generator(self, query):
return fol_bc_ask(self, query)
def retract(self, sentence):
self.clauses.remove(sentence)
def fetch_rules_for_goal(self, goal):
return self.clauses
def fol_fc_ask(kb, alpha):
"""
[Figure 9.3]
A simple forward-chaining algorithm.
"""
# TODO: improve efficiency
kb_consts = list({c for clause in kb.clauses for c in constant_symbols(clause)})
def enum_subst(p):
query_vars = list({v for clause in p for v in variables(clause)})
for assignment_list in itertools.product(kb_consts, repeat=len(query_vars)):
theta = {x: y for x, y in zip(query_vars, assignment_list)}
yield theta
# check if we can answer without new inferences
for q in kb.clauses:
phi = unify_mm(q, alpha)
if phi is not None:
yield phi
while True:
new = []
for rule in kb.clauses:
p, q = parse_definite_clause(rule)
for theta in enum_subst(p):
if set(subst(theta, p)).issubset(set(kb.clauses)):
q_ = subst(theta, q)
if all([unify_mm(x, q_) is None for x in kb.clauses + new]):
new.append(q_)
phi = unify_mm(q_, alpha)
if phi is not None:
yield phi
if not new:
break
for clause in new:
kb.tell(clause)
return None
def fol_bc_ask(kb, query):
"""
[Figure 9.6]
A simple backward-chaining algorithm for first-order logic.
KB should be an instance of FolKB, and query an atomic sentence.
"""
return fol_bc_or(kb, query, {})
def fol_bc_or(kb, goal, theta):
for rule in kb.fetch_rules_for_goal(goal):
lhs, rhs = parse_definite_clause(standardize_variables(rule))
for theta1 in fol_bc_and(kb, lhs, unify_mm(rhs, goal, theta)):
yield theta1
def fol_bc_and(kb, goals, theta):
if theta is None:
pass
elif not goals:
yield theta
else:
first, rest = goals[0], goals[1:]
for theta1 in fol_bc_or(kb, subst(theta, first), theta):
for theta2 in fol_bc_and(kb, rest, theta1):
yield theta2
# A simple KB that defines the relevant conditions of the Wumpus World as in Figure 7.4.
# See Sec. 7.4.3
wumpus_kb = PropKB()
P11, P12, P21, P22, P31, B11, B21 = expr('P11, P12, P21, P22, P31, B11, B21')
wumpus_kb.tell(~P11)
wumpus_kb.tell(B11 | '<=>' | (P12 | P21))
wumpus_kb.tell(B21 | '<=>' | (P11 | P22 | P31))
wumpus_kb.tell(~B11)
wumpus_kb.tell(B21)
test_kb = FolKB(map(expr, ['Farmer(Mac)',
'Rabbit(Pete)',
'Mother(MrsMac, Mac)',
'Mother(MrsRabbit, Pete)',
'(Rabbit(r) & Farmer(f)) ==> Hates(f, r)',
'(Mother(m, c)) ==> Loves(m, c)',
'(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)',
'(Farmer(f)) ==> Human(f)',
# Note that this order of conjuncts
# would result in infinite recursion:
# '(Human(h) & Mother(m, h)) ==> Human(m)'
'(Mother(m, h) & Human(h)) ==> Human(m)']))
crime_kb = FolKB(map(expr, ['(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)',
'Owns(Nono, M1)',
'Missile(M1)',
'(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)',
'Missile(x) ==> Weapon(x)',
'Enemy(x, America) ==> Hostile(x)',
'American(West)',
'Enemy(Nono, America)']))
# ______________________________________________________________________________
# Example application (not in the book).
# You can use the Expr class to do symbolic differentiation. This used to be
# a part of AI; now it is considered a separate field, Symbolic Algebra.
def diff(y, x):
"""Return the symbolic derivative, dy/dx, as an Expr.
However, you probably want to simplify the results with simp.
>>> diff(x * x, x)
((x * 1) + (x * 1))
"""
if y == x:
return 1
elif not y.args:
return 0
else:
u, op, v = y.args[0], y.op, y.args[-1]
if op == '+':
return diff(u, x) + diff(v, x)
elif op == '-' and len(y.args) == 1:
return -diff(u, x)
elif op == '-':
return diff(u, x) - diff(v, x)
elif op == '*':
return u * diff(v, x) + v * diff(u, x)
elif op == '/':
return (v * diff(u, x) - u * diff(v, x)) / (v * v)
elif op == '**' and isnumber(x.op):
return v * u ** (v - 1) * diff(u, x)
elif op == '**':
return (v * u ** (v - 1) * diff(u, x) +
u ** v * Expr('log')(u) * diff(v, x))
elif op == 'log':
return diff(u, x) / u
else:
raise ValueError('Unknown op: {} in diff({}, {})'.format(op, y, x))
def simp(x):
"""Simplify the expression x."""
if isnumber(x) or not x.args:
return x
args = list(map(simp, x.args))
u, op, v = args[0], x.op, args[-1]
if op == '+':
if v == 0:
return u
if u == 0:
return v
if u == v:
return 2 * u
if u == -v or v == -u:
return 0
elif op == '-' and len(args) == 1:
if u.op == '-' and len(u.args) == 1:
return u.args[0] # --y ==> y
elif op == '-':
if v == 0:
return u
if u == 0:
return -v
if u == v:
return 0
if u == -v or v == -u:
return 0
elif op == '*':
if u == 0 or v == 0:
return 0
if u == 1:
return v
if v == 1:
return u
if u == v:
return u ** 2
elif op == '/':
if u == 0:
return 0
if v == 0:
return Expr('Undefined')
if u == v:
return 1
if u == -v or v == -u:
return 0
elif op == '**':
if u == 0:
return 0
if v == 0:
return 1
if u == 1:
return 1
if v == 1:
return u
elif op == 'log':
if u == 1:
return 0
else:
raise ValueError('Unknown op: ' + op)
# If we fall through to here, we can not simplify further
return Expr(op, *args)
def d(y, x):
"""Differentiate and then simplify.
>>> d(x * x - x, x)
((2 * x) - 1)
"""
return simp(diff(y, x))
|
aimacode/aima-python
|
logic.py
|
Python
|
mit
| 74,057
|
[
"VisIt"
] |
6256ee9a00b76d00d3c92e6007794349e6619cc4433b77ce1f98f9c2ddab1691
|
from itertools import cycle
import random
import math
import sys
from Net4 import *
import pygame
from pygame.locals import *
from pickle import HIGHEST_PROTOCOL
from pickle import dump
FPS = float("inf")
SCREENWIDTH = 288
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
POPULATION_SIZE = 50
GRAPHICS = True
TEXT = True
networks = [Network([[],[],[],[],[]]) for i in range(POPULATION_SIZE)]
def KillNets(networks, fitnesses, grad = 2):
yx=zip(fitnesses, networks)
yx.sort()
s = [x for (y,x) in yx]
for i in range(POPULATION_SIZE/2):
rand = random.random()**grad
index = int(rand*len(s))
s.pop(index)
return s
def DivNets(networks):
output = []
for network in networks:
parent, child = DivNet(network)
output.extend([parent, child])
return output
for network in networks:
network.nextupdates = set(list(network.neurons))
for i in range(5):
mutation = random.choice(mutations.keys())
if random.random() < mutations[mutation]:
mutation(network)
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
# amount by which base can maximum shift to left
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
def main():
global SCREEN, FPSCLOCK, networks, GRAPHICS, TEXT, FPS
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
# sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
gen = 0
while True:
org = 0
fit = []
for network in networks:
if TEXT:
print "Generation "+str(gen)+" Organism "+str(org)
network.nextupdates = set(network.neurons)
for neuron in network.neurons:
neuron.initiallastout = neuron.lastout
fitness = Test(network)
for neuron in network.neurons:
neuron.lastout = neuron.initiallastout
if TEXT:
print "Fitness: "+str(fitness)
fit.append(fitness)
org += 1
maxfitness = max(fit)
if TEXT:
print "--------"
print "Gen Fitness: "+str(maxfitness)
print "--------"
with open("NETWORK", "wb") as output:
dump(networks[fit.index(maxfitness)], output, HIGHEST_PROTOCOL)
networks = KillNets(networks, fit)
networks = DivNets(networks)
random.shuffle(networks)
gen += 1
def Test(network):
# select random background sprites
randBg = 0
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = 0
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
return mainGame({"playery":25, "basex": 50, "playerIndexGen":cycle([0, 1, 2, 1])}, network)
def showWelcomeAnimation():
"""Shows welcome screen animation of flappy bird"""
# index of player to blit on screen
playerIndex = 0
playerIndexGen = cycle([0, 1, 2, 1])
# iterator used to change playerIndex after every 5th iteration
loopIter = 0
playerx = int(SCREENWIDTH * 0.2)
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.12)
basex = 0
# amount by which base can maximum shift to left
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# player shm for up-down motion on welcome screen
playerShmVals = {'val': 0, 'dir': 1}
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
# make first flap sound and return values for mainGame
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
# adjust playery, playerIndex, basex
if (loopIter + 1) % 5 == 0:
playerIndex = playerIndexGen.next()
loopIter = (loopIter + 1) % 30
basex = -((-basex + 4) % baseShift)
playerShm(playerShmVals)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
SCREEN.blit(IMAGES['player'][playerIndex],
(playerx, playery + playerShmVals['val']))
SCREEN.blit(IMAGES['message'], (messagex, messagey))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def mainGame(movementInfo, network):
global GRAPHICS, TEXT, FPS
fitness = 0
score = playerIndex = loopIter = 0
playerIndexGen = movementInfo['playerIndexGen']
playerx, playery = int(SCREENWIDTH * 0.2), SCREENHEIGHT/2 - 100
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = 0 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
threshold = True
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_s:
FPS = 30
elif event.key == K_f:
FPS = float("inf")
elif event.key == K_t:
TEXT = not TEXT
elif event.key == K_g:
GRAPHICS = not GRAPHICS
if len(network.neurons) >= 1:
if network.neurons[0].lastout <= 0.5:
threshold = True
elif network.neurons[0].lastout > 0.5 and threshold:
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
SOUNDS['wing'].play()
threshold = False
for ind in network.inputs1:
network.neurons[ind].lastout = (lowerPipes[-1]["y"]/2.0+upperPipes[-1]["y"]/2.0)/SCREENHEIGHT
for ind in network.inputs2:
network.neurons[ind].lastout = float(playery)/SCREENHEIGHT
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
upperPipes, lowerPipes)
if crashTest[0]:
return fitness
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
SOUNDS['point'].play()
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = playerIndexGen.next()
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
playerHeight = IMAGES['player'][playerIndex].get_height()
playery += min(playerVelY, BASEY - playery - playerHeight)
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# draw sprites
if GRAPHICS:
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery))
fitness += 1
if GRAPHICS:
pygame.display.update()
FPSCLOCK.tick(FPS)
network.Step()
def showGameOverScreen(crashInfo):
"""crashes the player down and shows gameover image"""
score = crashInfo['score']
playerx = SCREENWIDTH * 0.2
playery = crashInfo['y']
playerHeight = IMAGES['player'][0].get_height()
playerVelY = crashInfo['playerVelY']
playerAccY = 2
basex = crashInfo['basex']
upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes']
# play hit and die sounds
SOUNDS['hit'].play()
if not crashInfo['groundCrash']:
SOUNDS['die'].play()
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery + playerHeight >= BASEY - 1:
return
# player y shift
if playery + playerHeight < BASEY - 1:
playery += min(playerVelY, BASEY - playery - playerHeight)
# player velocity change
if playerVelY < 15:
playerVelY += playerAccY
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
showScore(score)
SCREEN.blit(IMAGES['player'][1], (playerx,playery))
FPSCLOCK.tick(FPS)
pygame.display.update()
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
if player['y'] + player['h'] <= 0:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
if __name__ == '__main__':
main()
|
computergeek314/FlapPy-Network
|
flappy.py
|
Python
|
mit
| 15,318
|
[
"NEURON"
] |
2fea58b81a037740453ae7f93236ac652b0362026fd533c224db824d1ccb25a6
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import copy
from functools import wraps
import numpy as np
import pyLikelihood as pyLike
from SrcModel import SourceModel
from AnalysisBase import AnalysisBase
from LikelihoodState import LikelihoodState
import pyIrfLoader
pyIrfLoader.Loader_go()
_funcFactory = pyLike.SourceFactory_funcFactory()
import BinnedAnalysis
import SummedLikelihood
from fermipy import utils
from fermipy import model_utils
evtype_string = {
4: 'PSF0',
8: 'PSF1',
16: 'PSF2',
32: 'PSF3'
}
def bitmask_to_bits(mask):
bits = []
for i in range(32):
if mask & (2**i):
bits += [2**i]
return bits
DEFAULT_SCALE_DICT = {'value': 1000.0,
'scale': 1.0, 'min': 0.001, 'max': 1000.0}
DEFAULT_NORM_DICT = {'value': 1E-12, 'scale': 1.0, 'min': 1E-5, 'max': 1000.0}
DEFAULT_INTEGRAL_DICT = {'value': 1E-6,
'scale': 1.0, 'min': 1E-5, 'max': 1000.0}
DEFAULT_INDEX_DICT = {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0}
FUNCTION_NORM_PARS = {}
FUNCTION_PAR_NAMES = {}
FUNCTION_DEFAULT_PARS = {
'PowerLaw': {
'Index': DEFAULT_INDEX_DICT,
'Scale': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT},
'PowerLaw2': {
'Index': DEFAULT_INDEX_DICT,
'LowerLimit': {'value': 100.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'UpperLimit': {'value': 100000.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'Integral': DEFAULT_INTEGRAL_DICT},
'BrokenPowerLaw': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'BreakValue': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT},
'BrokenPowerLaw2': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'LowerLimit': {'value': 100.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'UpperLimit': {'value': 100000.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'BreakValue': DEFAULT_SCALE_DICT,
'Integral': DEFAULT_INTEGRAL_DICT},
'BPLExpCutoff': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'BreakValue': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT},
'SmoothBrokenPowerLaw': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'BreakValue': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT,
'Beta': {'value': 0.2, 'scale': 1.0, 'min': 0.01, 'max': 10.0}},
'PLSuperExpCutoff': {
'Cutoff': DEFAULT_SCALE_DICT,
'Index1': {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0},
'Index2': {'value': 1.0, 'scale': 1.0, 'min': 0.0, 'max': 2.0},
'Prefactor': DEFAULT_NORM_DICT,
},
'LogParabola': {
'norm': DEFAULT_NORM_DICT,
'alpha': {'value': 2.0, 'scale': 1.0, 'min': -5.0, 'max': 5.0},
'beta': {'value': 0.0, 'scale': 1.0, 'min': -2.0, 'max': 2.0},
'Eb': DEFAULT_SCALE_DICT},
'SpatialMap': {
'Prefactor': {'value': 1.0, 'scale': 1.0, 'min': 1.0, 'max': 1.0}},
'ConstantValue': {
'Normalization': {'value': 1.0, 'scale': 1.0, 'min': 1E-5, 'max': 1000.0}},
'FileFunction': {
'Normalization': {'value': 1.0, 'scale': 1.0, 'min': 1E-5, 'max': 1000.0}},
'Gaussian': {
'Mean': {'value': 1000.0, 'scale': 1.0, 'min': 1E-5, 'max': 1E5},
'Sigma': {'value': 100.0, 'scale': 1.0, 'min': 10., 'max': 1E5},
'Prefactor': DEFAULT_NORM_DICT},
}
def init_function_pars():
global FUNCTION_PAR_NAMES
global FUNCTION_NORM_PARS
global FUNCTION_DEFAULT_PARS
FUNCTION_PAR_NAMES = {}
FUNCTION_NORM_PARS = {}
funcFactory = pyLike.SourceFactory_funcFactory()
names = pyLike.StringVector()
funcFactory.getFunctionNames(names)
for fname in names:
pars = FUNCTION_DEFAULT_PARS.setdefault(fname, {})
par_names = FUNCTION_PAR_NAMES.setdefault(fname, [])
if 'EblAtten' in fname and fname[len('EblAtten::'):] in FUNCTION_DEFAULT_PARS:
pars.update(FUNCTION_DEFAULT_PARS[fname[len('EblAtten::'):]])
fn = funcFactory.create(fname)
try:
FUNCTION_NORM_PARS[fname] = fn.normPar().getName()
except Exception:
FUNCTION_NORM_PARS[fname] = None
params = pyLike.ParameterVector()
fn.getParams(params)
for i, p in enumerate(params):
pname = p.getName()
par_names += [pname]
if pname == 'Scale':
pars.setdefault(pname, DEFAULT_SCALE_DICT)
elif pname == 'Prefactor':
pars.setdefault(pname, DEFAULT_NORM_DICT)
else:
pars.setdefault(pname, {})
bounds = p.getBounds()
par_dict = dict(name=pname,
value=p.getValue(),
min=bounds[0],
max=bounds[1],
scale=1.0,
free=False)
par_dict.update(copy.deepcopy(pars[pname]))
par_dict['name'] = pname
pars[pname] = par_dict
def get_function_par_names(function_type):
if not FUNCTION_NORM_PARS:
init_function_pars()
if not function_type in FUNCTION_PAR_NAMES.keys():
raise Exception('Invalid Function Type: %s' % function_type)
return copy.deepcopy(FUNCTION_PAR_NAMES[function_type])
def get_function_norm_par_name(function_type):
if not FUNCTION_NORM_PARS:
init_function_pars()
return FUNCTION_NORM_PARS[function_type]
def get_function_defaults(function_type):
if not FUNCTION_NORM_PARS:
init_function_pars()
return copy.deepcopy(FUNCTION_DEFAULT_PARS[function_type])
def build_piecewise_powerlaw(fn, spectral_pars):
ppl = pyLike.PiecewisePowerLaw.cast(fn)
index_l = spectral_pars['IndexL']['value']
index_h = spectral_pars['IndexH']['value']
i = 0
energies = pyLike.DoubleVector()
dndes = pyLike.DoubleVector()
while True:
try:
energy = spectral_pars['Energy%i'%i]['value']
dnde = spectral_pars['dNdE%i'%i]['value']
energies.push_back(energy)
dndes.push_back(dnde)
i += 1
except KeyError:
break
ppl.addParams(index_l, index_h, dndes, energies)
def create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None):
"""Create a Function object from a parameter dictionary.
Parameters
----------
spectrum_type : str
String identifying the spectrum type (e.g. PowerLaw).
spectral_pars : dict
Dictionary of spectral parameters.
"""
if fn is None:
fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type))
if spectrum_type == 'PiecewisePowerLaw':
build_piecewise_powerlaw(fn, spectral_pars)
for k, v in spectral_pars.items():
v.setdefault('scale', 1.0)
v.setdefault('min', v['value'] * 1E-3)
v.setdefault('max', v['value'] * 1E3)
par = fn.getParam(str(k))
vmin = min(float(v['value']), float(v['min']))
vmax = max(float(v['value']), float(v['max']))
par.setValue(float(v['value']))
par.setBounds(vmin, vmax)
par.setScale(float(v['scale']))
if 'free' in v and int(v['free']) != 0:
par.setFree(True)
else:
par.setFree(False)
fn.setParam(par)
return fn
def gtlike_spectrum_to_dict(spectrum):
""" Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file."""
parameters = pyLike.ParameterVector()
spectrum.getParams(parameters)
d = dict(spectrum_type=spectrum.genericName())
for p in parameters:
pname = p.getName()
pval = p.getTrueValue()
perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan
d[pname] = np.array([pval, perr])
if d['spectrum_type'] == 'FileFunction':
ff = pyLike.FileFunction_cast(spectrum)
d['file'] = ff.filename()
return d
def gtlike_spectrum_to_vectors(spectrum):
""" Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file."""
parameters = pyLike.ParameterVector()
spectrum.getParams(parameters)
npar = max(parameters.size(), 10)
o = {'param_names': np.zeros(npar, dtype='S32'),
'param_values': np.empty(npar, dtype=float) * np.nan,
'param_errors': np.empty(npar, dtype=float) * np.nan,
}
for i, p in enumerate(parameters):
o['param_names'][i] = p.getName()
o['param_values'][i] = p.getTrueValue()
perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan
o['param_errors'][i] = perr
return o
def get_function_pars_dict(fn):
pars = get_function_pars(fn)
pars_dict = {p['name']: p for p in pars}
return pars_dict
def get_function_pars(fn):
"""Extract the parameters of a pyLikelihood function object
(value, scale, bounds).
Parameters
----------
fn : pyLikelihood.Function
Returns
-------
pars : list
"""
pars = []
par_names = pyLike.StringVector()
fn.getParamNames(par_names)
for pname in par_names:
par = fn.getParam(pname)
bounds = par.getBounds()
perr = par.error() if par.isFree() else np.nan
pars += [dict(name=pname,
value=par.getValue(),
error=perr,
min=bounds[0],
max=bounds[1],
free=par.isFree(),
scale=par.getScale())]
return pars
def get_params_dict(like):
params = get_params(like)
params_dict = {}
for p in params:
params_dict.setdefault(p['src_name'], [])
params_dict[p['src_name']] += [p]
return params_dict
def get_params(like):
params = []
for src_name in like.sourceNames():
src = like[src_name].src
spars, ppars = get_source_pars(src)
for p in spars:
p['src_name'] = src_name
params += [p]
for p in ppars:
p['src_name'] = src_name
params += [p]
return params
def get_priors(like):
"""Extract priors from a likelihood object."""
npar = len(like.params())
vals = np.ones(npar)
errs = np.ones(npar)
has_prior = np.array([False] * npar)
for i, p in enumerate(like.params()):
prior = like[i].log_prior()
if prior is None:
continue
par_names = pyLike.StringVector()
prior.getParamNames(par_names)
if not 'Mean' in par_names:
raise Exception('Failed to find Mean in prior parameters.')
if not 'Sigma' in par_names:
raise Exception('Failed to find Sigma in prior parameters.')
for t in par_names:
if t == 'Mean':
vals[i] = prior.parameter(t).getValue()
if t == 'Sigma':
errs[i] = prior.parameter(t).getValue()
has_prior[i] = True
return vals, errs, has_prior
def get_source_pars(src):
"""Extract the parameters associated with a pyLikelihood Source object.
"""
fnmap = src.getSrcFuncs()
keys = fnmap.keys()
if 'Position' in keys:
ppars = get_function_pars(src.getSrcFuncs()[str('Position')])
elif 'SpatialDist' in keys:
ppars = get_function_pars(src.getSrcFuncs()[str('SpatialDist')])
else:
raise Exception('Failed to extract spatial parameters.')
fn = src.getSrcFuncs()[str('Spectrum')]
spars = get_function_pars(fn)
for i, p in enumerate(ppars):
ppars[i]['is_norm'] = False
for i, p in enumerate(spars):
if fn.normPar().getName() == p['name']:
spars[i]['is_norm'] = True
else:
spars[i]['is_norm'] = False
return spars, ppars
def savefreestate(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
free_params = self.get_free_param_vector()
o = func(self, *args, **kwargs)
self.set_free_param_vector(free_params)
return o
return wrapper
def savestate(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
saved_state = LikelihoodState(self.like)
o = func(self, *args, **kwargs)
saved_state.restore()
return o
return wrapper
class FreeParameterState(object):
def __init__(self, gta):
self._gta = gta
self._free = gta.get_free_param_vector()
def restore(self):
self._gta.set_free_param_vector(self._free)
class SourceMapState(object):
def __init__(self, like, names):
self._srcmaps = {}
self._like = like
for name in names:
self._srcmaps[name] = []
for c in self._like.components:
self._srcmaps[name] += [c.logLike.sourceMap(str(name)).model()]
def restore(self):
for name in self._srcmaps.keys():
for i, c in enumerate(self._like.components):
c.logLike.setSourceMapImage(str(name),
self._srcmaps[name][i])
class SummedLikelihood(SummedLikelihood.SummedLikelihood):
def nFreeParams(self):
"""Count the number of free parameters in the active model."""
nF = 0
pars = self.params()
for par in pars:
if par.isFree():
nF += 1
return nF
def optimize(self, verbosity=3, tol=None, optimizer=None, optObject=None):
self._syncParams()
if optimizer is None:
optimizer = self.optimizer
if tol is None:
tol = self.tol
if optObject is None:
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(optimizer, self.logLike)
else:
myOpt = optObject
myOpt.find_min_only(verbosity, tol, self.tolType)
self.saveBestFit()
def Ts2(self, srcName, reoptimize=False, approx=True,
tol=None, MaxIterations=10, verbosity=0):
srcName = str(srcName)
if verbosity > 0:
print("*** Start Ts_dl ***")
source_attributes = self.components[0].getExtraSourceAttributes()
self.syncSrcParams()
freeParams = pyLike.DoubleVector()
self.components[0].logLike.getFreeParamValues(freeParams)
logLike1 = -self()
for comp in self.components:
comp.scaleSource(srcName, 1E-10)
comp._ts_src = comp.logLike.getSource(srcName)
free_flag = comp._ts_src.spectrum().normPar().isFree()
if reoptimize:
comp._ts_src.spectrum().normPar().setFree(False)
self.syncSrcParams()
logLike0 = -self()
if tol is None:
tol = self.tol
if reoptimize:
if verbosity > 0:
print("** Do reoptimize")
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(self.optimizer, self.composite)
Niter = 1
while Niter <= MaxIterations:
try:
myOpt.find_min(0, tol)
break
except RuntimeError as e:
print(e)
if verbosity > 0:
print("** Iteration :", Niter)
Niter += 1
else:
if approx:
try:
self._renorm()
except ZeroDivisionError:
pass
self.syncSrcParams()
logLike0 = max(-self(), logLike0)
Ts_value = 2 * (logLike1 - logLike0)
for comp in self.components:
comp.scaleSource(srcName, 1E10)
if reoptimize:
comp._ts_src.spectrum().normPar().setFree(free_flag)
self.syncSrcParams(srcName)
comp.logLike.setFreeParamValues(freeParams)
comp.model = SourceModel(comp.logLike)
for src in source_attributes:
comp.model[src].__dict__.update(source_attributes[src])
self.model = self.components[0].model
return Ts_value
def _renorm(self, factor=None):
if factor is None:
freeNpred, totalNpred = self._npredValues()
deficit = self.total_nobs() - totalNpred
self.renormFactor = 1. + deficit / freeNpred
else:
self.renormFactor = factor
if self.renormFactor < 1:
self.renormFactor = 1
srcNames = self.sourceNames()
for src in srcNames:
if src == self.components[0]._ts_src.getName():
continue
parameter = self.normPar(src)
if (parameter.isFree() and
self.components[0]._isDiffuseOrNearby(src)):
oldValue = parameter.getValue()
newValue = oldValue * self.renormFactor
# ensure new value is within parameter bounds
xmin, xmax = parameter.getBounds()
if xmin <= newValue and newValue <= xmax:
parameter.setValue(newValue)
class BinnedAnalysis(BinnedAnalysis.BinnedAnalysis):
def __init__(self, binnedData, srcModel=None, optimizer='Drmngb',
use_bl2=False, verbosity=0, psfcorr=True, convolve=True,
resample=True, resamp_fact=2, minbinsz=0.1, wmap=None):
AnalysisBase.__init__(self)
if srcModel is None:
srcModel, optimizer = self._srcDialog()
self.binnedData = binnedData
self.srcModel = srcModel
self.optimizer = optimizer
if use_bl2:
self.logLike = pyLike.BinnedLikelihood2(binnedData.countsMap,
binnedData.observation,
binnedData.srcMaps,
True, psfcorr, convolve,
resample,
resamp_fact,
minbinsz)
else:
if wmap is None or wmap == "none":
self.logLike = pyLike.BinnedLikelihood(binnedData.countsMap,
binnedData.observation,
binnedData.srcMaps,
True, psfcorr, convolve,
resample,
resamp_fact,
minbinsz)
self._wmap = None
else:
self._wmap = pyLike.WcsMapLibrary.instance().wcsmap(wmap, "")
self._wmap.setInterpolation(False)
self._wmap.setExtrapolation(True)
self.logLike = pyLike.BinnedLikelihood(binnedData.countsMap,
self._wmap,
binnedData.observation,
binnedData.srcMaps,
True, psfcorr, convolve,
resample,
resamp_fact,
minbinsz)
self.verbosity = verbosity
self.logLike.initOutputStreams()
self.logLike.readXml(srcModel, _funcFactory, False, True, False)
self.model = SourceModel(self.logLike, srcModel)
self.energies = np.array(self.logLike.energies())
self.e_vals = np.sqrt(self.energies[:-1] * self.energies[1:])
self.nobs = self.logLike.countsSpectrum()
self.sourceFitPlots = []
self.sourceFitResids = []
def scaleSource(self, srcName, scale):
src = self.logLike.getSource(srcName)
old_scale = src.spectrum().normPar().getScale()
src.spectrum().normPar().setScale(old_scale * scale)
self.logLike.syncParams()
def Ts2(self, srcName, reoptimize=False, approx=True,
tol=None, MaxIterations=10, verbosity=0):
"""Computes the TS value for a source indicated by "srcName."
If "reoptimize=True" is selected this function will reoptimize
the model up to "MaxIterations" given the tolerance "tol"
(default is the tolerance selected for the overall fit). If
"appox=True" is selected (the default) it will renormalize the
model (see _renorm).
"""
saved_state = LikelihoodState(self)
if verbosity > 0:
print("*** Start Ts_dl ***")
source_attributes = self.getExtraSourceAttributes()
self.logLike.syncParams()
src = self.logLike.getSource(srcName)
self._ts_src = src
freeParams = pyLike.DoubleVector()
self.logLike.getFreeParamValues(freeParams)
logLike1 = self.logLike.value()
self.scaleSource(srcName, 1E-10)
logLike0 = self.logLike.value()
if tol is None:
tol = self.tol
if reoptimize:
if verbosity > 0:
print("** Do reoptimize")
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(self.optimizer, self.logLike)
Niter = 1
while Niter <= MaxIterations:
try:
myOpt.find_min(0, tol)
break
except RuntimeError as e:
print(e)
if verbosity > 0:
print("** Iteration :", Niter)
Niter += 1
else:
if approx:
try:
self._renorm()
except ZeroDivisionError:
pass
self.logLike.syncParams()
logLike0 = max(self.logLike.value(), logLike0)
Ts_value = 2 * (logLike1 - logLike0)
self.scaleSource(srcName, 1E10)
self.logLike.setFreeParamValues(freeParams)
self.model = SourceModel(self.logLike)
for src in source_attributes:
self.model[src].__dict__.update(source_attributes[src])
saved_state.restore()
self.logLike.value()
return Ts_value
def _isDiffuseOrNearby(self, srcName):
if (self[srcName].src.getType() in ['Diffuse','Composite'] or
self._ts_src.getType() in ['Diffuse','Composite']):
return True
elif self._separation(self._ts_src, self[srcName].src) < self.maxdist:
return True
return False
|
jefemagril/fermipy
|
fermipy/gtutils.py
|
Python
|
bsd-3-clause
| 22,978
|
[
"Gaussian"
] |
703d999f037accfdea9dc70f0539cbb7296eb6db8619db21c1d2abc112ea69c6
|
# Copyright (c) 2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of generally useful calculation tools."""
import functools
from inspect import signature
from operator import itemgetter
import warnings
import numpy as np
from numpy.core.numeric import normalize_axis_index
import numpy.ma as ma
from scipy.spatial import cKDTree
import xarray as xr
from ..cbook import broadcast_indices, result_type
from ..interpolate import interpolate_1d, log_interpolate_1d
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import check_axis, preprocess_xarray
exporter = Exporter(globals())
UND = 'UND'
UND_ANGLE = -999.
DIR_STRS = (
'N', 'NNE', 'NE', 'ENE',
'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW',
'W', 'WNW', 'NW', 'NNW',
UND
) # note the order matters!
MAX_DEGREE_ANGLE = 360 * units.degree
BASE_DEGREE_MULTIPLIER = 22.5 * units.degree
DIR_DICT = {dir_str: i * BASE_DEGREE_MULTIPLIER for i, dir_str in enumerate(DIR_STRS)}
DIR_DICT[UND] = np.nan
@exporter.export
@preprocess_xarray
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix
@exporter.export
@preprocess_xarray
def nearest_intersection_idx(a, b):
"""Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
"""
# Difference in the two y-value sets
difference = a - b
# Determine the point just before the intersection of the lines
# Will return multiple points for multiple intersections
sign_change_idx, = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
@exporter.export
@preprocess_xarray
@units.wraps(('=A', '=B'), ('=A', '=B', '=B', None, None))
def find_intersections(x, a, b, direction='all', log_x=False):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
log_x : bool, optional
Use logarithmic interpolation along the `x` axis (i.e. for finding intersections
in pressure coordinates). Default is False.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Change x to logarithmic if log_x=True
if log_x is True:
x = np.log(x)
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Return x to linear if log_x is True
if log_x is True:
intersect_x = np.exp(intersect_x)
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]
def _next_non_masked_element(a, idx):
"""Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
"""
try:
next_idx = idx + a[idx:].mask.argmin()
if ma.is_masked(a[next_idx]):
return None, None
else:
return next_idx, a[next_idx]
except (AttributeError, TypeError, IndexError):
return idx, a[idx]
def _delete_masked_points(*arrs):
"""Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
"""
if any(hasattr(a, 'mask') for a in arrs):
keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs))
return tuple(ma.asarray(a[keep]) for a in arrs)
else:
return arrs
@exporter.export
@preprocess_xarray
def reduce_point_density(points, radius, priority=None):
r"""Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask). The points and radius can be
specified with units. If none are provided, meters are assumed.
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : `pint.Quantity` or float
Minimum radius allowed between points. If units are not provided, meters is assumed.
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True])
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False])
"""
# Handle input with units. Assume meters if units are not specified
if hasattr(radius, 'units'):
radius = radius.to('m').m
if hasattr(points, 'units'):
points = points.to('m').m
# Handle 1D input
if points.ndim < 2:
points = points.reshape(-1, 1)
# Make a kd-tree to speed searching of data.
tree = cKDTree(points)
# Need to use sorted indices rather than sorting the position
# so that the keep mask matches *original* order.
if priority is not None:
# Need to sort the locations in decreasing priority.
sorted_indices = np.argsort(priority)[::-1]
else:
# Take advantage of iterator nature of range here to avoid making big lists
sorted_indices = range(len(points))
# Keep all points initially
keep = np.ones(len(points), dtype=np.bool)
# Loop over all the potential points
for ind in sorted_indices:
# Only proceed if we haven't already excluded this point
if keep[ind]:
# Find the neighbors and eliminate them
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
# We just removed ourselves, so undo that
keep[ind] = True
return keep
def _get_bound_pressure_height(pressure, bound, height=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere
([NOAA1976]_) is assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
height : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
"""
# avoid circular import if basic.py ever imports something from tools.py
from .basic import height_to_pressure_std, pressure_to_height_std
# Make sure pressure is monotonically decreasing
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
if height is not None:
height = height[sort_inds]
# Bound is given in pressure
if bound.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
# If the bound is in the pressure data, we know the pressure bound exactly
if bound in pressure:
bound_pressure = bound
# If we have heights, we know the exact height value, otherwise return standard
# atmosphere height for the pressure
if height is not None:
bound_height = height[pressure == bound_pressure]
else:
bound_height = pressure_to_height_std(bound_pressure)
# If bound is not in the data, return the nearest or interpolated values
else:
if interpolate:
bound_pressure = bound # Use the user specified bound
if height is not None: # Interpolate heights from the height data
bound_height = log_interpolate_1d(bound_pressure, pressure, height)
else: # If not heights given, use the standard atmosphere
bound_height = pressure_to_height_std(bound_pressure)
else: # No interpolation, find the closest values
idx = (np.abs(pressure - bound)).argmin()
bound_pressure = pressure[idx]
if height is not None:
bound_height = height[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
# Bound is given in height
elif bound.dimensionality == {'[length]': 1.0}:
# If there is height data, see if we have the bound or need to interpolate/find nearest
if height is not None:
if bound in height: # Bound is in the height data
bound_height = bound
bound_pressure = pressure[height == bound]
else: # Bound is not in the data
if interpolate:
bound_height = bound
# Need to cast back to the input type since interp (up to at least numpy
# 1.13 always returns float64. This can cause upstream users problems,
# resulting in something like np.append() to upcast.
bound_pressure = (np.interp(np.atleast_1d(bound.m), height.m,
pressure.m).astype(result_type(bound))
* pressure.units)
else:
idx = (np.abs(height - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = height[idx]
else: # Don't have heights, so assume a standard atmosphere
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
# If interpolation is on, this is all we need, if not, we need to go back and
# find the pressure closest to this and refigure the bounds
if not interpolate:
idx = (np.abs(pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
# Bound has invalid units
else:
raise ValueError('Bound must be specified in units of length or pressure.')
# If the bound is out of the range of the data, we shouldn't extrapolate
if not (_greater_or_close(bound_pressure, np.nanmin(pressure.m) * pressure.units)
and _less_or_close(bound_pressure, np.nanmax(pressure.m) * pressure.units)):
raise ValueError('Specified bound is outside pressure range.')
if height is not None and not (_less_or_close(bound_height,
np.nanmax(height.m) * height.units)
and _greater_or_close(bound_height,
np.nanmin(height.m) * height.units)):
raise ValueError('Specified bound is outside height range.')
return bound_pressure, bound_height
@exporter.export
@preprocess_xarray
@check_units('[length]')
def get_layer_heights(height, depth, *args, bottom=None, interpolate=True, with_agl=False):
"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the height only.
Parameters
----------
height : array-like
Atmospheric height
depth : `pint.Quantity`
The thickness of the layer
args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the height as above ground level by subtracting the minimum height in the
provided height. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
"""
# Make sure pressure and datavars are the same length
for datavar in args:
if len(height) != len(datavar):
raise ValueError('Height and data variables must have the same length.')
# If we want things in AGL, subtract the minimum height from all height values
if with_agl:
sfc_height = np.min(height)
height = height - sfc_height
# If the bottom is not specified, make it the surface
if bottom is None:
bottom = height[0]
# Make heights and arguments base units
height = height.to_base_units()
bottom = bottom.to_base_units()
# Calculate the top of the layer
top = bottom + depth
ret = [] # returned data variables in layer
# Ensure heights are sorted in ascending order
sort_inds = np.argsort(height)
height = height[sort_inds]
# Mask based on top and bottom
inds = _greater_or_close(height, bottom) & _less_or_close(height, top)
heights_interp = height[inds]
# Interpolate heights at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top not in heights_interp:
heights_interp = units.Quantity(np.sort(np.append(heights_interp.m, top.m)),
height.units)
if bottom not in heights_interp:
heights_interp = units.Quantity(np.sort(np.append(heights_interp.m, bottom.m)),
height.units)
ret.append(heights_interp)
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = interpolate_1d(heights_interp, height, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def get_layer(pressure, *args, height=None, bottom=None, depth=100 * units.hPa,
interpolate=True):
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
args : array-like
Atmospheric variable(s) measured at the given pressures
height: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere [NOAA1976]_.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
"""
# If we get the depth kwarg, but it's None, set it to the default as well
if depth is None:
depth = 100 * units.hPa
# Make sure pressure and datavars are the same length
for datavar in args:
if len(pressure) != len(datavar):
raise ValueError('Pressure and data variables must have the same length.')
# If the bottom is not specified, make it the surface pressure
if bottom is None:
bottom = np.nanmax(pressure.m) * pressure.units
bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
height=height,
interpolate=interpolate)
# Calculate the top if whatever units depth is in
if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
top = bottom_pressure - depth
elif depth.dimensionality == {'[length]': 1}:
top = bottom_height + depth
else:
raise ValueError('Depth must be specified in units of length or pressure')
top_pressure, _ = _get_bound_pressure_height(pressure, top, height=height,
interpolate=interpolate)
ret = [] # returned data variables in layer
# Ensure pressures are sorted in ascending order
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
# Mask based on top and bottom pressure
inds = (_less_or_close(pressure, bottom_pressure)
& _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
# Interpolate pressures at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if not np.any(np.isclose(top_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp.m, top_pressure.m)) * pressure.units
if not np.any(np.isclose(bottom_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp.m, bottom_pressure.m)) * pressure.units
ret.append(p_interp[::-1])
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::-1])
return ret
@exporter.export
@preprocess_xarray
def find_bounding_indices(arr, values, axis, from_below=True):
"""Find the indices surrounding the values within arr along axis.
Returns a set of above, below, good. Above and below are lists of arrays of indices.
These lists are formulated such that they can be used directly to index into a numpy
array and get the expected results (no extra slices or ellipsis necessary). `good` is
a boolean array indicating the "columns" that actually had values to bound the desired
value(s).
Parameters
----------
arr : array-like
Array to search for values
values: array-like
One or more values to search for in `arr`
axis : int
The dimension of `arr` along which to search.
from_below : bool, optional
Whether to search from "below" (i.e. low indices to high indices). If `False`,
the search will instead proceed from high indices to low indices. Defaults to `True`.
Returns
-------
above : list of arrays
List of broadcasted indices to the location above the desired value
below : list of arrays
List of broadcasted indices to the location below the desired value
good : array
Boolean array indicating where the search found proper bounds for the desired value
"""
# The shape of generated indices is the same as the input, but with the axis of interest
# replaced by the number of values to search for.
indices_shape = list(arr.shape)
indices_shape[axis] = len(values)
# Storage for the found indices and the mask for good locations
indices = np.empty(indices_shape, dtype=np.int)
good = np.empty(indices_shape, dtype=np.bool)
# Used to put the output in the proper location
take = make_take(arr.ndim, axis)
# Loop over all of the values and for each, see where the value would be found from a
# linear search
for level_index, value in enumerate(values):
# Look for changes in the value of the test for <= value in consecutive points
# Taking abs() because we only care if there is a flip, not which direction.
switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis))
# Good points are those where it's not just 0's along the whole axis
good_search = np.any(switches, axis=axis)
if from_below:
# Look for the first switch; need to add 1 to the index since argmax is giving the
# index within the difference array, which is one smaller.
index = switches.argmax(axis=axis) + 1
else:
# Generate a list of slices to reverse the axis of interest so that searching from
# 0 to N is starting at the "top" of the axis.
arr_slice = [slice(None)] * arr.ndim
arr_slice[axis] = slice(None, None, -1)
# Same as above, but we use the slice to come from the end; then adjust those
# indices to measure from the front.
index = arr.shape[axis] - 1 - switches[tuple(arr_slice)].argmax(axis=axis)
# Set all indices where the results are not good to 0
index[~good_search] = 0
# Put the results in the proper slice
store_slice = take(level_index)
indices[store_slice] = index
good[store_slice] = good_search
# Create index values for broadcasting arrays
above = broadcast_indices(arr, indices, arr.ndim, axis)
below = broadcast_indices(arr, indices - 1, arr.ndim, axis)
return above, below, good
def _greater_or_close(a, value, **kwargs):
r"""Compare values for greater or close to boolean masks.
Returns a boolean mask for values greater than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are greater than or nearly equal to value.
"""
return (a > value) | np.isclose(a, value, **kwargs)
def _less_or_close(a, value, **kwargs):
r"""Compare values for less or close to boolean masks.
Returns a boolean mask for values less than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are less than or nearly equal to value.
"""
return (a < value) | np.isclose(a, value, **kwargs)
def make_take(ndims, slice_dim):
"""Generate a take function to index in a particular dimension."""
def take(indexer):
return tuple(indexer if slice_dim % ndims == i else slice(None) # noqa: S001
for i in range(ndims))
return take
@exporter.export
@preprocess_xarray
def lat_lon_grid_deltas(longitude, latitude, y_dim=-2, x_dim=-1, **kwargs):
r"""Calculate the actual delta between grid points that are in latitude/longitude format.
Parameters
----------
longitude : array_like
array of longitudes defining the grid. If not a `pint.Quantity`, assumed to be in
degrees.
latitude : array_like
array of latitudes defining the grid. If not a `pint.Quantity`, assumed to be in
degrees.
y_dim : int
axis number for the y dimesion, defaults to -2.
x_dim: int
axis number for the x dimension, defaults to -1.
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy:
at least two dimensional arrays of signed deltas between grid points in the x and y
direction
Notes
-----
Accepts 1D, 2D, or higher arrays for latitude and longitude
Assumes [..., Y, X] dimension order for input and output, unless keyword arguments `y_dim`
and `x_dim` are otherwise specified.
"""
from pyproj import Geod
# Inputs must be the same number of dimensions
if latitude.ndim != longitude.ndim:
raise ValueError('Latitude and longitude must have the same number of dimensions.')
# If we were given 1D arrays, make a mesh grid
if latitude.ndim < 2:
longitude, latitude = np.meshgrid(longitude, latitude)
# pyproj requires ndarrays, not Quantities
try:
longitude = longitude.m_as('degrees')
latitude = latitude.m_as('degrees')
except AttributeError:
longitude = np.asarray(longitude)
latitude = np.asarray(latitude)
# Determine dimension order for offset slicing
take_y = make_take(latitude.ndim, y_dim)
take_x = make_take(latitude.ndim, x_dim)
geod_args = {'ellps': 'sphere'}
if kwargs:
geod_args = kwargs
g = Geod(**geod_args)
forward_az, _, dy = g.inv(longitude[take_y(slice(None, -1))],
latitude[take_y(slice(None, -1))],
longitude[take_y(slice(1, None))],
latitude[take_y(slice(1, None))])
dy[(forward_az < -90.) | (forward_az > 90.)] *= -1
forward_az, _, dx = g.inv(longitude[take_x(slice(None, -1))],
latitude[take_x(slice(None, -1))],
longitude[take_x(slice(1, None))],
latitude[take_x(slice(1, None))])
dx[(forward_az < 0.) | (forward_az > 180.)] *= -1
return dx * units.meter, dy * units.meter
@exporter.export
@preprocess_xarray
def azimuth_range_to_lat_lon(azimuths, ranges, center_lon, center_lat, **kwargs):
"""Convert azimuth and range locations in a polar coordinate system to lat/lon coordinates.
Pole refers to the origin of the coordinate system.
Parameters
----------
azimuths : array_like
array of azimuths defining the grid. If not a `pint.Quantity`,
assumed to be in degrees.
ranges : array_like
array of range distances from the pole. Typically in meters.
center_lat : float
The latitude of the pole in decimal degrees
center_lon : float
The longitude of the pole in decimal degrees
kwargs
arbitrary keyword arguments to pass to pyproj.Geod (e.g. 'ellps')
Returns
-------
lon, lat : 2D arrays of longitudes and latitudes corresponding to original locations
Notes
-----
Credit to Brian Blaylock for the original implementation.
"""
from pyproj import Geod
geod_args = {'ellps': 'sphere'}
if kwargs:
geod_args = kwargs
g = Geod(**geod_args)
rng2d, az2d = np.meshgrid(ranges, azimuths)
lats = np.full(az2d.shape, center_lat)
lons = np.full(az2d.shape, center_lon)
lon, lat, _ = g.fwd(lons, lats, az2d, rng2d)
return lon, lat
@exporter.export
def grid_deltas_from_dataarray(f, kind='default'):
"""Calculate the horizontal deltas between grid points of a DataArray.
Calculate the signed delta distance between grid points of a DataArray in the horizontal
directions, using actual (real distance) or nominal (in projection space) deltas.
Parameters
----------
f : `xarray.DataArray`
Parsed DataArray (MetPy's crs coordinate must be available for kind="actual")
kind : str
Type of grid delta to calculate. "actual" returns true distances as calculated from
longitude and latitude via `lat_lon_grid_deltas`. "nominal" returns horizontal
differences in the data's coordinate space, either in degrees (for lat/lon CRS) or
meters (for y/x CRS). "default" behaves like "actual" for datasets with a lat/lon CRS
and like "nominal" for all others. Defaults to "default".
Returns
-------
dx, dy:
arrays of signed deltas between grid points in the x and y directions with dimensions
matching those of `f`.
See Also
--------
lat_lon_grid_deltas
"""
# Determine behavior
if kind == 'default' and f.metpy.crs['grid_mapping_name'] == 'latitude_longitude':
kind = 'actual'
elif kind == 'default':
kind = 'nominal'
elif kind not in ('actual', 'nominal'):
raise ValueError('"kind" argument must be specified as "default", "actual", or '
'"nominal"')
if kind == 'actual':
# Get latitude/longitude coordinates and find dim order
latitude, longitude = xr.broadcast(*f.metpy.coordinates('latitude', 'longitude'))
try:
y_dim = latitude.metpy.find_axis_number('y')
x_dim = latitude.metpy.find_axis_number('x')
except AttributeError:
warnings.warn('y and x dimensions unable to be identified. Assuming [..., y, x] '
'dimension order.')
y_dim, x_dim = -2, -1
# Obtain grid deltas as xarray Variables
(dx_var, dx_units), (dy_var, dy_units) = (
(xr.Variable(dims=latitude.dims, data=deltas.magnitude), deltas.units)
for deltas in lat_lon_grid_deltas(longitude, latitude, y_dim=y_dim, x_dim=x_dim,
initstring=f.metpy.cartopy_crs.proj4_init))
else:
# Obtain y/x coordinate differences
y, x = f.metpy.coordinates('y', 'x')
dx_var = x.diff(x.dims[0]).variable
dx_units = units(x.attrs.get('units'))
dy_var = y.diff(y.dims[0]).variable
dy_units = units(y.attrs.get('units'))
# Broadcast to input and attach units
dx = dx_var.set_dims(f.dims, shape=[dx_var.sizes[dim] if dim in dx_var.dims else 1
for dim in f.dims]).data * dx_units
dy = dy_var.set_dims(f.dims, shape=[dy_var.sizes[dim] if dim in dy_var.dims else 1
for dim in f.dims]).data * dy_units
return dx, dy
def xarray_derivative_wrap(func):
"""Decorate the derivative functions to make them work nicely with DataArrays.
This will automatically determine if the coordinates can be pulled directly from the
DataArray, or if a call to lat_lon_grid_deltas is needed.
"""
@functools.wraps(func)
def wrapper(f, **kwargs):
if 'x' in kwargs or 'delta' in kwargs:
# Use the usual DataArray to pint.Quantity preprocessing wrapper
return preprocess_xarray(func)(f, **kwargs)
elif isinstance(f, xr.DataArray):
# Get axis argument, defaulting to first dimension
axis = f.metpy.find_axis_name(kwargs.get('axis', 0))
# Initialize new kwargs with the axis number
new_kwargs = {'axis': f.get_axis_num(axis)}
if check_axis(f[axis], 'time'):
# Time coordinate, need to get time deltas
new_kwargs['delta'] = f[axis].metpy.time_deltas
elif check_axis(f[axis], 'longitude'):
# Longitude coordinate, need to get grid deltas
new_kwargs['delta'], _ = grid_deltas_from_dataarray(f)
elif check_axis(f[axis], 'latitude'):
# Latitude coordinate, need to get grid deltas
_, new_kwargs['delta'] = grid_deltas_from_dataarray(f)
else:
# General coordinate, use as is
new_kwargs['x'] = f[axis].metpy.unit_array
# Calculate and return result as a DataArray
result = func(f.metpy.unit_array, **new_kwargs)
return xr.DataArray(result, coords=f.coords, dims=f.dims)
else:
# Error
raise ValueError('Must specify either "x" or "delta" for value positions when "f" '
'is not a DataArray.')
return wrapper
@exporter.export
@xarray_derivative_wrap
def first_derivative(f, **kwargs):
"""Calculate the first derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0. For reference, the current standard axis types are 'time', 'vertical', 'y', and
'x'.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. Should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The first derivative calculated along the selected axis.
See Also
--------
second_derivative
"""
n, axis, delta = _process_deriv_args(f, kwargs)
take = make_take(n, axis)
# First handle centered case
slice0 = take(slice(None, -2))
slice1 = take(slice(1, -1))
slice2 = take(slice(2, None))
delta_slice0 = take(slice(None, -1))
delta_slice1 = take(slice(1, None))
combined_delta = delta[delta_slice0] + delta[delta_slice1]
delta_diff = delta[delta_slice1] - delta[delta_slice0]
center = (- delta[delta_slice1] / (combined_delta * delta[delta_slice0]) * f[slice0]
+ delta_diff / (delta[delta_slice0] * delta[delta_slice1]) * f[slice1]
+ delta[delta_slice0] / (combined_delta * delta[delta_slice1]) * f[slice2])
# Fill in "left" edge with forward difference
slice0 = take(slice(None, 1))
slice1 = take(slice(1, 2))
slice2 = take(slice(2, 3))
delta_slice0 = take(slice(None, 1))
delta_slice1 = take(slice(1, 2))
combined_delta = delta[delta_slice0] + delta[delta_slice1]
big_delta = combined_delta + delta[delta_slice0]
left = (- big_delta / (combined_delta * delta[delta_slice0]) * f[slice0]
+ combined_delta / (delta[delta_slice0] * delta[delta_slice1]) * f[slice1]
- delta[delta_slice0] / (combined_delta * delta[delta_slice1]) * f[slice2])
# Now the "right" edge with backward difference
slice0 = take(slice(-3, -2))
slice1 = take(slice(-2, -1))
slice2 = take(slice(-1, None))
delta_slice0 = take(slice(-2, -1))
delta_slice1 = take(slice(-1, None))
combined_delta = delta[delta_slice0] + delta[delta_slice1]
big_delta = combined_delta + delta[delta_slice1]
right = (delta[delta_slice1] / (combined_delta * delta[delta_slice0]) * f[slice0]
- combined_delta / (delta[delta_slice0] * delta[delta_slice1]) * f[slice1]
+ big_delta / (combined_delta * delta[delta_slice1]) * f[slice2])
return concatenate((left, center, right), axis=axis)
@exporter.export
@xarray_derivative_wrap
def second_derivative(f, **kwargs):
"""Calculate the second derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0. For reference, the current standard axis types are 'time', 'vertical', 'y', and
'x'.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The second derivative calculated along the selected axis.
See Also
--------
first_derivative
"""
n, axis, delta = _process_deriv_args(f, kwargs)
take = make_take(n, axis)
# First handle centered case
slice0 = take(slice(None, -2))
slice1 = take(slice(1, -1))
slice2 = take(slice(2, None))
delta_slice0 = take(slice(None, -1))
delta_slice1 = take(slice(1, None))
combined_delta = delta[delta_slice0] + delta[delta_slice1]
center = 2 * (f[slice0] / (combined_delta * delta[delta_slice0])
- f[slice1] / (delta[delta_slice0] * delta[delta_slice1])
+ f[slice2] / (combined_delta * delta[delta_slice1]))
# Fill in "left" edge
slice0 = take(slice(None, 1))
slice1 = take(slice(1, 2))
slice2 = take(slice(2, 3))
delta_slice0 = take(slice(None, 1))
delta_slice1 = take(slice(1, 2))
combined_delta = delta[delta_slice0] + delta[delta_slice1]
left = 2 * (f[slice0] / (combined_delta * delta[delta_slice0])
- f[slice1] / (delta[delta_slice0] * delta[delta_slice1])
+ f[slice2] / (combined_delta * delta[delta_slice1]))
# Now the "right" edge
slice0 = take(slice(-3, -2))
slice1 = take(slice(-2, -1))
slice2 = take(slice(-1, None))
delta_slice0 = take(slice(-2, -1))
delta_slice1 = take(slice(-1, None))
combined_delta = delta[delta_slice0] + delta[delta_slice1]
right = 2 * (f[slice0] / (combined_delta * delta[delta_slice0])
- f[slice1] / (delta[delta_slice0] * delta[delta_slice1])
+ f[slice2] / (combined_delta * delta[delta_slice1]))
return concatenate((left, center, right), axis=axis)
@exporter.export
def gradient(f, **kwargs):
"""Calculate the gradient of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
Sequence of arrays containing the coordinate values corresponding to the
grid points in `f` in axis order.
deltas : array-like, optional
Sequence of arrays or scalars that specify the spacing between the grid points in `f`
in axis order. There should be one item less than the size of `f` along the applicable
axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given. In general, each axis can be an axis number
(integer), dimension coordinate name (string) or a standard axis type (string). The
current standard axis types are 'time', 'vertical', 'y', and 'x'.
Returns
-------
tuple of array-like
The first derivative calculated along each specified axis of the original array
See Also
--------
laplacian, first_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
"""
pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)
return tuple(first_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes))
@exporter.export
def laplacian(f, **kwargs):
"""Calculate the laplacian of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
The coordinate values corresponding to the grid points in `f`
deltas : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along the applicable axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given. In general, each axis can be an axis number
(integer), dimension coordinate name (string) or a standard axis type (string). The
current standard axis types are 'time', 'vertical', 'y', and 'x'.
Returns
-------
array-like
The laplacian
See Also
--------
gradient, second_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
"""
pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)
derivs = [second_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes)]
return sum(derivs)
def _broadcast_to_axis(arr, axis, ndim):
"""Handle reshaping coordinate array to have proper dimensionality.
This puts the values along the specified axis.
"""
if arr.ndim == 1 and arr.ndim < ndim:
new_shape = [1] * ndim
new_shape[axis] = arr.size
arr = arr.reshape(*new_shape)
return arr
def _process_gradient_args(f, kwargs):
"""Handle common processing of arguments for gradient and gradient-like functions."""
axes = kwargs.get('axes', range(f.ndim))
def _check_length(positions):
if 'axes' in kwargs and len(positions) < len(axes):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that '
'of "axes".')
elif 'axes' not in kwargs and len(positions) != len(axes):
raise ValueError('Length of "coordinates" or "deltas" must match the number of '
'dimensions of "f" when "axes" is not given.')
if 'deltas' in kwargs:
if 'coordinates' in kwargs or 'x' in kwargs:
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(kwargs['deltas'])
return 'delta', kwargs['deltas'], axes
elif 'coordinates' in kwargs:
_check_length(kwargs['coordinates'])
return 'x', kwargs['coordinates'], axes
elif isinstance(f, xr.DataArray):
return 'pass', axes, axes # only the axis argument matters
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions '
'when "f" is not a DataArray.')
def _process_deriv_args(f, kwargs):
"""Handle common processing of arguments for derivative functions."""
n = f.ndim
axis = normalize_axis_index(kwargs.get('axis', 0), n)
if f.shape[axis] < 3:
raise ValueError('f must have at least 3 point along the desired axis.')
if 'delta' in kwargs:
if 'x' in kwargs:
raise ValueError('Cannot specify both "x" and "delta".')
delta = np.atleast_1d(kwargs['delta'])
if delta.size == 1:
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if not hasattr(delta, 'units') and delta_units is not None:
delta = delta * delta_units
else:
delta = _broadcast_to_axis(delta, axis, n)
elif 'x' in kwargs:
x = _broadcast_to_axis(kwargs['x'], axis, n)
delta = np.diff(x, axis=axis)
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return n, axis, delta
@exporter.export
@preprocess_xarray
def parse_angle(input_dir):
"""Calculate the meteorological angle from directional text.
Works for abbrieviations or whole words (E -> 90 | South -> 180)
and also is able to parse 22.5 degreee angles such as ESE/East South East
Parameters
----------
input_dir : string or array-like
Directional text such as west, [south-west, ne], etc
Returns
-------
`pint.Quantity`
The angle in degrees
"""
if isinstance(input_dir, str):
# abb_dirs = abbrieviated directions
abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)])
elif hasattr(input_dir, '__len__'): # handle np.array, pd.Series, list, and array-like
input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True))
abb_dir_str = _abbrieviate_direction(input_dir_str)
abb_dirs = _clean_direction(abb_dir_str.split(','))
else: # handle unrecognizable scalar
return np.nan
return itemgetter(*abb_dirs)(DIR_DICT)
def _clean_direction(dir_list, preprocess=False):
"""Handle None if preprocess, else handles anything not in DIR_STRS."""
if preprocess: # primarily to remove None from list so ','.join works
return [UND if not isinstance(the_dir, str) else the_dir
for the_dir in dir_list]
else: # remove extraneous abbrieviated directions
return [UND if the_dir not in DIR_STRS else the_dir
for the_dir in dir_list]
def _abbrieviate_direction(ext_dir_str):
"""Convert extended (non-abbrievated) directions to abbrieviation."""
return (ext_dir_str
.upper()
.replace('_', '')
.replace('-', '')
.replace(' ', '')
.replace('NORTH', 'N')
.replace('EAST', 'E')
.replace('SOUTH', 'S')
.replace('WEST', 'W')
)
@exporter.export
@preprocess_xarray
def angle_to_direction(input_angle, full=False, level=3):
"""Convert the meteorological angle to directional text.
Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)
and rounds to the nearest angle (355 -> N | 404 -> NNE)
Parameters
----------
input_angle : numeric or array-like numeric
Angles such as 0, 25, 45, 360, 410, etc
full : boolean
True returns full text (South), False returns abbrieviated text (S)
level : int
Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)
Returns
-------
direction
The directional text
"""
try: # strip units temporarily
origin_units = input_angle.units
input_angle = input_angle.m
except AttributeError: # no units associated
origin_units = units.degree
if not hasattr(input_angle, '__len__') or isinstance(input_angle, str):
input_angle = [input_angle]
scalar = True
else:
scalar = False
# clean any numeric strings, negatives, and None
# does not handle strings with alphabet
input_angle = np.array(input_angle).astype(float)
with np.errstate(invalid='ignore'): # warns about the np.nan
input_angle[np.where(input_angle < 0)] = np.nan
input_angle = input_angle * origin_units
# normalizer used for angles > 360 degree to normalize between 0 - 360
normalizer = np.array(input_angle.m / MAX_DEGREE_ANGLE.m, dtype=int)
norm_angles = abs(input_angle - MAX_DEGREE_ANGLE * normalizer)
if level == 3:
nskip = 1
elif level == 2:
nskip = 2
elif level == 1:
nskip = 4
else:
err_msg = 'Level of complexity cannot be less than 1 or greater than 3!'
raise ValueError(err_msg)
angle_dict = {i * BASE_DEGREE_MULTIPLIER.m * nskip: dir_str
for i, dir_str in enumerate(DIR_STRS[::nskip])}
angle_dict[MAX_DEGREE_ANGLE.m] = 'N' # handle edge case of 360.
angle_dict[UND_ANGLE] = UND
# round to the nearest angles for dict lookup
# 0.001 is subtracted so there's an equal number of dir_str from
# np.arange(0, 360, 22.5), or else some dir_str will be preferred
# without the 0.001, level=2 would yield:
# ['N', 'N', 'NE', 'E', 'E', 'E', 'SE', 'S', 'S',
# 'S', 'SW', 'W', 'W', 'W', 'NW', 'N']
# with the -0.001, level=2 would yield:
# ['N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE',
# 'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW']
multiplier = np.round(
(norm_angles / BASE_DEGREE_MULTIPLIER / nskip) - 0.001).m
round_angles = (multiplier * BASE_DEGREE_MULTIPLIER.m * nskip)
round_angles[np.where(np.isnan(round_angles))] = UND_ANGLE
dir_str_arr = itemgetter(*round_angles)(angle_dict) # for array
if full:
dir_str_arr = ','.join(dir_str_arr)
dir_str_arr = _unabbrieviate_direction(dir_str_arr)
if not scalar:
dir_str = dir_str_arr.split(',')
else:
dir_str = dir_str_arr.replace(',', ' ')
else:
dir_str = dir_str_arr
return dir_str
def _unabbrieviate_direction(abb_dir_str):
"""Convert abbrieviated directions to non-abbrieviated direction."""
return (abb_dir_str
.upper()
.replace(UND, 'Undefined ')
.replace('N', 'North ')
.replace('E', 'East ')
.replace('S', 'South ')
.replace('W', 'West ')
.replace(' ,', ',')
).strip()
def _remove_nans(*variables):
"""Remove NaNs from arrays that cause issues with calculations.
Takes a variable number of arguments
Returns masked arrays in the same order as provided
"""
mask = None
for v in variables:
if mask is None:
mask = np.isnan(v)
else:
mask |= np.isnan(v)
# Mask everyone with that joint mask
ret = []
for v in variables:
ret.append(v[~mask])
return ret
def wrap_output_like(**wrap_kwargs):
"""Wrap the output from a function to be like some other data object type.
Wraps given data to match the units/coordinates/object type of another array. Currently
supports:
- As input (output from wrapped function):
* ``pint.Quantity``
* ``xarray.DataArray``
* any type wrappable by ``pint.Quantity``
- As matched output (final returned value):
* ``pint.Quantity``
* ``xarray.DataArray`` wrapping a ``pint.Quantity``
(if matched output is not one of these types, we instead treat the match as if it was a
dimenionless Quantity.)
This wrapping/conversion follows the following rules:
- If match_unit is False, for output of Quantity or DataArary respectively,
* ndarray becomes dimensionless Quantity or unitless DataArray with matching coords
* Quantity is unchanged or becomes DataArray with input units and output coords
* DataArray is converted to Quantity by accessor or is unchanged
- If match_unit is True, for output of Quantity or DataArary respectively, with a given
unit,
* ndarray becomes Quantity or DataArray (with matching coords) with output unit
* Quantity is converted to output unit, then returned or converted to DataArray with
matching coords
* DataArray is has units converted via the accessor, then converted to Quantity via
the accessor or returned
The output to match can be specified two ways:
- Using the `argument` keyword argument, the output is taken from argument of that name
from the wrapped function's signature
- Using the `other` keyword argument, the output is given directly
Parameters
----------
argument : str
specify the name of a single argument from the function signature from which
to take the other data object
other : `numpy.ndarray` or `pint.Quantity` or `xarray.DataArray`
specify the other data object directly
match_unit : bool
if True and other data object has units, convert output to those units
(defaults to False)
Notes
-----
This can be extended in the future to support:
- ``units.wraps``-like behavior
- Python scalars vs. NumPy scalars (Issue #1209)
- dask (and other duck array) compatibility
- dimensionality reduction (particularly with xarray)
See Also
--------
preprocess_xarray
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Determine other
if 'other' in wrap_kwargs:
other = wrap_kwargs['other']
elif 'argument' in wrap_kwargs:
other = signature(func).bind(*args, **kwargs).arguments[
wrap_kwargs['argument']]
else:
raise ValueError('Must specify keyword "other" or "argument".')
# Get result from wrapped function
result = func(*args, **kwargs)
# Proceed with wrapping rules
if wrap_kwargs.get('match_unit', False):
return _wrap_output_like_matching_units(result, other)
else:
return _wrap_output_like_not_matching_units(result, other)
return wrapper
return decorator
def _wrap_output_like_matching_units(result, match):
"""Convert result to be like match with matching units for output wrapper."""
if isinstance(match, xr.DataArray):
output_xarray = True
match_units = match.metpy.units
elif isinstance(match, units.Quantity):
output_xarray = False
match_units = match.units
else:
output_xarray = False
match_units = ''
if isinstance(result, xr.DataArray):
result = result.metpy.convert_units(match_units)
return result.metpy.quantify() if output_xarray else result.metpy.unit_array
else:
result = result.m_as(match_units) if isinstance(result, units.Quantity) else result
if output_xarray:
return xr.DataArray(
units.Quantity(result, match_units),
dims=match.dims,
coords=match.coords
)
else:
return units.Quantity(result, match_units)
def _wrap_output_like_not_matching_units(result, match):
"""Convert result to be like match without matching units for output wrapper."""
output_xarray = isinstance(match, xr.DataArray)
if isinstance(result, xr.DataArray):
return result.metpy.quantify() if output_xarray else result.metpy.unit_array
else:
if isinstance(result, units.Quantity):
result_magnitude = result.magnitude
result_units = str(result.units)
else:
result_magnitude = result
result_units = ''
if output_xarray:
return xr.DataArray(
units.Quantity(result_magnitude, result_units),
dims=match.dims,
coords=match.coords
)
else:
return units.Quantity(result_magnitude, result_units)
|
ShawnMurd/MetPy
|
src/metpy/calc/tools.py
|
Python
|
bsd-3-clause
| 62,642
|
[
"Brian"
] |
8c6798968650a30b0fc09c36a3acd38bd318872ce4ae2c8370e93024e68f514d
|
from ..utils import *
##
# Minions
class GVG_023:
"Goblin Auto-Barber"
play = Buff(FRIENDLY_WEAPON, "GVG_023a")
GVG_023a = buff(atk=1)
class GVG_025:
"One-eyed Cheat"
events = Summon(CONTROLLER, PIRATE - SELF).on(Stealth(SELF))
class GVG_027:
"Iron Sensei"
events = OWN_TURN_END.on(Buff(RANDOM(FRIENDLY_MINIONS + MECH - SELF), "GVG_027e"))
GVG_027e = buff(+2, +2)
class GVG_028:
"Trade Prince Gallywix"
events = Play(OPPONENT, SPELL - ID("GVG_028t")).on(
Give(CONTROLLER, Copy(Play.CARD)),
Give(OPPONENT, "GVG_028t")
)
class GVG_028t:
play = ManaThisTurn(CONTROLLER, 1)
class GVG_088:
"Ogre Ninja"
events = FORGETFUL
##
# Spells
class GVG_022:
"Tinker's Sharpsword Oil"
play = Buff(FRIENDLY_WEAPON, "GVG_022a")
combo = Buff(FRIENDLY_WEAPON, "GVG_022a"), Buff(RANDOM_FRIENDLY_CHARACTER, "GVG_022b")
GVG_022a = buff(atk=3) # Weapon
GVG_022b = buff(atk=3) # Minion
class GVG_047:
"Sabotage"
play = Destroy(RANDOM_ENEMY_MINION)
combo = Destroy(ENEMY_WEAPON | RANDOM_ENEMY_MINION)
##
# Weapons
class GVG_024:
"Cogmaster's Wrench"
update = Find(FRIENDLY_MINIONS + MECH) & Refresh(SELF, {GameTag.ATK: +2})
|
beheh/fireplace
|
fireplace/cards/gvg/rogue.py
|
Python
|
agpl-3.0
| 1,148
|
[
"TINKER"
] |
759fd92b93faf055cc8e7a8c60627200d0704e4c4501baf4673a58b1d89ee501
|
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from collections import deque
import logging
import os
import re
import nltk
import commoncode
from textcode import analysis
from cluecode import copyrights_hint
COPYRIGHT_TRACE = 0
logger = logging.getLogger(__name__)
if os.environ.get('SCANCODE_COPYRIGHT_DEBUG'):
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
COPYRIGHT_TRACE = 0
"""
Detect and collect copyright statements.
The process consists in:
- prepare and cleanup text
- identify regions of text that may contain copyright (using hints)
- tag the text for parts-of-speech (POS) to identify various copyright
statements parts such as dates, names ("named entities"), etc. This is done
using NLTK POS tagging
- feed the tagged text to a parsing grammar describing actual copyright
statements
- yield copyright statements, years, holder and authors with start and end line
from the parse tree, eventually performing some minor cleanups.
"""
def detect_copyrights(location):
"""
Yield tuples of:
(copyrights list, authors list, years list, holders list, start line, end line)
detected in file at location.
"""
detector = CopyrightDetector()
for numbered_lines in candidate_lines(analysis.text_lines(location)):
detected = detector.detect(numbered_lines)
cp, auth, yr, hold, _start, _end = detected
if any([cp, auth, yr, hold]):
yield detected
def detect(location):
"""
Return lists of detected copyrights, authors, years and holders
in file at location.
WARNING: Deprecated legacy entry point.
"""
copyrights = []
copyrights_extend = copyrights.extend
authors = []
authors_extend = authors.extend
years = []
years_extend = years.extend
holders = []
holders_extend = holders.extend
for cp, auth, yr, hold, _start, _end in detect_copyrights(location):
copyrights_extend(cp)
authors_extend(auth)
years_extend(yr)
holders_extend(hold)
return copyrights, authors, years, holders
_YEAR = (r'('
'19[6-9][0-9]' # 1960 to 1999
'|'
'20[0-1][0-9]' # 2000 to 2019
')')
_YEAR_SHORT = (r'('
'[6-9][0-9]' # 19-60 to 19-99
'|'
'[0-1][0-9]' # 20-00 to 20-19
')')
_YEAR_YEAR = (r'('
# fixme v ....the underscore below is suspicious
'19[6-9][0-9][\.,\-]_[6-9][0-9]' # 1960-99
'|'
'19[6-9][0-9][\.,\-]+[0-9]' # 1998-9
'|'
'20[0-1][0-9][\.,\-]+[0-1][0-9]' # 2001-16 or 2012-04
'|'
'200[0-9][\.,\-]+[0-9]' # 2001-4 not 2012
')')
_PUNCT = (r'('
'['
'\W' # not a word (word includes underscore)
'\D' # not a digit
'\_' # underscore
'i' # oddity
'\?'
']'
'|'
'\ ' # html entity sometimes are double escaped
')*') # repeated 0 or more times
_YEAR_PUNCT = _YEAR + _PUNCT
_YEAR_YEAR_PUNCT = _YEAR_YEAR + _PUNCT
_YEAR_SHORT_PUNCT = _YEAR_SHORT + _PUNCT
_YEAR_OR_YEAR_YEAR_WITH_PUNCT = (r'(' +
_YEAR_PUNCT +
'|' +
_YEAR_YEAR_PUNCT +
')')
_YEAR_THEN_YEAR_SHORT = (r'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'(' +
_YEAR_SHORT_PUNCT +
')*' +
')')
pats = [
_YEAR,
_YEAR_SHORT,
_YEAR_YEAR,
_PUNCT,
_YEAR_OR_YEAR_YEAR_WITH_PUNCT
]
# FIXME: multi-tokens patterns are likely not behaving as expected
# FIXME: patterns could be greatly simplified
patterns = [
# TODO: this needs to be simplified:
# TODO: in NLTK 3.0 this will fail because of this bug:
# https://github.com/nltk/nltk/issues/1025
# JUNK are things to ignore
# All Rights Reserved. should be a terminator/delimiter.
(r'^([Aa]ll [Rr]ights? [Rr]eserved|ALL RIGHTS? RESERVED|[Aa]ll|ALL)$', 'JUNK'),
(r'^([Rr]eserved|RESERVED)[,]?$', 'JUNK'),
# found in crypto certificates and LDAP
(r'^(O=|OU=|XML)$', 'JUNK'),
(r'^(Parser|Dual|Crypto|NO|PART|[Oo]riginall?y?|[Rr]epresentations?\.?)$', 'JUNK'),
(r'^(Refer|Apt|Agreement|Usage|Please|Based|Upstream|Files?|Filename:?|'
r'Description:?|Holder?s|HOLDER?S|[Pp]rocedures?|You|Everyone)$', 'JUNK'),
(r'^(Rights?|Unless|rant|Subject|Acknowledgements?|Special)$', 'JUNK'),
(r'^(LICEN[SC]E[EDS]?|Licen[sc]e[eds]?)$', 'TOIGNORE'),
(r'^(Derivative|Work|[Ll]icensable|[Ss]ince|[Ll]icen[cs]e[\.d]?|'
r'[Ll]icen[cs]ors?|under|COPYING)$', 'JUNK'),
(r'^(TCK|Use|[Rr]estrictions?|[Ii]ntrodu`ction)$', 'JUNK'),
(r'^([Ii]ncludes?|[Vv]oluntary|[Cc]ontributions?|[Mm]odifications?)$', 'JUNK'),
(r'^(CONTRIBUTORS?|OTHERS?|Contributors?\:)$', 'JUNK'),
(r'^(Company:|For|File|Last|[Rr]eleased?|[Cc]opyrighting)$', 'JUNK'),
(r'^Authori.*$', 'JUNK'),
(r'^[Bb]uild$', 'JUNK'),
(r'^[Ss]tring$', 'JUNK'),
(r'^Implementation-Vendor$', 'JUNK'),
(r'^(dnl|rem|REM)$', 'JUNK'),
(r'^Implementation-Vendor$', 'JUNK'),
(r'^Supports$', 'JUNK'),
(r'^\.byte$', 'JUNK'),
(r'^[Cc]ontributed?$', 'JUNK'),
(r'^[Ff]unctions?$', 'JUNK'),
(r'^[Nn]otices?|[Mm]ust$', 'JUNK'),
(r'^ISUPPER?|ISLOWER$', 'JUNK'),
(r'^AppPublisher$', 'JUNK'),
(r'^DISCLAIMS?|SPECIFICALLY|WARRANT(Y|I)E?S?$', 'JUNK'),
(r'^(hispagestyle|Generic|Change|Add|Generic|Average|Taken|LAWS\.?|design|Driver)$', 'JUNK'),
# Windows XP
(r'^(Windows|XP|SP1|SP2|SP3|SP4)$', 'JUNK'),
(r'^example\.com$', 'JUNK'),
# C/C++
(r'^(template|struct|typedef|type|next|typename|namespace|type_of|begin|end)$', 'JUNK'),
# various trailing words that are junk
(r'^(?:Copyleft|LegalCopyright|AssemblyCopyright|Distributed|Report|'
r'Available|true|false|node|jshint|node\':true|node:true)$', 'JUNK'),
(r'^\$?LastChangedDate\$?$', 'YR'),
# Bare C char is COPYRIGHT SIGN
# (r'^C$', 'COPY'),
# exceptions to composed proper nouns, mostly debian copyright-related
# FIXME: may be lowercase instead?
(r'^(Title:?|Debianized-By:?|Upstream-Maintainer:?|Content-MD5)$', 'JUNK'),
(r'^(Upstream-Author:?|Packaged-By:?)$', 'JUNK'),
# NOT a copyright symbol (ie. "copyrighted."): treat as NN
(r'^[Cc](opyright(s|ed)?|OPYRIGHT(S|ED))\.$', 'NN'),
# copyright word or symbol
# note the leading @ .... this may be a source of problems
(r'.?(@?([Cc]opyright)s?:?|[Cc]opr\.?|[(][Cc][)]|(COPYRIGHT)S?:?)', 'COPY'),
# copyright in markup, until we strip markup: apache'>Copyright
(r'[A-Za-z0-9]+[\'">]+[Cc]opyright', 'COPY'),
# AT&T (the company), needs special handling
(r'^AT\&T[\.,]?$', 'COMP'),
# company suffix
(r'^([Ii]nc[.]?|[I]ncorporated|[Cc]ompany|Limited|LIMITED).?$', 'COMP'),
# company suffix
(r'^(INC(ORPORATED|[.])?|CORP(ORATION|[.])?|FOUNDATION|GROUP|COMPANY|'
r'[(]tm[)]).?$|[Ff]orum.?', 'COMP'),
# company suffix
(r'^([cC]orp(oration|[.])?|[fF]oundation|[Aa]lliance|Working|[Gg]roup|'
r'[Tt]echnolog(y|ies)|[Cc]ommunit(y|ies)|[Mm]icrosystems.?|[Pp]roject|'
r'[Tt]eams?|[Tt]ech).?$', 'COMP'),
(r"^Limited'?,?$", 'COMP'),
# company suffix : LLC, LTD, LLP followed by one extra char
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.,$', 'COMP'),
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.?,?$', 'COMP'),
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.$', 'COMP'),
(r'^L\.P\.$', 'COMP'),
# company suffix : SA, SAS, AG, AB, AS, CO, labs followed by a dot
(r'^(S\.?A\.?S?|Sas|sas|AG|AB|Labs?|[Cc][Oo]\.|Research|INRIA).?$', 'COMP'),
# (german) company suffix
(r'^[Gg][Mm][Bb][Hh].?$', 'COMP'),
# (italian) company suffix
(r'^[sS]\.[pP]\.[aA]\.?$', 'COMP'),
# (Laboratory) company suffix
(r'^(Labs?|Laboratory|Laboratories)\.?,?$', 'COMP'),
# (dutch and belgian) company suffix
(r'^[Bb]\.?[Vv]\.?|BVBA$', 'COMP'),
# university
(r'^\(?[Uu]niv(?:[.]|ersit(?:y|e|at?|ad?))\)?\.?$', 'UNI'),
# institutes
(r'^[Ii]nstitut(s|o|os|e|es|et|a|at|as|u|i)?$', 'NNP'),
# "holders" is considered as a common noun
(r'^([Hh]olders?|HOLDERS?|[Rr]espective)$', 'NN'),
# affiliates
(r'^[Aa]ffiliates?\.?$', 'NNP'),
# OU as in Org unit, found in some certficates
(r'^OU$', 'OU'),
# (r'^[Cc]ontributors?\.?', 'NN'),
# "authors" or "contributors" is interesting, and so a tag of its own
(r'^[Aa]uthors?\.?$', 'AUTH'),
(r'^[Aa]uthor\(s\)\.?$', 'AUTH'),
(r'^[Cc]ontribut(ors?|ing)\.?$', 'AUTH'),
# commiters is interesting, and so a tag of its own
(r'[Cc]ommitters\.??', 'COMMIT'),
# same for maintainers.
(r'^([Mm]aintainers?\.?|[Dd]evelopers?\.?)$', 'MAINT'),
# same for developed, etc...
(r'^(([Rr]e)?[Cc]oded|[Mm]odified|[Mm]ai?nt[ea]ine(d|r)|[Ww]ritten|[Dd]eveloped)$', 'AUTH2'),
# author
(r'@author', 'AUTH'),
# of
(r'^[Oo][Ff]|[Dd][Eei]$', 'OF'),
# in
(r'^(in|en)$', 'IN'),
# by
(r'^by$', 'BY'),
# following
(r'^following$', 'FOLLOW'),
# conjunction: and
(r'^([Aa]nd|&|[Uu]nd|ET|[Ee]t|at|and/or)$', 'CC'),
# conjunction: or. Even though or is not conjunctive ....
# (r'^or$', 'CC'),
# conjunction: or. Even though or is not conjunctive ....
# (r'^,$', 'CC'),
# ie. in things like "Copyright (c) 2012 John Li and others"
(r'^other?s\.?$', 'OTH'),
# in year ranges: dash, or 'to': "1990-1995", "1990/1995" or "1990 to 1995"
(r'^([-/]|to)$', 'DASH'),
# explicitly ignoring these words: FIXME: WHY?
(r'^([Tt]his|THIS|[Pp]ermissions?|PERMISSIONS?|All)$', 'NN'),
# Portions copyright .... are worth keeping
(r'[Pp]ortions?', 'PORTIONS'),
# in dutch/german names, like Marco van Basten, or Klemens von Metternich
# and Spanish/French Da Siva and De Gaulle
(r'^(([Vv][ao]n)|[Dd][aeu])$', 'VAN'),
# year or year ranges
# plain year with various leading and trailing punct
# dual or multi years 1994/1995. or 1994-1995
# 1987,88,89,90,91,92,93,94,95,96,98,99,2000,2001,2002,2003,2004,2006
# multi years
# dual years with second part abbreviated
# 1994/95. or 2002-04 or 1991-9
(r'^' + _PUNCT + _YEAR_OR_YEAR_YEAR_WITH_PUNCT + '+' +
'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'|' +
_YEAR_THEN_YEAR_SHORT +
')*' + '$', 'YR'),
(r'^' + _PUNCT + _YEAR_OR_YEAR_YEAR_WITH_PUNCT + '+' +
'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'|' +
_YEAR_THEN_YEAR_SHORT +
'|' +
_YEAR_SHORT_PUNCT +
')*' + '$', 'YR'),
# cardinal numbers
(r'^-?[0-9]+(.[0-9]+)?.?$', 'CD'),
# exceptions to proper nouns
(r'^(The|Commons|AUTHOR|software)$', 'NN'),
# composed proper nouns, ie. Jean-Claude or ST-Microelectronics
# FIXME: what about a variant with spaces around the dash?
(r'^[A-Z][a-zA-Z]*\s?[\-]\s?[A-Z]?[a-zA-Z]+.?$', 'NNP'),
# Countries abbreviations
(r'^U\.S\.A\.?$', 'NNP'),
# Places
(r'^\(?(?:Cambridge|Stockholm|Davis|Sweden|California)\)?,?.?$', 'NNP'),
# proper nouns with digits
(r'^[A-Z][a-z0-9]+.?$', 'NNP'),
# saxon genitive, ie. Philippe's
(r"^[A-Z][a-z]+[']s$", 'NNP'),
# Uppercase dotted name, ie. P.
(r"^([A-Z][.]?|[A-Z]+[\.])$", 'PN'),
# proper noun with some separator and trailing comma
(r"^[A-Z]+[.][A-Z][a-z]+[,]?$", 'NNP'),
# proper noun with apostrophe ': D'Orleans, D'Arcy, T'so, Ts'o
(r"^[A-Z][[a-z]?['][A-Z]?[a-z]+[,.]?$", 'NNP'),
# proper noun with apostrophe ': d'Itri
(r"^[a-z]['][A-Z]?[a-z]+[,\.]?$", 'NNP'),
# all CAPS word, at least 1 char long such as MIT, including an optional trailing comma or dot
(r'^[A-Z0-9]+[,]?$', 'CAPS'),
# all caps word 3 chars and more, enclosed in parens
(r'^\([A-Z0-9]{2,}\)$', 'CAPS'),
# proper noun:first CAP, including optional trailing comma
(r'^[A-Z][a-zA-Z0-9]+[,]?$', 'NNP'),
# email
(r'[a-zA-Z0-9\+_\-\.\%]+(@|at)[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]*\.[a-zA-Z]{2,5}?', 'EMAIL'),
# email eventually in parens or brackets. The closing > or ) is optional
(r'[\<\(][a-zA-Z0-9\+_\-\.\%]+(@|at)[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]*\.[a-zA-Z]{2,5}?[\>\)]?', 'EMAIL'),
# URLS such as ibm.com
# TODO: add more extensions?
(r'\(+[a-z0-9A-Z\-\.\_]+\.(com|net|info|org|us|mil|io|edu|co\.[a-z][a-z]|eu|biz)[\.\)]+$', 'URL'),
(r'<?a?.(href)?.\(?[a-z0-9A-Z\-\.\_]+\.(com|net|info|org|us|mil|io|edu|co\.[a-z][a-z]|eu|biz)[\.\)]?$', 'URL'),
# derived from regex in cluecode.finder
(r'<?a?.(href)?.('
r'(?:http|ftp|sftp)s?://[^\s<>\[\]"]+'
r'|(?:www|ftp)\.[^\s<>\[\]"]+'
r')\.?', 'URL'),
(r'^https?://[a-zA-Z0-9_\-]+(\.([a-zA-Z0-9_\-])+)+.?$', 'URL'),
# K.K. (a company suffix), needs special handling
(r'^K.K.,?$', 'NAME'),
# comma as a conjunction
(r'^,$', 'CC'),
# .\" is not a noun
(r'^\.\\\?"?$', 'JUNK'),
# Mixed cap nouns (rare) LeGrande
(r'^[A-Z][a-z]+[A-Z][a-z]+[\.\,]?$', 'MIXEDCAP'),
# nouns (default)
(r'.+', 'NN'),
]
# Comments in the Grammar are lines that start with #
grammar = """
YR-RANGE: {<YR>+ <CC>+ <YR>} #20
YR-RANGE: {<YR> <DASH>* <YR|CD>+} #30
YR-RANGE: {<CD>? <YR>+} #40
YR-RANGE: {<YR>+ } #50
YR-AND: {<CC>? <YR>+ <CC>+ <YR>} #60
YR-RANGE: {<YR-AND>+} #70
NAME: {<NN|NNP> <CC> <URL>} #80
NAME: {<NNP> <VAN|OF> <NN*> <NNP>} #90
NAME: {<NNP> <PN> <VAN> <NNP>} #100
# by the netfilter coreteam <coreteam@netfilter.org>
NAME: {<BY> <NN>+ <EMAIL>} #110
# Kaleb S. KEITHLEY
NAME: {<NNP> <PN> <CAPS>} #120
DASHCAPS: {<DASH> <CAPS>}
# INRIA - CIRAD - INRA
COMPANY: { <COMP> <DASHCAPS>+} #1280
# the Regents of the University of California
COMPANY: {<BY>? <NN> <NNP> <OF> <NN> <UNI> <OF> <COMPANY|NAME|NAME2|NAME3><COMP>?} #130
# Corporation/COMP for/NN National/NNP Research/COMP Initiatives/NNP
COMPANY: {<COMP> <NN> <NNP> <COMP> <NNP>} #140
# Sun Microsystems, Inc. Mountain View
COMPANY: {<COMP> <COMP> <NNP><NNP>} #144
# AT&T Laboratories, Cambridge
COMPANY: {<COMP> <COMP> <NNP>} #145
# rare "Software in the public interest, Inc."
COMPANY: {<COMP> <CD> <COMP>} #170
COMPANY: {<NNP> <IN><NN> <NNP> <NNP>+<COMP>?} #180
COMPANY: {<NNP> <CC> <NNP> <COMP>} #200
COMPANY: {<NNP|CAPS> <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <COMP> <COMP>?} #210
COMPANY: {<UNI|NNP> <VAN|OF> <NNP>+ <UNI>?} #220
COMPANY: {<NNP>+ <UNI>} #230
COMPANY: {<UNI> <OF> <NN|NNP>} #240
COMPANY: {<COMPANY> <CC> <COMPANY>} #250
COMPANY: {<COMP>+} #260
COMPANY: {<COMPANY> <CC> <NNP>+} #270
# AIRVENT SAM s.p.a - RIMINI(ITALY)
COMPANY: {<COMPANY> <DASH> <NNP|NN> <EMAIL>?} #290
# Typical names
#John Robert LoVerso
NAME: {<NNP> <NNP> <MIXEDCAP>} #340
NAME: {<NNP|PN>+ <NNP>+} #350
NAME: {<NNP> <PN>? <NNP>+} #360
NAME: {<NNP> <NNP>} #370
NAME: {<NNP> <NN> <EMAIL>} #390
NAME: {<NNP> <PN|VAN>? <PN|VAN>? <NNP>} #400
NAME: {<NNP> <NN> <NNP>} #410
NAME: {<NNP> <COMMIT>} #420
# the LGPL VGABios developers Team
NAME: {<NN>? <NNP> <MAINT> <COMP>} #440
# Debian Qt/KDE Maintainers
NAME: {<NNP> <NN>? <MAINT>} #460
NAME: {<NN> <NNP> <ANDCO>} #470
NAME: {<NN>? <NNP> <CC> <NAME>} #480
NAME: {<NN>? <NNP> <OF> <NN>? <NNP> <NNP>?} #490
NAME: {<NAME> <CC> <NAME>} #500
COMPANY: {<NNP> <IN> <NN>? <COMPANY>} #510
NAME2: {<NAME> <EMAIL>} #530
NAME3: {<YR-RANGE> <NAME2|COMPANY>+} #540
NAME: {<NAME|NAME2>+ <OF> <NNP> <OF> <NN>? <COMPANY>} #550
NAME: {<NAME|NAME2>+ <CC|OF>? <NAME|NAME2|COMPANY>} #560
NAME3: {<YR-RANGE> <NAME>+} #570
NAME: {<NNP> <OF> <NNP>} #580
NAME: {<NAME> <NNP>} #590
NAME: {<NN|NNP|CAPS>+ <CC> <OTH>} #600
NAME: {<NNP> <CAPS>} #610
NAME: {<CAPS> <DASH>? <NNP|NAME>} #620
NAME: {<NNP> <CD> <NNP>} #630
NAME: {<COMP> <NAME>+} #640
NAME: {<NNP|CAPS>+ <AUTH>} #660
NAME: {<VAN|OF> <NAME>} #680
NAME: {<NAME3> <COMP>} #690
# more names
NAME: {<NNP> <NAME>} #710
NAME: {<CC>? <IN> <NAME|NNP>} #720
NAME: {<NAME><UNI>} #730
NAME: { <NAME> <IN> <NNP> <CC|IN>+ <NNP>} #740
# Companies
COMPANY: {<NAME|NAME2|NAME3|NNP>+ <OF> <NN>? <COMPANY|COMP>} #770
COMPANY: {<NNP> <COMP> <COMP>} #780
COMPANY: {<NN>? <COMPANY|NAME|NAME2> <CC> <COMPANY|NAME|NAME2>} #790
COMPANY: {<COMP|NNP> <NN> <COMPANY> <NNP>+} #800
COMPANY: {<COMPANY> <CC> <AUTH>} #810
COMPANY: {<NN> <COMP>+} #820
COMPANY: {<URL>} #830
COMPANY: {<COMPANY> <COMP>} #840
# The Regents of the University of California
NAME: {<NN> <NNP> <OF> <NN> <COMPANY>} #870
# Trailing Authors
COMPANY: {<NAME|NAME2|NAME3|NNP>+ <AUTH>} #900
# Jeffrey C. Foo
COMPANY: {<PN> <COMPANY>} #910
# "And" some name
ANDCO: {<CC> <NNP> <NNP>+} #930
ANDCO: {<CC> <OTH>} #940
ANDCO: {<CC> <NN> <NAME>+} #950
ANDCO: {<CC> <COMPANY|NAME|NAME2|NAME3>+} #960
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+} #970
NAME: {<NNP> <ANDCO>+} #980
NAME: {<BY> <NN> <AUTH>} #1000
# NetGroup, Politecnico di Torino (Italy)
COMPANY: {<NNP> <COMPANY> <NN>} #1030
# Arizona Board of Regents (University of Arizona)
NAME: {<COMPANY> <OF> <NN|NNP>} #1060
# The Regents of the University of California
NAME: {<NAME> <COMPANY>} #1090
# John Doe and Myriam Doe
NAME: {<NAME|NNP> <CC> <NNP|NAME>} #1120
# International Business Machines Corporation and others
COMPANY: {<COMPANY> <CC> <OTH>} #1150
COMPANY: {<NAME3> <CC> <OTH>} #1160
# Nara Institute of Science and Technology.
COMPANY: {<NNP> <COMPANY> <CC> <COMP>} #1190
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
COMPANY: {<NNP> <COMPANY> <NAME>} #1220
# Bio++ Development Team
COMPANY: {<NN> <COMPANY>} #1250
# Institut en recherche ....
COMPANY: {<NNP> <IN> <NN>+ <COMPANY>} #1310
# OU OISTE Foundation
COMPANY: {<OU> <COMPANY>} #1340
# NETLABS, Temple University
COMPANY: {<CAPS> <COMPANY>} #1370
# XZY emails
COMPANY: {<COMPANY> <EMAIL>+} #1400
# "And" some name
ANDCO: {<CC>+ <NN> <NNP>+<UNI|COMP>?} #1430
ANDCO: {<CC>+ <NNP> <NNP>+<UNI|COMP>?} #1440
ANDCO: {<CC>+ <COMPANY|NAME|NAME2|NAME3>+<UNI|COMP>?} #1450
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+} #1460
COMPANY: {<COMPANY><COMPANY>+} #1480
# Oracle and/or its affiliates.
NAME: {<NNP> <ANDCO>} #1410
# Various forms of copyright statements
COPYRIGHT: {<COPY> <NAME> <COPY> <YR-RANGE>} #1510
COPYRIGHT: {<COPY> <COPY>? <BY>? <COMPANY|NAME*|YR-RANGE>* <BY>? <EMAIL>+} #1530
COPYRIGHT: {<COPY> <COPY>? <NAME|NAME2|NAME3> <CAPS> <YR-RANGE>} #1550
#Copyright . 2008 Mycom Pany, inc.
COPYRIGHT: {<COPY> <NN> <NAME3>} #1560
COPYRIGHT: {<COPY> <COPY>? <NAME|NAME2|NAME3>+ <YR-RANGE>*} #1570
COPYRIGHT: {<COPY> <COPY>? <CAPS|NNP>+ <CC> <NN> <COPY> <YR-RANGE>?} #1590
COPYRIGHT: {<COPY> <COPY>? <BY>? <COMPANY|NAME*>+ <YR-RANGE>*} #1610
COPYRIGHT: {<NNP>? <COPY> <COPY>? (<YR-RANGE>+ <BY>? <NN>? <COMPANY|NAME|NAME2>+ <EMAIL>?)+} #1630
COPYRIGHT: {<COPY> <COPY>? <NN> <NAME> <YR-RANGE>} #1650
COPYRIGHT: {<COPY>+ <BY> <NAME|NAME2|NAME3>+} #1670
COPYRIGHT: {<COPY> <COPY> <COMP>+} #1690
COPYRIGHT: {<COPY> <COPY> <NN>+ <COMPANY|NAME|NAME2>+} #1710
COPYRIGHT: {<COPY> <COPY>? <NN> <NN>? <COMP> <YR-RANGE>?} #1730
COPYRIGHT: {<COPY> <COPY>? <NN> <NN>? <COMP> <YR-RANGE>?} #1750
COPYRIGHT: {<COPY> <NN> <NN>? <COMPANY> <YR-RANGE>?} #1760
COPYRIGHT: {<COPY> <COPY>? <YR-RANGE|NNP> <CAPS|BY>? <NNP|YR-RANGE|NAME>+} #1780
COPYRIGHT: {<COPY> <COPY> <NNP>+} #1800
# Copyright (c) 2016 Project Admins foobar
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE>+ <COMP> <NNP> <NN>} #1830
# Copyright (c) 1995, 1996 The President and Fellows of Harvard University
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <NNP> <ANDCO>} #1860
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <AUTH>} #1880
# Copyright 1999, 2000 - D.T.Shield.
# Copyright (c) 1999, 2000 - D.T.Shield.
COPYRIGHT2: {<COPY> <COPY>? <YR-RANGE> <DASH> <NN>} #1920
# Copyright 2007-2010 the original author or authors.
# Copyright (c) 2007-2010 the original author or authors.
COPYRIGHT2: {<COPY>+ <YR-RANGE> <NN> <JUNK> <AUTH> <NN> <AUTH>} #1960
#(c) 2017 The Chromium Authors
COPYRIGHT2: {<COPY> <COPY>? <YR-RANGE> <NN> <NNP> <NN>} #1990
# Copyright (C) Research In Motion Limited 2010. All rights reserved.
COPYRIGHT2: {<COPYRIGHT> <COMPANY> <YR-RANGE>} #2020
# Copyright (c) 1999 Computer Systems and Communication Lab,
# Institute of Information Science, Academia Sinica.
COPYRIGHT2: {<COPYRIGHT> <COMPANY> <COMPANY>} #2060
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>} #2080
COPYRIGHT2: {<COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>} #2090
COPYRIGHT2: {<COPY> <COPY><NN>? <COPY> <YR-RANGE> <BY> <NN>} #2110
COPYRIGHT2: {<COPY> <NN>? <COPY> <YR-RANGE> <BY> <NN>} #2120
COPYRIGHT2: {<COPY> <COPY>? <NN> <YR-RANGE> <BY> <NAME>} #2140
COPYRIGHT2: {<COPY> <COPY>? <YR-RANGE> <DASH> <BY>? <NAME2|NAME>} #2160
COPYRIGHT2: {<COPY> <COPY>? <YR-RANGE> <NNP> <NAME>} #2180
# Copyright (c) 2012-2016, Project contributors
COPYRIGHT2: {<COPY> <COPY>? <YR-RANGE> <COMP> <AUTH>} #2210
COPYRIGHT2: {<COPY>+ <YR-RANGE> <COMP>} #2230
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE>+ <CAPS>? <MIXEDCAP>} #2240
COPYRIGHT2: {<NAME> <COPY> <YR-RANGE>} #2260
COPYRIGHT2: {<COPY> <COPY>? <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>*} #2280
COPYRIGHT2: {<COPY> <COPY>? <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <COMPANY>} #2300
COPYRIGHT2: {<COPY> <COPY>? <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <DASH> <COMPANY>} #2320
COPYRIGHT2: {<NNP|NAME|COMPANY> <COPYRIGHT2>} #2340
COPYRIGHT: {<COPYRIGHT> <NN> <COMPANY>} #2360
COPYRIGHT: {<COPY> <COPY>? <BY>? <NN> <COMPANY>} #2380
COPYRIGHT: {<COMPANY> <NN> <NAME> <COPYRIGHT2>} #2400
COPYRIGHT: {<COPYRIGHT2> <COMP> <COMPANY>} #2410
COPYRIGHT: {<COMPANY> <NN> <COPYRIGHT2>} #2420
COPYRIGHT: {<COPYRIGHT2> <NNP> <CC> <COMPANY>} #2430
COPYRIGHT: {<COPYRIGHT2> <NAME|NAME2|NAME3>+} #2860
# copyrights in the style of Scilab/INRIA
COPYRIGHT: {<NNP> <NN> <COPY> <NNP>} #2460
COPYRIGHT: {<NNP> <COPY> <NNP>} #2470
# Copyright or Copr. 2006 INRIA - CIRAD - INRA
COPYRIGHT: {<COPY> <NN> <COPY> <YR-RANGE>+ <COMPANY>+} #2500
#Copyright or Copr. CNRS
COPYRIGHT: {<COPY> <NN> <COPY> <CAPS>} #2530
#Copyright or Copr. CNRS
COPYRIGHT: {<COPY> <NN> <COPY> <COPYRIGHT>} #2560
COPYRIGHT: {<COPYRIGHT|COPYRIGHT2> <COMPANY>+ <NAME>*} #2580
# at iClick, Inc., software copyright (c) 1999
COPYRIGHT: {<ANDCO> <NN> <COPYRIGHT2>} #2590
# portions copyright
COPYRIGHT: {<PORTIONS> <COPYRIGHT|COPYRIGHT2>} #2610
#copyright notice (3dfx Interactive, Inc. 1999), (notice is JUNK)
COPYRIGHT: {<COPY> <JUNK> <COMPANY> <YR-RANGE>} #2620
# Authors
AUTH: {<AUTH2>+ <BY>} #2640
AUTHOR: {<AUTH>+ <NN>? <COMPANY|NAME|YR-RANGE>* <BY>? <EMAIL>+} #2650
AUTHOR: {<AUTH>+ <NN>? <COMPANY|NAME|NAME2>+ <YR-RANGE>*} #2660
AUTHOR: {<AUTH>+ <YR-RANGE>+ <BY>? <COMPANY|NAME|NAME2>+} #2670
AUTHOR: {<AUTH>+ <YR-RANGE|NNP> <NNP|YR-RANGE>+} #2680
AUTHOR: {<AUTH>+ <NN|CAPS>? <YR-RANGE>+} #2690
AUTHOR: {<COMPANY|NAME|NAME2>+ <AUTH>+ <YR-RANGE>+} #2700
AUTHOR: {<YR-RANGE> <NAME|NAME2>+} #2710
AUTHOR: {<NAME2>+} #2720
AUTHOR: {<AUTHOR> <CC> <NN>? <AUTH>} #2730
AUTHOR: {<BY> <EMAIL>} #2740
ANDAUTH: {<CC> <AUTH|NAME>+} #2750
AUTHOR: {<AUTHOR> <ANDAUTH>+} #2760
# Compounded statements usings authors
# found in some rare cases with a long list of authors.
COPYRIGHT: {<COPY> <BY> <AUTHOR>+ <YR-RANGE>*} #2800
COPYRIGHT: {<AUTHOR> <COPYRIGHT2>} #2820
COPYRIGHT: {<AUTHOR> <YR-RANGE>} #2830
COPYRIGHT: {<COPYRIGHT> <NAME3>} #2850
"""
def strip_numbers(s):
"""
Return a string removing words made only of numbers. If there is an
exception or s is not a string, return s as-is.
"""
if s:
s = u' '.join([x for x in s.split(' ') if not x.isdigit()])
return s
def strip_some_punct(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s:
s = s.strip(''','"}{-_:;&''')
s = s.lstrip('.>)]')
s = s.rstrip('<([')
return s
def fix_trailing_space_dot(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s and s.endswith(' .'):
s = s[:-2] + '.'
return s
def strip_unbalanced_parens(s, parens='()'):
"""
Return a string where unbalanced parenthesis are replaced with a space.
`paren` is a pair of characters to balance such as (), <>, [] , {}.
For instance:
>>> strip_unbalanced_parens('This is a super string', '()')
'This is a super string'
>>> strip_unbalanced_parens('This is a super(c) string', '()')
'This is a super(c) string'
>>> strip_unbalanced_parens('This ((is a super(c) string))', '()')
'This ((is a super(c) string))'
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens(u'This )(is a super(c) string)(', '()')
u'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )((is a super(c) string)((', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This ) is', '()')
'This is'
>>> strip_unbalanced_parens('This ( is', '()')
'This is'
>>> strip_unbalanced_parens('This )) is', '()')
'This is'
>>> strip_unbalanced_parens('This (( is', '()')
'This is'
>>> strip_unbalanced_parens('(', '()')
' '
>>> strip_unbalanced_parens(')', '()')
' '
"""
start, end = parens
if not start in s and not end in s:
return s
unbalanced = []
unbalanced_append = unbalanced.append
stack = []
stack_append = stack.append
stack_pop = stack.pop
for i, c in enumerate(s):
if c == start:
stack_append((i, c,))
elif c == end:
try:
stack_pop()
except IndexError:
unbalanced_append((i, c,))
unbalanced.extend(stack)
pos_to_del = set([i for i, c in unbalanced])
cleaned = [c if i not in pos_to_del else ' ' for i, c in enumerate(s)]
return type(s)('').join(cleaned)
def strip_all_unbalanced_parens(s):
"""
Return a string where unbalanced parenthesis are replaced with a space.
Strips (), <>, [] and {}.
"""
c = strip_unbalanced_parens(s, '()')
c = strip_unbalanced_parens(c, '<>')
c = strip_unbalanced_parens(c, '[]')
c = strip_unbalanced_parens(c, '{}')
return c
def refine_copyright(c):
"""
Refine a detected copyright string.
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
c = fix_trailing_space_dot(c)
c = strip_all_unbalanced_parens(c)
# FIXME: this should be in the grammar, but is hard to get there right
# these are often artifacts of markup
c = c.replace('Copyright Copyright', 'Copyright')
c = c.replace('Copyright copyright', 'Copyright')
c = c.replace('copyright copyright', 'Copyright')
c = c.replace('copyright Copyright', 'Copyright')
c = c.replace('copyright\'Copyright', 'Copyright')
c = c.replace('copyright"Copyright', 'Copyright')
c = c.replace('copyright\' Copyright', 'Copyright')
c = c.replace('copyright" Copyright', 'Copyright')
s = c.split()
# fix traliing garbage, captured by the grammar
last_word = s[-1]
if last_word.lower() in ('parts', 'any', '0', '1'):
s = s[:-1]
# this is hard to catch otherwise, unless we split the author
# vs copyright grammar in two. Note that AUTHOR and Authors should be kept
last_word = s[-1]
if last_word.lower() == 'author' and last_word not in ('AUTHOR', 'AUTHORS', 'Authors',) :
s = s[:-1]
s = u' '.join(s)
return s.strip()
def refine_author(c):
"""
Refine a detected author.
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
c = strip_numbers(c)
c = strip_all_unbalanced_parens(c)
c = c.split()
# FIXME: also split comma separated lists: gthomas, sorin@netappi.com, andrew.lunn@ascom.che.g.
# strip prefixes.
# NOTE: prefixes are hard to catch otherwise, unless we split the
# author vs copyright grammar in two
prefixes = set([
'author',
'authors',
'author(s)',
'authored',
'contributor',
'contributors',
'contributor(s)',
'by',
'developed',
'written',
'created',
])
while c and c[0].lower() in prefixes:
c = c[1:]
c = u' '.join(c)
return c.strip()
def refine_date(c):
"""
Refine a detected date or date range.
FIXME: the grammar should not allow this to happen.
"""
return strip_some_punct(c)
def is_junk(c):
"""
Return True if string `c` is a junk copyright that cannot be resolved
otherwise by parsing with a grammar.
It would be best not to have to resort to this, but this is practical.
"""
junk = set([
'copyrighted by their authors',
'copyrighted by their authors.',
'copyright holder or other authorized',
'copyright holder who authorizes',
'copyright holder has authorized',
'copyright holder nor the author',
'copyright holder(s) or the author(s)',
'copyright owner or entity authorized',
'copyright owner or contributors',
'copyright for a new language file should be exclusivly the authors',
'copyright holder or said author',
'copyright holder, or any author',
'copyrighted material, only this license, or another one contracted with the authors',
'copyright notices, authorship',
'copyright holder means the original author(s)',
"copyright notice. timevar.def's author",
"copyright holder or simply that it is author-maintained'.",
"copyright holder or simply that is author-maintained'.",
'(c) if you bring a patent claim against any contributor',
'copyright-check writable-files m4-check author_mark_check',
# 'copyrighting it yourself or claiming authorship'
])
return c.lower() in junk
class CopyrightDetector(object):
"""
Class to detect copyrights and authorship.
"""
def __init__(self):
self.tagger = nltk.RegexpTagger(patterns)
self.chunker = nltk.RegexpParser(grammar, trace=COPYRIGHT_TRACE)
@staticmethod
def as_str(node):
"""
Return a parse tree node as a space-normalized string.
"""
node_string = ' '.join(k for k, _ in node.leaves())
return u' '.join(node_string.split())
def detect(self, numbered_lines):
"""
Return a sequence of tuples (copyrights, authors, years, holders)
detected in a sequence of numbered line tuples.
"""
numbered_lines = list(numbered_lines)
numbers = [n for n, _l in numbered_lines]
start_line = min(numbers)
end_line = max(numbers)
# logger.debug('CopyrightDetector:detect:lines numbers: %(start_line)d->%(end_line)d' % locals())
tokens = self.get_tokens(numbered_lines)
# we accumulate detected items in these synchronized lists
# this could be a single list of namedtuples
# or a list of dicts instead
copyrights, authors, years, holders = [], [], [], []
if not tokens:
return copyrights, authors, years, holders, None, None
# OPTIMIZED
copyrights_append = copyrights.append
authors_append = authors.append
years_append = years.append
holders_append = holders.append
# first, POS tag each token using token regexes
tagged_text = self.tagger.tag(tokens)
logger.debug('CopyrightDetector:tagged_text: ' + str(tagged_text))
# then build a parse tree based on tagged tokens
tree = self.chunker.parse(tagged_text)
logger.debug('CopyrightDetector:parse tree: ' + str(tree))
# OPTIMIZED
nltk_tree_Tree = nltk.tree.Tree
CopyrightDetector_as_str = CopyrightDetector.as_str
def collect_year_and_holder(detected_copyright):
"""
Walk the a parse sub-tree starting with the `detected_copyright`
node collecting all years and holders.
"""
for copyr in detected_copyright:
if isinstance(copyr, nltk_tree_Tree):
# logger.debug('n: ' + str(copyr))
node_text = CopyrightDetector_as_str(copyr)
copyr_label = copyr.label()
if 'YR-RANGE' in copyr_label:
years_append(refine_date(node_text))
elif 'NAME' == copyr_label or 'COMPANY' in copyr_label:
# FIXME : this would wreck things like 23andme
# where a company name contains numbers
holders_append(refine_author(node_text))
# logger.debug('CopyrightDetector: node_text: ' + node_text)
else:
collect_year_and_holder(copyr)
# then walk the parse tree, collecting copyrights, years and authors
for tree_node in tree:
if isinstance(tree_node, nltk_tree_Tree):
node_text = CopyrightDetector_as_str(tree_node)
tree_node_label = tree_node.label()
if 'COPYRIGHT' in tree_node_label:
if node_text and node_text.strip():
refined = refine_copyright(node_text)
if not is_junk(refined):
copyrights_append(refined)
collect_year_and_holder(tree_node)
elif tree_node_label == 'AUTHOR':
authors_append(refine_author(node_text))
return copyrights, authors, years, holders, start_line, end_line
def get_tokens(self, numbered_lines):
"""
Return an iterable of tokens from lines of text.
"""
tokens = []
tokens_append = tokens.append
# simple tokenization: spaces and some punctuation
splitter = re.compile('[\\t =;]+').split
for _line_number, line in numbered_lines:
line = line.strip()
if line:
line = prepare_text_line(line)
if line :
line = strip_markup(line)
if line and line.strip():
for tok in splitter(line):
# strip trailing quotes and ignore empties
tok = tok.strip("' ")
if not tok:
continue
# strip trailing colons: why?
tok = tok.rstrip(':').strip()
# strip leading @: : why?
tok = tok.lstrip('@').strip()
if tok and tok not in (':',):
tokens_append(tok)
logger.debug('CopyrightDetector:tokens: ' + repr(tokens))
return tokens
def is_candidate(line):
"""
Return True if a line is a candidate line for copyright detection
"""
line = line.lower()
line = prepare_text_line(line)
if has_content(line):
if copyrights_hint.years(line):
logger.debug('is_candidate: year in line:\n%(line)r' % locals())
return True
else:
logger.debug('is_candidate: NOT year in line:\n%(line)r' % locals())
for marker in copyrights_hint.statement_markers:
if marker in line:
logger.debug('is_candidate: %(marker)r in line:\n%(line)r' % locals())
return True
def has_content(line):
"""
Return True if a line has some content, ignoring white space, digit and
punctuation.
"""
return re.sub(r'\W+', '', line)
def is_all_rights_reserved(line):
"""
Return True if a line ends with "all rights reserved"-like statements.
"""
line = prepare_text_line(line)
# remove any non-character
line = re.sub(r'\W+', '', line)
line = line.strip()
line = line.lower()
return line.endswith(('rightreserved', 'rightsreserved'))
def candidate_lines(lines):
"""
Yield lists of candidate lines where each list element is a tuple of
(line number, line text).
A candidate line is a line of text that may contain copyright statements.
A few lines before and after a candidate line are also included.
"""
candidates = deque()
candidates_append = candidates.append
candidates_clear = candidates.clear
previous = None
# used as a state and line counter
in_copyright = 0
for line_number, line in enumerate(lines):
# the first line number is ONE, not zero
numbered_line = (line_number + 1, line)
if is_candidate(line):
# the state is now "in copyright"
in_copyright = 2
# we keep one line before a candidate line if any
if previous:
candidates_append(previous)
previous = None
# we keep the candidate line and yield if we reached the end
# of a statement
candidates_append(numbered_line)
if is_all_rights_reserved(line):
yield list(candidates)
candidates_clear()
in_copyright = 0
else:
if in_copyright:
# if the previous line was a candidate
# then we keep one line after that candidate line
if has_content(line):
candidates_append(numbered_line)
# and decrement our state
in_copyright -= 1
else:
if candidates:
yield list(candidates)
candidates_clear()
in_copyright = 0
else:
# if are neither a candidate line nor the line just after
# then we yield the accumulated lines if any
if candidates:
yield list(candidates)
candidates_clear()
# and we keep track of this line as "previous"
if has_content(line):
previous = numbered_line
else:
previous = None
# finally
if candidates:
yield list(candidates)
def strip_markup(text):
"""
Strip markup tags from text.
"""
html_tag_regex = re.compile(
r'<'
r'[(--)\?\!\%\/]?'
r'[a-zA-Z0-9#\"\=\s\.\;\:\%\&?!,\+\*\-_\/]+'
r'\/?>',
re.MULTILINE | re.UNICODE
)
if text:
text = re.sub(html_tag_regex, ' ', text)
return text
COMMON_WORDS = set([
'Unicode',
'Modified',
'NULL',
'FALSE', 'False',
'TRUE', 'True',
'Last',
'Predefined',
'If',
'Standard',
'Version', 'Versions',
'Package', 'PACKAGE',
'Powered',
'Licensed', 'License', 'License.' 'Licensee', 'License:', 'License-Alias:',
'Legal',
'Entity',
'Indemnification.',
'AS', 'IS',
'See',
'This',
'Java',
'DoubleClick',
'DOM', 'SAX', 'URL',
'Operating System',
'Original Software',
'Berkeley Software Distribution',
'Software Release', 'Release',
'IEEE Std',
'BSD',
'POSIX',
'Derivative Works',
'Intellij IDEA',
'README', 'NEWS',
'ChangeLog', 'CHANGElogger', 'Changelog',
'Redistribution',
])
def lowercase_well_known_word(text):
"""
Return text with certain words lowercased.
Rationale: some common words can start with a capital letter and be mistaken
for a named entity because capitalized words are often company names.
"""
lines = []
lines_append = lines.append
for line in text.splitlines(True):
words = []
words_append = words.append
for word in line.split():
if word in COMMON_WORDS:
word = word.lower()
words_append(word)
lines_append(' '.join(words))
return '\n'.join(lines)
# FIXME: instead of using functions, use plain re and let the re cache do its work
def IGNORED_PUNCTUATION_RE():
return re.compile(r'[*#"%\[\]\{\}`]+', re.I | re.M | re.U)
def ASCII_LINE_DECO_RE():
return re.compile(r'[-_=!\\*]{2,}')
def ASCII_LINE_DECO2_RE():
return re.compile(r'/{3,}')
def WHITESPACE_RE():
return re.compile(r' +')
def MULTIQUOTES_RE():
return re.compile(r"\'{2,}")
# TODO: add debian <s> </s> POS name taggings
def DEBIAN_COPYRIGHT_TAGS_RE():
return re.compile(r"(\<s\>|\<s\\/>)")
def prepare_text_line(line):
"""
Prepare a line of text for copyright detection.
"""
re_sub = re.sub
# FIXME: maintain the original character positions
# strip whitespace
line = line.strip()
# strip comment markers
# common comment characters
line = line.strip('\\/*#%;')
# un common comment line prefix in dos
line = re_sub('^rem\s+', ' ', line)
line = re_sub('^\@rem\s+', ' ', line)
# un common comment line prefix in autotools am/in
line = re_sub('^dnl\s+', ' ', line)
# un common comment line prefix in man pages
line = re_sub('^\.\\\\"', ' ', line)
# un common pipe chars in some ascii art
line = line.replace('|', ' ')
# normalize copyright signs and spacing aournd them
line = line.replace('(C)', ' (c) ')
line = line.replace('(c)', ' (c) ')
# the case of \251 is tested by 'weirdencoding.h'
line = line.replace(u'\251', u' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace(u'\xa9', ' (c) ')
# FIXME: what is \xc2???
line = line.replace(u'\xc2', '')
# TODO: add more HTML entities replacements
# see http://www.htmlhelp.com/reference/html40/entities/special.html
# convert html entities CR LF to space
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
# normalize (possibly repeated) quotes to unique single quote '
# backticks ` and "
line = line.replace(u'`', "'")
line = line.replace(u'"', "'")
line = re.sub(MULTIQUOTES_RE(), "'", line)
# quotes to space? but t'so will be wrecked
# line = line.replace(u"'", ' ')
# some trailing garbage ')
line = line.replace("')", ' ')
# note that we do not replace the debian tag by a space: we remove it
# TODO: use POS tag: (r'^(?:\<s\>).*(?:\<s\\/>)$', 'NAME'),
line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)
line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)
# tabs to spaces
line = line.replace('\t', ' ')
# normalize spaces around commas
line = line.replace(' , ', ', ')
# remove ASCII "line decorations"
# such as in --- or === or !!! or *****
line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)
line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)
# Replace escaped literal \0 \n \r \t that may exist as-is by a space
# such as in code literals: a="\\n some text"
line = line.replace('\\r', ' ')
line = line.replace('\\n', ' ')
line = line.replace('\\t', ' ')
line = line.replace('\\0', ' ')
# TODO: Why?
# replace contiguous spaces with only one occurrence
# line = re.sub(WHITESPACE_RE(), ' ', text)
# normalize to ascii text
line = commoncode.text.toascii(line)
# logger.debug("ascii_only_text: " + text)
# strip verbatim back slash and comment signs again at both ends of a line
# FIXME: this is done at the start of this function already
line = line.strip('\\/*#%;')
# normalize to use only LF as line endings so we can split correctly
# and keep line endings
line = commoncode.text.unixlinesep(line)
# why?
line = lowercase_well_known_word(line)
return line
|
yashdsaraf/scancode-toolkit
|
src/cluecode/copyrights.py
|
Python
|
apache-2.0
| 47,037
|
[
"VisIt"
] |
5de2bc2698e148080de271a0e992dd6d14a963334a0ce46be9bd718ad3e133b3
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from enum import Enum
__all__ = ['eReturnCode']
class eReturnCode(Enum):
Nothing = -2
Error = 1
Success = 0
NotFound = -3
Unknown = -4
def __str__(self):
return str(self.value)
|
thica/ORCA-Remote
|
src/ORCA/actions/ReturnCode.py
|
Python
|
gpl-3.0
| 1,130
|
[
"ORCA"
] |
52e4d693bfbc34874a7faf8460e4dc1292d4124971aa2bb10cfe518ec3cf5b49
|
#!/home/epicardi/bin/python27/bin/python
# Copyright (c) 2013-2014 Ernesto Picardi <ernesto.picardi@uniba.it>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os, time, math, random, getopt, operator, string, errno
try: import pysam
except: sys.exit('Pysam module not found.')
from multiprocessing import Process, Queue
from Queue import Empty
version='1.0'
pid=str(os.getpid()+random.randint(0,999999999))
def usage():
print """
USAGE: python REDItoolKnown.py [options]
Options:
-i BAM file
-I Sort input BAM file
-f Reference in fasta file
-l List of known RNA editing events
-C Base interval to explore [100000]
-k List of chromosomes to skip separated by comma or file
-t Number of threads [1]
-o Output folder [rediFolder_%s]
-F Internal folder name [null]
-c Min. read coverage [10]
-Q Fastq offset value [33]
-q Min. quality score [25]
-m Min. mapping quality score [25]
-O Min. homoplymeric length [5]
-s Infer strand (for strand oriented reads) [1]
-g Strand inference type 1:maxValue 2:useConfidence [1]
-x Strand confidence [0.70]
-S Strand correction
-G Infer strand by gff annotation (must be sorted, otherwise use -X)
-X Sort annotation files
-K File with positions to exclude
-e Exclude multi hits
-d Exclude duplicates
-p Use paired concardant reads only
-u Consider mapping quality
-T Trim x bases up and y bases down per read [0-0]
-B Blat folder for correction
-U Remove substitutions in homopolymeric regions
-v Min. num. of reads supporting the variation [3]
-n Min. editing frequency [0.1]
-E Exclude positions with multiple changes
-P File containing splice sites annotations
-r Num. of bases near splice sites to explore [4]
-h Print this help
"""%(pid)
try:
opts, args = getopt.getopt(sys.argv[1:], "i:f:k:t:o:c:Q:q:m:O:s:edpuT:B:Sv:n:EP:r:hIXG:K:l:C:F:x:g:U")
except getopt.GetoptError as err:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
corrstr=0
strconf=0.70 #confidenza strand
useconf=0
bamfile=''
fastafile=''
sortbam=0
kfile=''
nochrs=[]
NCPU=1
infolder=''
outfolder_='rediFolder_%s' %(pid)
MINCOV=10
QVAL=33
MQUAL=25
MAPQ=20
homo=5
rmpv = '0-0'
rmp = [int(x) for x in rmpv.split('-')]
getstrand=0 # considera la strand
exh=0 # escludi multi hits
exd=0 # escludi duplicati
conc=0 # se presenti paired-end, usa solo quelle concordanti
mq=0 # considera il map quality
rmnuc=0 # rimuovi nucleotide a monte ed a valle delle read; connesso a rmp e rmpv
blatr=0 # applica la correzione blat
blatfolder=''
rmsh=0 # rimuovi sostituzioni in omopolimeri di lunghezza maggiore o uguale a homo
vnuc=3 # numero minimo di basi che supportano la variazione
mmf=0.1 # frequenza minima della variazione
exms=0 # escludi sostituzioni multiple
exss=0 # escludi posizioni introniche nei pressi dei siti di splicing a nss nucleotidi
nss=4 # basi introniche da esplorare per ogni sito si splicing
splicefile='' #'splicesites.hg18.sorted.txt'
#custsub=0 # use custom distribution
#custfile='' # custom distribution file
#sigsites=0 # select significant sites
#test = 'bh' # select statistical test
usubs=[x+y for x in 'ACGT' for y in 'ACGT' if x!=y] # use these substitutions [default all]
#sval=0.05 # significant value
annfile='' # use annotation file for strand correction and features
sortann=0 # sort annotation file
uann=0 # use annotation
exfile='' # use annotations to exclude positions
expos=0 #
chunckval=100000
unchange1=1
unchange2=0
for o, a in opts:
if o == "-h":
usage()
sys.exit()
elif o == "-i": bamfile=a
elif o == "-f": fastafile=a
elif o == "-l": kfile=a
elif o == "-k":
if os.path.exists(a):
f=open(a)
nochrs=[x.strip() for x in f if x.strip()!='']
f.close()
else: nochrs=[x for x in a.split(',') if x.strip()!='']
elif o == "-t": NCPU=int(a)
elif o == "-F": infolder=a
elif o == "-o": outfolder_=a
elif o == "-c": MINCOV=int(a)
elif o == "-Q": QVAL=int(a)
elif o == "-q": MQUAL=int(a)
elif o == "-m": MAPQ=int(a)
elif o == "-O": homo=int(a)
elif o == "-x": strconf=float(a)
elif o == "-g":
if a=='2': useconf=1
elif o == "-s":
getstrand=1
if int(a)==1: unchange1,unchange2=1,0
elif int(a)==0: unchange1,unchange2=0,0
elif int(a)==2: unchange1,unchange2=0,1
elif int(a)==12: unchange1,unchange2=1,1
elif o == "-U": usubs=[x.upper() for x in a.split(',') if i.strip()!='']
elif o == "-e": exh=1
elif o == "-d": exd=1
elif o == "-p": conc=1
elif o == "-I": sortbam=1
elif o == "-X": sortann=1
elif o == "-C": chunckval=int(a)
elif o == "-u": mq=1
elif o == "-T":
rmpv = a
try:
rmp = [int(x) for x in rmpv.split('-')]
rmnuc=1
except: rmnuc=0
elif o == "-B":
blatfolder=a
if os.path.exists(blatfolder): blatr=1
elif o == "-S": corrstr=1
elif o == "-U": rmsh=1
elif o == "-v": vnuc=int(a)
elif o == "-n": mmf=float(a)
elif o == "-E": exms=1
elif o == "-P":
splicefile=a
if os.path.exists(splicefile): exss=1
elif o == "-K":
exfile=a
if os.path.exists(exfile): expos=1
elif o == "-r": nss=int(a)
elif o == "-G":
annfile=a
uann=1
else:
assert False, "Unhandled Option"
#######
commandLine=' '.join(sys.argv[1:])
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
params=[]
#Input parameters
params.append('REDItoolKnown version %s\n' %(version))
params.append('User command line: %s\n' %(commandLine))
params.append('Analysis ID: %s\n' %(pid))
params.append('Analysis time: %s\n' %(script_time))
params.append('-i --> BAM file: %s\n' %(bamfile))
params.append('-f --> Reference file: %s\n' %(fastafile))
params.append('-I --> Sort input BAM file: %i\n' %(sortbam))
params.append('-l --> File with known RNA editing positions: %s\n' %(kfile))
params.append('-X --> Sort annotation files: %i\n' %(sortann))
params.append('-k --> Regions to exclude: %s\n' %(','.join(nochrs)))
params.append('-t --> Number of working threads: %i\n' %(NCPU))
params.append('-C --> Base interval to explore: %i\n' %(chunckval))
params.append('-o --> Output folder: %s\n' %(outfolder_))
params.append('-F --> Infolder folder: %s\n' %(infolder))
params.append('-c --> Min. per base coverage: %i\n' %(MINCOV))
params.append('-Q --> FastQ offset value: %i\n' %(QVAL))
params.append('-q --> Min. per base quality: %i\n' %(MQUAL))
params.append('-m --> Min. mapping quality: %i\n' %(MAPQ))
params.append('-O --> Min. homoplymeric length: %i\n' %(homo))
params.append('-s --> Infer strand: %i - %i-%i\n' %(getstrand,unchange1,unchange2))
params.append('-g --> Use confidence: %i\n' %(useconf))
params.append('-x --> Strand confidence: %.2f\n' %(strconf))
params.append('-S --> Strand correction : %i\n' %(corrstr))
params.append('-G --> GFF annotation to infer strand: %s\n' %(annfile))
params.append('-e --> Exclude multi hits: %i\n' %(exh))
params.append('-d --> Exclude duplicates: %i\n' %(exd))
params.append('-p --> Use paired concardant reads only: %i\n' %(conc))
params.append('-u --> Consider mapping quality: %i\n' %(mq))
params.append('-T --> Trim x bases up and y bases down per read: %i - %i-%i\n' %(rmnuc,rmp[0],rmp[1]))
params.append('-B --> Blat folder for correction: %s\n' %(blatfolder))
params.append('-S --> Remove substitutions in homopolymeric regions: %i\n' %(rmsh))
params.append('-v --> Min. num. of reads supporting the variation: %i\n' %(vnuc))
params.append('-n --> Min. editing frequency: %.2f\n' %(mmf))
params.append('-E --> Exclude positions with multiple changes: %i\n' %(exms))
params.append('-P --> File containing splice sites annotations: %s\n' %(splicefile))
params.append('-r --> Num. of bases near splice sites to explore: %i\n' %(nss))
params.append('-K --> File with positions to exclude: %s\n' %(exfile))
#######
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def get_no(pvalue,siglevel,ngenes): # No Correction
lista=[]
pp=siglevel
y=0
for i in pvalue:
p=i[0]
if p<=siglevel:
lista.append(i)
y+=1
return lista,y,pp
def get_b(pvalue,siglevel,ngenes): # Bonferroni
pvalue.sort()
lista=[]
y=0
#bcorr=siglevel/ngenes
pp=1.0
for i in pvalue:
p=i[0]*ngenes
if p<=siglevel:
lista.append(i)
#lista[i[1]]=i[0]
y+=1
if p<pp: pp=p
#print "Passed:",y,pp
return lista,y,pp
def get_bh(pvalue,siglevel,ngenes): # B-H
pvalue.sort()
#print ngenes
lista=[]
x=1
y=0
p=0
for i in pvalue:
nf=i[0]*ngenes
fdr=nf/x
if fdr<=siglevel:
#dic[i[1]]=i[0]
lista.append(i)
p=i[0]
y+=1
x+=1
#print "Passed:",y,p
return lista,y,p
def getTail(pp):
if ftail=='l': return pp.left_tail
elif ftail=='r': return pp.right_tail
elif ftail=='t': return pp.two_tail
def getDicSS(dicp): # dicp = dizionario con le frequenze di sostituzione
dicpp={}
for i in dicp:
if i[0]!=i[1]:
dicpp[i]=1-dicp[i]
return dicpp
def getFreads(bases):
fread={'A':0,'C':0,'G':0,'T':0}
for i in range(4):
if i==0: fread['A']=bases[i]
elif i==1: fread['C']=bases[i]
elif i==2: fread['G']=bases[i]
elif i==3: fread['T']=bases[i]
return fread
def getSub(ref,fread,dics):
#fread={A,C,G,T}
nref=fread[ref.upper()]
sub=[(ref.upper()+i,nref,fread[i]) for i in fread if i!=ref.upper() and fread[i]!=0]
allsub=' '.join([x[0] for x in sub])
# lista del tipo [('AT', 50, 10), ('AG', 50, 2)]
res=[] #[(int(dics[i[0]]*(i[1]+i[2])),((i[1]+i[2])-exp1),pvalue(i[1],i[2],int(dics[i[0]]*(i[1]+i[2])),((i[1]+i[2])-exp1))) for i in sub]
for i in sub:
#if binomial:
# pval=bdtrc(i[2],i[1]+i[2],(1.0-dics[i[0]]))
# #pval=Bprob(i[2],i[1]+i[2],(1.0-dics[i[0]]))
# #print i[2],i[1]+i[2],(1.0-dics[i[0]]),pval
# obs1,obs2,exp1,exp2=0,0,0,0
obs1=i[1]
obs2=i[2]
exp1=int(dics[i[0]]*(i[1]+i[2]))
exp2=((i[1]+i[2]) - exp1)
pval=pvalue(obs1,obs2,exp1,exp2)
pval=getTail(pval)
res.append((i[0],obs1,obs2,exp1,exp2,str(pval)))
if len(res)==1: return res[0][5] #,allsub,fread
elif len(res) > 1:
rr=[float(x[-1]) for x in res]
idx=rr.index(min(rr))
return res[idx][5] #,allsub,fread
else: return '1.0' #,0,0
def BaseCount(seq,ref):
b={'A':0,'C':0,'G':0,'T':0}
subs=[]
subv=[]
for i in seq.upper():
if b.has_key(i): b[i]+=1
for i in b:
if not b.has_key(ref): continue
if b[i]!=0 and i!=ref:
vv=float(b[i])/(b[i]+b[ref])
subv.append((b[i],vv,ref+i))
subv.sort()
subv.reverse()
for i in subv:
if i[0]>=vnuc and i[1]>=mmf: subs.append(i[2])
freq=0.0
if len(subs)==0: subs.append('-')
else: freq=subv[0][1]
return sum(b.values()),[b['A'],b['C'],b['G'],b['T']],' '.join(subs),'%.2f'%(freq)
def meanq(v,n):
try:m=float(v)/n
except: m=0.0
return '%.2f'%(m)
def rmHomo(sequp,seqdw,gh,ref):
if len(sequp)==0 and len(seqdw)==0: return 0
up,dw=0,0
for i in seqdw:
if i==ref:dw+=1
else:break
for i in sequp[::-1]:
if i==ref:up+=1
else:break
if up+dw+1 >= gh : return 1
return 0
def prop(tot,va):
try: av=float(va)/tot
except: av=0.0
return av
def vstand(strand):
vv=[(strand.count('+'),'+'),(strand.count('-'),'-'),(strand.count('*'),'*')]
if vv[0][0]==0 and vv[1][0]==0: return '*'
if useconf:
totvv=sum([x[0] for x in vv[:2]])
if prop(totvv,vv[0][0])>=strconf: return '+'
if prop(totvv,vv[1][0])>=strconf: return '-'
return '*'
else:
if vv[0][0]==vv[1][0] and vv[2][0]==0: return '+'
return max(vv)[1]
def comp(s):
a={'A':'T','T':'A','C':'G','G':'C'}
ss=''
for i in s.upper():
if a.has_key(i): ss+=a[i]
else: ss+='N'
return ss
def whereis(program):
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(os.path.join(path, program)): return 1
return 0
def vstrand(lista):
if len(lista)==0: return '2'
p=lista.count('+')
m=lista.count('-')
if p==len(lista): return '1'
elif m==len(lista): return '0'
else: return '2'
def getd(lines): #fixed error in reading strand 6/3/2014
d={}
for i in lines:
l=(i.strip('\n\r')).split('\t')
if len(l)>=3:
if l[2]=='+': strand='1'
elif l[2]=='-': strand='0'
elif l[2] in '012': strand=l[2]
else: strand='2'
else: strand='2'
d[int(l[1])]=strand
return d
def normByStrand(seq_,strand_,squal_,mystrand_):
st='+'
if mystrand_=='0': st='-'
seq,qual,squal='',0,''
for i in range(len(seq_)):
if strand_[i]==st:
seq+=seq_[i]
qual+=ord(squal_[i])-QVAL
squal+=squal_[i]
return seq,qual,squal
def normByBlat(seq_,strand_,squal_,blatc_):
seq,qual,squal,strand='',0,'',''
for i in range(len(seq_)):
if blatc_[i]=='1':
seq+=seq_[i]
qual+=ord(squal_[i])-QVAL
squal+=squal_[i]
strand=strand_[i]
return seq,qual,squal,strand
def testBlat(blc):
if blc.count('1') > blc.count('0'): return 1
return 0
#######
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stderr.write("Script time --> START: %s\n"%(script_time))
sys.stderr.write("Analysis ID: %s\n"%(pid))
if not os.path.exists(bamfile):
usage()
sys.exit('BAM file %s not found.' %(bamfile))
if sortbam:
sys.stderr.write('Sorting BAM file.\n')
pysam.sort(bamfile,'sorted_%s'%(pid))
os.rename(bamfile,bamfile+'_old')
os.rename('sorted_%s.bam'%(pid),bamfile)
sys.stderr.write('Indexing BAM file.\n')
pysam.index(bamfile)
if not os.path.exists(bamfile+'.bai') and not sortbam:
sys.stderr.write('Indexing BAM file.\n')
pysam.index(bamfile)
if not os.path.exists(fastafile):
usage()
sys.exit('Fasta file %s not found.' %(fastafile))
if not os.path.exists(fastafile+'.fai'):
sys.stderr.write('Indexing Fasta file.\n')
pysam.faidx(fastafile)
if not os.path.exists(kfile): sys.exit('File containing RNA editing positions not found.')
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting file with known editing positions.\n')
scmd='grep -v ^"chrom" %s | grep -v "^[[:space:]]*$" | sort -k1,1 -k2,2n > %s' %(kfile,'positions_%s'%(pid))
os.system(scmd)
os.rename(kfile,kfile+'_old')
os.rename('positions_%s'%(pid),kfile)
if not os.path.exists(kfile+'.tbi'):
sys.stderr.write('Indexing file with known positions.\n')
kfile=pysam.tabix_index(kfile, seq_col=0, start_col=1, end_col=1)
# Format for tabfile with positions:
# chr start strand
##################################
# check reference names
rrefs={}
ridxinfo=pysam.idxstats(bamfile)
for j in ridxinfo:
l=(j.strip()).split('\t')
if l[0]=='*': continue
if int(l[2])+int(l[3]) > 0: rrefs[l[0]]=int(l[1])
frefs=[]
fidxinfo=open(fastafile+'.fai')
for j in fidxinfo:
l=(j.strip()).split('\t')
if l[0]=='': continue
frefs.append(l[0])
fidxinfo.close()
# in rna-seq
rnof=[]
for i in rrefs.keys():
if i not in frefs: sys.stderr.write('WARNING: Region %s in RNA-Seq not found in reference file.\n' %(i))
##################################
if uann:
getstrand=0
if not os.path.exists(annfile):
usage()
sys.exit('Annotation file %s not found.' %(annfile))
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting annotation file.\n')
scmd='grep ^"#" %s; grep -v ^"#" %s | sort -k1,1 -k4,4n > %s' %(annfile,annfile,'annotation_%s'%(pid))
os.system(scmd)
os.rename(annfile,annfile+'_old')
os.rename('annotation_%s'%(pid),annfile)
if not os.path.exists(annfile+'.tbi'):
sys.stderr.write('Indexing annotation file.\n')
annfile=pysam.tabix_index(annfile, preset='gff')
if expos:
if not os.path.exists(exfile):
usage()
sys.exit('File %s not found.' %(exfile))
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting file.\n')
scmd='grep ^"#" %s; grep -v ^"#" %s | sort -k1,1 -k4,4n > %s' %(exfile,exfile,'exfile_%s'%(pid))
os.system(scmd)
os.rename(exfile,exfile+'_old')
os.rename('exfile_%s'%(pid),exfile)
if not os.path.exists(exfile+'.tbi'):
sys.stderr.write('Indexing %s file.\n' %(exfile))
exfile=pysam.tabix_index(exfile, preset='gff')
#mainbam=pysam.Samfile(bamfile,"rb")
#regions=mainbam.references
#regionslens=mainbam.lengths
#mainbam.close()
#dicregions=dict([(regions[x],regionslens[x]) for x in range(len(regions))])
#chrs=[x for x in regions if x not in nochrs]
dicregions=dict(rrefs.items())
chrs=[x for x in dicregions.keys() if x not in nochrs]
sys.stderr.write('Analysis on %i regions.\n' %(len(chrs)))
if infolder!='': outfolder=os.path.join(outfolder_,'known_%s_%s' %(infolder,pid))
else: outfolder=os.path.join(outfolder_,'known_%s' %(pid))
if not os.path.exists(outfolder):
splitfolder=os.path.split(outfolder)
if not os.path.exists(splitfolder[0]): os.mkdir(splitfolder[0])
os.mkdir(outfolder)
outtable=os.path.join(outfolder,'outTable_%s' %(pid))
#write command line and input parameters
f=open(os.path.join(outfolder,'parameters.txt'),'w')
f.writelines(params)
f.close()
def exploreBAM(myinput):
inputs=myinput.split('$')
chr,bamfile=inputs[0],inputs[1]
outfile=os.path.join(outfolder,'table_%s_%s'%(chr,pid))
#outfile2=os.path.join(outfolder,'subs_%s_%s'%(chr,pid))
d,di={},{}
bam=pysam.Samfile(bamfile,"rb")
fasta=pysam.Fastafile(fastafile)
ktabix=pysam.Tabixfile(kfile)
lenregion=dicregions[chr]
if uann: tabix=pysam.Tabixfile(annfile)
if expos: extabix=pysam.Tabixfile(exfile)
out=open(outfile,'w')
#if not custsub:
# dsubs=dict([(x+y, 0) for x in 'ACGT' for y in 'ACGT'])
# out2=open(outfile2,'w')
#header='Region\tPosition\tReference\tCoverage\tMeanQuality\tBaseCount\tSubs\tFrequency\n'
#out.write(header)
sys.stderr.write('Started analysis on region: %s\n'%(chr))
if blatr:
badblat=os.path.join(blatfolder,'blatseqs_%s.bad'%(chr))
if os.path.exists(badblat):
sys.stderr.write('Using Blat mapping for region %s\n'%(chr))
f=open(badblat)
for i in f:
l=(i.strip()).split()
d[l[0]+'_'+l[1]]=int(l[1])
f.close()
sys.stderr.write('Found %i reads for region %s\n'%(len(d),chr))
if exss:
if os.path.exists(splicefile):
sys.stderr.write('Loading known splice sites for region %s\n'%(chr))
f=open(splicefile)
for i in f:
l=(i.strip()).split()
if l[0]!=chr: continue
st,tp,cc=l[4],l[3],int(l[1])
if st=='+' and tp=='D':
for j in range(nss): di[cc+(j+1)]=0
if st=='+' and tp=='A':
for j in range(nss): di[cc-(j+1)]=0
if st=='-' and tp=='D':
for j in range(nss): di[cc-(j+1)]=0
if st=='-' and tp=='A':
for j in range(nss): di[cc+(j+1)]=0
f.close()
sys.stderr.write('Loaded %i positions for %s\n'%(len(di),chr))
if chr in ktabix.contigs:
for kpos in range(0,lenregion,chunckval):
startk,endk=kpos,(kpos+chunckval)-1
kres=[kk for kk in ktabix.fetch(reference=chr,start=startk,end=endk)]
if len(kres)==0: continue
kdic=getd(kres)
#print kdic
# else explore bam to find exact positions
for pileupcolumn in bam.pileup(chr,startk,endk):
if not startk<=pileupcolumn.pos<=endk: continue
if not kdic.has_key(pileupcolumn.pos+1): continue
ref=fasta.fetch(chr,pileupcolumn.pos,pileupcolumn.pos+1).upper()
seq,qual,strand,squal,blatc='',0,'','',''
if rmsh:
if ((pileupcolumn.pos+1)-homo)-1 < 0: sequp=''
else: sequp=(fasta.fetch(chr,((pileupcolumn.pos+1)-homo)-1,(pileupcolumn.pos+1)-1)).upper()
seqdw=(fasta.fetch(chr,pileupcolumn.pos+1,(pileupcolumn.pos+1)+homo)).upper()
for pileupread in pileupcolumn.pileups: # per ogni base dell'allineamento multiplo
s,q,t,qq=pileupread.alignment.seq[pileupread.qpos].upper(),ord(pileupread.alignment.qual[pileupread.qpos])-QVAL,'*',pileupread.alignment.qual[pileupread.qpos]
# escludi posizioni introniche nei pressi di splice sites
if exss and di.has_key(pileupcolumn.pos+1): continue
# multiple hit
if exh and pileupread.alignment.is_secondary: continue
# duplicates
if exd and pileupread.alignment.is_duplicate: continue
# se paired end
if conc and pileupread.alignment.is_paired:
# se non concordanti
if not pileupread.alignment.is_proper_pair: continue
# se concordanti ma nello stesso orientamento
flag=pileupread.alignment.flag
if pileupread.alignment.is_duplicate: flag=flag-1024
if pileupread.alignment.is_secondary: flag=flag-256
if flag in [67,131,115,179]: continue
# mapping quality
if mq and pileupread.alignment.mapq < MAPQ: continue
#se la qualita' >= alla qualita' minima
if q >= MQUAL and pileupcolumn.pos in pileupread.alignment.positions:
#tags=dict(pileupread.alignment.tags)
#deduci la strand per ogni posizione
if getstrand:
#usa le info del mapping se strand oriented
if pileupread.alignment.is_read1:
if unchange1:
if pileupread.alignment.is_reverse: t='-'
else: t='+'
else:
if pileupread.alignment.is_reverse: t='+'
else: t='-'
elif pileupread.alignment.is_read2:
if unchange2:
if pileupread.alignment.is_reverse: t='-'
else: t='+'
else:
if pileupread.alignment.is_reverse: t='+'
else: t='-'
else: # for single ends
if unchange1:
if pileupread.alignment.is_reverse: t='-'
else: t='+'
else:
if pileupread.alignment.is_reverse: t='+'
else: t='-'
if rmnuc:
#rlen=pileupread.alignment.rlen #pileupread.alignment.qlen #lunghezza della specifica read
#print rlen,pileupread.qpos,pileupread.alignment.qstart,pileupread.alignment.qend
# verifica se il nuc deve essere rimosso alle estremita' nel range x-y
# testare il forward
#qp=pileupread.qpos #pileupread.qpos-pileupread.alignment.qstart
#print pileupread.qpos,pileupread.alignment.rlen,len(pileupread.alignment.seq)
#if pileupread.alignment.is_reverse:
# if (rlen-qp)-1 < rmp[0]:continue
# if (rlen-qp)-1 > ((rlen)-rmp[1])-1: continue
#else:
# if qp<rmp[0]:continue
# if qp>(rlen-rmp[1])-1: continue
rlen=pileupread.alignment.rlen #pileupread.alignment.qlen #lunghezza della specifica read
qp=pileupread.qpos #pileupread.qpos-pileupread.alignment.qstart
if pileupread.alignment.is_reverse:
if qp>(rlen-rmp[0])-1: continue
if qp<rmp[1]:continue
else:
if qp<rmp[0]:continue
if qp>(rlen-rmp[1])-1: continue
# se la read di appartenenza non mappa in modo univoco con Blat
if blatr:
rt=0
if pileupread.alignment.is_read1: rt=1
elif pileupread.alignment.is_read2: rt=2
rname=pileupread.alignment.qname+'_%i'%(rt)
if d.has_key(rname): blatc+='0' #continue
else: blatc+='1'
# se la base e' diversa dal reference
# se in regione omopolimerica scarta
if rmsh and rmHomo(sequp,seqdw,homo,ref): continue
seq+=s
qual+=q
strand+=t
squal+=qq
if seq.strip()!='':
if blatr:
if testBlat(blatc): seq,qual,squal,strand=normByBlat(seq,strand,squal,blatc)
else: continue
#print pileupcolumn.pos+1,seq,squal
#mystrand=kdic[pileupcolumn.pos+1]
#print mystrand
try: mystrand=kdic[pileupcolumn.pos+1]
except: mystrand='2'
#print chr,pileupcolumn.pos+1,seq,strand, mystrand
if uann and not getstrand:
if chr in tabix.contigs:
sres=[kk.strand for kk in tabix.fetch(reference=chr,start=(pileupcolumn.pos),end=(pileupcolumn.pos+1),parser=pysam.asGTF())]
mystrand=vstrand(sres)
if getstrand and not uann:
mystr=vstand(strand)
if mystr=='-': mystrand='0'
elif mystr=='+': mystrand='1'
else: mystrand='2'
if mystrand=='0':
seq=comp(seq)
ref=comp(ref)
#if getstrand and mystrand in ['1','0'] and not useconf: seq,qual,squal=normByStrand(seq,strand,squal,mystrand)
if getstrand and mystrand in ['1','0'] and corrstr: seq,qual,squal=normByStrand(seq,strand,squal,mystrand)
if uann and mystrand in ['1','0'] and corrstr: seq,qual,squal=normByStrand(seq,strand,squal,mystrand)
#if not getstrand and not uann and mystrand in ['1','0']: seq,qual,squal=normByStrand(seq,strand,squal,mystrand)
#print chr,pileupcolumn.pos+1,seq,strand,mystrand
cov,bcomp,subs,freq=BaseCount(seq,ref)
if cov < MINCOV: continue
if exms and subs.count(' ')>0: continue
mqua=meanq(qual,len(seq))
if expos:
if chr in extabix.contigs:
exres=[kk for kk in extabix.fetch(reference=chr,start=(pileupcolumn.pos),end=(pileupcolumn.pos+1))]
if len(exres)>0: continue
line='\t'.join([chr,str(pileupcolumn.pos+1),ref,mystrand,str(cov),(mqua),str(bcomp),subs,freq])+'\n'
out.write(line)
bam.close()
fasta.close()
ktabix.close()
out.close()
if uann: tabix.close()
if expos: extabix.close()
sys.stderr.write('Job completed for region: %s\n'%(chr))
def addPvalue(myinput2): # not used here
inputs=myinput2.split('$')
f=open(inputs[0])
subs=eval((f.readline()).strip())
f.close()
dsubs={}
for i in subs: dsubs[i]=float(subs[i])/sum(subs.values())
dsubss=getDicSS(dsubs)
#print dsubss
o=open(inputs[2],'w')
f=open(inputs[1])
for i in f:
l=(i.strip()).split('\t')
if i.strip()=='': continue
#if i.startswith('Region'):
# l.append('Pvalue')
# o.write('\t'.join(l)+'\n')
# continue
if l[6]!='-': pval=getSub(l[2],getFreads(eval(l[6])),dsubss)
else: pval='1.0'
l.append(pval)
o.write('\t'.join(l)+'\n')
o.close()
def do_work(q):
while True:
try:
x=q.get(block=False)
exploreBAM(x)
except Empty:
break
work_queue = Queue()
for i in chrs:
strinput=i+'$'+bamfile
work_queue.put(strinput)
processes=[Process(target=do_work, args=(work_queue,)) for i in range(NCPU)]
for t in processes:
t.start()
for t in processes:
t.join()
time.sleep(0.5)
#
head='Region\tPosition\tReference\tStrand\tCoverage-q%i\tMeanQ\tBaseCount[A,C,G,T]\tAllSubs\tFrequency\n' %(MQUAL)
sys.stderr.write('Merging Tables.\n')
o=open(outtable,'w')
o.write(head)
for i in chrs:
#tabfile=os.path.join(outfolder,'outTable_%s_%s' %(i,pid))
tabfile=os.path.join(outfolder,'table_%s_%s' %(i,pid))
if os.path.exists(tabfile):
f=open(tabfile)
for j in f: o.write(j)
f.close()
os.remove(tabfile)
#os.remove(intabfile)
o.close()
#if sigsites:
# sys.stderr.write('Selecting significant sites.\n')
# outsig=os.path.join(outfolder,'outTableSig_%s' %(pid))
# f=open(outtable)
# o=open(outsig,'w')
# o.write(head)
# allv=[]
# for i in f:
# if i.startswith('Region'): continue
# if i.strip()=='': continue
# l=(i.strip()).split('\t')
# if l[7]=='-': continue
# if l[7] not in usubs: continue
# pp=float(l[9])
# allv.append((pp,i))
# if test=='bh': rr=get_bh(allv,sval,len(allv))
# elif test=='bo': rr=get_b(allv,sval,len(allv))
# else: rr=get_no(allv,sval,len(allv))
# for i in rr[0]: o.write(i[1])
# f.close()
# o.close()
sys.stderr.write('Results saved on %s\n'%(outtable))
#if sigsites: sys.stderr.write('Significant sites saved on %s\n'%(outsig))
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stderr.write("Script time --> END: %s\n"%(script_time))
|
RNAEDITINGPLUS/main
|
node/reditools-1.0.4/reditools/REDItoolKnown.py
|
Python
|
apache-2.0
| 28,181
|
[
"pysam"
] |
4b47f9d3732bb33170b1ed9bf33f36d429c8b5e1ebb8ddde16c86558daf317f9
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unsmuggle_url,
)
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class SenateISVPIE(InfoExtractor):
_COMM_MAP = [
["ag", "76440", "http://ag-f.akamaihd.net"],
["aging", "76442", "http://aging-f.akamaihd.net"],
["approps", "76441", "http://approps-f.akamaihd.net"],
["armed", "76445", "http://armed-f.akamaihd.net"],
["banking", "76446", "http://banking-f.akamaihd.net"],
["budget", "76447", "http://budget-f.akamaihd.net"],
["cecc", "76486", "http://srs-f.akamaihd.net"],
["commerce", "80177", "http://commerce1-f.akamaihd.net"],
["csce", "75229", "http://srs-f.akamaihd.net"],
["dpc", "76590", "http://dpc-f.akamaihd.net"],
["energy", "76448", "http://energy-f.akamaihd.net"],
["epw", "76478", "http://epw-f.akamaihd.net"],
["ethics", "76449", "http://ethics-f.akamaihd.net"],
["finance", "76450", "http://finance-f.akamaihd.net"],
["foreign", "76451", "http://foreign-f.akamaihd.net"],
["govtaff", "76453", "http://govtaff-f.akamaihd.net"],
["help", "76452", "http://help-f.akamaihd.net"],
["indian", "76455", "http://indian-f.akamaihd.net"],
["intel", "76456", "http://intel-f.akamaihd.net"],
["intlnarc", "76457", "http://intlnarc-f.akamaihd.net"],
["jccic", "85180", "http://jccic-f.akamaihd.net"],
["jec", "76458", "http://jec-f.akamaihd.net"],
["judiciary", "76459", "http://judiciary-f.akamaihd.net"],
["rpc", "76591", "http://rpc-f.akamaihd.net"],
["rules", "76460", "http://rules-f.akamaihd.net"],
["saa", "76489", "http://srs-f.akamaihd.net"],
["smbiz", "76461", "http://smbiz-f.akamaihd.net"],
["srs", "75229", "http://srs-f.akamaihd.net"],
["uscc", "76487", "http://srs-f.akamaihd.net"],
["vetaff", "76462", "http://vetaff-f.akamaihd.net"],
["arch", "", "http://ussenate-f.akamaihd.net/"]
]
_IE_NAME = 'senate.gov'
_VALID_URL = r'http://www\.senate\.gov/isvp/\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
'info_dict': {
'id': 'judiciary031715',
'ext': 'flv',
'title': 'Integrated Senate Video Player',
'thumbnail': 're:^https?://.*\.(?:jpg|png)$',
}
}, {
'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false',
'info_dict': {
'id': 'commerce011514',
'ext': 'flv',
'title': 'Integrated Senate Video Player'
}
}, {
'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi',
# checksum differs each time
'info_dict': {
'id': 'intel090613',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
}
}]
@staticmethod
def _search_iframe_url(webpage):
mobj = re.search(
r"<iframe[^>]+src=['\"](?P<url>http://www\.senate\.gov/isvp/\?[^'\"]+)['\"]",
webpage)
if mobj:
return mobj.group('url')
def _get_info_for_comm(self, committee):
for entry in self._COMM_MAP:
if entry[0] == committee:
return entry[1:]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('qs'))
if not qs.get('filename') or not qs.get('type') or not qs.get('comm'):
raise ExtractorError('Invalid URL', expected=True)
video_id = re.sub(r'.mp4$', '', qs['filename'][0])
webpage = self._download_webpage(url, video_id)
if smuggled_data.get('force_title'):
title = smuggled_data['force_title']
else:
title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, video_id)
poster = qs.get('poster')
thumbnail = poster[0] if poster else None
video_type = qs['type'][0]
committee = video_type if video_type == 'arch' else qs['comm'][0]
stream_num, domain = self._get_info_for_comm(committee)
formats = []
if video_type == 'arch':
filename = video_id if '.' in video_id else video_id + '.mp4'
formats = [{
# All parameters in the query string are necessary to prevent a 403 error
'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=',
}]
else:
hdcore_sign = '?hdcore=3.1.0'
url_params = (domain, video_id, stream_num)
f4m_url = '%s/z/%s_1@%s/manifest.f4m' % url_params + hdcore_sign
m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params
for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'):
# URLs without the extra param induce an 404 error
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.append(entry)
for entry in self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', m3u8_id='m3u8'):
mobj = re.search(r'(?P<tag>(?:-p|-b)).m3u8', entry['url'])
if mobj:
entry['format_id'] += mobj.group('tag')
formats.append(entry)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
|
marxin/youtube-dl
|
youtube_dl/extractor/senateisvp.py
|
Python
|
unlicense
| 5,890
|
[
"EPW"
] |
16b164574f7945d0a3356e4cabb0985d88207a1dfdc96a276caa454b118fe0f5
|
import pymatgen
structure = pymatgen.Structure(
lattice = pymatgen.core.lattice.Lattice([[.0,.5,.5],[.5,.0,.5],[.5,.5,.0]]).scale(5.4309456887829**3/4),
species= ['Si', 'Si'],
coords = [3*[-.125], 3*[.125]],
)
structure.to(filename='Si.cif')
structure.to(filename='Si.json')
|
trangel/OPTpy
|
examples/data/structures/write-Si-structure.py
|
Python
|
gpl-3.0
| 294
|
[
"pymatgen"
] |
480c38a3050ee0d624b9d231d73c91b5ab1b2f2cd9e213ea765e2ecc56229fb3
|
# PYTHON script to write and submit pbsjobs to queue
import optparse
import sys
import string
import os
import stat
# User inputted values and options
parser = optparse.OptionParser(description="submit GROMACS grompp and mdrun commands to cluster queue.")
parser.add_option("-f", "--fileprefix", help="the full prefix of the files of concern")
parser.add_option("-s", "--states", help="the number of alchemical states (including endpoints). Default = 16", default = 16, type = int)
parser.add_option("-G", "--gromacs", help="location of GROMACS binaries, default = /h3/n1/shirtsgroup/gromacs_46/Install_v462/bin", default = '/h3/n1/shirtsgroup/gromacs_46/Install_v462/bin')
(options, args) = parser.parse_args()
#Check for -f user input value (necessary)
datafile_prefix = options.fileprefix
if datafile_prefix == None:
sys.exit("No data file prefix given, exiting.")
num_states = options.states
gromacs_loc = options.gromacs
#Create dictionary with parameter values for modifying generic job file
parameterdict = {'LOCATION' : gromacs_loc, 'NAME' : datafile_prefix}
#Loop over lambda states
for x in range(num_states):
os.system('%(gromacs_loc)s/grompp_d -f %(datafile_prefix)s.%(x)s.mdp -c %(datafile_prefix)s.gro -p %(datafile_prefix)s.top -o %(datafile_prefix)s.%(x)s.tpr -maxwarn 2' % locals())
print 'finished'
|
hainm/open-forcefield-group
|
reweighting_optimization/scripts/grompp.py
|
Python
|
gpl-2.0
| 1,340
|
[
"Gromacs"
] |
f5a09ba0f2a953c753555afdfa6ba8a789912bd8ee2d368c0e42b36a524e422e
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""Search extentions add columns and tables to search dialogs"""
class SearchExtension(object):
"""A SearchExtension is intended to add extra columns to a SearchDialog.
Suppose you have the following dialog:
#>>> class ProductSearch(SearchDialog):
#... search_spec = ProductsView
#...
#... def get_columns(self):
#... return [Column('name')]
This is the default search, and it has only one column. The use has the
optical plugin installed, and this plugin adds a few more properties to a
product, like the color of the product.
A search extention for this dialog would be defined as:
#>>> class OpticalProductSearchExtention(SearchExtension):
#... spec_joins = [
#... LeftJoin(OpticalProduct, OpticalProduct.product_id == Product.id)
#... ]
#...
#... spec_attributes = dict(
#... color=OpticalProduct.color
#... )
#...
#... def get_columns(self):
#... return [Column('color')]
Then, the plugin should also connect to the event
:class:`stoqlib.gui.events.SearchDialogSetupSearchEvent` and when the
desired dialog is being set up, he should attach the extention:
#>>> dialog = ProductSearch()
#>>> dialog.add_extention(OpticalProductSearchExtention())
"""
#: A list of table joins that will be added to the query of the search
spec_joins = []
#: A dictionary of the columns that should be queried. Normally columns from
#: the tables specified in the spec_joins above
spec_attributes = {}
def attach(self, search):
"""Attaches this extension to the given search dialog
This will replace the viewable of the dialog by another one that is a
subclass of the original, extended with the desired tables and columns,
defined in the spec_joins and spec_attributes.
"""
search.search_spec = search.search_spec.extend_viewable(self.spec_attributes,
self.spec_joins)
search.add_columns(self.get_columns())
def get_columns(self): # pragma no cover
"""Returns the extra columns that should be added in the search dialog.
If the column is not already present in the original viewable of the
search dialog, it should be specified in the spec_attributes above
"""
raise NotImplementedError
|
andrebellafronte/stoq
|
stoqlib/gui/search/searchextension.py
|
Python
|
gpl-2.0
| 3,316
|
[
"VisIt"
] |
fad4edf11e22c24f3eae46be07cbf977b3ff0aa8ddeac643f732a3e4c13eaf6e
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the ResetLaunchConfigurations action class."""
from launch import LaunchContext
from launch.actions import ResetLaunchConfigurations
from launch.substitutions import LaunchConfiguration
def test_reset_launch_configurations_constructors():
"""Test the constructors for ResetLaunchConfigurations class."""
ResetLaunchConfigurations()
ResetLaunchConfigurations({})
ResetLaunchConfigurations({'foo': 'FOO', 'bar': 'BAR'})
def test_reset_launch_configurations_execute():
"""Test the execute() of the ResetLaunchConfigurations class."""
# Clear all existing launch configurations without initializer for
# launch_configurations
lc1 = LaunchContext()
assert len(lc1.launch_configurations) == 0
lc1.launch_configurations['foo'] = 'FOO'
lc1.launch_configurations['bar'] = 'BAR'
assert len(lc1.launch_configurations) == 2
ResetLaunchConfigurations().visit(lc1)
assert len(lc1.launch_configurations) == 0
# Clear all existing launch configurations with initializer for
# launch_configurations = None
lc2 = LaunchContext()
assert len(lc2.launch_configurations) == 0
lc2.launch_configurations['foo'] = 'FOO'
lc2.launch_configurations['bar'] = 'BAR'
assert len(lc2.launch_configurations) == 2
ResetLaunchConfigurations(launch_configurations=None).visit(lc2)
assert len(lc2.launch_configurations) == 0
# Clear all existing launch configurations with initializer for
# launch_configurations = {}
lc3 = LaunchContext()
assert len(lc3.launch_configurations) == 0
lc3.launch_configurations['foo'] = 'FOO'
lc3.launch_configurations['bar'] = 'BAR'
assert len(lc3.launch_configurations) == 2
ResetLaunchConfigurations(launch_configurations={}).visit(lc3)
assert len(lc3.launch_configurations) == 0
# Pass through an existing launch configuration
lc4 = LaunchContext()
assert len(lc4.launch_configurations) == 0
lc4.launch_configurations['foo'] = 'FOO'
lc4.launch_configurations['bar'] = 'BAR'
assert len(lc4.launch_configurations) == 2
ResetLaunchConfigurations(launch_configurations={'foo': LaunchConfiguration('foo')}).visit(lc4)
assert len(lc4.launch_configurations) == 1
assert lc4.launch_configurations['foo'] == 'FOO'
assert 'bar' not in lc4.launch_configurations.keys()
# Add a launch configuration that did not exist
lc5 = LaunchContext()
assert len(lc5.launch_configurations) == 0
lc5.launch_configurations['foo'] = 'FOO'
lc5.launch_configurations['bar'] = 'BAR'
assert len(lc5.launch_configurations) == 2
ResetLaunchConfigurations(launch_configurations={'baz': 'BAZ'}).visit(lc5)
assert len(lc5.launch_configurations) == 1
assert lc5.launch_configurations['baz'] == 'BAZ'
assert 'foo' not in lc5.launch_configurations.keys()
assert 'bar' not in lc5.launch_configurations.keys()
# Overwrite an existing launch configuration
lc6 = LaunchContext()
assert len(lc6.launch_configurations) == 0
lc6.launch_configurations['foo'] = 'FOO'
lc6.launch_configurations['bar'] = 'BAR'
assert len(lc6.launch_configurations) == 2
ResetLaunchConfigurations(launch_configurations={'foo': 'OOF'}).visit(lc6)
assert len(lc6.launch_configurations) == 1
assert lc6.launch_configurations['foo'] == 'OOF'
assert 'bar' not in lc6.launch_configurations.keys()
|
ros2/launch
|
launch/test/launch/actions/test_reset_launch_configurations.py
|
Python
|
apache-2.0
| 3,998
|
[
"VisIt"
] |
cc169dda51facb41927f6888f65efa00646c3622da6556a15fef47c1ac97d44e
|
"""
Class to help creating a source detection pipeline with sep
by creating 'steps' with different configurations.
"""
import os
import numpy as np
from scipy import ndimage
from astropy.table import Table
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma, gaussian_sigma_to_fwhm
import sep
from . import utils
from .utils import check_kwargs_defaults, check_random_state
from .log import logger
__all__ = ['SepStepperBase', 'SepLsstStepper', 'sep_ellipse_mask']
def _byteswap(arr):
"""
If array is in big-endian byte order (as astropy.io.fits
always returns), swap to little-endian for SEP.
"""
if arr.dtype.byteorder=='>':
arr = arr.byteswap().newbyteorder()
return arr
def sep_ellipse_mask(sources, image_shape, scale=5.0):
logger.info('building ellipse mask')
mask = np.zeros(image_shape, dtype=bool)
sep.mask_ellipse(mask, sources['x'], sources['y'], sources['a'],
sources['b'], sources['theta'], scale)
logger.info('{:.2f}% of patch masked'.format(100 * mask.sum()/mask.size))
return mask
class SepStepperBase(object):
SEP_EXTRACT_DEFAULTS = dict(
thresh=5.0, minarea=10, deblend_nthresh=32,
deblend_cont=0.005, clean=True, clean_param=1.0,
filter_type='conv', filter_num_fwhm=1.0
)
SEP_BACK_DEFAULTS = dict(bw=64, bh=64, fw=3, fh=3, fthresh=0.0)
def __init__(self, config_fn=None, config={}):
if config_fn is not None:
config = utils.read_config(config_fn)
self.extract_pixstack = config.pop('extract_pixstack', 300000)
sep.set_extract_pixstack(self.extract_pixstack)
self.step_kws = config
self.sources = {}
def setup_image(self, image_object, **kwargs):
"""
Must make the followiing attributes:
- image
- noise_image (ndarray or number)
- psf_fwhm (pixels)
"""
raise NotImplementedError
def _measure(self, img, sources, mask=None):
logger.info('measuring source parameters')
# HACK: issues with numerical precision
# must have pi/2 <= theta <= npi/2
sources[np.abs(np.abs(sources['theta']) - np.pi/2) < 1e-6] = np.pi/2
for p in ['x', 'y', 'a', 'b', 'theta']:
sources = sources[~np.isnan(sources[p])]
# calculate "AUTO" parameters
kronrad, krflag = sep.kron_radius(
img, sources['x'], sources['y'], sources['a'], sources['b'],
sources['theta'], 6.0, mask=mask)
flux, fluxerr, flag = sep.sum_ellipse(
img, sources['x'], sources['y'], sources['a'], sources['b'],
sources['theta'], 2.5*kronrad, subpix=5, mask=mask)
flag |= krflag # combine flags into 'flag'
sources = sources[~np.isnan(flux)]
flux = flux[~np.isnan(flux)]
sources = sources[flux > 0]
flux = flux[flux > 0]
mag_auto = utils.zpt - 2.5*np.log10(flux)
r, flag = sep.flux_radius(
img, sources['x'], sources['y'], 6.*sources['a'], 0.5,
normflux=flux, subpix=5, mask=mask)
sources['mag_auto'] = mag_auto
sources['flux_auto'] = flux
sources['flux_radius'] = r * utils.pixscale
# approximate fwhm
r_squared = sources['a']**2 + sources['b']**2
sources['fwhm'] = 2 * np.sqrt(np.log(2) * r_squared) * utils.pixscale
q = sources['b'] / sources['a']
area = np.pi * q * sources['flux_radius']**2
sources['mu_ave_auto'] = sources['mag_auto'] + 2.5 * np.log10(2*area)
area_arcsec = np.pi * (self.psf_fwhm/2)**2 * utils.pixscale**2
flux, fluxerr, flag = sep.sum_circle(
img, sources['x'], sources['y'], self.psf_fwhm/2,
subpix=5, mask=mask)
flux[flux<=0] = np.nan
mu_0 = utils.zpt - 2.5*np.log10(flux / area_arcsec)
sources['mu_0_aper'] = mu_0
return sources
def apply_mask(self, image, mask, inplace=True):
img = image if inplace else image.copy()
if type(self.noise_image) == np.ndarray:
img[mask] = self.noise_image[mask]
else:
img[mask] = self.noise_image
return img
def run(self, step_name, mask=None):
try:
img = self.image.copy()
except AttributeError:
logger.error('You must setup image before running pipeline!')
exit(1)
logger.info('running ' + step_name)
kws = self.step_kws[step_name].copy()
sep_extract_kws = kws.pop('sep_extract_kws', {})
sep_extract_kws = check_kwargs_defaults(sep_extract_kws,
self.SEP_EXTRACT_DEFAULTS)
sep_back_kws = kws.pop('sep_back_kws', {})
sep_back_kws = check_kwargs_defaults(sep_back_kws,
self.SEP_BACK_DEFAULTS)
if mask is not None:
logger.info('applying mask')
sep_extract_kws['mask'] = mask
sep_back_kws['mask'] = mask
# define gaussian kernel for detection
# TODO: make filter shape optional
num_fwhm = sep_extract_kws.pop('filter_num_fwhm', 1)
kernel_fwhm = self.psf_fwhm * num_fwhm
logger.info('smoothing with kernel with fwhm = {:.2f} arcsec'.\
format(kernel_fwhm * utils.pixscale))
kern = Gaussian2DKernel(kernel_fwhm * gaussian_fwhm_to_sigma,
mode='oversample')
kern.normalize()
sep_extract_kws['filter_kernel'] = kern.array
# estimate background and subtract from image
bkg = sep.Background(img, **sep_back_kws)
img_sub = img - bkg
# extract sources
logger.info('detecting with a threshold of {} x background'.\
format(sep_extract_kws['thresh']))
sources, segmap = sep.extract(
img_sub, err=bkg.rms(), segmentation_map=True, **sep_extract_kws)
sources = Table(sources)
sources = sources[sources['flux'] > 0]
logger.info('found {} sources'.format(len(sources)))
if kws['do_measure']:
sources = self._measure(img, sources, mask)
sources['seg_label'] = np.arange(1, len(sources) + 1)
self.sources[step_name] = sources
return sources, segmap
class SepLsstStepper(SepStepperBase):
def setup_image(self, exposure, random_state=None):
self.image = exposure.getImage().getArray()
self.psf_fwhm = utils.get_psf_sigma(exposure) * gaussian_sigma_to_fwhm
self.noise_image = utils.make_noise_image(exposure.getMaskedImage(),
random_state)
|
johnnygreco/hugs
|
hugs/sep_stepper.py
|
Python
|
mit
| 6,819
|
[
"Gaussian"
] |
5cf1072e6336ecf99c6ad47909975bbdb5417f9631c0f5cb650702c7c12b1350
|
#!/usr/bin/env python
#
# Copyright 2006 Google Inc. All Rights Reserved.
"""Calculates Javascript dependencies without requiring Google3.
It iterates over a number of search paths and builds a dependency tree. With
the inputs provided, it walks the dependency tree and outputs all the files
required for compilation.\n
"""
try:
import distutils.version
except ImportError:
# distutils is not available in all environments
distutils = None
import logging
import optparse
import os
import re
import subprocess
import sys
req_regex = re.compile('goog\.require\s*\(\s*[\'\"]([^\)]+)[\'\"]\s*\)')
prov_regex = re.compile('goog\.provide\s*\(\s*[\'\"]([^\)]+)[\'\"]\s*\)')
ns_regex = re.compile('^ns:((\w+\.)*(\w+))$')
version_regex = re.compile('[\.0-9]+')
def IsValidFile(ref):
"""Returns true if the provided reference is a file and exists."""
return os.path.isfile(ref)
def IsJsFile(ref):
"""Returns true if the provided reference is a Javascript file."""
return ref.endswith('.js')
def IsNamespace(ref):
"""Returns true if the provided reference is a namespace."""
return re.match(ns_regex, ref) is not None
def IsDirectory(ref):
"""Returns true if the provided reference is a directory."""
return os.path.isdir(ref)
def ExpandDirectories(refs):
"""Expands any directory references into inputs.
Description:
Looks for any directories in the provided references. Found directories
are recursively searched for .js files, which are then added to the result
list.
Args:
refs: a list of references such as files, directories, and namespaces
Returns:
A list of references with directories removed and replaced by any
.js files that are found in them. Also, the paths will be normalized.
"""
result = []
for ref in refs:
if IsDirectory(ref):
# Disable 'Unused variable' for subdirs
# pylint: disable-msg=W0612
for (directory, subdirs, filenames) in os.walk(ref):
for filename in filenames:
if IsJsFile(filename):
result.append(os.path.join(directory, filename))
else:
result.append(ref)
return map(os.path.normpath, result)
class DependencyInfo(object):
"""Represents a dependency that is used to build and walk a tree."""
def __init__(self, filename):
self.filename = filename
self.provides = []
self.requires = []
def __str__(self):
return '%s Provides: %s Requires: %s' % (self.filename,
repr(self.provides),
repr(self.requires))
def BuildDependenciesFromFiles(files):
"""Build a list of dependencies from a list of files.
Description:
Takes a list of files, extracts their provides and requires, and builds
out a list of dependency objects.
Args:
files: a list of files to be parsed for goog.provides and goog.requires.
Returns:
A list of dependency objects, one for each file in the files argument.
"""
result = []
filenames = set()
for filename in files:
if filename in filenames:
continue
# Python 3 requires the file encoding to be specified
if (sys.version_info[0] < 3):
file_handle = open(filename, 'r')
else:
file_handle = open(filename, 'r', encoding='utf8')
dep = DependencyInfo(filename)
try:
for line in file_handle:
if re.match(req_regex, line):
dep.requires.append(re.search(req_regex, line).group(1))
if re.match(prov_regex, line):
dep.provides.append(re.search(prov_regex, line).group(1))
finally:
file_handle.close()
result.append(dep)
filenames.add(filename)
return result
def BuildDependencyHashFromDependencies(deps):
"""Builds a hash for searching dependencies by the namespaces they provide.
Description:
Dependency objects can provide multiple namespaces. This method enumerates
the provides of each dependency and adds them to a hash that can be used
to easily resolve a given dependency by a namespace it provides.
Args:
deps: a list of dependency objects used to build the hash.
Raises:
Exception: If a multiple files try to provide the same namepace.
Returns:
A hash table { namespace: dependency } that can be used to resolve a
dependency by a namespace it provides.
"""
dep_hash = {}
for dep in deps:
for provide in dep.provides:
if provide in dep_hash:
raise Exception('Duplicate provide (%s) in (%s, %s)' % (
provide,
dep_hash[provide].filename,
dep.filename))
dep_hash[provide] = dep
return dep_hash
def CalculateDependencies(paths, inputs):
"""Calculates the dependencies for given inputs.
Description:
This method takes a list of paths (files, directories) and builds a
searchable data structure based on the namespaces that each .js file
provides. It then parses through each input, resolving dependencies
against this data structure. The final output is a list of files,
including the inputs, that represent all of the code that is needed to
compile the given inputs.
Args:
paths: the references (files, directories) that are used to build the
dependency hash.
inputs: the inputs (files, directories, namespaces) that have dependencies
that need to be calculated.
Raises:
Exception: if a provided input is invalid.
Returns:
A list of all files, including inputs, that are needed to compile the given
inputs.
"""
deps = BuildDependenciesFromFiles(paths + inputs)
search_hash = BuildDependencyHashFromDependencies(deps)
result_list = []
seen_list = []
for input_file in inputs:
if IsNamespace(input_file):
namespace = re.search(ns_regex, input_file).group(1)
if namespace not in search_hash:
raise Exception('Invalid namespace (%s)' % namespace)
input_file = search_hash[namespace].filename
if not IsValidFile(input_file) or not IsJsFile(input_file):
raise Exception('Invalid file (%s)' % input_file)
seen_list.append(input_file)
file_handle = open(input_file, 'r')
try:
for line in file_handle:
if re.match(req_regex, line):
require = re.search(req_regex, line).group(1)
ResolveDependencies(require, search_hash, result_list, seen_list)
finally:
file_handle.close()
result_list.append(input_file)
# All files depend on base.js, so put it first.
base_js_path = FindClosureBasePath(paths)
if base_js_path:
result_list.insert(0, base_js_path)
else:
logging.warning('Closure Library base.js not found.')
return result_list
def FindClosureBasePath(paths):
"""Given a list of file paths, return Closure base.js path, if any.
Args:
paths: A list of paths.
Returns:
The path to Closure's base.js file including filename, if found.
"""
for path in paths:
pathname, filename = os.path.split(path)
if filename == 'base.js':
f = open(path)
is_base = False
# Sanity check that this is the Closure base file. Check that this
# is where goog is defined.
for line in f:
if line.startswith('var goog = goog || {};'):
is_base = True
break
f.close()
if is_base:
return path
def ResolveDependencies(require, search_hash, result_list, seen_list):
"""Takes a given requirement and resolves all of the dependencies for it.
Description:
A given requirement may require other dependencies. This method
recursively resolves all dependencies for the given requirement.
Raises:
Exception: when require does not exist in the search_hash.
Args:
require: the namespace to resolve dependencies for.
search_hash: the data structure used for resolving dependencies.
result_list: a list of filenames that have been calculated as dependencies.
This variable is the output for this function.
seen_list: a list of filenames that have been 'seen'. This is required
for the dependency->dependant ordering.
"""
if require not in search_hash:
raise Exception('Missing provider for (%s)' % require)
dep = search_hash[require]
if not dep.filename in seen_list:
seen_list.append(dep.filename)
for sub_require in dep.requires:
ResolveDependencies(sub_require, search_hash, result_list, seen_list)
result_list.append(dep.filename)
def GetDepsLine(dep, base_path):
"""Returns a JS string for a dependency statement in the deps.js file.
Args:
dep: The dependency that we're printing.
base_path: The path to Closure's base.js including filename.
"""
return 'goog.addDependency("%s", %s, %s);' % (
os.path.normpath(
GetRelpath(dep.filename, base_path)).replace('\\', '\\\\'),
dep.provides, dep.requires)
def GetRelpath(path, start):
"""Return a relative path to |path| from |start|."""
# NOTE: Python 2.6 provides os.path.relpath, which has almost the same
# functionality as this function. Since we want to support 2.4, we have
# to implement it manually. :(
path_list = os.path.abspath(os.path.normpath(path)).split(os.sep)
start_list = os.path.abspath(
os.path.normpath(os.path.dirname(start))).split(os.sep)
common_prefix_count = 0
for i in range(0, min(len(path_list), len(start_list))):
if path_list[i] != start_list[i]:
break
common_prefix_count += 1
return os.sep.join(['..'] * (len(start_list) - common_prefix_count) +
path_list[common_prefix_count:])
def PrintLine(msg, out):
out.write(msg)
out.write('\n')
def PrintDeps(source_paths, deps, out):
"""Print out a deps.js file from a list of source paths.
Args:
source_paths: Paths that we should generate dependency info for.
deps: Paths that provide dependency info. Their dependency info should
not appear in the deps file.
out: The output file.
Returns:
True on success, false if it was unable to find the base path
to generate deps relative to.
"""
base_path = FindClosureBasePath(source_paths + deps)
if not base_path:
return False
PrintLine('// This file was autogenerated by calcdeps.py', out)
excludesSet = set(deps)
for dep in BuildDependenciesFromFiles(source_paths + deps):
if not dep.filename in excludesSet:
PrintLine(GetDepsLine(dep, base_path), out)
return True
def PrintScript(source_paths, out):
for index, dep in enumerate(source_paths):
PrintLine('// Input %d' % index, out)
f = open(dep, 'r')
PrintLine(f.read(), out)
f.close()
def GetJavaVersion():
"""Returns the string for the current version of Java installed."""
proc = subprocess.Popen(['java', '-version'], stderr=subprocess.PIPE)
proc.wait()
version_line = proc.stderr.read().splitlines()[0]
return version_regex.search(version_line).group()
def FilterByExcludes(options, files):
"""Filters the given files by the exlusions specified at the command line.
Args:
options: The flags to calcdeps.
files: The files to filter.
Returns:
A list of files.
"""
excludes = []
if options.excludes:
excludes = ExpandDirectories(options.excludes)
excludesSet = set(excludes)
return [i for i in files if not i in excludesSet]
def GetPathsFromOptions(options):
"""Generates the path files from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of files in the specified paths. (strings).
"""
search_paths = options.paths
if not search_paths:
search_paths = ['.'] # Add default folder if no path is specified.
search_paths = ExpandDirectories(search_paths)
return FilterByExcludes(options, search_paths)
def GetInputsFromOptions(options):
"""Generates the inputs from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of inputs (strings).
"""
inputs = options.inputs
if not inputs: # Parse stdin
logging.info('No inputs specified. Reading from stdin...')
inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()])
logging.info('Scanning files...')
inputs = ExpandDirectories(inputs)
return FilterByExcludes(options, inputs)
def Compile(compiler_jar_path, source_paths, out, flags=None):
"""Prepares command-line call to Closure compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
flags: A list of additional flags to pass on to Closure compiler.
"""
args = ['java', '-jar', compiler_jar_path]
seen = {}
for path in source_paths:
if path not in seen:
seen[path] = 1
args += ['--js', path]
if flags:
args += flags
logging.info('Compiling with the following command: %s', ' '.join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = proc.communicate()
if proc.returncode != 0:
logging.error('JavaScript compilation failed.')
sys.exit(1)
else:
out.write(stdoutdata)
def main():
"""The entrypoint for this script."""
logging.basicConfig(format='calcdeps.py: %(message)s', level=logging.INFO)
usage = 'usage: %prog [options] arg'
parser = optparse.OptionParser(usage)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
help='The inputs to calculate dependencies for. Valid '
'values can be files, directories, or namespaces '
'(ns:goog.net.XhrLite). Only relevant to "list" and '
'"script" output.')
parser.add_option('-p',
'--path',
dest='paths',
action='append',
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-d',
'--dep',
dest='deps',
action='append',
help='Directories or files that should be traversed to '
'find required dependencies for the deps file. '
'Does not generate dependency information for names '
'provided by these files. Only useful in "deps" mode.')
parser.add_option('-e',
'--exclude',
dest='excludes',
action='append',
help='Files or directories to exclude from the --path '
'and --input flags')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
action='store',
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'file, "deps" to generate a deps.js file for all '
'paths, or "compiled" to produce compiled output with '
'the Closure compiler.')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flag',
'--compiler_flags', # for backwards compatability
dest='compiler_flags',
action='append',
help='Additional flag to pass to the Closure compiler. '
'May be specified multiple times to pass multiple flags.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
(options, args) = parser.parse_args()
search_paths = GetPathsFromOptions(options)
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
if options.output_mode == 'deps':
result = PrintDeps(search_paths, ExpandDirectories(options.deps or []), out)
if not result:
logging.error('Could not find Closure Library in the specified paths')
sys.exit(1)
return
inputs = GetInputsFromOptions(options)
logging.info('Finding Closure dependencies...')
deps = CalculateDependencies(search_paths, inputs)
output_mode = options.output_mode
if output_mode == 'script':
PrintScript(deps, out)
elif output_mode == 'list':
# Just print out a dep per line
for dep in deps:
PrintLine(dep, out)
elif output_mode == 'compiled':
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(1)
# User friendly version check.
if distutils and not (distutils.version.LooseVersion(GetJavaVersion()) >
distutils.version.LooseVersion('1.6')):
logging.error('Closure Compiler requires Java 1.6 or higher.')
logging.error('Please visit http://www.java.com/getjava')
sys.exit(1)
Compile(options.compiler_jar, deps, out, options.compiler_flags)
else:
logging.error('Invalid value for --output flag.')
sys.exit(1)
if __name__ == '__main__':
main()
|
jmt4/Selenium2
|
third_party/closure/bin/calcdeps.py
|
Python
|
apache-2.0
| 17,507
|
[
"VisIt"
] |
1c4068c78a2e49fed6d9a8ef3046dfac3947fe7c59eeec4c1ed87c1358059303
|
import espresso
import logging
from math import sqrt
system, integrator = espresso.standard_system.LennardJones(1000, (20,20,20), dt=0.00001, temperature = 1.0)
# logging.getLogger("ExtAnalyze").setLevel(logging.INFO)
print "warming up ..."
capForce = espresso.integrator.CapForce(system, capForce=10000.0)
integrator.addExtension(capForce)
integrator.run(50000)
capForce.disconnect()
print "equilibrating ..."
integrator.dt=0.005
integrator.run(50000)
PressureTensor = espresso.analysis.PressureTensor(system)
# interval between measurements
interval = 10
ExtAnalyzePressureTensor = espresso.integrator.ExtAnalyze(PressureTensor, interval=interval)
integrator.addExtension(ExtAnalyzePressureTensor)
print "starting integration ... measuring pressure tensor every ", interval, " steps"
PressureTensor.reset()
integrator.run(10000)
average_PressureTensor = PressureTensor.getAverageValue()
print "average Pressure Tensor = ", average_PressureTensor[:6]
print " std deviation = ", average_PressureTensor[6:]
print "number of measurements = ", PressureTensor.getNumberOfMeasurements()
|
BackupTheBerlios/espressopp
|
examples/analyze_during_integration/analyze_during_integration.py
|
Python
|
gpl-3.0
| 1,099
|
[
"ESPResSo"
] |
5b31188f4a229f62bd47815556f6a1abb779c8cafd106f5285b1e627745c2933
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import rand, randn, permutation, multivariate_normal
from shogun import BinaryLabels, RealFeatures, IndexBlock, IndexBlockGroup, FeatureBlockLogisticRegression
def generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd):
# Generates synthetic data for the logistic regression, using the example
# from [Friedman10]
# n : # of observations
# p : # of predictors
# L : # of blocks
# blk_nnz : # of non-zero coefs. in each block
# gcov : correlation within groups
# nstd : standard deviation of the added noise
# size of each block (assumed to be an integer)
pl = p / L
# generating the coefficients (betas)
coefs = np.zeros((p, 1))
for (i, nnz) in enumerate(blk_nnz):
blkcoefs = np.zeros((pl, 1))
blkcoefs[0:nnz] = np.sign(rand(nnz, 1) - 0.5)
coefs[pl * i:pl * (i + 1)] = permutation(blkcoefs)
# generating the predictors
mu = np.zeros(p)
gsigma = gcov * np.ones((pl, pl))
np.fill_diagonal(gsigma, 1.0)
Sigma = np.kron(np.eye(L), gsigma)
# the predictors come from a standard Gaussian multivariate distribution
X = multivariate_normal(mu, Sigma, n)
# linear function of the explanatory variables in X, plus noise
t = np.dot(X, coefs) + randn(n, 1) * nstd
# applying the logit
Pr = 1 / (1 + np.exp(-t))
# The response variable y[i] is a Bernoulli random variable taking
# value 1 with probability Pr[i]
y = rand(n, 1) <= Pr
# we want each _column_ in X to represent a feature vector
# y and coefs should be also 1D arrays
return X.T, y.flatten(), coefs.flatten()
def misclassified_groups(est_coefs, true_coefs, L):
# Compute the number of groups that are misclassified, i.e. the ones with
# at least one non-zero coefficient whose estimated coefficients are all
# set to zero, or viceversa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
# L : number of blocks
p = est_coefs.shape[0] # number of predictors
pl = p / L
est_nz = est_coefs != 0
true_nz = true_coefs != 0
est_blk_nzcount = np.array([sum(est_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
true_blk_nzcount = np.array([sum(true_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
return np.sum(np.logical_xor(est_blk_nzcount == 0, true_blk_nzcount == 0))
def misclassified_features(est_coefs, true_coefs):
# Compute the number of individual coefficients that are misclassified,
# i.e. estimated to be zero when the true coefficient is nonzero or
# vice-versa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
return np.sum(np.logical_xor(est_coefs == 0, true_coefs == 0))
def compute_misclassifications(cls, true_coefs, L, rel_z):
# Try the given classifier with different values of relative regularization
# parameters, store the coefficients and compute the number of groups
# and features misclassified.
# INPUTS:
# - cls : the classifier to try
# - true_coefs : the original coefficients of our synthetic example
# - L : number of blocks
# - rel_z : regularization values to try, they will be in [0,1]
# OUTPUTS:
# - est_coefs : array with the estimated coefficients, each row for a
# different value of regularization
# - misc_groups, misc_feats : see above
num_z = rel_z.shape[0]
est_coefs = np.zeros((num_z, true_coefs.shape[0]))
misc_groups = np.zeros(num_z)
misc_feats = np.zeros(num_z)
for (i, z) in enumerate(rel_z):
cls.set_z(z)
cls.train()
est_coefs[i, :] = cls.get_w()
misc_groups[i] = misclassified_groups(est_coefs[i, :], true_coefs, L)
misc_feats[i] = misclassified_features(est_coefs[i, :], true_coefs)
return est_coefs, misc_groups, misc_feats
if __name__ == '__main__':
print('FeatureBlockLogisticRegression example')
np.random.seed(956) # reproducible results
# default parameters from [Friedman10]
n = 200
p = 100
L = 10
blk_nnz = [10, 8, 6, 4, 2, 1]
gcov = 0.2
nstd = 0.4
# range of (relative) regularization values to try
min_z = 0
max_z = 1
num_z = 21
# get the data
X, y, true_coefs = generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd)
# here each column represents a feature vector
features = RealFeatures(X)
# we have to convert the labels to +1/-1
labels = BinaryLabels(np.sign(y.astype(int) - 0.5))
# SETTING UP THE CLASSIFIERS
# CLASSIFIER 1: group LASSO
# build the feature blocks and add them to the block group
pl = p / L
block_group = IndexBlockGroup()
for i in xrange(L):
block_group.add_block(IndexBlock(pl * i, pl * (i + 1)))
cls_gl = FeatureBlockLogisticRegression(0.0, features, labels, block_group)
# with set_regularization(1), the parameter z will indicate the fraction of
# the maximum regularization to use, and so z is in [0,1]
# (reference: SLEP manual)
cls_gl.set_regularization(1)
cls_gl.set_q(2.0) # it is the default anyway...
# CLASSIFIER 2: LASSO (illustrating group lasso with all group sizes = 1)
block_group_ones = IndexBlockGroup()
for i in xrange(p):
block_group_ones.add_block(IndexBlock(i, i + 1))
cls_l = FeatureBlockLogisticRegression(0.0, features, labels, block_group_ones)
cls_l.set_regularization(1)
cls_l.set_q(2.0)
# trying with different values of (relative) regularization parameters
rel_z = np.linspace(min_z, max_z, num_z)
coefs_gl, miscgp_gl, miscft_gl = compute_misclassifications(cls_gl, true_coefs, L, rel_z)
coefs_l, miscgp_l, miscft_l = compute_misclassifications(cls_l, true_coefs, L, rel_z)
# Find the best regularization for each classifier
# for the group lasso: the one that gives the fewest groups misclassified
best_z_gl = np.argmin(miscgp_gl)
# for the lasso: the one that gives the fewest features misclassified
best_z_l = np.argmin(miscft_l)
# plot the true coefs. and the signs of the estimated coefs.
fig = plt.figure()
for (coefs, best_z, name, pos) in zip([coefs_gl, coefs_l], [best_z_gl, best_z_l], ['Group lasso', 'Lasso'], [0, 1]):
ax = plt.subplot2grid((4, 2), (pos, 0), colspan=2)
plt.hold(True)
plt.plot(xrange(p), np.sign(coefs[best_z, :]), 'o', markeredgecolor='none', markerfacecolor='g')
plt.plot(xrange(p), true_coefs, '^', markersize=7, markeredgecolor='r', markerfacecolor='none', markeredgewidth=1)
plt.xticks(xrange(0, p + pl, pl))
plt.yticks([-1, 0, 1])
plt.xlim((-1, p + 1))
plt.ylim((-2, 2))
plt.grid(True)
# plt.legend(('estimated', 'true'), loc='best')
plt.title(name)
plt.xlabel('Predictor [triangles=true coefs], best reg. value = %.2f' % rel_z[best_z])
plt.ylabel('Coefficient')
ax = plt.subplot2grid((4, 2), (2, 0), rowspan=2)
plt.plot(rel_z, miscgp_gl, 'ro-', rel_z, miscgp_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Groups misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of groups misclassified')
ax = plt.subplot2grid((4, 2), (2, 1), rowspan=2)
plt.plot(rel_z, miscft_gl, 'ro-', rel_z, miscft_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Features misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of features misclassified')
plt.tight_layout(1.2, 0, 0)
plt.show()
|
besser82/shogun
|
examples/undocumented/python/graphical/group_lasso.py
|
Python
|
bsd-3-clause
| 7,789
|
[
"Gaussian"
] |
bc91af7b943ffbdb8e9c5d8ea9daee9c8b52bc03d1d72c3c97c6462e7ee05bf5
|
# -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array
from .exceptions import DataDimensionalityWarning
from .exceptions import NotFittedError
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
""" Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components: numpy array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
self.components_ = None
self.n_components_ = None
@abstractmethod
def _make_random_matrix(n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components_)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X, y=None):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.components_ is None:
raise NotFittedError('No random projection matrix had been fit.')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
https://users.soe.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
self.density_ = None
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
|
RPGOne/scikit-learn
|
sklearn/random_projection.py
|
Python
|
bsd-3-clause
| 22,132
|
[
"Gaussian"
] |
0e705e12538608979caf606d67fffb7bbbb5d865831c6310c5f4869ebd28d633
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Stephane Charette <stephanecharette@gmail.com>
# Contribution 2009 by Bob Ham <rah@bash.sh>
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Generate an hourglass graph using the GraphViz generator.
/Reports/GraphViz/Hourglass Graph
"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import (PersonOption, BooleanOption, NumberOption,
EnumeratedListOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.datehandler import get_date
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
#------------------------------------------------------------------------
#
# Constant options items
#
#------------------------------------------------------------------------
_COLORS = [ { 'name' : _("B&W outline"), 'value' : "outline" },
{ 'name' : _("Colored outline"), 'value' : "colored" },
{ 'name' : _("Color fill"), 'value' : "filled" }]
#------------------------------------------------------------------------
#
# HourGlassReport
#
#------------------------------------------------------------------------
class HourGlassReport(Report):
"""
An hourglass report displays ancestors and descendants of a center person.
"""
def __init__(self, database, options, user):
"""
Create HourGlass object that produces the report.
"""
Report.__init__(self, database, options, user)
# Would be nice to get rid of these 2 hard-coded arrays of colours
# and instead allow the user to pick-and-choose whatever colour they
# want. When/if this is done, take a look at the colour-selection
# widget and code used in the FamilyLines graph.
colored = {
'male': 'dodgerblue4',
'female': 'deeppink',
'unknown': 'black',
'family': 'darkgreen'
}
filled = {
'male': 'lightblue',
'female': 'lightpink',
'unknown': 'lightgray',
'family': 'lightyellow'
}
self.__db = database
self.__used_people = []
menu = options.menu
self.max_descend = menu.get_option_by_name('maxdescend').get_value()
self.max_ascend = menu.get_option_by_name('maxascend').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.center_person = database.get_person_from_gramps_id(pid)
if (self.center_person == None) :
raise ReportError(_("Person %s is not in the Database") % pid )
self.colorize = menu.get_option_by_name('color').get_value()
if self.colorize == 'colored':
self.colors = colored
elif self.colorize == 'filled':
self.colors = filled
self.roundcorners = menu.get_option_by_name('roundcorners').get_value()
def write_report(self):
"""
Generate the report.
"""
self.add_person(self.center_person)
self.traverse_up(self.center_person, 1)
self.traverse_down(self.center_person, 1)
def traverse_down(self, person, gen):
"""
Recursively find the descendants of the given person.
"""
if gen > self.max_descend:
return
for family_handle in person.get_family_handle_list():
family = self.__db.get_family_from_handle(family_handle)
self.add_family(family)
self.doc.add_link( person.get_gramps_id(), family.get_gramps_id() )
for child_ref in family.get_child_ref_list():
child_handle = child_ref.get_reference_handle()
if child_handle not in self.__used_people:
# Avoid going down paths twice when descendant cousins marry
self.__used_people.append(child_handle)
child = self.__db.get_person_from_handle(child_handle)
self.add_person(child)
self.doc.add_link(family.get_gramps_id(),
child.get_gramps_id() )
self.traverse_down(child, gen+1)
def traverse_up(self, person, gen):
"""
Recursively find the ancestors of the given person.
"""
if gen > self.max_ascend:
return
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.__db.get_family_from_handle(family_handle)
family_id = family.get_gramps_id()
self.add_family(family)
self.doc.add_link( family_id, person.get_gramps_id(),
head='none', tail='normal' )
father_handle = family.get_father_handle()
if father_handle and father_handle not in self.__used_people:
self.__used_people.append(father_handle)
father = self.__db.get_person_from_handle(father_handle)
self.add_person(father)
self.doc.add_link( father.get_gramps_id(), family_id,
head='none', tail='normal' )
self.traverse_up(father, gen+1)
mother_handle = family.get_mother_handle()
if mother_handle and mother_handle not in self.__used_people:
self.__used_people.append(mother_handle)
mother = self.__db.get_person_from_handle( mother_handle )
self.add_person( mother )
self.doc.add_link( mother.get_gramps_id(), family_id,
head='none', tail='normal' )
self.traverse_up( mother, gen+1 )
def add_person(self, person):
"""
Add a person to the Graph. The node id will be the person's gramps id.
"""
p_id = person.get_gramps_id()
name = name_displayer.display_formal(person)
birth_evt = get_birth_or_fallback(self.__db, person)
if birth_evt:
birth = get_date(birth_evt)
else:
birth = ""
death_evt = get_death_or_fallback(self.__db, person)
if death_evt:
death = get_date(death_evt)
else:
death = ""
label = "%s \\n(%s - %s)" % (name, birth, death)
(shape, style, color, fill) = self.get_gender_style(person)
self.doc.add_node(p_id, label, shape, color, style, fill)
def add_family(self, family):
"""
Add a family to the Graph. The node id will be the family's gramps id.
"""
family_id = family.get_gramps_id()
label = ""
marriage = ReportUtils.find_marriage(self.__db, family)
if marriage:
label = get_date(marriage)
color = ""
fill = ""
style = "solid"
if self.colorize == 'colored':
color = self.colors['family']
elif self.colorize == 'filled':
fill = self.colors['family']
style = "filled"
self.doc.add_node(family_id, label, "ellipse", color, style, fill)
def get_gender_style(self, person):
"return gender specific person style"
gender = person.get_gender()
shape = "box"
style = "solid"
color = ""
fill = ""
if gender == person.FEMALE and self.roundcorners:
style = "rounded"
elif gender == person.UNKNOWN:
shape = "hexagon"
if self.colorize == 'colored':
if gender == person.MALE:
color = self.colors['male']
elif gender == person.FEMALE:
color = self.colors['female']
else:
color = self.colors['unknown']
elif self.colorize == 'filled':
style += ",filled"
if gender == person.MALE:
fill = self.colors['male']
elif gender == person.FEMALE:
fill = self.colors['female']
else:
fill = self.colors['unknown']
return(shape, style, color, fill)
#------------------------------------------------------------------------
#
# HourGlassOptions
#
#------------------------------------------------------------------------
class HourGlassOptions(MenuReportOptions):
"""
Defines options for the HourGlass report.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Create all the menu options for this report.
"""
category_name = _("Options")
pid = PersonOption(_("Center Person"))
pid.set_help(_("The Center person for the graph"))
menu.add_option(category_name, "pid", pid)
max_gen = NumberOption(_('Max Descendant Generations'), 10, 1, 15)
max_gen.set_help(_("The number of generations of descendants to "
"include in the graph"))
menu.add_option(category_name, "maxdescend", max_gen)
max_gen = NumberOption(_('Max Ancestor Generations'), 10, 1, 15)
max_gen.set_help(_("The number of generations of ancestors to "
"include in the graph"))
menu.add_option(category_name, "maxascend", max_gen)
################################
category_name = _("Graph Style")
################################
color = EnumeratedListOption(_("Graph coloring"), "filled")
for i in range( 0, len(_COLORS) ):
color.add_item(_COLORS[i]["value"], _COLORS[i]["name"])
color.set_help(_("Males will be shown with blue, females "
"with red. If the sex of an individual "
"is unknown it will be shown with gray."))
menu.add_option(category_name, "color", color)
roundedcorners = BooleanOption( # see bug report #2180
_("Use rounded corners"), False)
roundedcorners.set_help(
_("Use rounded corners to differentiate "
"between women and men."))
menu.add_option(category_name, "roundcorners", roundedcorners)
|
Forage/Gramps
|
gramps/plugins/graph/gvhourglass.py
|
Python
|
gpl-2.0
| 11,649
|
[
"Brian"
] |
55caf4ff1d6d000525a629abfd5c55aed8589ebaca11a6b65156259b862da9b7
|
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
"""Codon adaption indxes, including Sharp and Li (1987) E. coli index.
Currently this module only defines a single codon adaption index from
Sharp & Li, Nucleic Acids Res. 1987.
"""
__docformat__ = "restructuredtext en"
SharpEcoliIndex = {
'GCA': 0.586, 'GCC': 0.122, 'GCG': 0.424, 'GCT': 1, 'AGA': 0.004, 'AGG': 0.002, 'CGA': 0.004,
'CGC': 0.356, 'CGG': 0.004, 'CGT': 1, 'AAC': 1, 'AAT': 0.051, 'GAC': 1, 'GAT': 0.434, 'TGC': 1,
'TGT': 0.5, 'CAA': 0.124, 'CAG': 1, 'GAA': 1, 'GAG': 0.259, 'GGA': 0.01, 'GGC': 0.724, 'GGG': 0.019,
'GGT': 1, 'CAC': 1, 'CAT': 0.291, 'ATA': 0.003, 'ATC': 1, 'ATT': 0.185, 'CTA': 0.007, 'CTC': 0.037,
'CTG': 1, 'CTT': 0.042, 'TTA': 0.02, 'TTG': 0.02, 'AAA': 1, 'AAG': 0.253, 'ATG': 1, 'TTC': 1, 'TTT': 0.296,
'CCA': 0.135, 'CCC': 0.012, 'CCG': 1, 'CCT': 0.07, 'AGC': 0.41, 'AGT': 0.085, 'TCA': 0.077, 'TCC': 0.744,
'TCG': 0.017, 'TCT': 1, 'ACA': 0.076, 'ACC': 1, 'ACG': 0.099, 'ACT': 0.965, 'TGG': 1, 'TAC': 1, 'TAT': 0.239,
'GTA': 0.495, 'GTC': 0.066, 'GTG': 0.221, 'GTT': 1}
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/SeqUtils/CodonUsageIndices.py
|
Python
|
gpl-2.0
| 1,108
|
[
"Biopython"
] |
38e887c3b87bfbf70451b5690dc5a925804b275bffd16f5e2c87a21ca91557cc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# this module is responsible for loading the data by calling the necessary information from **loaddata** module from datamodel directory and then calling necessary algorithms to predict the score
# note that this will act as intermediary file for giving result by interacting with all other files
# library packages
import pickle
# third party packages
import numpy as np
# local files
from ..datamodel import loaddata, nitems, nusers
from . import cf, ann, convert
from . import hyperparam_loc
from . import RBM_user
class Predict:
"""
It is responsible for predicting scores for an user to an item by using different techniques.
Not matter what technique you use you will load the data first so it makes sense to load the data in __init__ method. Different techniques use different algorithms to calculate the prediction, so each method have its own separate implementation.
"""
def __init__(self):
self.data = loaddata.Data()
self.data.load_data()
@staticmethod
def scale(l):
l = l[0]
no, val = 1, l[0]
for i, x in enumerate(l):
if x > val:
no = i + 1
val = x
return no
@staticmethod
def f(rating):
"""
converts the rating to a format for a five output neuron
:param rating: its a rating value given to a movie item
:type rating: int
"""
l = [0, 0, 0, 0, 0]
l[rating - 1] = 1
return l
class PredictNeuralNetwork(Predict):
def __init__(self):
Predict.__init__(self)
self.rating_matrix = self.data.get_rating_matrix_with_nan()
self.correlation_matrix = cf.Correlation().pearson(self.rating_matrix)
def create_training_examples_with_item(self, ratings):
"""
:param ratings: list of tuples with item_id at 0th index and rating at 1th index
:type ratings: list
:return: list of tuples of training examples and each training example contains item feature and rating given to that item
:rtype: list
"""
feature = []
for item_id, rating in ratings:
feature.append((np.array(list(self.data.get_item_by_id(item_id).get_genres().values())), np.array(self.f(rating[0]))))
return feature
def create_training_examples_with_item_and_user_rating(self):
"""
"""
raise NotImplementedError
def training_and_test_for_an_user_with_item(self, user_id):
"""
This method roughly does the following:
1. find how many movies user_name_of(user_id) has rated
2. divide the number of ratings to 80% (for training) and 20% (for test)
3. create ndarray both form 80% and 20% of the information separately
4. train it
5. test it
:param user_id: the id of the user for which you wanted to train and test
:type user_id: int
"""
ratings_by_user_id = list(self.data.get_user_by_id(user_id).get_movie_rating().items())
# find how many movies user_name_of(user_id) has rated
nratings_by_user_id = len(ratings_by_user_id)
# divide the number of ratings to 80% (for training) and 20% (for test)
nratings_for_train = int(nratings_by_user_id * .8)
nratings_for_test = nratings_by_user_id - nratings_for_train
ratings_for_train = ratings_by_user_id[:nratings_for_train]
ratings_for_test = ratings_by_user_id[nratings_for_train:]
# create ndarray both from 80% and 20% of the information separately
train = self.create_training_examples_with_item(ratings_for_train)
test = self.create_training_examples_with_item(ratings_for_test)
# train it
NN = ann.Neural_Network()
NN.backpropagation(train, 700, .05)
#for feature, y in test:
#print(convert.f_inverse_cap(list(NN.feedforward(feature)[0])), convert.f_inverse(list(y)))
# test it
whole = self.create_training_examples_with_item(list(map(lambda x: (x, [1, None]), range(1, nitems + 1))))
ans = []
for feature, y in whole:
ans.append(convert.f_inverse_cap(list(NN.feedforward(feature)[0])))
return ans
def training_and_test_for_an_user_with_item_and_user_rating(self, user_id):
"""
This method roughly does the following:
1. sort neighbors according to the similarity
2. find how many movies user_name_of(user_id) has rated
3. divide the number of ratings to 80% (for training) and 20% (for test)
4. create ndarray both form 80% and 20% of the information separately
5. train it
6. test it
:param user_id: the id of the user for which you wanted to train and test
:type user_id: int
"""
raise NotImplementedError
class PredictRBM(Predict):
def __init__(self):
Predict.__init__(self)
self.rating_matrix = self.data.get_rating_matrix_with_zero()
# create the RBM
self.rbm = RBM_user.RBM_User(self.rating_matrix.shape[1] - 1, 500)
def load_hyperparameters(self):
"""
loads the hyperparameters from the file which has been saved by training the RBM model
"""
self.hyperparam = pickle.load(open(hyperparam_loc, 'rb'))
self.rbm.bvisible = self.hyperparam[0]
self.rbm.weights = self.hyperparam[1]
self.rbm.bhidden = self.hyperparam[2]
def train_rbm(self):
"""
Train the rbm by contrastive divergence
"""
t = RBM_user.Trainer(self.data, self.rating_matrix, self.rbm)
t.train(1000, 1, 0.05)
def predict(self, user_id, movie_id = None):
"""
Predict how much rating the user will give
:param user_id: id of a particular user
:type user_id: int
:param movie_id: id of a particular movie
:type movie_id: int
"""
user_vector = self.rating_matrix[user_id][1:]
h = self.rbm.positive_phase(user_vector)
v, h = self.rbm.negative_phase(h)
if movie_id != None: return v[0][movie_id]
else: return v[0]
|
sagnik17/Movie-Recommendation-System
|
mrs/recsys/predict.py
|
Python
|
gpl-3.0
| 6,251
|
[
"NEURON"
] |
b3fbe04959f35e0a0cbf261032052449cc5b650a3143d1521d67ef7e1fc0824b
|
import os
import json
import Airfoil
import SplineRefine
import TrailingEdge
import Meshing
import Connect
from Settings import DATAPATH
import logging
logger = logging.getLogger(__name__)
class Batch:
def __init__(self, app, batch_controlfile, __version__):
self.app = app
self.app.mainwindow = self
self.load_batch_control(batch_controlfile)
stars = 50
message_stars = stars*'*'
print('\n' + message_stars)
message = '{:*^{stars}}'.format(' PYAERO batch meshing ', stars=stars)
print(message)
message = '{:*^{stars}}'.format(' v' + __version__ + ' ', stars=stars)
print(message)
logger.info(message)
print(message_stars + '\n')
def load_batch_control(self, batch_controlfile):
with open(batch_controlfile, 'r') as f:
self.batch_control = json.load(f)
def run_batch(self):
# loop all airfoils
airfoil_path = self.batch_control['Airfoils']['path']
mesh_path = self.batch_control['Output formats']['path']
output_formats = self.batch_control['Output formats']['formats']
print('Airfoil path is', airfoil_path)
print('Mesh output path is', mesh_path, '\n')
airfoils = self.batch_control['Airfoils']['names']
trailing_edges = self.batch_control['Airfoils']['trailing_edges']
message = 'Airfoils to mesh:'
print(message)
logger.info(message)
for airfoil in airfoils:
message = f' --> {airfoil}'
print(message)
logger.info(message)
print('\n')
for i, airfoil in enumerate(airfoils):
message = f'Starting batch meshing for airfoil {airfoil}'
print(message)
logger.info(message)
# load airfoil
basename = os.path.splitext(airfoil)[0]
self.airfoil = Airfoil.Airfoil(basename)
self.airfoil.readContour(os.path.join(airfoil_path, airfoil), '#')
# spline and refine
refinement = self.batch_control['Airfoil contour refinement']
refine = SplineRefine.SplineRefine()
refine.doSplineRefine(tolerance=refinement['Refinement tolerance'],
points=refinement['Number of points on spline'],
ref_te=refinement['Refine trailing edge old'],
ref_te_n=refinement['Refine trailing edge new'],
ref_te_ratio=refinement['Refine trailing edge ratio'])
# trailing edge
if trailing_edges[i] == 'yes':
self.app.mainwindow.airfoil.has_TE = True
te = self.batch_control['Airfoil trailing edge']
trailing = TrailingEdge.TrailingEdge()
trailing.trailingEdge(blend=te['Upper side blending length'] / 100.0,
ex=te['Upper blending polynomial exponent'],
thickness=te['Trailing edge thickness relative to chord'],
side='upper')
trailing.trailingEdge(blend=te['Lower side blending length'] / 100.0,
ex=te['Lower blending polynomial exponent'],
thickness=te['Trailing edge thickness relative to chord'],
side='lower')
# make mesh
wind_tunnel = Meshing.Windtunnel()
contour = self.app.mainwindow.airfoil.spline_data[0]
# mesh around airfoil
acm = self.batch_control['Airfoil contour mesh']
wind_tunnel.AirfoilMesh(name='block_airfoil',
contour=contour,
divisions=acm['Divisions normal to airfoil'],
ratio=acm['Cell growth rate'],
thickness=acm['1st cell layer thickness'])
# mesh at trailing edge
tem = self.batch_control['Airfoil trailing edge mesh']
wind_tunnel.TrailingEdgeMesh(name='block_TE',
te_divisions=tem['Divisions at trailing edge'],
thickness=tem['1st cell layer thickness'],
divisions=tem['Divisions downstream'],
ratio=tem['Cell growth rate'])
# mesh tunnel airfoil
tam = self.batch_control['Windtunnel mesh airfoil']
wind_tunnel.TunnelMesh(name='block_tunnel',
tunnel_height=tam['Windtunnel height'],
divisions_height=tam['Divisions of tunnel height'],
ratio_height=tam['Cell thickness ratio'],
dist=tam['Distribution biasing'])
# mesh tunnel wake
twm = self.batch_control['Windtunnel mesh wake']
wind_tunnel.TunnelMeshWake(name='block_tunnel_wake',
tunnel_wake=twm['Windtunnel wake'],
divisions=twm['Divisions in the wake'],
ratio=twm['Cell thickness ratio'],
spread=twm['Equalize vertical wake line at'] / 100.0)
# connect mesh blocks
connect = Connect.Connect(None)
vertices, connectivity, _ = \
connect.connectAllBlocks(wind_tunnel.blocks)
# add mesh to Wind-tunnel instance
wind_tunnel.mesh = vertices, connectivity
# generate cell to edge connectivity from mesh
wind_tunnel.makeLCE()
# generate cell to edge connectivity from mesh
wind_tunnel.makeLCE()
# generate boundaries from mesh connectivity
wind_tunnel.makeBoundaries()
message = f'Finished batch meshing for airfoil {airfoil}'
print(message)
logger.info(message)
# export mesh
message = f'Starting mesh export for airfoil {airfoil}'
print(message)
logger.info(message)
for output_format in output_formats:
extension = {'FLMA': '.flma',
'SU2': '.su2',
'GMSH': '.msh',
'VTK': '.vtk',
'CGNS': '.cgns',
'ABAQUS': '.inp'}
mesh_name = os.path.join(mesh_path, basename + extension[output_format])
getattr(Meshing.BlockMesh, 'write'+output_format)(wind_tunnel, name=mesh_name)
message = f'Finished mesh export for airfoil {airfoil} to {mesh_name}'
print(message)
logger.info(message)
|
chiefenne/PyAero
|
src/BatchMode.py
|
Python
|
mit
| 7,028
|
[
"VTK"
] |
00cd3626f7056237ef694f3876ceebc92831ee2797c3e384333dc70299e3370d
|
__author__ = 'Mark Dickinson'
__source__ = 'http://code.activestate.com/recipes/578507-strongly-connected-components-of-a-directed-graph/'
def strongly_connected_components_path(vertices, edges):
"""
Find the strongly connected components of a directed graph.
Uses a recursive linear-time algorithm described by Gabow [1]_ to find all
strongly connected components of a directed graph.
Parameters
----------
vertices : iterable
A sequence or other iterable of vertices. Each vertex should be
hashable.
edges : mapping
Dictionary (or mapping) that maps each vertex v to an iterable of the
vertices w that are linked to v by a directed edge (v, w).
Returns
-------
components : iterator
An iterator that yields sets of vertices. Each set produced gives the
vertices of one strongly connected component.
Raises
------
RuntimeError
If the graph is deep enough that the algorithm exceeds Python's
recursion limit.
Notes
-----
The algorithm has running time proportional to the total number of vertices
and edges. It's practical to use this algorithm on graphs with hundreds of
thousands of vertices and edges.
The algorithm is recursive. Deep graphs may cause Python to exceed its
recursion limit.
`vertices` will be iterated over exactly once, and `edges[v]` will be
iterated over exactly once for each vertex `v`. `edges[v]` is permitted to
specify the same vertex multiple times, and it's permissible for `edges[v]`
to include `v` itself. (In graph-theoretic terms, loops and multiple edges
are permitted.)
References
----------
.. [1] Harold N. Gabow, "Path-based depth-first search for strong and
biconnected components," Inf. Process. Lett. 74 (2000) 107--114.
.. [2] Robert E. Tarjan, "Depth-first search and linear graph algorithms,"
SIAM J.Comput. 1 (2) (1972) 146--160.
Examples
--------
Example from Gabow's paper [1]_.
>>> vertices = [1, 2, 3, 4, 5, 6]
>>> edges = {1: [2, 3], 2: [3, 4], 3: [], 4: [3, 5], 5: [2, 6], 6: [3, 4]}
>>> for scc in strongly_connected_components_path(vertices, edges):
... print(scc)
...
set([3])
set([2, 4, 5, 6])
set([1])
Example from Tarjan's paper [2]_.
>>> vertices = [1, 2, 3, 4, 5, 6, 7, 8]
>>> edges = {1: [2], 2: [3, 8], 3: [4, 7], 4: [5],
... 5: [3, 6], 6: [], 7: [4, 6], 8: [1, 7]}
>>> for scc in strongly_connected_components_path(vertices, edges):
... print(scc)
...
set([6])
set([3, 4, 5, 7])
set([8, 1, 2])
"""
identified = set()
stack = []
index = {}
boundaries = []
def dfs(v):
index[v] = len(stack)
stack.append(v)
boundaries.append(index[v])
for w in edges[v]:
if w not in index:
# For Python >= 3.3, replace with "yield from dfs(w)"
for scc in dfs(w):
yield scc
elif w not in identified:
while index[w] < boundaries[-1]:
boundaries.pop()
if boundaries[-1] == index[v]:
boundaries.pop()
scc = set(stack[index[v]:])
del stack[index[v]:]
identified.update(scc)
yield scc
for v in vertices:
if v not in index:
# For Python >= 3.3, replace with "yield from dfs(v)"
for scc in dfs(v):
yield scc
def strongly_connected_components_tree(vertices, edges):
"""
Find the strongly connected components of a directed graph.
Uses a recursive linear-time algorithm described by Tarjan [2]_ to find all
strongly connected components of a directed graph.
Parameters
----------
vertices : iterable
A sequence or other iterable of vertices. Each vertex should be
hashable.
edges : mapping
Dictionary (or mapping) that maps each vertex v to an iterable of the
vertices w that are linked to v by a directed edge (v, w).
Returns
-------
components : iterator
An iterator that yields sets of vertices. Each set produced gives the
vertices of one strongly connected component.
Raises
------
RuntimeError
If the graph is deep enough that the algorithm exceeds Python's
recursion limit.
Notes
-----
The algorithm has running time proportional to the total number of vertices
and edges. It's practical to use this algorithm on graphs with hundreds of
thousands of vertices and edges.
The algorithm is recursive. Deep graphs may cause Python to exceed its
recursion limit.
`vertices` will be iterated over exactly once, and `edges[v]` will be
iterated over exactly once for each vertex `v`. `edges[v]` is permitted to
specify the same vertex multiple times, and it's permissible for `edges[v]`
to include `v` itself. (In graph-theoretic terms, loops and multiple edges
are permitted.)
References
----------
.. [1] Harold N. Gabow, "Path-based depth-first search for strong and
biconnected components," Inf. Process. Lett. 74 (2000) 107--114.
.. [2] Robert E. Tarjan, "Depth-first search and linear graph algorithms,"
SIAM J.Comput. 1 (2) (1972) 146--160.
Examples
--------
Example from Gabow's paper [1]_.
>>> vertices = [1, 2, 3, 4, 5, 6]
>>> edges = {1: [2, 3], 2: [3, 4], 3: [], 4: [3, 5], 5: [2, 6], 6: [3, 4]}
>>> for scc in strongly_connected_components_tree(vertices, edges):
... print(scc)
...
set([3])
set([2, 4, 5, 6])
set([1])
Example from Tarjan's paper [2]_.
>>> vertices = [1, 2, 3, 4, 5, 6, 7, 8]
>>> edges = {1: [2], 2: [3, 8], 3: [4, 7], 4: [5],
... 5: [3, 6], 6: [], 7: [4, 6], 8: [1, 7]}
>>> for scc in strongly_connected_components_tree(vertices, edges):
... print(scc)
...
set([6])
set([3, 4, 5, 7])
set([8, 1, 2])
"""
identified = set()
stack = []
index = {}
lowlink = {}
def dfs(v):
index[v] = len(stack)
stack.append(v)
lowlink[v] = index[v]
for w in edges[v]:
if w not in index:
# For Python >= 3.3, replace with "yield from dfs(w)"
for scc in dfs(w):
yield scc
lowlink[v] = min(lowlink[v], lowlink[w])
elif w not in identified:
lowlink[v] = min(lowlink[v], lowlink[w])
if lowlink[v] == index[v]:
scc = set(stack[index[v]:])
del stack[index[v]:]
identified.update(scc)
yield scc
for v in vertices:
if v not in index:
# For Python >= 3.3, replace with "yield from dfs(v)"
for scc in dfs(v):
yield scc
def strongly_connected_components_iterative(vertices, edges):
"""
This is a non-recursive version of strongly_connected_components_path.
See the docstring of that function for more details.
Examples
--------
Example from Gabow's paper [1]_.
>>> vertices = [1, 2, 3, 4, 5, 6]
>>> edges = {1: [2, 3], 2: [3, 4], 3: [], 4: [3, 5], 5: [2, 6], 6: [3, 4]}
>>> for scc in strongly_connected_components_iterative(vertices, edges):
... print(scc)
...
set([3])
set([2, 4, 5, 6])
set([1])
Example from Tarjan's paper [2]_.
>>> vertices = [1, 2, 3, 4, 5, 6, 7, 8]
>>> edges = {1: [2], 2: [3, 8], 3: [4, 7], 4: [5],
... 5: [3, 6], 6: [], 7: [4, 6], 8: [1, 7]}
>>> for scc in strongly_connected_components_iterative(vertices, edges):
... print(scc)
...
set([6])
set([3, 4, 5, 7])
set([8, 1, 2])
"""
identified = set()
stack = []
index = {}
boundaries = []
for v in vertices:
if v not in index:
to_do = [('VISIT', v)]
while to_do:
operation_type, v = to_do.pop()
if operation_type == 'VISIT':
index[v] = len(stack)
stack.append(v)
boundaries.append(index[v])
to_do.append(('POSTVISIT', v))
# We reverse to keep the search order identical to that of
# the recursive code; the reversal is not necessary for
# correctness, and can be omitted.
to_do.extend(
reversed([('VISITEDGE', w) for w in edges[v]]))
elif operation_type == 'VISITEDGE':
if v not in index:
to_do.append(('VISIT', v))
elif v not in identified:
while index[v] < boundaries[-1]:
boundaries.pop()
else:
# operation_type == 'POSTVISIT'
if boundaries[-1] == index[v]:
boundaries.pop()
scc = set(stack[index[v]:])
del stack[index[v]:]
identified.update(scc)
yield scc
|
yipeipei/peppy
|
graph/scc.py
|
Python
|
mit
| 9,280
|
[
"VisIt"
] |
adf686c199f5a303045acefad94ab1644164e10160505ec9abb9a3e55cb29828
|
"""Set of plotting aids for Holoviews."""
from __future__ import division
import holoviews as hv
import numpy as np
import scipy
import matplotlib.pyplot as plt
# get colors
colors = hv.core.options.Cycle.default_cycles['default_colors']
Ncolors = len(colors)
def plot_vector(x, lim=1):
"""Plot the vector x.
Parameters
----------
x : list or array
2D array with x and y coordinate
lim : float
Plotting limit
"""
xs = [0, x[0]]
ys = [0, x[1]]
path = hv.Curve(zip(xs, ys), extents=(-lim, -lim, lim, lim))
point = hv.Scatter(zip(xs[1:], ys[1:]))
return path*point
def plot_vector_3D(x, lim=1, color='black'):
"""Plot the 3D vector x.
Parameters
----------
x : list or array
2D array with x and y coordinate
lim : float
Plotting limit
color : whatever plotting tool accepts
Color of the vector
"""
xs = [0, x[0]]
ys = [0, x[1]]
zs = [0, x[2]]
path = hv.Path3D(zip(xs, ys, zs), extents=(-lim, -lim, -lim, lim, lim, lim)).opts(color=color)
point = hv.Scatter3D(zip(xs[1:], ys[1:], zs[1:])).opts(size=3, color=color)
return path*point
def plot3d(traj, ntraj=1, labels=['x', 'y', 'z'], ax=None):
"""Plot the 3d trajectory in traj in 3D.
Parameters
----------
traj : array
3D array for plotting, should be datapoints*dimensions.
ntraj : int, optional
In how many trajectories to split up the data (can be useful to see
the time dependence of the trajectory)
labels : list of strings, optional
What labels to put on the axes. By default is ['x', 'y', 'z']
ax : matplotlib axis object, optional
If None, will make a new axis.
Example
-------
To plot three 3d subplots, do something like the following:
fig = plt.figure()
ax = fig.add_subplot(131, projection='3d')
plot3d(delayed[0,::1,:], labels=['x(t)','x(t-tau)','x(t-2tau)'], ax=ax)
ax = fig.add_subplot(132, projection='3d')
plot3d(delayed[1,::1,:], labels=['y(t)','y(t-tau)','y(t-2tau)'], ax=ax)
ax = fig.add_subplot(133, projection='3d')
plot3d(delayed[2,::1,:], labels=['z(t)','z(t-tau)','z(t-2tau)'], ax=ax)
To have interactable 3d plots in Jupyter, use %matplotlib nbagg
"""
# define 3d plot
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# if necessary, split up data, then plot it
data_split = np.array_split(traj, ntraj)
for n in range(ntraj):
xs = data_split[n][:, 0]
ys = data_split[n][:, 1]
zs = data_split[n][:, 2]
ax.plot3D(xs, ys, zs, alpha=1)
# set labels
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
# ax.set_title('[x,y,z] = ' +str(var*180/pi))
plt.show()
def whisk_compare(Points, labels=None, ref=0, vdims=['score'], group='Bars',
tests=None):
"""Show datapoints as a whisker plot for averages of all distributions.
Show the significance between ref and all other datapoints.
Parameters
----------
Points : list
list of arrays of points to be compared. As [array1, array2, ...].
labels : list of strings
Labels of the distributions, list of form ['points1','points2']
ref : int, optional
Which distribution to use as a reference to test significance of
other distributions against.
vdims : Holoviews dimension object, optional
Should be of form ['label']. Gives the y-axis label.
group : string, optional
The group that the Bars object should belong to
tests : list of string, optional
TO BE IMPLEMENTED
Which tests to use, in order of distributions (skipping the reference
distribution). Should be a list of strings, with the following options:
'ttest_rel' : paired t-test
'ttest_ind' : independent t-test
The list should be of length len(Points)-1
Default test is pairwise test.
Returns
-------
Holoviews Overlay
Whisker plot
"""
ndists = len(Points)
if labels is None:
labels = ['points'+str(i) for i in range(ndists)]
# make box plots
groups = []
for i in range(ndists):
npoints = len(Points[i])
groups += [labels[i] for j in range(npoints)]
allpoints = np.concatenate(Points)
whisk = hv.BoxWhisker((groups, allpoints),
kdims=[' '], vdims=vdims)
# calculate and plot significance
sig_plot = hv.Overlay()
rnge = np.arange(ndists, dtype=int)
rnge = rnge[rnge != ref]
count = 0
for i in rnge:
points1 = Points[ref]
points2 = Points[i]
sign = scipy.stats.wilcoxon(points1, points2)[1]
fontsize = 20
offset = 0.02
if sign < 0.0005:
sig_text = '***'
fontsize = 20
offset = 0.03
elif sign < 0.005:
sig_text = '**'
elif sign < 0.05:
sig_text = '*'
else:
fontsize = 15
offset = 0.03
sig_text = 'n.s.'
# plot significance
maxpoint = max(allpoints)
xref = ref+1
xcom = 1+i
y = maxpoint+.1*(ndists-count-3)
sig_plot *= hv.Curve(zip([xref, xcom], [y, y]),
extents=(0, -maxpoint/3, 5, maxpoint+0.2),
group='significance')(style={'color': 'k'})
sig_plot *= hv.Curve(zip([xref, xref], [y, y-.02]),
group='significance')(style={'color': 'k'})
sig_plot *= hv.Curve(zip([xcom, xcom], [y, y-.02]),
group='significance')(style={'color': 'k'})
xloc = (ref+2+i)/2
yloc = maxpoint+offset+.1*(ndists-count-3)
text = hv.Text(xloc, yloc, sig_text, fontsize=fontsize)
sig_plot *= text
count += 1
return whisk*sig_plot
def bar_compare(Points, labels=None, ref=0, vdims=['score'], group='Bars',
tests=None):
"""Show datapoints and bars for averages of all distributions in Points.
Show the significance between ref and all other datapoints.
Parameters
----------
Points : list
list of arrays of points to be compared. As [array1, array2, ...].
labels : list of strings
Labels of the distributions, list of form ['points1','points2']
ref : int, optional
Which distribution to use as a reference to test significance of
other distributions against.
vdims : Holoviews dimension object, optional
Should be of form ['label']. Gives the y-axis label.
group : string, optional
The group that the Bars object should belong to
tests : list of string, optional
TO BE IMPLEMENTED
Which tests to use, in order of distributions (skipping the reference
distribution). Should be a list of strings, with the following options:
'ttest_rel' : paired t-test
'ttest_ind' : independent t-test
The list should be of length len(Points)-1
Default test is pairwise test.
Returns
-------
Holoviews Overlay
Bars with points overlay
"""
ndists = len(Points)
if labels is None:
labels = ['points'+str(i) for i in range(ndists)]
# make bars
data = [(labels[i], np.mean(Points[i])) for i in range(ndists)]
bars = hv.Bars(data, kdims=[hv.Dimension(' ')], vdims=vdims)
# make points
points_plot = hv.Overlay()
for i in range(ndists):
y = Points[i]
x = np.ones(len(y))*i+.5
points_plot *= hv.Scatter(zip(x, y))
# calculate and plot significance
sig_plot = hv.Overlay()
rnge = np.arange(ndists, dtype=int)
rnge = rnge[rnge != ref]
for i in rnge:
points1 = Points[ref]
points2 = Points[i]
sign = scipy.stats.ttest_rel(points1, points2)[1]
fontsize = 20
offset = 0.175
if sign < 0.0005:
sig_text = 'p<0.0005'
elif sign < 0.005:
sig_text = '**'
elif sign < 0.05:
sig_text = '*'
else:
fontsize = 15
offset = 0.2
sig_text = 'n.s.'
# plot significance
maxpoint = max([max(points1), max(points2)])
sig_plot *= hv.Curve(zip([ref+0.5, .5+i],
[maxpoint+0.15*(ref+0.5-i),
maxpoint+0.15*(ref+0.5-i)]),
extents=(0, 0, 2, 1),
group='significance')(style={'color': 'k'})
sig_plot *= hv.Curve(zip([ref+0.5, ref+0.5],
[maxpoint+0.15*(ref+0.5-i),
maxpoint+0.15*(ref+0.5-i)-.05]),
extents=(0, 0, 2, 1),
group='significance')(style={'color': 'k'})
sig_plot *= hv.Curve(zip([.5+i, .5+i],
[maxpoint+0.15*(ref+0.5-i),
maxpoint+0.15*(ref+0.5-i)-.05]),
extents=(0, 0, 2, 1),
group='significance')(style={'color': 'k'})
xloc = (ref+1.5+i)/2
yloc = maxpoint+offset+.15*(ref-0.5-i)
text = hv.Text(xloc, yloc, sig_text, fontsize=fontsize)
sig_plot *= text
return bars*points_plot*sig_plot
def plotspikes(spiketimes, yoffset, dt, text_offset=None):
"""Plot the spikes in spiketimes.
Also indicates if there are more than one spike within distance dt.
Parameters
----------
spiketimes : array
Array of spiketimes
yoffset : array
The y-offset for plotting spikes
dt : float
Distance dt at which spikes should be indicated as a group
text_offset : array, optional
If given (as [dx,dy]), this gives the offset of the text indicating
the number of spikes, relative to the first spike in a series.
If None, will set it to [-dt/2, 0]
Returns
-------
holoviews Scatter object
Shows the spikes. Can be interfaced with as any normal Scatter object.
"""
# get groups of spikes
small_diffs = np.diff(spiketimes) <= dt
start_ends = np.diff(np.concatenate([[0], small_diffs, [0]]))
starts = np.where(start_ends == 1)[0]
ends = np.where(start_ends == -1)[0]
numbers = ends-starts + 1 # number of spikes per start
# plot spikes
fig = hv.Scatter(zip(spiketimes, np.ones(len(spiketimes))*yoffset))
# plot spike numbers
if text_offset is None:
dx, dy = [-dt/2, 0]
else:
dx, dy = text_offset
for i, start in enumerate(starts):
fig *= hv.Text(spiketimes[start]+dx, yoffset+dy, str(numbers[i]))
return fig
def hist(x, bins=10, group='hist'):
"""Make a Holoviews histogram for the data provided.
Parameters
----------
x : array
data
bins : int or array, optional (default: 10)
bins as normaly provided to numpy.histogram
Returns
-------
Holoviews.histogram
Holoviews histogram object
"""
counts, edges = np.histogram(x, bins)
return hv.Histogram(counts, edges, group=group)
def ScaleBars(x=0, y=0, scalex=1, scaley=1, labeldx=0.035,
labeldy=2, labelx='x', labely='y', w=1, color='k'):
"""Make scalebars using HoloViews' Curve object, and puts them at (x,y).
Parameters
----------
x : float (0)
The x start position
y : float (0)
The y start position
labeldx,labeldy : floats
The offsets of the x and y labels compared to the scale bars ends.
The offset is away from the middle
scalex : float (1)
The scale of the horizontal scalebar
scaley : float (1)
The scale of the vertical scalebar
labelx : string ('x')
The label for the x scale
labely : string ('y')
The label for the y scale
w : float (1)
Width of the bars
color : string ('k')
Color of scale
Returns
-------
A holoviews Curve object with both the horizontal and vertical
scalebars
"""
# define horizontal scalebar
bar_x = hv.Curve(zip([x, x+scalex], [y, y]))(style={'color': color,
'linewidth': w})
# define horizontal label
label_x = hv.Text(x+scalex/2, y-labeldx, labelx)(style={'color': color})
# define vertical scalebar
bar_y = hv.Curve(zip([x, x], [y, y+scaley]))(style={'color': color,
'linewidth': w})
# define vertical label
label_y = hv.Text(x-labeldy, y+scaley/2, labely,
rotation=90)(style={'color': color})
# return overlay
return bar_x*label_x*bar_y*label_y
def show_connectivity(W):
""" Illustrate the connectivity matrix W, with a simple graph.
On the left will be all source neurons, on the right all target neurons.
Parameters
----------
W : array
The weight matrix (should be a square matrix)
Output
------
Holoviews overlay
The plotting object
"""
# plot neurons on left and right
N, M = W.shape
mx = np.max([N, M])
shiftmx = (mx-1)/2.
shiftN = (N-1)/2.
shiftM = (M-1)/2.
extents = (-.1, -.1*mx-shiftmx, 1.1, mx-1+.1*mx-shiftmx)
opts = hv.opts.Scatter(color='k', size=100/mx)
out = hv.Scatter(zip(np.zeros(N), np.arange(N)-shiftN),
extents=extents).opts(opts)
out *= hv.Scatter(zip(np.ones(M), np.arange(M)-shiftM),
extents=extents).opts(opts)
# add forward connections
for i in range(N):
for j in range(M):
w = W[i, j]
alpha = abs(w)/abs(W).max()
if w < 0:
opts = hv.opts.Curve(color='b', alpha=alpha)
elif w > 0:
opts = hv.opts.Curve(color='r', alpha=alpha)
else:
opts = hv.opts.Curve(color='k', alpha=alpha)
out *= hv.Curve(zip([0, 1], [i-shiftN, j-shiftM])).opts(opts)
return out
def rectangle(x=0, y=0, width=1, height=1):
"""Gives an array with a rectangle shape, for the plot_frs_block func."""
return np.array([(x,y), (x+width, y), (x+width, y+height), (x, y+height)])
def plot_frs_blocks(frs,norm, unit = ''):
""" Plots the firing rates in frs as blocks, and adds a star where the minima are
Parameters
----------
frs : array
2D array with firing rate sizes
norm : float
normalization constant (frs will be scaled as 0.4*frs/norm)
unit : string
what unit to use on axes. Need to do own formatting, for example ' (deg)' will make the
x-label 'Center orientation (deg)'
Returns
-------
Holoviews Overlay
The elements plotted
"""
# init output
out = hv.Overlay()
# normalize frs
baseheight=.4
frs = baseheight*frs/norm
# draw boxes
Nx, Ny = frs.shape
for i in range(Ny):
for j in range(Nx):
rect = rectangle(j-0.4,i*0.5-frs[j,i]/2,0.8,frs[j,i])
out *= hv.Polygons([rect],extents=(-0.6,-0.25,3.5,1.1),kdims=['Surround orientation'+unit,'Center orientation'+unit])
# add stars
argmins = np.argmin(frs,axis=0)
out*= hv.Scatter(zip(argmins,[0,0.5,1]),label='Strongest suppression')
# add scalebar
fr = int(norm/2)
height = baseheight*fr/norm
out*= hv.Curve(zip([0,0],[0,height]))
out*= hv.Text(0, height, str(fr)+'Hz')
return out
def plot_bounds_z2(D, offset=(0,0,0), length=1, group='Curve'):
''' Plots the bounding box, for a given offset in z direction.
For the offset in z it will be calculated what the net size of the bounding box is
Parameters
----------
D : array
Decoding weights, 3D
offset : list/tuple/array
[x,y,z] offset of the bounding box
length : float
length of the bounding box vertices
Returns
-------
HoloViews overlay
The boundary box, offset by 'offset'
HoloViews overlay
The vectors determining the boundary box, offset by ''offset'
'''
# get offsets
x, y, z = offset
# get thresholds for 2D box at height 0
D2 = D[:2, :]
Omeg = np.dot(D2.T, D2) + np.identity(N)*beta
T = np.diag(Omeg)
# T = np.sqrt(2)*T
# plot projection vectors and bounding box
angle = np.pi/2
rotation = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
projectVs = hv.Overlay()
bounds = hv.Overlay()
for i in range(N):
v = np.copy(D2[:, i])
norm = np.linalg.norm(v)
v/= norm
scale = T[i]/norm**1-z
if scale<0: scale=0
prop = scale*norm**1/T[i]
v*= scale
projectVs *= hv.Curve(zip([x, x+v[0]], [y, y+v[1]]), group=group)
v90 = np.dot(rotation, v)
v90*= length*prop/np.linalg.norm(v90)
bounds *= hv.Curve(zip([x+v[0]+v90[0], x+v[0]-v90[0]],
[y+v[1]+v90[1], y+v[1]-v90[1]]), group=group)
return bounds, projectVs
def plot_bounds(D, beta=0, offset=(0,0), length=1, widths=None, alphas=None):
''' Plots spike coding network bounding box in 2D.
Parameters
----------
D : array
Decoding weights
beta : float
network cost parameter
offset : list/tuple/array
[x,y] offset of the bounding box
length : float
length of the bounding box vertices
widths : array
Array of linewidths for each vertice
alphas : array
Array of alphas for each vertice
Returns
-------
HoloViews overlay
The boundary box, offset by 'offset'
HoloViews overlay
The vectors determining the boundary box, offset by ''offset'
'''
# infer some Parameters
N = D.shape[1]
if widths is None: widths = np.ones(N)*2
if alphas is None: alphas = np.ones(N)
Omeg = np.dot(D.T, D) + np.identity(N)*beta
T = np.diag(Omeg)
# get offsets
x, y = offset
# get thresholds
Omeg = np.dot(D.T, D) + np.identity(N)*beta
T = np.diag(Omeg)/2
# plot projection vectors and bounding box
angle = np.pi/2
rotation = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
projectVs = hv.Overlay()
bounds = hv.Overlay()
for i in range(N):
v = np.copy(D[:, i])
norm = np.linalg.norm(v)
v/= norm**2
v*= T[i]
projectVs *= hv.Curve(zip([x, x+v[0]], [y, y+v[1]]))
v90 = np.dot(rotation, v)
v90*= length/np.linalg.norm(v90)
optsbokeh = hv.opts.Curve(backend='bokeh', line_width=widths[i],
alpha=alphas[i], color=colors[i%len(colors)])
optsmat = hv.opts.Curve(linewidth=widths[i], backend='matplotlib',
alpha=alphas[i], color=colors[i%len(colors)])
bounds *= hv.Curve(zip([x+v[0]+v90[0], x+v[0]-v90[0]],
[y+v[1]+v90[1], y+v[1]-v90[1]]),
kdims='x1 error', vdims='x2 error').opts(
optsbokeh, optsmat)
return bounds, projectVs
def plot_bounds_z(D, T, beta=0, offset=(0,0,0), length=1, group='Curve',
widths=None, alphas=None):
''' Plots the bounding box, for a given offset in z directionself.
This function is specifically for bounding boxes which are cone-shaped.
For the offset in z it will be calculated what the net size of the
bounding box is.
Parameters
----------
D : array
Decoding weights, 3D
T : array
array of spiking thresholds
beta : float
network cost parameter
offset : list/tuple/array
[x,y,z] offset of the bounding box
length : float
length of the bounding box vertices
widths : array
Array of linewidths for each vertice
alphas : array
Array of alphas for each vertice
Returns
-------
HoloViews overlay
The boundary box, offset by 'offset'
HoloViews overlay
The vectors determining the boundary box, offset by ''offset'
'''
# infer some parameters
N = D.shape[1]
if widths is None: widths = np.ones(N)*2
if alphas is None: widths = np.ones(N)
# get offsets
x, y, z = offset
# get thresholds for 2D box at height 0
D2 = D[:2, :]
# plot projection vectors and bounding box
angle = np.pi/2
rotation = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
projectVs = hv.Overlay()
bounds = hv.Overlay()
for i in range(N):
v = np.copy(D2[:, i])
norm = np.linalg.norm(v)
v/= norm
scale = T[i]/norm**1-z
if scale<0: scale=0
prop = scale*norm**1/T[i]
v*= scale
projectVs *= hv.Curve(zip([x, x+v[0]], [y, y+v[1]]), group=group)
v90 = np.dot(rotation, v)
v90*= length*prop/np.linalg.norm(v90)
# opts = hv.opts.Curve('line_width':widths[i], 'linewidth':widths[i],
# 'alpha':alphas[i], 'color':colors[i%len(colors)]}
bounds *= hv.Curve(zip([x+v[0]+v90[0], x+v[0]-v90[0]],
[y+v[1]+v90[1], y+v[1]-v90[1]]),
group=group)#(style=style)
return bounds, projectVs
def animate_error_box_2D(D, beta, E, x, o, Tstart=0, Tend=None,
boundlength=0.5, trail_length=40, step_size=10,
spike_tau=.3, dt=0.01):
"""For spike coding networks (SCNs), animates the error inside bounding box.
Parameters
----------
D : array
2D array which is the SCN decoding matrix
beta : float
SCN cost parameter
E : array
2D array of the error
x : array
2D array of the actual stimulus
o : array
N by nT array of 0s and 1s indicating spikes
Tstart : Omeg = np.dot(D.T, D) + np.identity(N)*beta
T = np.diag(Omeg)/2int
Starting timestep
Tend : int
Final timestep (if None, will use final timestep)
boundlength : float
How long to make each bounding edge
trail_length : int
How many timesteps to use for the error trail
step_size : int
How many timesteps to skip for each frame
spike_tau/dt : floats
Determine time constant on increased line thickness with spikes
Output
------
Holoviews HoloMap
"""
# get some parameters
if Tend is None: Tend = E.shape[1]
framenums = range(Tstart, Tstart+Tend,step_size)
N = D.shape[1]
Omeg = np.dot(D.T, D) + np.identity(N)*beta
T = np.diag(Omeg)/2
# turn spikes into line ticknesses
s = np.zeros(o.shape)
for i in range(o.shape[1]-1):
s[:, i+1]=s[:, i]+dt*(-s[:, i]/spike_tau+o[:, i]/dt)
# based on spiking, determine bound widths (so for a spike, a cell's
# bound changes size)
widths = {f: np.ones(D.shape[1])*3 for f in framenums}
alphas = {f: np.ones(D.shape[1]) for f in framenums}
alpha_max = 0
# for f in framenums:
# widths[f] += s[:, f]*2
# alphas[f] += s[:, f]
# if alphas[f].max() > alpha_max:
# alpha_max = alphas[f].max()
# for f in framenums:
# alphas[f]/=alpha_max
# Define the animation frames
frames = {f: hv.Scatter(zip([E[0, f]], [E[1, f]]),
kdims='x1 error', vdims='x2 error').opts(color='k')
for f in framenums}
frames = {f: frames[f]*hv.Curve(E[:2, f+1-trail_length:f+1].T).opts(
color='k')
for f in framenums}
frames = {f: frames[f]*hv.Scatter(
zip([x[0, f]], [x[1, f]])
).opts(color='w', marker='x')
for f in framenums}
frames = {f: frames[f]*plot_bounds(D, beta, (0, 0),
length=boundlength,
widths=widths[f],
alphas=alphas[f])[0]
for f in framenums}
# return animation
return hv.HoloMap(frames)*hv.Scatter(zip([0], [0]), group='origin')
def animate_error_box_z(D, beta, E, x, o, Tstart=0, Tend=None,
boundlength=0.5, trail_length=40, step_size=10,
spike_tau=.3, dt=0.01):
"""For spike coding networks (SCNs), animates the error inside bounding box.
Parameters
----------
D : array
2D array which is the SCN decoding matrix
beta : float
SCN cost parameter
E : array
3D array of the error
x : array
2D array of the actual stimulus
o : array
N by nT array of 0s and 1s indicating spikes
Tstart : int
Starting timestep
Tend : int
Final timestep (if None, will use final timestep)
boundlength : float
How long to make each bounding edge
trail_length : int
How many timesteps to use for the error trail
step_size : int
How many timesteps to skip for each frame
spike_tau/dt : floats
Determine time constant on increased line thickness with spikes
Output
------
Holoviews HoloMap
"""
# get some parameters
if Tend is None: Tend = E.shape[1]
framenums = range(Tstart, Tstart+Tend,step_size)
N = D.shape[1]
Omeg = np.dot(D.T, D) + np.identity(N)*beta
T = np.diag(Omeg)/2
# turn spikes into line ticknesses
s = np.zeros(o.shape)
for i in range(o.shape[1]-1):
s[:, i+1]=s[:, i]+dt*(-s[:, i]/spike_tau+o[:, i]/dt)
# based on spiking, determine bound widths (so for a spike, a cell's
# bound changes size)
widths = {f: np.ones(D.shape[1])*2 for f in framenums}
alphas = {f: np.ones(D.shape[1]) for f in framenums}
alpha_max = 0
for f in framenums:
widths[f] += s[:, f]*2
alphas[f] += s[:, f]
if alphas[f].max() > alpha_max:
alpha_max = alphas[f].max()
for f in framenums:
alphas[f]/=alpha_max
# Define the animation frames
frames = {f: hv.Scatter(zip([E[0, f]], [E[1, f]]),
kdims='x1 error', vdims='x2 error')
for f in framenums}
frames = {f: frames[f]*hv.Curve(E[:2, f+1-trail_length:f+1].T)
for f in framenums}
frames = {f: frames[f]*hv.VLine(x[0, f])*hv.HLine(x[1, f])
for f in framenums}
frames = {f: frames[f]*plot_bounds_z(D, T, beta, (0, 0, E[2, f]),
length=boundlength,
widths=widths[f],
alphas=alphas[f])[0]
for f in framenums}
# return animation
return hv.HoloMap(frames)*hv.Scatter(zip([0], [0]), group='origin')
def animate_signal_tracking(x, x_, times, Tstart=0, Tend=None, step_size=10):
"""For spike coding networks (SCNs), animates signal tracking.
Parameters
----------
x : array
2D array of the actual stimulus
x_ : array
2D array of the estimated stimulus
times : array
array of times
Tstart : int
Starting timestep
Tend : int
Final timestep (if None, will use final timestep)
step_size : int
How many timesteps to skip for each frame
Output
------
Holoviews HoloMap
"""
# get some parameters
if Tend is None: Tend = x.shape[1]
framenums = range(Tstart, Tstart+Tend,step_size)
# Define the animation frames
style_x = {'color':'k', 'alpha':0.5, 'linestyle':'--'}
style_x_ = {'color':'k', 'alpha':1}
step_size=1
frames = {f: hv.Curve(
zip(times[0:f:step_size], x[0, 0:f:step_size])
)(style=style_x) for f in framenums}
frames = {f: frames[f]*hv.Curve(
zip(times[0:f:step_size], x_[0, 0:f:step_size])
)(style=style_x_) for f in framenums}
for s in range(1, x.shape[0]):
frames = {f: frames[f]*hv.Curve(
zip(times[0:f:step_size], x[s, 0:f:step_size])
)(style=style_x) for f in framenums}
frames = {f: frames[f]*hv.Curve(
zip(times[0:f:step_size], x_[s, 0:f:step_size])
)(style=style_x_) for f in framenums}
# return animation
return hv.HoloMap(frames)
def spike_plot(o, times, base_offset, offset, colors=colors):
"""Plots a set of neurons' spikes, given a 2d array of 0's and 1's.
Parameters
----------
o : array
2D-array of 0's and 1's (1's being spikes),
of size (n_cells, n_timepoints)
times : array
array of times
base_offset : float
y-axis offset of all spikes
offset : float
y-axis offset between each row of spikes
colors : list of strings
Color list to cycle through when plotting the neurons
Returns
-------
Holoviews Overlay
An overlay with all the spikes shown
"""
# make spike plot animation
out = hv.Overlay()
for i in range(o.shape[0]):
spiketimes = times[np.where(o[i, :]==1)[0]]
if len(spiketimes)>0:
opts = hv.opts.Scatter(color=colors[i%len(colors)])
out *= hv.Scatter(
zip(spiketimes, np.ones(len(spiketimes))*offset*i+base_offset),
kdims='Time (s)',
vdims='Neuron', group='spikes').opts(opts)
else:
opts = hv.opts.Scatter(color='w', alpha=0)
out *= hv.Scatter([],
kdims='Time (s)',
vdims='Neuron', group='spikes').opts(opts)
return out
def spike_anim(o, times, base_offset, offset, Tstart=0, Tend=None,
step_size=10):
"""Animates a set of neurons' spikes, given a 2d array of 0's and 1's.
Parameters
----------
o : array
2D-array of 0's and 1's (1's being spikes),
of size (n_cells, n_timepoints)
times : array
array of times
base_offset : float
y-axis offset of all spikes
offset : float
y-axis offset between each row of spikes
Tstart : int
Starting timestep
Tend : int
Final timestep (if None, will use final timestep)
step_size : int
How many timesteps to skip for each frame
y-axis offset between each row of spikes
Tstart : int
Starting timestep
Tend : int
Final timestep (if None, will use final timestep)
step_size : int
How many timesteps to skip for each frame
Returns
-------
Holoviews Overlay
An overlay with all the spikes shown
"""
# get some parameters
if Tend is None: Tend = o.shape[1]
framenums = range(Tstart, Tstart+Tend,step_size)
# Define the animation frames
frames = {f: hv.Curve([])*spike_plot(
o[:, 0:f], times[0:f], base_offset, offset
) for f in framenums}
# return animation
return hv.HoloMap(frames)
def GiveBoundLines(D, ref, lim):
"""Gives the boundary lines for all neurons with weights D (assuming a 2d encoded variable)
Parameters
----------
D : array
Standard SCN decoding matrix (for 2D signals)
ref : float
Which value the voltage should be at (usually the threshold)
limit : float
What limit to use for x- and y- axes
Returns
-------
array
N by 2 array with x-coordinates
array
N by 2 array with y-coordinates
"""
N = D.shape[1]
Dzeros = D==0
D[Dzeros]=1e-20
xy = np.linspace(-lim, lim, 2)
# find naive coordinates (x to y coords)
Xs = np.array([xy for n in range(N)])
Ys = np.array([(ref-D[0, n]*xy)/D[1, n] for n in range(N)])
# for all out of bounds (oob), find y to x coords instead
oob_x, oob_y = np.where(abs(Ys)>lim)
for i in range(len(oob_x)):
nx, ny = oob_x[i], oob_y[i]
y = np.sign(Ys[nx, ny])*lim
Xs[nx, ny] = (ref-D[1, nx]*y)/D[0, nx]
Ys[nx, ny] = y
D[Dzeros]=0
return Xs, Ys
def findIntersection(Xs, Ys):
"""Finds the intersection between two lines.
From https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines-in-python
Parameters
----------
Xs, Ys : arrays
2 by 2 arrays, first dimension is neurons, second x and y axis
Returns
-------
np.array
the x and y coordinates of the intersection
"""
x1,x2 = Xs[0, :]
x3,x4 = Xs[1, :]
y1,y2 = Ys[0, :]
y3,y4 = Ys[1, :]
px= ( (x1*y2-y1*x2)*(x3-x4)-(x1-x2)*(x3*y4-y3*x4) ) / ( (x1-x2)*(y3-y4)-(y1-y2)*(x3-x4) )
py= ( (x1*y2-y1*x2)*(y3-y4)-(y1-y2)*(x3*y4-y3*x4) ) / ( (x1-x2)*(y3-y4)-(y1-y2)*(x3-x4) )
return np.array([px, py])
def findAllIntersects(D, ref, lim):
"""Finds all intersections for point sets Xs and Ys.
Only returns the inner intersections.
Parameters
----------
D : array
Standard SCN decoding matrix (for 2D signals)
ref : float
Which value the voltage should be at (usually the threshold)
lim : float
What limit to use for x- and y- axes
Output
------
array
2 by n array, where n is the number of intersections. These are the cross-section coordinates.
array
n 2 by 2 array, where n is the number intersections. These are the line segments. These are sorted by neuron.
"""
# get bounding lines
Xs, Ys = GiveBoundLines(D, ref, lim)
# get intersects
N = Xs.shape[0]
intersects = np.array([findIntersection(Xs[[n1, n2], :], Ys[[n1, n2], :]) for n1 in range(N) for n2 in range(n1+1,N)])
# throw away any that are past threshold, so can't be part of the bounding box
past_threshold=np.sum(np.dot(D.T, intersects.T) > ref+0.00000000000001, axis=0) # need to add a small value for floating point errors
intersects = intersects[past_threshold==0, :]
# order by orientation, and by neuron number
sort = np.argsort(np.arctan2(intersects[:, 0], intersects[:, 1]))
intersects = intersects[sort, :]
# find linesegments
intersects_lines = np.array([[intersects[n1%N, :], intersects[(n1+1)%N, :]] for n1 in range(-1,N-1)])
return intersects, intersects_lines
def plot_bounding_curves(D, ref, lim, offsets=None, linewidths=None):
"""Plots a bounding box from a set of lines.
Parameters
----------
D : array
Standard SCN decoding matrix (for 2D signals)
ref : float
Which value the voltage should be at (usually the threshold)
lim : float
What limit to use for x- and y- axes
offsets : array
[x, y] offsets
linewidths : array
Linewidths for each neural bound
Returns
-------
Holoviews Overlay
An overlay plot
"""
# give default offsets
if offsets is None:
offsets = [0, 0]
if linewidths is None:
opts = [hv.opts.Curve()]*D.shape[1]
else:
opts = [hv.opts.Curve(linewidth=linewidths[n]) for n in range(D.shape[1])]
# get intersect lines
intersects, intersect_lines = findAllIntersects(D, ref, lim)
N = intersect_lines.shape[0]
fig = hv.Overlay()
for n in range(N):
x = intersect_lines[n, :, 0]+offsets[0]
y = intersect_lines[n, :, 1]+offsets[1]
fig *= hv.Curve(zip(x, y), extents=(-lim, -lim, lim, lim)).opts(opts[n])
return fig
def plot_bounding_curves_z(intersect_lines, height):
"""Plots a bounding box from a set of lines in 3D at particular height.
Parameters
----------
intersect_lines : array
Array as returned by findAllIntersects()
Returns
-------
Holoviews Overlay
An overlay plot
"""
N = intersect_lines.shape[0]
fig = hv.Overlay()
for n in range(N):
x = intersect_lines[n, :, 0]*abs(1-height)
y = intersect_lines[n, :, 1]*abs(1-height)
z = np.ones(len(x))*height
fig *= hv.Path3D((x, y, z)).opts(hv.opts.Path3D(line_width=3))
return fig
def plot_cone_frame(D, ref, lim, depth, extents=None):
"""Plots a 3D bounding-cone using HoloViews.
Parameters
----------
D : array
Decoding weights, 3D
ref : float
Which value the voltage should be at (usually the threshold)
ref : float
What limit to use for x- and y- axes for determining bounding box
depth : float
Until what depth to plot the bounding cone
extents : tuple
Plotting extents (xmin, ymin, zmin, xmax, ymax, zmax)
Returns
-------
Holoviews Overlay
3D plotting object, only works in matplotlib or plotly
"""
# find intersection points
intersects, intersects_lines = findAllIntersects(D[:2, ], ref, lim)
N = D.shape[1]
# plot the contour lines
fig = hv.Overlay()
for n in range(N):
x = np.concatenate([[0], intersects_lines[n, :, 0]*(1+depth)])
y = np.concatenate([[0], intersects_lines[n, :, 1]*(1+depth)])
z = np.ones(len(x))*-depth
z[0] = ref
fig*=hv.Path3D((x, y, z), extents=extents).opts(color='gray')
return fig
def plot_spike_3D(o, E, D, ref, lim, color=None):
""" Plots a spike on 3D structure.
Inputs
------
o, E, D, ref, lim : TODO
TODO
color : list
List of colors to use"""
if color is None:
color = colors
Ncolors = len(color)
N = D.shape[1]
intersects, intersect_lines = findAllIntersects(D[:2, :], ref, lim)
zs = E[-1, :]
spike_ids, spike_times = np.where(o)
fig = hv.Overlay()
for i, time in enumerate(spike_times):
n = spike_ids[i]
x = intersect_lines[(n)%N, :, 0]*abs(1-zs[time])
y = intersect_lines[(n)%N, :, 1]*abs(1-zs[time])
z = np.ones(len(x))*zs[time]
fig *= hv.Path3D((x, y, z)).opts(hv.opts.Path3D(line_width=3, color=color[n%Ncolors]))
return fig
def plot_stim_and_ests(x, x_, times, offset, Mplot, colors=None, backend='bokeh'):
"""Plots the stimulus and the estimates on top of eachother with offsets.
Parameters
----------
x, x_ : arrays
Input and outputs
times : array
Time points for x-axis
offset : float
The offset between each curve (per stimulus dimension)
Mplot : int
How many dimensions to plot
colors : list of color strings or codes
Optionally give a list of colors instead of the default colors
Outputs
-------
Holoviews Overlay
The curves plotted together
"""
if colors is None:
colors = hv.core.options.Cycle.default_cycles['default_colors']
ncolors = len(colors)
fig = hv.Overlay()
for m in range(Mplot):
if backend=='bokeh':
fig *= hv.Curve(zip(times, x_[m, :]-m*offset), kdims='Time',
group='readout').opts(color=colors[m%ncolors])
fig *= hv.Curve(zip(times, x[m, :-1]-m*offset), kdims='Time',
group='input').opts(
hv.opts.Curve(color='k',line_dash='dashed'))
elif backend=='matplotlib':
fig *= hv.Curve(zip(times, x_[m, :]-m*offset), kdims='Time',
group='readout').opts(color=colors[m%ncolors])
fig *= hv.Curve(zip(times, x[m, :-1]-m*offset), kdims='Time',
group='input').opts(
hv.opts.Curve(color='k',linestyle='--'))
else:
raise NotImplementedError('No other backend explicitly implemented. Plotly might work with either though.')
return fig
|
swkeemink/PythonTools
|
swktools/plotting.py
|
Python
|
gpl-2.0
| 40,079
|
[
"NEURON"
] |
8be2681f5690b8f5b8795aba40b6137718bdb1d82c09e63419b7b78d2cd7bf4d
|
#!/usr/bin/python
#prompt-core-processes
#daniel.antony.pass@googlemail.com
###################################################################
## This script runs the backbone of prompt. External packages ##
## are called from here but additional scripts which are refered ##
## to can be found in the ./scripts folder. ##
###################################################################
import os
import sys
import re
from subprocess import call
import shutil
import subprocess
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastnCommandline
indir = sys.argv[1]
sample = sys.argv[2]
tmpdir = sys.argv[3]
database = sys.argv[4]
blast_homology = sys.argv[5]
multicore_no = sys.argv[6]
script_dir = sys.argv[7]
run_mode = sys.argv[8]
webdir = sys.argv[9]
in_fasta = sample + ".fas"
orig_stdout = sys.stdout
bufsize = 1
f = file(webdir + 'tmp/log.txt', 'a', bufsize)
sys.stdout = f
seq_no = 0
lengths = []
taxa_list = ("refseq","species","genus","family","class")
divider = "<br>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br>"
try:
open_fasta = open(indir + in_fasta, "rU")
except IOError:
print "Cannot find input:" + indir + in_fasta
def main():
process_infiles(open_fasta)
if (run_mode == "both") or (run_mode == "processing_only"):
process_sample()
if (run_mode == "both") or (run_mode == "analysis_only"):
analyse_sample()
pop_website()
def blastn(fas, db):
print("blasting cdhit sequences against the " + db + " database")
blast_in = (fas)
blast_out = (tmpdir + "blast_files/" + sample + ".blast")
blastn_cline = NcbiblastnCommandline(query=blast_in, db=db, evalue=0.001, outfmt=6, out=blast_out, num_threads=multicore_no)
stdout, stderr = blastn_cline()
blast_err_log = open(tmpdir + "blast_err.txt", "w")
blast_stdout_log = open(tmpdir + "blast_stdout.txt", "w")
blast_err_log.write(stderr)
blast_stdout_log.write(stdout)
return blast_out
def cdhit(fas):
cdhit_in = (indir + fas)
global cdhit_out
cdhit_out = (tmpdir + "cdhit_files/" + fas + "_cdhitout.fa")
cd_stdout = (tmpdir + "cdhit_files/ cdhit_stdout.txt")
cd_stderr = (tmpdir + "cdhit_files/ cdhit_sterr.txt")
call(["cd-hit-454","-i",cdhit_in,"-o",cdhit_out, "-c", "0.99"])
#subprocess.Popen(["cd-hit-454","-i",cdhit_in,"-o",cdhit_out], stdout=subprocess.PIPE)
call([script_dir + "/parse_cd-hit.py", cdhit_out + ".clstr", tmpdir])
global cdhit_clusters
cdhit_clusters = (cdhit_out + ".clstr.parse")
return cdhit_out
def process_infiles(open_fasta):
#process infiles
print divider
print "Processing infile for sample" + sample
for record in SeqIO.parse(open_fasta, "fasta"):
lengths.append(len(record.seq))
global seq_no
seq_no = len(lengths)
print "Average length of", seq_no, "input sequences is ", sum(lengths)/seq_no, "base pairs"
def process_sample():
#cd-hit
print divider
print "Passing to CD-HIT for data reduction"
cdhit_fas = cdhit(in_fasta)
#do blast
print divider
print "Passing to blast for taxonomic assignment"
blast_out = blastn(cdhit_fas, database)
print divider
print "Filtering Blast results at " + str(blast_homology) + "% required database match"
print "Filtering " + blast_out
call([script_dir + "/filter_blast.py", blast_out, str(blast_homology)])
def analyse_sample():
cdhit_cluster_file = tmpdir + "cdhit_files/" + in_fasta + "_cdhitout.fa.clstr.parse"
#Create abundance files
print divider
print "Converting blast output into abundance files"
call(["mkdir", tmpdir + "abundance_files/" + sample])
call(["perl", script_dir + "create_abundance_files.pl", str(sample) + ".blast_filter", str(seq_no), tmpdir, cdhit_cluster_file])
#build web files
print divider
print "converting output files into html webfiles"
call(["mkdir", tmpdir + "html_files/" + sample])
for level in taxa_list:
print "Generating html file: " + sample + ":" + level
call(["perl", script_dir + "/generate_html.pl", tmpdir, script_dir, sample, level])
def pop_website():
#Populate website
print divider
print "Copying files to the webserver"
copyDirectory(tmpdir + 'html_files/' + sample, webdir + '/analyses/pie/NGS/' + sample)
copyDirectory(tmpdir + 'abundance_files/' + sample, webdir + '/analyses/abun/NGS/' + sample)
def copyDirectory(src, dest):
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
print('Directory not copied. Error: %s' % e)
if __name__ == "__main__":
main()
|
passdan/prompt
|
scripts/prompt_sample.py
|
Python
|
gpl-3.0
| 4,862
|
[
"BLAST"
] |
6183f3800cc3ec61ffd5782f185e91445693578e772ee7fef8a4d66d5b98ca8c
|
import os
import sys
import pkgutil
def LoadFolder(inputFolderPath):
infilePathList=[]
for root, dirs, files in os.walk(inputFolderPath):
for file in files:
infilePathList.append(inputFolderPath+file)
return infilePathList
def Loader(mainPath0):
flag=0
mainEntryPath=mainPath0
mainPath=os.path.dirname(mainEntryPath)
if mainPath[-1]=='/':
pass
else:
mainPath=mainPath+'/'
fileList=LoadFolder(mainPath)
# Check path configure file
if mainPath+'PathConfigure.log' in fileList:
with open(mainPath+'PathConfigure.log','r') as inf:
tempList=inf.readlines()
if len(tempList)==2:
if os.path.exists(tempList[0].replace('\n','')):
if os.path.exists(tempList[1].replace('\n','')):
pass
else:
flag = 1
print('Pkcombu path is not configured correctly.\nExit.')
return 1
else:
flag = 1
print('eMolFrag path is not configured correctly.\nExit.')
return 1
else:
flag = 1
print('Path configuration is not correctly.\nExit.')
return 1
else:
flag = 1
print('Cannot find path configure file.\nExit.')
return 1
# Check main entry eMolFrag.py
if mainEntryPath in fileList:
pass
else:
flag = 1
print('Cannot find entrance: eMolFrag.py.\nExit.')
return 1
# Check RDKit
load1=pkgutil.find_loader('rdkit')
#load2=pkgutil.find_loader('rdkit.Chem')
if load1 == None:
print('Cannot find RDKit.\nExit.')
flag = 1
return 1
else:
load2=pkgutil.find_loader('rdkit.Chem')
if load2 == None:
print('RDKit is not properly installed.\nExit.')
flag = 1
return 1
else:
pass
# Check pkcombu
# Already checked previously.
# Check function lib
if os.path.exists(mainPath+'chopRDKit02.py'):
pass
else:
flag = 1
print('Cannot find part of script files.\nExit.')
return 1
if os.path.exists(mainPath+'combineLinkers01.py'):
pass
else:
flag = 1
print('Cannot find part of script files.\nExit.')
return 1
if os.path.exists(mainPath+'mol-ali-04.py'):
pass
else:
flag = 1
print('Cannot find part of script files.\nExit.')
return 1
if os.path.exists(mainPath+'rmRedLinker03.py'):
pass
else:
flag = 1
print('Cannot find part of script files.\nExit.')
return 1
if os.path.exists(mainPath+'rmRedRigid01.py'):
pass
else:
flag = 1
print('Cannot find part of script files.\nExit.')
return 1
return flag
|
liutairan/eMolFrag
|
Old versions/eMolFrag_2016_09_09_01/loader.py
|
Python
|
gpl-3.0
| 2,968
|
[
"RDKit"
] |
8988925ce3aca2dcea04ec68ec6c664225807f88a21520ed117b9844fc2d3812
|
#!/usr/bin/env python
"""
scanFAAM.py
===========
Holds the scanFAAM function that is used to work out the first and last
index of real data (i.e. non-missing data in a load of FAAM flight data).
It is used in conjunction with nappy to reduce the size of output files
by missing out beginning and end periods that hold only misssing values.
Usage
=====
scanFAAM.py -f <filename> [-m <missing_value>]
Where:
------
filename - path to a FAAM NetCDF file
missing_value - missing value to use for variables.
"""
import os, sys, cdms, getopt
def scanFAAM(fileName=None, vars=None, nth=4, missingValuesToUse=(-9999., -32767.)):
"""
Scans every 'nth' variable in the list of variables (or found in the
file and gets the first and last index of the first (time) dimension
that holds real (non-missing) values.
"""
if type(missingValuesToUse)!=type((1,2)):
missingValuesToUse=(missingValuesToUse,)
startList=[]
endList=[]
start=None
end=None
if not fileName and not vars:
raise "You must provide either a file name or a list of cdms variables."
if fileName:
f=cdms.open(fileName)
vars=f.listvariables()
for var in vars:
if type(var)!=type(""):
id=var.id
else:
id=var
if id[-4:]=="FLAG" or id=="Time":
continue
if type(var)==type(""):
var=f(var)
step=1000
while (start, end)==(None, None):
(start, end)=findMissing(var, step, missingValuesToUse)
step=step/2
startList.append(start)
endList.append(end)
print "Start/End index: %s %s:%s" % (id, start, end)
startMin=min(startList)
endMax=max(endList)
return (startMin, endMax)
def findMissing(var, step, missingValuesToUse):
"""
Returns the (start, end) tuple for a given variable where
they are indices of an array where missing values end and begin.
"""
start=None
end=None
i0=0
sh=var.shape
iend=sh[0]-1
print var.id, step
for miss in missingValuesToUse:
for i in range(i0, iend, step):
if var[i][0]==miss:
start=i
break
for e in range(iend, i0, -step):
if var[e][0]==miss:
end=e
break
return (start, end)
if __name__=="__main__":
argList=sys.argv[1:]
args=getopt.getopt(argList, "f:m:")
fileName=None
missingValue=None
for arg,value in args[0]:
if arg=="-f":
fileName=value
elif arg=="-m":
missingValue=float(value)
scanFAAM(fileName=fileName, missingValuesToUse=(missingValue,))
|
eufarn7sp/egads
|
egads/thirdparty/nappy/contrib/aircraft/scanFAAM.py
|
Python
|
gpl-3.0
| 2,764
|
[
"NetCDF"
] |
ee260333361adb6856a0bf5b03b93b947c52d4fdbf6a9fe15104d095030f8395
|
"""Tests for the Elk-M1 Control integration."""
|
turbokongen/home-assistant
|
tests/components/elkm1/__init__.py
|
Python
|
apache-2.0
| 48
|
[
"Elk"
] |
111fbb4814dce8d2b51bb8838717a7361c51569246345aa5c0312671f4c3f7c6
|
import math
from numpy import array, full, ones_like
import util
from tvtk.api import tvtk
from mayavi import mlab
from traits.api import HasTraits, Instance, on_trait_change
from traitsui.api import View, Item
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, SceneEditor
from mayavi.core.api import Engine
from callout import Callout
from const import GYPSY_PINK
"""
Created on Wed Nov 27 10:37:08 2013
"""
__author__ = 'brandon.corfman'
# noinspection SpellCheckingInspection
__doc__ = '''
Plot a target scene using JMAE input and output files.
The AVs in AVFILE are plotted as small red spheres.
Target surfaces are plotted as wireframe quads.
Blast volumes are plotted as spheres or double cylinders with sphere caps.
Matrix is a VTK rectilinear grid that can display either fixed- or exponential-size cells.
Sample/burst points are displayed as small white spheres.
'''
######################################################################
class Visualization(HasTraits):
scene = Instance(MlabSceneModel)
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
resizable=True, show_label=False),
resizable=True)
def __init__(self, **traits):
super(HasTraits, self).__init__(**traits)
self.engine = Engine()
self.engine.start()
def _scene_default(self):
return MlabSceneModel(engine=self.engine)
@on_trait_change('scene.activated')
def update_plot(self):
pass
class Plotter(Visualization):
def __init__(self, model):
super(Plotter, self).__init__()
self.scale_defl, self.scale_range = 0.0, 0.0
self.plot = None
self.target = None
self.model = model
self.rotation = 0
self.sel_x = []
self.sel_y = []
self.sel_z = []
self.burstpoint_array = None
self.burstpoint_glyphs = None
self.outline = None
self.axes = None
self.selected_az = None
self.radius_points = None
self.pid = None
self.rgrid = None
self.wgrid = None
self.rgrid_array = None
self.mtx_callout = None
self.mun_callout = None
self.av_callouts = []
self.access_obj = None
self.lut_table = None
def plot_av(self):
""" Plot fragment vulnerable AVs as points (spheres) on the 3D scene."""
# TODO: plot AVs based on interpolation like JMAE (not just the nearest ones)
model = self.model
x, y, z, sz, color = [], [], [], [], []
for i in model.frag_ids:
x.append(model.comps[i].x)
y.append(model.comps[i].y)
z.append(model.comps[i].z)
sz.append(0.3)
color.append(1.0)
title = '{0} ({1},{2},{3})'.format(model.comps[i].name, x[-1], y[-1], z[-1])
callout = Callout(title, justification='center', font_size=9, color=(1, 1, 1),
position=(x[-1], y[-1], z[-1] + 0.5))
callout.visible = False
self.av_callouts.append(callout)
self.scene.add_actor(callout.actor)
pts = self.scene.mlab.quiver3d([x], [y], [z], [sz], [sz], [sz], name='component AV', colormap='blue-red',
scalars=color, mode='sphere', scale_factor=1)
pts.module_manager.scalar_lut_manager.reverse_lut = True
pts.glyph.color_mode = 'color_by_scalar'
pts.glyph.glyph_source.glyph_source.center = (0, 0, 0)
# noinspection SpellCheckingInspection
def plot_srf_file(self):
""" Reformat target model surfaces as a numpy array, and display them as wireframe polygons on the 3D scene. """
model = self.model
polys = array([[4 * i, 4 * i + 1, 4 * i + 2, 4 * i + 3] for i in range(len(model.surfaces) // 4)])
poly_obj = tvtk.PolyData(points=model.surfaces, polys=polys)
self.target = mlab.pipeline.surface(poly_obj, name='target')
self.target.actor.property.representation = 'wireframe'
self.target.actor.property.color = (0, 0, 0)
# save this table for later in case of frag zone plotting
self.lut_table = self.target.module_manager.scalar_lut_manager.lut
# noinspection SpellCheckingInspection
def plot_matrix_file(self):
""" Show matrix as a VTK rectilinear grid at the munition burst height. """
model = self.model
# Define rectilinear grid according to the matrix gridlines.
# Set the single Z coordinate in the elevation array equal to the munition burst height.
elevations = full(1, model.burst_height)
x_dim, y_dim, z_dim = len(model.gridlines_range), len(model.gridlines_defl), len(elevations)
self.rgrid = tvtk.RectilinearGrid(x_coordinates=model.gridlines_range, y_coordinates=model.gridlines_defl,
z_coordinates=elevations, dimensions=(x_dim, y_dim, z_dim))
# Grid colors are displayed using an additional array (PKs).
# T transposes the 2D PK array to match the gridline cells and then
# ravel() flattens the 2D array to a 1D array for VTK use as scalars.
self.rgrid.cell_data.scalars = model.pks.T.ravel()
self.rgrid.cell_data.scalars.name = 'pks'
self.rgrid.cell_data.update() # refreshes the grid now that a new array has been added.
p = tvtk.Property(color=(0, 0, 0)) # color only matters if we are using wireframe, but I left it in for ref.
# this method puts the surface in the Mayavi pipeline so the user can change it.
surf = self.scene.mlab.pipeline.surface(self.rgrid, name='matrix')
surf.actor.actor.property = p
surf.actor.update_data()
# gridlines added at JJS request, but they make the scene look fairly bad, so I'm commenting them out.
# wgrid_height = full(1, model.burst_height+0.1)
# self.wgrid = tvtk.RectilinearGrid(x_coordinates=model.gridlines_range, y_coordinates=model.gridlines_defl,
# z_coordinates=wgrid_height, dimensions=(x_dim, y_dim, z_dim))
# surf = self.scene.mlab.pipeline.surface(self.wgrid, name='gridlines')
# wf = tvtk.Property(representation='wireframe', color=(0.5, 0.5, 0.5))
# surf.actor.actor.property = wf
# surf.actor.update_data()
# give PK colorbar a range between 0 and 1. The default is to use the min/max values in the array,
# which would give us a custom range every time and make it harder for the user to consistently identify what
# the colors mean.
surf.module_manager.scalar_lut_manager.use_default_range = False
surf.module_manager.scalar_lut_manager.data_range = array([0., 1.])
self.scene.mlab.colorbar(surf, title='Cell Pk', orientation='vertical')
# Put max and min gridline coordinates in the upper-right corner of the matrix.
# Also, scale the text to a readable size.
sz = max(1, int(abs(model.gridlines_range[-1] - model.gridlines_range[0]) / 100))
spacing = max(5, sz)
text = 'Matrix range: (%5.1f, %5.1f)\nMatrix defl: (%5.1f, %5.1f)' % (model.mtx_extent_range[0],
model.mtx_extent_range[1],
model.mtx_extent_defl[0],
model.mtx_extent_defl[1])
self.mtx_callout = Callout(text, justification='left', font_size=18, color=(1, 1, 1),
position=(model.gridlines_range[-1], model.gridlines_defl[0], 4 * spacing))
self.scene.add_actor(self.mtx_callout.actor)
def plot_blast_volumes(self):
model = self.model
p = tvtk.Property(opacity=0.25, color=GYPSY_PINK)
for bidx in model.blast_ids:
comp = model.comps[bidx]
r1, r2, r3, z1, z2 = model.blast_vol[bidx]
if r1 == 0 and r2 == 0 and z1 == 0:
# blast sphere
source_obj = mlab.pipeline.builtin_surface()
source_obj.source = 'sphere'
source_obj.data_source.center = (comp.x, comp.y, z2)
source_obj.data_source.radius = r3
source_obj.data_source.phi_resolution = 50
source_obj.data_source.theta_resolution = 50
# adding TVTK poly to Mayavi pipeline will do all the rest of the setup necessary to view the volume
surf = mlab.pipeline.surface(source_obj, name='blast sphere %s' % comp.name)
surf.actor.actor.property = p # add color
else:
# double cylinder merged with sphere cap
cap = tvtk.SphereSource(center=(0, 0, 0), radius=r3, start_theta=0,
end_theta=180, phi_resolution=50, theta_resolution=50)
t = tvtk.Transform()
t.translate(comp.x, comp.y, z2)
t.rotate_x(90.0)
cap_tf = tvtk.TransformPolyDataFilter(input_connection=cap.output_port, transform=t)
cap_tf.update()
upper_cyl_height = r3 + z2 - z1
source_obj = tvtk.AppendPolyData()
if upper_cyl_height > 0.0: # handle both upper cylinder with sphere cap, plus lower cylinder
upper_cyl = tvtk.CylinderSource(center=(0, 0, 0), radius=r2, height=upper_cyl_height, resolution=50)
t = tvtk.Transform()
t.translate(comp.x, comp.y, upper_cyl_height / 2.0)
t.rotate_x(90.0)
upper_tf = tvtk.TransformPolyDataFilter(input_connection=upper_cyl.output_port, transform=t)
upper_tf.update()
tri1 = tvtk.TriangleFilter(input_connection=cap_tf.output_port)
tri2 = tvtk.TriangleFilter(input_connection=upper_tf.output_port)
cap_slice = tvtk.BooleanOperationPolyDataFilter()
cap_slice.operation = 'difference'
cap_slice.add_input_connection(0, tri1.output_port)
cap_slice.add_input_connection(1, tri2.output_port)
cap_slice.update()
lower_cyl = tvtk.CylinderSource(center=(0, 0, 0), radius=r1, height=z1, resolution=50)
t = tvtk.Transform()
t.translate(comp.x, comp.y, z1 / 2.0 + 0.01)
t.rotate_x(90.0)
lower_tf = tvtk.TransformPolyDataFilter(input_connection=lower_cyl.output_port, transform=t)
lower_tf.update()
tri3 = tvtk.TriangleFilter(input_connection=cap_slice.output_port)
tri4 = tvtk.TriangleFilter(input_connection=lower_tf.output_port)
upper_cyl_plus_cap = tvtk.BooleanOperationPolyDataFilter()
upper_cyl_plus_cap.operation = 'difference'
upper_cyl_plus_cap.add_input_connection(0, tri3.output_port)
upper_cyl_plus_cap.add_input_connection(1, tri4.output_port)
upper_cyl_plus_cap.update()
source_obj.add_input_connection(upper_cyl_plus_cap.output_port)
source_obj.add_input_connection(lower_tf.output_port)
source_obj.update()
else: # lower cylinder only, intersected with sphere cap
lower_cyl = tvtk.CylinderSource(center=(0, 0, 0), radius=r1, height=z1, resolution=50)
t = tvtk.Transform()
t.translate(comp.x, comp.y, z1 / 2)
t.rotate_x(90.0)
lower_tf = tvtk.TransformPolyDataFilter(input_connection=lower_cyl.output_port, transform=t)
lower_tf.update()
tri1 = tvtk.TriangleFilter(input_connection=cap_tf.output_port)
tri2 = tvtk.TriangleFilter(input_connection=lower_tf.output_port)
boolean_op = tvtk.BooleanOperationPolyDataFilter()
boolean_op.operation = 'difference'
boolean_op.add_input_connection(0, tri1.output_port)
boolean_op.add_input_connection(1, tri2.output_port)
boolean_op.update()
source_obj.add_input_connection(lower_tf.output_port)
source_obj.add_input_connection(boolean_op.output_port)
source_obj.update()
# adding TVTK poly to Mayavi pipeline will do all the rest of the setup necessary to view the volume
surf = mlab.pipeline.surface(source_obj.output, name='blast volume %s' % comp.name)
surf.actor.actor.property = p # add color
def plot_munition(self):
""" Plot an arrow showing direction of incoming munition and display text showing angle of fall,
attack azimuth and terminal velocity. """
model = self.model
# calculate scaling size for matrix range and deflection text.
# allow for a missing matrix file by checking to see whether gridlines exist first.
if model.gridlines_range:
sz = max(1, int(abs(model.gridlines_range[-1] - model.gridlines_range[0]) / 1000),
int(abs(model.gridlines_defl[-1] - model.gridlines_defl[0]) / 1000))
else:
sz = 1
# position arrow position outside of target, using both maximum radius and matrix offset.
line_scale = 15
zloc = model.burst_height + line_scale * math.sin(math.radians(model.aof))
if not model.az_averaging:
xv, yv, zv = util.rotate_pt_around_yz_axes(1.0, 0.0, 0.0, model.aof, model.attack_az)
# rotate unit vector into position of munition attack_az and aof
xloc, yloc, _ = util.rotate_pt_around_yz_axes(-1.0, 0.0, 0.0, model.aof, model.attack_az)
xloc *= model.volume_radius
yloc *= model.volume_radius
# rotate arrow into correct position
mlab.quiver3d([xloc], [yloc], [zloc], [xv], [yv], [zv], color=(1, 1, 1), reset_zoom=False,
scale_factor=15, name='munition')
# label arrow with text describing terminal conditions
format_str = '{0} deg AOF\n{1}° deg attack azimuth\n{2} ft/s terminal velocity\n{3} ft. burst height'
label = format_str.format(model.aof, model.attack_az, model.term_vel, model.burst_height)
self.mun_callout = Callout(label, justification='left', font_size=14, color=(1, 1, 1),
position=(xloc, yloc, zloc + 3))
self.scene.add_actor(self.mun_callout.actor)
else: # azimuth averaged case
# plot arrows showing incoming AOF for each azimuth
for az in range(0, 360, int(model.attack_az)):
xv, yv, zv = util.rotate_pt_around_yz_axes(1.0, 0.0, 0.0, model.aof, az)
# rotate arrow into correct position
xloc, yloc, _ = util.rotate_pt_around_yz_axes(-1.0, 0.0, 0.0, model.aof, az)
xloc *= model.volume_radius
yloc *= model.volume_radius
self.scene.mlab.quiver3d([xloc], [yloc], [zloc], [xv], [yv], [zv], color=(1, 1, 1), reset_zoom=False,
scale_factor=15, name='munition %d deg' % az)
if az == 0:
# display one callout showing terminal conditions above the 0 degree azimuth arrow.
format_str = '{0} deg AOF\nAvg attack az - {1} deg inc.\n{2} ft/s terminal velocity\n'
format_str += '{3} fr. burst height'
label = format_str.format(model.aof, model.attack_az, model.term_vel, model.burst_height)
self.mun_callout = Callout(label, justification='left', font_size=14, color=(1, 1, 1),
position=(xloc, yloc, zloc + 3))
self.scene.add_actor(self.mun_callout.actor)
def plot_detail(self):
""" Plot burstpoints or sample points from the detail file."""
self.sel_x, self.sel_y, self.sel_z = [], [], []
points = self.radius_points
az = self.selected_az
for _, key in enumerate(points):
self.sel_x.append(points[key][az][0])
self.sel_y.append(points[key][az][1])
self.sel_z.append(points[key][az][2])
# setting the scalars here is necessary to avoid VTK error: "Algorithm vtkAssignAttribute returned failure
# for request: vtkInformation". See https://github.com/enthought/mayavi/issues/3
if self.burstpoint_glyphs is None:
self.burstpoint_glyphs = self.scene.mlab.points3d(self.sel_x, self.sel_y, self.sel_z, ones_like(self.sel_x),
color=(1, 1, 1), scale_factor=0.75)
else:
self.burstpoint_glyphs.mlab_source.set(x=self.sel_x, y=self.sel_y, z=self.sel_z,
scalars=ones_like(self.sel_x))
# Here, we grab the points describing the individual glyph, to figure
# out how many points are in an individual glyph.
self.burstpoint_array = self.burstpoint_glyphs.glyph.glyph_source.glyph_source.output.points.to_array()
@on_trait_change('scene.activated')
def update_plot(self):
""" Called after Mayavi window has been initialized, so 3D scene is ready to be graphed. """
model = self.model
# noinspection PyProtectedMember
self.scene.scene_editor._tool_bar.setVisible(False)
self.scene.disable_render = True # generate scene more quickly by temporarily turning off rendering
if model.pks is not None:
self.plot_matrix_file() # matrix can be plotted if it was read in
self.plot_srf_file()
if model.blast_ids:
self.plot_blast_volumes()
self.plot_av()
self.plot_munition()
if model.sample_loc:
self.plot_detail()
self.axes = self.scene.mlab.orientation_axes(figure=self.scene.mayavi_scene)
self.axes.visible = False
self.scene.disable_render = False # reinstate display
super(Plotter, self).update_plot()
self.reset_view()
def reset_view(self):
""" Puts 3D camera back in default position. """
self.scene.mlab.view(azimuth=315, elevation=83, distance=self.model.volume_radius * 6, focalpoint=(0, 0, 20))
def top_view(self):
self.scene.mlab.view(azimuth=270, elevation=0, distance=self.model.volume_radius * 12, focalpoint=(0, 0, 20))
def save_view_to_file(self, filename):
self.scene.mlab.savefig(filename, figure=self.scene.mayavi_scene)
def show_axes(self, state):
""" Shows/hides 3D axis legend on the window. """
self.axes.visible = state
def get_camera(self):
return self.scene.camera
def update_point_detail(self, az, points):
""" Called when view selections are changed, and associated azimuth and sample/burst points need to
be updated on next scene refresh. """
self.selected_az = az
self.radius_points = points
def set_av_callouts_visible(self, is_visible):
""" Show/hide component AV callouts """
for c in self.av_callouts:
c.visible = is_visible
|
bcorfman/stage
|
plot3d.py
|
Python
|
mit
| 19,592
|
[
"BLAST",
"Mayavi",
"VTK"
] |
bf20a4e02cca054a3652d3b1b96a9cfa2aae117f205fb90c34bd13f0e06d910c
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 11:42:42 2015
@author: admin
"""
import h5py
import os
import numpy as np
def getSignals(fileName,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
#channel=tdms_file.object('PXI M6251','Lang_U')
liste=[]
hdf5.visit(liste.append)
liste2=[x for x in liste if isinstance(hdf5[x],h5py.Dataset)]
print liste2
hdf5.close()
return liste2
def getAttributes(fileName,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
#channel=tdms_file.object('PXI M6251','Lang_U')
liste=[]
hdf5.visit(liste.append)
liste2=[hdf5[x].attrs.keys() for x in liste if isinstance(hdf5[x],h5py.Group)]
hdf5.close()
return liste2
def getOverview(fileName,item,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
if hdf5[item].parent.name=='/S7':
data=np.array(hdf5[item])[-100:-1]
time=np.array(hdf5['S7/Time'])[-100:-1]/1000
sampling=1/(time[1]-time[0])
else:
data=hdf5[item]
sampling=hdf5[item].parent.attrs['sampling']
time=np.linspace(0,len(data)-1,num=len(data))/(sampling)
return time,data
hdf5.close()
def getData(fileName,item,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
par=hdf5[item].parent.name
if par=='/S7':
data=np.array(hdf5[item])[-100:-1]
time=np.array(hdf5[par[1:]+'/Time'])[-100:-1]/1000
sampling=1/(time[1]-time[0])
elif par=='/Process':
data=np.array(hdf5[item])
time=np.array(hdf5[par[1:]+'/Time'])
sampling=1/(time[1]-time[0])
else:
data=np.array(hdf5[item])
sampling=np.abs(hdf5[item].parent.attrs['sampling'])
time=np.linspace(0,len(data)-1,num=len(data))/(sampling)
hdf5.close()
return time,data,sampling
def saveData(fileName,item,data,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'a')
group=hdf5['Process']
try:
del group[item]
except:
pass
group.create_dataset(item,data=data,compression="gzip")
hdf5.close()
return
def getDataProcess(fileName,item,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
# group=hdf5['Process']
a=np.array(hdf5[item])
hdf5.close()
return a
def saveAttr(fileName,attributName,data,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'a')
group=hdf5['Process']
group.attrs[attributName]=data
hdf5.close()
def getAttr(fileName,attributName,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
group=hdf5['Process']
aa=group.attrs[attributName]
hdf5.close()
return aa
def getAttrlist(fileName,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
group=hdf5['Process']
string=''
for x,y in zip(group.attrs.iterkeys(),group.attrs.itervalues()):
string=string+str(x)+' : '+str(y)+'\n'
hdf5.close()
return string
def getMultiData(fileName,item1,item2,env):
path=env.path
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
data1=hdf5[item1]
data2=hdf5[item2]
sampling=np.abs(hdf5[item1].parent.attrs['sampling'])
time=np.linspace(0,len(data1)-1,num=len(data1))/(sampling)
return data1,data2,time,sampling
hdf5.close()
def getSummaryData(fileList,env):
path=env.path
shot=dict()
for fileName in fileList:
shotnumber=int(fileName[0:-5])
shot[shotnumber]=[fileName]
print os.path.join(path,fileName+'.h5')
hdf5=h5py.File(os.path.join(path,fileName+'.h5'),'r')
group=hdf5['Process']
try:
shot[shotnumber].append(group.attrs['date'])
except:
shot[shotnumber].append('')
try:
shot[shotnumber].append(group.attrs['program'])
except:
shot[shotnumber].append('')
try:
shot[shotnumber].append(group.attrs['programdesc'])
except:
shot[shotnumber].append('')
try:
shot[shotnumber].append(group.attrs['positionSpace'])
except:
shot[shotnumber].append(0.)
try:
shot[shotnumber].append(group.attrs['Gas'])
except:
shot[shotnumber].append('')
try:
shot[shotnumber].append(group.attrs['densityHP'])
except:
shot[shotnumber].append(0.)
try:
shot[shotnumber].append(group.attrs['densityLP'])
except:
shot[shotnumber].append(0.)
try:
shot[shotnumber].append(group.attrs['pressure'])
except:
shot[shotnumber].append(0.)
try:
shot[shotnumber].append(group.attrs['magnetic'])
except:
shot[shotnumber].append(0.)
try:
shot[shotnumber].append(group.attrs['comment'])
except:
shot[shotnumber].append('')
hdf5.close()
return shot
#def name_dataset(name,obj):
# if isinstance(obj,h5py.Dataset):
# return obj
# for name in hdf5:
# return [hdf5[name].items() for name in hdf5]
|
albanatita/data-process
|
readHdf5.py
|
Python
|
gpl-2.0
| 5,726
|
[
"VisIt"
] |
032b24c5c46ea2ec15bb2312598789cad870ee6f9a847870f27401ce85b651a2
|
#!/usr/bin/python
# Htsnapp - create snapshots of websites from urls, nmap xml, and txr files.
# Author - Nikhil Sreekumar
# Contact: roo7break@gmail.com / @roo7break
# Supports urls, files with urls, and nmap.xml files
# Primary source for screenshot code:
# PyWebShot - create webpage thumbnails. Originally based on
# http://burtonini.com/computing/screenshot-tng.py
# Ben Dowling - http://www.coderholic.com
# License:
# HTsnapp is licensed under the GNU-General Public License version 3 and later.
# Please visit http://www.gnu.org/copyleft/gpl.html for more information
import urlparse
import os
import sys
try:
import gtk
from optparse import OptionParser
import xml.dom.minidom as xmll
import gtk.gdk as gdk
import gobject
import gtkmozembed
except ImportError:
print "Required modules are not installed. Please run with '-i or --install' with root privs"
class PyWebShot:
def __init__(self, urls, screen, thumbnail, delay, outfile, location):
self.parent = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.parent.set_border_width(10)
self.urls = urls
self.delay = delay
self.location = location
# Get resoltion information
(x,y) = screen.split('x')
x = int(x)
y = int(y)
(t_x, t_y) = thumbnail.split('x')
t_x = int(t_x)
t_y = int(t_y)
# Calculate the x scale factor
scale_x = float(t_x) / x
scale_y = float(t_y) / y
self.t_x = t_x
self.t_y = t_y
self.scale = scale_x
self.widget = gtkmozembed.MozEmbed()
self.widget.set_size_request(x + 18, y)
# Connect signal
self.widget.connect("net_stop", self.on_net_stop)
if outfile:
(self.outfile_base, ignore) = outfile.split('.png')
else:
self.outfile_base = None
self.parent.add(self.widget)
self.url_num = 0
self.load_next_url()
self.parent.show_all()
def load_next_url(self):
#print len(self.urls)
#print self.url_num
if self.url_num > len(self.urls) - 1:
gtk.main_quit()
return
self.current_url = self.urls[self.url_num]
self.countdown = self.delay
print "Loading " + self.current_url + "..."
self.url_num += 1
self.widget.load_url(self.current_url)
def on_net_stop(self, data = None):
if self.delay > 0: gobject.timeout_add(1500,self.do_countdown,self)
else: self.do_countdown()
def do_countdown(self, data = None):
self.countdown -= 1
if(self.countdown > 0):
return True
else:
self.screenshot()
self.load_next_url()
return False
def screenshot(self, data = None):
window = self.widget.window
(x,y,width,height,depth) = window.get_geometry()
width -= 16
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,width,height)
pixbuf.get_from_drawable(window,self.widget.get_colormap(),0,0,0,0,width,height)
thumbnail = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,self.t_x,self.t_y)
pixbuf.scale(thumbnail, 0, 0, self.t_x, self.t_y, 0, 0, self.scale, self.scale, gdk.INTERP_HYPER)
if self.outfile_base:
if len(self.urls) == 1:
filename = "%s.png" % (self.outfile_base)
else:
filename = "%s-%d.png" % (self.outfile_base, self.url_num)
else:
parts = urlparse.urlsplit(self.current_url)
if ':' in parts.netloc: # replace : in netlocation to _ for filename support
parts.netloc = parts.netloc.replace(':', '_')
filename = parts.netloc + parts.path.replace('/', '.') + ".png"
os.chdir(self.location)
thumbnail.save(filename,"png")
print "saved as " + filename
return True
def __windowExit(widget, data=None):
gtk.main_quit()
class HtSnapp(PyWebShot):
def __init__(self, option):
# Process input received from user
# options.screen - Screen resolution
# options.thumbnail - Thumbnail size
# options.delay - Delay for page load
# options.filename - Output filename. Default is by hostname
# options.xml - Nmap XML file
# options.note - File input with hosts
# options.url - URL or URLs (separated by comma)
# options.location - Directory location to save all the files
print '''++++++++++++ Htsnapp (beta):: by Nikhil 'roo7break' Sreekumar ++++++++++++
contact: roo7break@gmail.com
'''
option = option[0]
self.max_pro = 5 # Max mumber of processes
if option.instal:
if self.installation():
print 'Installation complete'
else:
print 'Some error occured during installation.'
sys.exit(1)
if not os.path.exists(option.location):
os.makedirs(option.location)
if option.xml: # If there is value in XML
nurl = self.nmapparser(option.xml)
if PyWebShot.__init__(self,urls=nurl, screen=option.screen, thumbnail=option.thumbnail, delay=option.delay, outfile=option.filename, location=option.location):
print "All done with xml"
else:
print "Something went wrong"
elif option.note: # If there is value in file
lurl = self.fileparse(option.note)
if PyWebShot.__init__(self,urls=lurl, screen=option.screen, thumbnail=option.thumbnail, delay=option.delay, outfile=option.filename, location=option.location):
print "All done with files"
else:
print "Something went wrong"
else:
if PyWebShot.__init__(self,urls=option.url, screen=option.screen, thumbnail=option.thumbnail, delay=option.delay, outfile=option.filename, location=option.location):
print "All done with urls"
else:
print "Something went wrong"
def installation(self):
print "Installing WebKit libraries"
os.system('sudo apt-get install python-webkit')
print "------ Done ------"
print "Installing XML libraries"
os.system('sudo apt-get install python-libxml2')
print "------ Done ------"
print "Installing Embedded GTK libraries.."
os.system('sudo apt-get install python-gtkmozembed')
print "------ Done ------"
def nmapparser(self, fxml):
# Parse nmap xml for targets (http service)
try:
fp = xmll.parse(fxml)
except Exception,e:
print "Error ", e
return []
listt = []
for hst in fp.getElementsByTagName('host'):
for prt in hst.getElementsByTagName('port'):
port = prt.getAttributeNode('portid').value
for info in hst.getElementsByTagName('address'):
typee = info.getAttributeNode('addrtype').value
if typee == 'ipv4':
ip = info.getAttributeNode('addr').value
for ser in prt.getElementsByTagName('service'):
if 'http' in ser.getAttributeNode('name').value:
if port == '80':
listt.append(ip)
elif port == '443':
listt.append('https://' + ip)
else:
listt.append(ip + ":" + port)
return listt
def fileparse(self, files):
# Read file for targets in format ipaddress:port
fl = open(files, 'r')
listt = []
for target in fl.readlines():
target = target.strip('\n')
if ":" in target:
if target.split(":")[1] == '443':
listt.append('https://' + target)
else:
listt.append(target)
else:
listt.append(target)
return listt
if __name__ == "__main__":
parser = OptionParser(version='htsnapp v0.2', description = '''++++++++++++ Htsnapp (beta):: by Nikhil 'roo7break' Sreekumar ++++++++++++''')
parser.add_option('-s', dest = 'screen', action='store', type='string', help='Screen resolution at which to capture the webpage (default %default)', default="1024x769")
parser.add_option('-t', dest = 'thumbnail', action='store', type='string', help='Thumbnail resolution (default %default)', default="350x200")
parser.add_option('-d', dest = 'delay', action='store', type='int', help='Delay in seconds to wait after page load before taking the screenshot (default %default)', default=0)
parser.add_option('-f', dest = 'filename', action='store', type='string', help='PNG output filename with .png extension, otherwise default is based on url name and given a .png extension')
parser.add_option('-x', dest = 'xml', action='store', type='string', help='Nmap XML file to parse', default='')
parser.add_option('-r', dest = 'range', action='store', type='string', help='IP range to sweep. Provide port number', default='')
parser.add_option('-p', dest = 'port', action='store', type='string', help='Port number to sweep on range', default='80')
parser.add_option('-n', dest = 'note', action='store', type='string', help='File with targets:port', default='')
parser.add_option('-u', dest = 'url', action='store', type='string', help='Provide single url or mulitple urls (separated by comma. no spaces.)', default='')
parser.add_option('-l', dest = 'location', action='store', type='string', help='Directory location to save files', default=os.getcwd())
parser.add_option('-i', dest = 'instal', action='store_true', help='Install required dependencies')
options = parser.parse_args()
temp = []
if options[0].url:
temp = options[0].url.split(',')
options[0].url = [x.strip() for x in temp]
print options[0].url
snapp = HtSnapp(options)
options = options[0]
os.chdir(options.location)
if options.url == None and options.xml == None and options.note == None:
parser.error('No targets specified')
gtk.main()
|
roo7break/HtSnapp
|
htsnapp.py
|
Python
|
gpl-3.0
| 8,944
|
[
"VisIt"
] |
a6b2f4e11318dd5ee22b4761b4274bf4cc676524a10ce53c866e5e60600958a0
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ImmunizationHistory.tetanus_toxoid1'
db.add_column(u'patient_immunizationhistory', 'tetanus_toxoid1',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'ImmunizationHistory.tetanus_toxoid2'
db.add_column(u'patient_immunizationhistory', 'tetanus_toxoid2',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'ImmunizationHistory.tetanus_toxoid3'
db.add_column(u'patient_immunizationhistory', 'tetanus_toxoid3',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ImmunizationHistory.tetanus_toxoid1'
db.delete_column(u'patient_immunizationhistory', 'tetanus_toxoid1')
# Deleting field 'ImmunizationHistory.tetanus_toxoid2'
db.delete_column(u'patient_immunizationhistory', 'tetanus_toxoid2')
# Deleting field 'ImmunizationHistory.tetanus_toxoid3'
db.delete_column(u'patient_immunizationhistory', 'tetanus_toxoid3')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'previous_surgery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousSurgery']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_previous_obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousObstetricHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.DateField', [], {})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient']
|
aazhbd/medical_info01
|
patient/migrations/0017_auto__add_field_immunizationhistory_tetanus_toxoid1__add_field_immuniz.py
|
Python
|
bsd-3-clause
| 30,488
|
[
"VisIt"
] |
72256e63d9cc3e3e70043d2e594489d7361cbddb57b37c1dd90159219fbecd9d
|
from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "{0!s}"; two linestyle symbols'.format(fmt))
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "{0!s}"; two marker symbols'.format(fmt))
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "{0!s}"; two color symbols'.format(fmt))
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_{0!s}".format(key)
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "{0!s}"'.format(key)
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_{0!s}".format(key)
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "{0!s}"'.format(key)
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes({0:g},{1:g};{2:g}x{3:g})".format(*tuple(self._position.bounds))
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [{scale!s}]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [{scale!s}]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""".format(**{'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])})
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among {0!s}'.format(
', '.join(mtransforms.BBox.coefs.keys())))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection{0:d}'.format(len(self.collections)))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line{0:d}'.format(len(self.lines)))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: {0!s}'.format((
", ".join(mscale.get_scale_names())))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: {0!s}'.format((
", ".join(mscale.get_scale_names())))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x={0!s}, y={1!s}'.format(xs, ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = {k: 1 for k in among}
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: {0!s}'.format(orientation)
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be {0:d} or scalar".format(nbars)
assert len(height)==nbars, ("argument 'height' must be {0:d} or scalar".format(
nbars))
assert len(width)==nbars, ("argument 'width' must be {0:d} or scalar".format(
nbars))
assert len(bottom)==nbars, ("argument 'bottom' must be {0:d} or scalar".format(
nbars))
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len({0!s}) or scalar".format(nbars))
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len({0!s}) or scalar".format(nbars))
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: {0!s}'.format(align)
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif binsis notNone:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to {0!s}; see help({1!s})'.format(funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to {0!s}; see help({1!s})'.format(
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range is not None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: {0!s}'.format(histtype)
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: {0!s}'.format(align)
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: {0!s}'.format(orientation)
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: {0!s}'.format(align)
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: {0!s}'.format(histtype)
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: {0!s}'.format(orientation)
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: {0!s}'.format(histtype)
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density ({0!s})'.format(psd_units))
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("{0!s}Subplot".format((axes_class.__name__)),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
|
runt18/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/axes.py
|
Python
|
agpl-3.0
| 260,242
|
[
"Gaussian"
] |
75c311883e3b4721870ffb3efea7bc0c4695a4670a93317f9c00d73c004d72c1
|
import numpy as np
import openpnm as op
from tqdm import tqdm
__all__ = [
'plot_connections',
'plot_coordinates',
'plot_networkx',
'plot_vpython',
'plot_tutorial',
'plot_network_jupyter',
'generate_voxel_image',
]
def plot_connections(network,
throats=None,
ax=None,
size_by=None,
color_by=None,
cmap='jet',
color='b',
alpha=1.0,
linestyle='solid',
linewidth=1,
**kwargs): # pragma: no cover
r"""
Produce a 3D plot of the network topology.
This shows how throats connect for quick visualization without having
to export data to veiw in Paraview.
Parameters
----------
network : GenericNetwork
The network whose topological connections to plot
throats : array_like (optional)
The list of throats to plot if only a sub-sample is desired. This is
useful for inspecting a small region of the network. If no throats are
specified then all throats are shown.
fig : Matplotlib figure handle and line property arguments (optional)
If a ``fig`` is supplied, then the topology will be overlaid on this
plot. This makes it possible to combine coordinates and connections,
and to color throats differently for instance.
size_by : array_like (optional)
An ndarray of throat values (e.g. alg['throat.rate']). These
values are used to scale the ``linewidth``, so if the lines are too
thin, then increase ``linewidth``.
color_by : str or array_like (optional)
An ndarray of throat values (e.g. alg['throat.rate']).
cmap : str or cmap object (optional)
The matplotlib colormap to use if specfying a throat property
for ``color_by``
color : str, optional (optional)
A matplotlib named color (e.g. 'r' for red).
alpha : float (optional)
The transparency of the lines, with 1 being solid and 0 being invisible
linestyle : str (optional)
Can be one of {'solid', 'dashed', 'dashdot', 'dotted'}. Default is
'solid'.
linewidth : float (optional)
Controls the thickness of drawn lines. Is used to scale the thickness
if ``size_by`` is given. Default is 1. If a value is provided for
``size_by`` then they are used to scale the ``linewidth``.
**kwargs : dict
All other keyword arguments are passed on to the ``Line3DCollection``
class of matplotlib, so check their documentation for additional
formatting options.
Returns
-------
lc : LineCollection or Line3DCollection
Matplotlib object containing the lines representing the throats.
Notes
-----
To create a single plot containing both pore coordinates and throats,
consider creating an empty figure and then pass the ``ax`` object as
an argument to ``plot_connections`` and ``plot_coordinates``.
Otherwise, each call to either of these methods creates a new figure.
See Also
--------
plot_coordinates
Examples
--------
>>> import openpnm as op
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> mpl.use('Agg')
>>> pn = op.network.Cubic(shape=[10, 10, 3])
>>> pn.add_boundary_pores()
>>> Ts = pn.throats('*boundary', mode='not') # find internal throats
>>> fig, ax = plt.subplots() # create empty figure
>>> _ = op.topotools.plot_connections(network=pn,
... throats=Ts) # plot internal throats
>>> Ts = pn.throats('*boundary') # find boundary throats
>>> _ = op.topotools.plot_connections(network=pn,
... throats=Ts,
... ax=ax,
... color='r') # plot boundary throats in red
"""
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors as mcolors
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from openpnm.topotools import dimensionality
Ts = network.Ts if throats is None else network._parse_indices(throats)
dim = dimensionality(network)
ThreeD = True if dim.sum() == 3 else False
# Add a dummy axis for 1D networks
if dim.sum() == 1:
dim[np.argwhere(~dim)[0]] = True
if "fig" in kwargs.keys():
raise Exception("'fig' argument is deprecated, use 'ax' instead.")
if ax is None:
fig, ax = plt.subplots()
else:
# The next line is necessary if ax was created using plt.subplots()
fig, ax = ax.get_figure(), ax.get_figure().gca()
if ThreeD and ax.name != '3d':
fig.delaxes(ax)
ax = fig.add_subplot(111, projection='3d')
# Collect coordinates
Ps = np.unique(network['throat.conns'][Ts])
X, Y, Z = network['pore.coords'][Ps].T
xyz = network["pore.coords"][:, dim]
P1, P2 = network["throat.conns"][Ts].T
throat_pos = np.column_stack((xyz[P1], xyz[P2])).reshape((Ts.size, 2, dim.sum()))
# Deal with optional style related arguments
if 'c' in kwargs.keys():
color = kwargs.pop('c')
color = mcolors.to_rgb(color) + tuple([alpha])
# Override colors with color_by if given
if color_by is not None:
color = cm.get_cmap(name=cmap)(color_by / color_by.max())
color[:, 3] = alpha
if size_by is not None:
linewidth = size_by / size_by.max() * linewidth
if ThreeD:
lc = Line3DCollection(throat_pos, colors=color, cmap=cmap,
linestyles=linestyle, linewidths=linewidth,
antialiaseds=np.ones_like(network.Ts), **kwargs)
else:
lc = LineCollection(throat_pos, colors=color, cmap=cmap,
linestyles=linestyle, linewidths=linewidth,
antialiaseds=np.ones_like(network.Ts), **kwargs)
ax.add_collection(lc)
_scale_axes(ax=ax, X=X, Y=Y, Z=Z)
_label_axes(ax=ax, X=X, Y=Y, Z=Z)
fig.tight_layout()
return lc
def plot_coordinates(network,
pores=None,
ax=None,
size_by=None,
color_by=None,
cmap='jet',
color='r',
alpha=1.0,
marker='o',
markersize=10,
**kwargs): # pragma: no cover
r"""
Produce a 3D plot showing specified pore coordinates as markers.
Parameters
----------
network : GenericNetwork
The network whose topological connections to plot.
pores : array_like (optional)
The list of pores to plot if only a sub-sample is desired. This is
useful for inspecting a small region of the network. If no pores
are specified then all are shown.
ax : Matplotlib axis handle
If ``ax`` is supplied, then the coordinates will be overlaid.
This enables the plotting of multiple different sets of pores as
well as throat connections from ``plot_connections``.
size_by : str or array_like
An ndarray of pore values (e.g. alg['pore.concentration']). These
values are normalized by scaled by ``markersize``.
color_by : str or array_like
An ndarray of pore values (e.g. alg['pore.concentration']).
cmap : str or cmap object
The matplotlib colormap to use if specfying a pore property
for ``color_by``
color : str
A matplotlib named color (e.g. 'r' for red).
alpha : float
The transparency of the lines, with 1 being solid and 0 being invisible
marker : 's'
The marker to use. The default is a circle. Options are explained
`here <https://matplotlib.org/3.2.1/api/markers_api.html>`_
markersize : scalar
Controls size of marker, default is 1.0. This value is used to scale
the ``size_by`` argument if given.
**kwargs
All other keyword arguments are passed on to the ``scatter``
function of matplotlib, so check their documentation for additional
formatting options.
Returns
-------
pc : PathCollection
Matplotlib object containing the markers representing the pores.
Notes
-----
To create a single plot containing both pore coordinates and throats,
consider creating an empty figure and then pass the ``ax`` object as
an argument to ``plot_connections`` and ``plot_coordinates``.
Otherwise, each call to either of these methods creates a new figure.
See Also
--------
plot_connections
Examples
--------
>>> import openpnm as op
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> mpl.use('Agg')
>>> pn = op.network.Cubic(shape=[10, 10, 3])
>>> pn.add_boundary_pores()
>>> Ps = pn.pores('internal') # find internal pores
>>> fig, ax = plt.subplots() # create empty figure
>>> _ = op.topotools.plot_coordinates(network=pn,
... pores=Ps,
... color='b',
... ax=ax) # plot internal pores
>>> Ps = pn.pores('*boundary') # find boundary pores
>>> _ = op.topotools.plot_coordinates(network=pn,
... pores=Ps,
... color='r',
... ax=ax) # plot boundary pores in red
"""
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from openpnm.topotools import dimensionality
Ps = network.Ps if pores is None else network._parse_indices(pores)
dim = dimensionality(network)
ThreeD = True if dim.sum() == 3 else False
# Add a dummy axis for 1D networks
if dim.sum() == 1:
dim[np.argwhere(~dim)[0]] = True
# Add 2 dummy axes for 0D networks (1 pore only)
if dim.sum() == 0:
dim[[0, 1]] = True
if "fig" in kwargs.keys():
raise Exception("'fig' argument is deprecated, use 'ax' instead.")
if ax is None:
fig, ax = plt.subplots()
else:
# The next line is necessary if ax was created using plt.subplots()
fig, ax = ax.get_figure(), ax.get_figure().gca()
if ThreeD and ax.name != '3d':
fig.delaxes(ax)
ax = fig.add_subplot(111, projection='3d')
# Collect specified coordinates
X, Y, Z = network['pore.coords'][Ps].T
# The bounding box for fig is the entire ntwork (to fix the problem with
# overwriting figures' axes lim)
Xl, Yl, Zl = network['pore.coords'].T
# Parse formatting kwargs
if 'c' in kwargs.keys():
color = kwargs.pop('c')
if 's' in kwargs.keys():
markersize = kwargs.pop('s')
if color_by is not None:
color = cm.get_cmap(name=cmap)(color_by / color_by.max())
if size_by is not None:
markersize = size_by / size_by.max() * markersize
if ThreeD:
sc = ax.scatter(X, Y, Z, c=color, s=markersize, marker=marker, alpha=alpha, **kwargs)
_scale_axes(ax=ax, X=Xl, Y=Yl, Z=Zl)
else:
_X, _Y = np.column_stack((X, Y, Z))[:, dim].T
sc = ax.scatter(_X, _Y, c=color, s=markersize, marker=marker, alpha=alpha, **kwargs)
_scale_axes(ax=ax, X=Xl, Y=Yl, Z=np.zeros_like(Yl))
_label_axes(ax=ax, X=Xl, Y=Yl, Z=Zl)
fig.tight_layout()
return sc
def _label_axes(ax, X, Y, Z):
labels = ["X", "Y", "Z"]
dim = np.zeros(3, dtype=bool)
for i, arr in enumerate([X, Y, Z]):
if np.unique(arr).size > 1:
dim[i] = True
# Add a dummy axis for 1D networks
if dim.sum() == 1:
dim[np.argwhere(~dim)[0]] = True
# Add 2 dummy axes for 0D networks (1 pore only)
if dim.sum() == 0:
dim[[0, 1]] = True
dim_idx = np.argwhere(dim).squeeze()
ax.set_xlabel(labels[dim_idx[0]])
ax.set_ylabel(labels[dim_idx[1]])
if hasattr(ax, "set_zlim"):
ax.set_zlabel("Z")
def _scale_axes(ax, X, Y, Z):
max_range = np.ptp([X, Y, Z]).max() / 2
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
if hasattr(ax, "set_zlim"):
ax.set_zlim(mid_z - max_range, mid_z + max_range)
else:
ax.axis("equal")
def plot_networkx(network,
plot_throats=True,
labels=None,
colors=None,
scale=1,
ax=None,
alpha=1.0): # pragma: no cover
r"""
Creates a pretty 2d plot for 2d OpenPNM networks.
Parameters
----------
network : GenericNetwork
plot_throats : bool, optional
Plots throats as well as pores, if True.
labels : list, optional
List of OpenPNM labels
colors : list, optional
List of corresponding colors to the given `labels`.
scale : float, optional
Scale factor for size of pores.
ax : matplotlib.Axes, optional
Matplotlib axes object
alpha: float, optional
Transparency value, 1 is opaque and 0 is transparent
"""
import matplotlib.pyplot as plt
from matplotlib.collections import PathCollection
from networkx import Graph, draw_networkx_nodes, draw_networkx_edges
from openpnm.topotools import dimensionality
dims = dimensionality(network)
if dims.sum() > 2:
raise Exception("NetworkX plotting only works for 2D networks.")
temp = network['pore.coords'].T[dims].squeeze()
if dims.sum() == 1:
x = temp
y = np.zeros_like(x)
if dims.sum() == 2:
x, y = temp
try:
node_size = scale * network['pore.diameter']
except KeyError:
node_size = np.ones_like(x) * scale * 0.5
G = Graph()
pos = {network.Ps[i]: [x[i], y[i]] for i in range(network.Np)}
if not np.isfinite(node_size).all():
node_size[~np.isfinite(node_size)] = np.nanmin(node_size)
node_color = np.array(['k'] * len(network.Ps))
if labels:
if not isinstance(labels, list):
labels = [labels]
if not isinstance(colors, list):
colors = [colors]
if len(labels) != len(colors):
raise Exception('len(colors) must be equal to len(labels)!')
for label, color in zip(labels, colors):
node_color[network.pores(label)] = color
if ax is None:
fig, ax = plt.subplots()
ax.set_aspect('equal', adjustable='datalim')
offset = node_size.max() * 0.5
ax.set_xlim((x.min() - offset, x.max() + offset))
ax.set_ylim((y.min() - offset, y.max() + offset))
ax.axis("off")
# Keep track of already plotted nodes
temp = [id(item) for item in ax.collections if isinstance(item, PathCollection)]
# Plot pores
gplot = draw_networkx_nodes(G, ax=ax, pos=pos, nodelist=network.Ps.tolist(),
alpha=alpha, node_color=node_color, edgecolors=node_color,
node_size=node_size)
# (Optionally) Plot throats
if plot_throats:
draw_networkx_edges(G, pos=pos, edge_color='k', alpha=alpha,
edgelist=network['throat.conns'].tolist(), ax=ax)
spi = 2700 # 1250 was obtained by trial and error
figwidth, figheight = ax.get_figure().get_size_inches()
figsize_ratio = figheight / figwidth
data_ratio = ax.get_data_ratio()
corr = min(figsize_ratio / data_ratio, 1)
xrange = np.ptp(ax.get_xlim())
markersize = np.atleast_1d((corr*figwidth)**2 / xrange**2 * node_size**2 * spi)
for item in ax.collections:
if isinstance(item, PathCollection) and id(item) not in temp:
item.set_sizes(markersize)
return gplot
def plot_vpython(network,
Psize='pore.diameter',
Tsize='throat.diameter',
Pcolor=None,
Tcolor=None,
cmap='jet',
**kwargs): # pragma: no cover
r"""
Quickly visualize a network in 3D using VPython.
Parameters
----------
network : GenericNetwork
The network to visualize.
Psize : str (default = 'pore.diameter')
The dictionary key pointing to the pore property by which sphere
diameters should be scaled
Tsize : str (default = 'throat.diameter')
The dictionary key pointing to the throat property by which cylinder
diameters should be scaled
Pcolor : str
The dictionary key pointing to the pore property which will control
the sphere colors. The default is None, which results in a bright
red for all pores.
Tcolor : str
The dictionary key pointing to the throat property which will control
the cylinder colors. The default is None, which results in a unform
pale blue for all throats.
cmap : str or Matplotlib colormap object (default is 'jet')
The color map to use when converting pore and throat properties to
RGB colors. Can either be a string indicating which color map to
fetch from matplotlib.cmap, or an actual cmap object.
kwargs : dict
Any additional kwargs that are received are passed to the VPython
``canvas`` object. Default options are:
*'height' = 500* - Height of canvas
*'width' = 800* - Width of canvas
*'background' = [0, 0, 0]* - Sets the background color of canvas
*'ambient' = [0.2, 0.2, 0.3]* - Sets the brightness of lighting
Returns
-------
canvas : VPython Canvas object
The canvas object containing the generated scene. The object has
several useful methods.
Notes
-----
**Important**
a) This does not work in Spyder. It should only be called from a
Jupyter Notebook.
b) This is only meant for relatively small networks. For proper
visualization use Paraview.
"""
import matplotlib.pyplot as plt
try:
from vpython import canvas, vec, sphere, cylinder
except ModuleNotFoundError:
raise Exception('VPython must be installed to use this function')
if isinstance(cmap, str):
cmap = getattr(plt.cm, cmap)
if Pcolor is None:
Pcolor = [vec(230/255, 57/255, 0/255)]*network.Np
else:
a = cmap(network[Pcolor]/network[Pcolor].max())
Pcolor = [vec(row[0], row[1], row[2]) for row in a]
if Tcolor is None:
Tcolor = [vec(51/255, 153/255, 255/255)]*network.Nt
else:
a = cmap(network[Tcolor]/network[Tcolor].max())
Tcolor = [vec(row[0], row[1], row[2]) for row in a]
# Set default values for canvas properties
if 'background' not in kwargs.keys():
kwargs['background'] = vec(1.0, 1.0, 1.0)
if 'height' not in kwargs.keys():
kwargs['height'] = 500
if 'width' not in kwargs.keys():
kwargs['width'] = 800
# Parse any given values for canvas properties
for item in kwargs.keys():
try:
kwargs[item] = vec(*kwargs[item])
except TypeError:
pass
scene = canvas(title=network.name, **kwargs)
for p in network.Ps:
r = network[Psize][p]/2
xyz = network['pore.coords'][p]
c = Pcolor[p]
sphere(pos=vec(*xyz), radius=r, color=c,
shininess=.5)
for t in network.Ts:
head = network['throat.endpoints.head'][t]
tail = network['throat.endpoints.tail'][t]
v = tail - head
r = network[Tsize][t]
L = np.sqrt(np.sum((head-tail)**2))
c = Tcolor[t]
cylinder(pos=vec(*head), axis=vec(*v), opacity=1, size=vec(L, r, r),
color=c)
return scene
def plot_tutorial(network,
font_size=12,
line_width=2,
node_color='b',
edge_color='r',
node_size=500): # pragma: no cover
r"""
Generate a network plot suitable for tutorials and explanations.
Parameters
----------
network : GenericNetwork
The network to plot, should be 2D, since the z-coordinate will be
ignored.
font_size : int
Size of font to use for labels.
line_width : int
Thickness of edge lines and node borders.
node_color : str
Color of node border.
edge_color : str
Color of edge lines.
node_size : int
Size of node circle.
Returns
-------
g : NetworkX plot object
"""
import networkx as nx
import matplotlib.pyplot as plt
from openpnm.io import to_networkx
G = to_networkx(network=network)
pos = {i: network['pore.coords'][i, 0:2] for i in network.Ps}
labels = {i: i for i in network.Ps}
edge_labels = {tuple(network['throat.conns'][i, :]): i for i in network.Ts}
gplot = nx.draw_networkx_nodes(G, pos,
node_size=node_size,
node_color='w',
edgecolors=node_color,
linewidths=line_width)
nx.draw_networkx_edges(
G, pos, width=line_width, edge_color=edge_color)
nx.draw_networkx_labels(
G, pos, labels=labels, font_size=font_size, font_color='k')
nx.draw_networkx_edge_labels(
G, pos, edge_labels=edge_labels, font_size=font_size, font_color='k')
# Prettify the figure (margins, etc.)
plt.axis('off')
ax = plt.gca()
ax.margins(0.1, 0.1)
ax.set_aspect("equal")
fig = plt.gcf()
fig.tight_layout()
dims = op.topotools.dimensionality(network)
xy_range = network.coords.ptp(axis=0)[dims]
aspect_ratio = xy_range[0] / xy_range[1]
fig.set_size_inches(5, 5 / aspect_ratio)
return gplot
def plot_network_jupyter(network,
node_color=0,
edge_color=0,
node_size=1,
node_scale=20,
edge_scale=5,
colormap='viridis'):
r"""
Visualize a network in 3D using Plotly.
The pores and throats are scaled and colored by their properties.
The final figure can be rotated and zoomed.
Parameters
----------
network : GenericNetwork
The network to visualize
node_color : ndarray
An array of values used for coloring the pores. If not given, the
lowest value of the employed colormap is assigned to all markers.
edge_color : ndarray
An array of values used for coloring the throats. If not given, the
lowest value of the employed colormap is assigned to all lines.
node_size : ndarray
An array of values controlling the size of the markers. If not given
all markers will be the same size
node_scale : scalar
A scaler to resize the markers
edge_scale : scalar
A scaler to the line thickness
colormap : str
The colormap to use
Returns
-------
fig : Plotly graph object
The graph object containing the generated plots. The object has
several useful methods.
Notes
-----
**Important**
a) This does not work in Spyder. It should only be called from a
Jupyter Notebook.
b) This is only meant for relatively small networks. For proper
visualization use Paraview.
"""
try:
import plotly.graph_objects as go
except ImportError:
raise Exception('Plotly is not installed.'
'Please install Plotly using "pip install plotly"')
# Get xyz coords for points
x_nodes, y_nodes, z_nodes = network.coords.T
node_size = np.ones(network.Np)*node_size
node_color = np.ones(network.Np)*node_color
edge_color = np.ones(network.Nt)*edge_color
node_labels = [str(i)+ ': ' + str(x) for i, x in enumerate(zip(node_size, node_color))]
edge_labels = [str(i)+ ': ' + str(x) for i, x in enumerate(edge_color)]
# Create edges and nodes coordinates
N = network.Nt*3
x_edges = np.zeros(N)
x_edges[np.arange(0, N, 3)] = network.coords[network.conns[:, 0]][:, 0]
x_edges[np.arange(1, N, 3)] = network.coords[network.conns[:, 1]][:, 0]
x_edges[np.arange(2, N, 3)] = np.nan
y_edges = np.zeros(network.Nt*3)
y_edges[np.arange(0, N, 3)] = network.coords[network.conns[:, 0]][:, 1]
y_edges[np.arange(1, N, 3)] = network.coords[network.conns[:, 1]][:, 1]
y_edges[np.arange(2, N, 3)] = np.nan
z_edges = np.zeros(network.Nt*3)
z_edges[np.arange(0, N, 3)] = network.coords[network.conns[:, 0]][:, 2]
z_edges[np.arange(1, N, 3)] = network.coords[network.conns[:, 1]][:, 2]
z_edges[np.arange(2, N, 3)] = np.nan
# Create plotly's Scatter3d object for pores and throats
trace_edges = go.Scatter3d(x=x_edges,
y=y_edges,
z=z_edges,
mode='lines',
line=dict(color=edge_color,
width=edge_scale,
colorscale=colormap),
text=edge_labels, hoverinfo='text')
trace_nodes = go.Scatter3d(x=x_nodes,
y=y_nodes,
z=z_nodes,
mode='markers',
marker=dict(symbol='circle',
size=node_size*node_scale,
color=node_color,
colorscale=colormap,
line=dict(color='black', width=0.5)),
text=node_labels, hoverinfo='text')
axis = dict(showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title='')
layout = go.Layout(width=650,
height=625,
showlegend=False,
scene=dict(xaxis=dict(axis),
yaxis=dict(axis),
zaxis=dict(axis),),
margin=dict(t=100),
hovermode='closest')
data = [trace_edges, trace_nodes]
fig = go.Figure(data=data, layout=layout)
return fig
def _generate_voxel_image(network, pore_shape, throat_shape, max_dim=200):
r"""
Generates a 3d numpy array from an OpenPNM network
Parameters
----------
network : OpenPNM GenericNetwork
Network from which voxel image is to be generated
pore_shape : str
Shape of pores in the network, valid choices are "sphere", "cube"
throat_shape : str
Shape of throats in the network, valid choices are "cylinder", "cuboid"
max_dim : int
Number of voxels in the largest dimension of the network
Returns
-------
im : ndarray
Voxelated image corresponding to the given pore network model
Notes
-----
(1) The generated voxel image is labeled with 0s, 1s and 2s signifying
solid phase, pores, and throats respectively.
"""
from skimage.morphology import cube, ball
from porespy.tools import overlay, insert_cylinder
xyz = network["pore.coords"]
cn = network["throat.conns"]
# Distance bounding box from the network by a fixed amount
delta = network["pore.diameter"].mean() / 2
if isinstance(network, op.network.Cubic):
try:
delta = op.topotools.get_spacing(network).mean() / 2
except AttributeError:
delta = network.spacing.mean() / 2
# Shift everything to avoid out-of-bounds
extra_clearance = int(max_dim * 0.05)
# Transform points to satisfy origin at (0, 0, 0)
xyz0 = xyz.min(axis=0) - delta
xyz += -xyz0
res = (xyz.ptp(axis=0).max() + 2 * delta) / max_dim
shape = np.rint((xyz.max(axis=0) + delta) / res).astype(int) + 2 * extra_clearance
# Transforming from real coords to matrix coords
xyz = np.rint(xyz / res).astype(int) + extra_clearance
pore_radi = np.rint(network["pore.diameter"] * 0.5 / res).astype(int)
throat_radi = np.rint(network["throat.diameter"] * 0.5 / res).astype(int)
im_pores = np.zeros(shape, dtype=np.uint8)
im_throats = np.zeros_like(im_pores)
if pore_shape == "cube":
pore_elem = cube
rp = pore_radi * 2 + 1 # +1 since num_voxel must be odd
rp_max = int(2 * round(delta / res)) + 1
if pore_shape == "sphere":
pore_elem = ball
rp = pore_radi
rp_max = int(round(delta / res))
if throat_shape == "cuboid":
raise Exception("Not yet implemented, try 'cylinder'.")
# Generating voxels for pores
for i, pore in enumerate(tqdm(network.Ps)):
elem = pore_elem(rp[i])
try:
im_pores = overlay(im1=im_pores, im2=elem, c=xyz[i])
except ValueError:
elem = pore_elem(rp_max)
im_pores = overlay(im1=im_pores, im2=elem, c=xyz[i])
# Get rid of pore overlaps
im_pores[im_pores > 0] = 1
# Generating voxels for throats
for i, throat in enumerate(tqdm(network.Ts)):
try:
im_throats = insert_cylinder(
im_throats, r=throat_radi[i], xyz0=xyz[cn[i, 0]], xyz1=xyz[cn[i, 1]])
except ValueError:
im_throats = insert_cylinder(
im_throats, r=rp_max, xyz0=xyz[cn[i, 0]], xyz1=xyz[cn[i, 1]])
# Get rid of throat overlaps
im_throats[im_throats > 0] = 1
# Subtract pore-throat overlap from throats
im_throats = (im_throats.astype(bool) * ~im_pores.astype(bool)).astype(np.uint8)
im = im_pores * 1 + im_throats * 2
return im[extra_clearance:-extra_clearance,
extra_clearance:-extra_clearance,
extra_clearance:-extra_clearance]
def generate_voxel_image(network, pore_shape="sphere", throat_shape="cylinder",
max_dim=None, rtol=0.1):
r"""
Generate a voxel image from an GenericNetwork
Parameters
----------
network : OpenPNM GenericNetwork
Network from which voxel image is to be generated
pore_shape : str
Shape of pores in the network, valid choices are "sphere", "cube"
throat_shape : str
Shape of throats in the network, valid choices are "cylinder", "cuboid"
max_dim : int
Number of voxels in the largest dimension of the network
rtol : float
Stopping criteria for finding the smallest voxel image such that
further increasing the number of voxels in each dimension by 25% would
improve the predicted porosity of the image by less that ``rtol``
Returns
-------
im : ndarray
Voxelated image corresponding to the given pore network model
Notes
-----
(1) The generated voxelated image is labeled with 0s, 1s and 2s signifying
solid phase, pores, and throats respectively.
(2) If max_dim is not provided, the method calculates it such that the
further increasing it doesn't change porosity by much.
"""
# If max_dim is provided, generate voxel image using max_dim
if max_dim is not None:
return _generate_voxel_image(
network, pore_shape, throat_shape, max_dim=max_dim)
max_dim = 200
# If max_dim is not provided, find best max_dim that predicts porosity
err = 100
eps_old = 200
while err > rtol:
im = _generate_voxel_image(
network, pore_shape, throat_shape, max_dim=max_dim)
eps = im.astype(bool).sum() / np.prod(im.shape)
err = abs(1 - eps / eps_old)
eps_old = eps
max_dim = int(max_dim * 1.25)
return im
|
PMEAL/OpenPNM
|
openpnm/topotools/_plottools.py
|
Python
|
mit
| 32,286
|
[
"ParaView"
] |
2b9428b60338b3e7b39277eaded6410e9df40eb0f015e294868526a0854d1744
|
#!/usr/bin/env python2.7
# pySCLikeSync.py by GameplayJDK,
# Copyright 2014 (c) GameplayJDK.de,
# All rights reserved until further notice
import sys
import soundcloud
import urllib
import os
import glob
import mutagen
# Prefix for all console output
pre = "[pySCLikeSync] "
# INFO: never show your account information to others!
try:
# The ClientID
cli_id = sys.argv[1]
# The ClientSecret
cli_se = sys.argv[2]
# Your email address:
usr_nm = sys.argv[3]
# Your password:
usr_pw = sys.argv[4]
# The name of the target user
tr_user = sys.argv[5]
# The limit of tracks to load
tr_limit = sys.argv[6]
# *** +Python code ***
print pre+"Signing in as client..."
client = soundcloud.Client(client_id=cli_id,client_secret=cli_se,username=usr_nm,password=usr_pw)
print pre+"Signed in."
print pre+"Loading liked tracks... ('http://www.soundcloud.com/"+tr_user+"/likes')"
tracks = client.get('/users/'+tr_user+'/favorites', limit=tr_limit)
print pre+"Loaded '"+str(len(tracks))+"' tracks."
print pre+"Scanning for existing tracks..."
count = 0
toskip = []
if os.path.isfile("track_id_store"):
lines = open("track_id_store", 'r')
for line in lines.readlines():
toskip.append(line.strip("\n"))
count = count+1
print pre+"'"+str(count)+"': "+line.strip("\n")
lines.close()
track_id_store = open("track_id_store", 'a')
print pre+"Scanned. Found '"+str(count)+"' existing tracks."
count = 0
print pre+"Downloading new tracks..."
for track in tracks:
try:
t_name = track.title
t_url = client.get(track.stream_url, allow_redirects=False).location
t_id = track.id
if str(t_id) not in toskip:
try:
print pre+"Trying to download '"+t_url+"' to '"+t_name+"'..."
urllib.urlretrieve(t_url, t_name+".mp3")
count = count+1
track_id_store.write(str(t_id)+"\n")
track_id_store.flush()
print pre+"Successfully downloaded '"+t_url+"' to '"+t_name+".mp3'."
try:
audio = mutagen.mp3.MP3(t_name+".mp3")
audio["TIT2"] = TIT2(encoding=3, text=[t_name])
print pre+"Successfully set ID3 tag for title."
except:
print pre+"Unable to set ID3 tag for title. (This is likely being caused by an internal mutagen error or a corrupted import)."
except:
if os.path.isfile(t_name+".mp3"):
os.remove(t_name+".mp3")
print pre+"Failed to download '"+t_url+"' to '"+t_name+".mp3'."
else:
print pre+"Skipped download of '"+t_url+"' to '"+t_name+".mp3'."
except:
print pre+"Failed to download track. (This is likely being caused by a httpStatusCode-404 of a removed track or by a failed attempt to remove an incomplete download file)."
track_id_store.close()
print pre+"Downloaded '"+str(count)+"/"+str(len(tracks))+"' tracks."
count = 0
globbed = glob.glob("*.mp3")
print pre+"Tracks in '"+os.getcwd()+"': ("+str(len(globbed))+")"
for mp3f in globbed:
print pre+"- "+mp3f
# *** -Python code ***
except Exception, ex:
print pre+"Failed to run the program properly!"
print pre+"Please report this error ('"+str(ex)+"') to the official issue tracker at 'https://github.com/GameplayJDK/pySCLikeSync/issues' or view the README.md at 'https://github.com/GameplayJDK/pySCLikeSync/blob/master/README.md' and visit the wiki at 'https://github.com/GameplayJDK/pySCLikeSync/wiki' to read about common errors."
|
GameplayJDK/pySCLikeSync
|
src/pySCLikeSync-v1.3.py
|
Python
|
gpl-3.0
| 3,820
|
[
"VisIt"
] |
0f01c224167df5b2db4a7634411c186e6b6d91ccc6b30f70884733ada09c4216
|
from copy import deepcopy
from glob import glob
from os import path
import dask.array as da
import numpy as np
from parcels.field import Field, DeferredArray
from parcels.field import NestedField
from parcels.field import SummedField
from parcels.field import VectorField
from parcels.grid import Grid
from parcels.gridset import GridSet
from parcels.grid import GridCode
from parcels.tools.converters import TimeConverter, convert_xarray_time_units
from parcels.tools.statuscodes import TimeExtrapolationError
from parcels.tools.loggers import logger
import functools
try:
from mpi4py import MPI
except:
MPI = None
__all__ = ['FieldSet']
class FieldSet(object):
"""FieldSet class that holds hydrodynamic data needed to execute particles
:param U: :class:`parcels.field.Field` object for zonal velocity component
:param V: :class:`parcels.field.Field` object for meridional velocity component
:param fields: Dictionary of additional :class:`parcels.field.Field` objects
"""
def __init__(self, U, V, fields=None):
self.gridset = GridSet()
self.completed = False
if U:
self.add_field(U, 'U')
self.time_origin = self.U.grid.time_origin if isinstance(self.U, Field) else self.U[0].grid.time_origin
if V:
self.add_field(V, 'V')
# Add additional fields as attributes
if fields:
for name, field in fields.items():
self.add_field(field, name)
self.compute_on_defer = None
@staticmethod
def checkvaliddimensionsdict(dims):
for d in dims:
if d not in ['lon', 'lat', 'depth', 'time']:
raise NameError('%s is not a valid key in the dimensions dictionary' % d)
@classmethod
def from_data(cls, data, dimensions, transpose=False, mesh='spherical',
allow_time_extrapolation=None, time_periodic=False, **kwargs):
"""Initialise FieldSet object from raw data
:param data: Dictionary mapping field names to numpy arrays.
Note that at least a 'U' and 'V' numpy array need to be given, and that
the built-in Advection kernels assume that U and V are in m/s
1. If data shape is [xdim, ydim], [xdim, ydim, zdim], [xdim, ydim, tdim] or [xdim, ydim, zdim, tdim],
whichever is relevant for the dataset, use the flag transpose=True
2. If data shape is [ydim, xdim], [zdim, ydim, xdim], [tdim, ydim, xdim] or [tdim, zdim, ydim, xdim],
use the flag transpose=False (default value)
3. If data has any other shape, you first need to reorder it
:param dimensions: Dictionary mapping field dimensions (lon,
lat, depth, time) to numpy arrays.
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable
(e.g. dimensions['U'], dimensions['V'], etc).
:param transpose: Boolean whether to transpose data on read-in
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
Usage examples
==============
* `Analytical advection <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_analyticaladvection.ipynb>`_
* `Diffusion <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_diffusion.ipynb>`_
* `Interpolation <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_interpolation.ipynb>`_
* `Unit converters <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_
"""
fields = {}
for name, datafld in data.items():
# Use dimensions[name] if dimensions is a dict of dicts
dims = dimensions[name] if name in dimensions else dimensions
cls.checkvaliddimensionsdict(dims)
if allow_time_extrapolation is None:
allow_time_extrapolation = False if 'time' in dims else True
lon = dims['lon']
lat = dims['lat']
depth = np.zeros(1, dtype=np.float32) if 'depth' not in dims else dims['depth']
time = np.zeros(1, dtype=np.float64) if 'time' not in dims else dims['time']
time = np.array(time) if not isinstance(time, np.ndarray) else time
if isinstance(time[0], np.datetime64):
time_origin = TimeConverter(time[0])
time = np.array([time_origin.reltime(t) for t in time])
else:
time_origin = TimeConverter(0)
grid = Grid.create_grid(lon, lat, depth, time, time_origin=time_origin, mesh=mesh)
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_data'
fields[name] = Field(name, datafld, grid=grid, transpose=transpose,
allow_time_extrapolation=allow_time_extrapolation, time_periodic=time_periodic, **kwargs)
u = fields.pop('U', None)
v = fields.pop('V', None)
return cls(u, v, fields=fields)
def add_field(self, field, name=None):
"""Add a :class:`parcels.field.Field` object to the FieldSet
:param field: :class:`parcels.field.Field` object to be added
:param name: Name of the :class:`parcels.field.Field` object to be added
For usage examples see the following tutorials:
* `Nested Fields <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_NestedFields.ipynb>`_
* `Unit converters <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_
"""
if self.completed:
raise RuntimeError("FieldSet has already been completed. Are you trying to add a Field after you've created the ParticleSet?")
name = field.name if name is None else name
if hasattr(self, name): # check if Field with same name already exists when adding new Field
raise RuntimeError("FieldSet already has a Field with name '%s'" % name)
if isinstance(field, SummedField):
setattr(self, name, field)
field.name = name
for fld in field:
self.gridset.add_grid(fld)
fld.fieldset = self
elif isinstance(field, NestedField):
setattr(self, name, field)
for fld in field:
self.gridset.add_grid(fld)
fld.fieldset = self
elif isinstance(field, list):
raise NotImplementedError('FieldLists have been replaced by SummedFields. Use the + operator instead of []')
else:
setattr(self, name, field)
if (isinstance(field.data, DeferredArray) or isinstance(field.data, da.core.Array)) and len(self.get_fields()) > 0:
# ==== check for inhabiting the same grid, and homogenise the grid chunking ==== #
g_set = field.grid
grid_chunksize = field.field_chunksize
dFiles = field.dataFiles
is_processed_grid = False
is_same_grid = False
for fld in self.get_fields(): # avoid re-processing/overwriting existing and working fields
if type(fld) in [VectorField, NestedField, SummedField] or fld.dataFiles is None:
continue
if fld.grid == g_set:
is_processed_grid |= True
break
if not is_processed_grid:
for fld in self.get_fields():
if type(fld) in [VectorField, NestedField, SummedField] or fld.dataFiles is None:
continue
procdims = fld.dimensions
procinds = fld.indices
procpaths = fld.dataFiles
nowpaths = field.dataFiles
if procdims == field.dimensions and procinds == field.indices:
is_same_grid = False
if field.grid.mesh == fld.grid.mesh:
is_same_grid = True
else:
is_same_grid = True
for dim in ['lon', 'lat', 'depth', 'time']:
if dim in field.dimensions.keys() and dim in fld.dimensions.keys():
is_same_grid &= (field.dimensions[dim] == fld.dimensions[dim])
fld_g_dims = [fld.grid.tdim, fld.grid.zdim, fld.ydim, fld.xdim]
field_g_dims = [field.grid.tdim, field.grid.zdim, field.grid.ydim, field.grid.xdim]
for i in range(0, len(fld_g_dims)):
is_same_grid &= (field_g_dims[i] == fld_g_dims[i])
if is_same_grid:
g_set = fld.grid
# ==== check here that the dims of field_chunksize are the same ==== #
if g_set.master_chunksize is not None:
res = False
if (isinstance(field.field_chunksize, tuple) and isinstance(g_set.master_chunksize, tuple)) or (isinstance(field.field_chunksize, dict) and isinstance(g_set.master_chunksize, dict)):
res |= functools.reduce(lambda i, j: i and j, map(lambda m, k: m == k, field.field_chunksize, g_set.master_chunksize), True)
else:
res |= (field.field_chunksize == g_set.master_chunksize)
if res:
grid_chunksize = g_set.master_chunksize
if field.grid.master_chunksize is not None:
logger.warning_once("Trying to initialize a shared grid with different chunking sizes - action prohibited. Replacing requested field_chunksize with grid's master chunksize.")
else:
raise ValueError("Conflict between grids of the same fieldset chunksize and requested field chunksize as well as the chunked name dimensions - Please apply the same chunksize to all fields in a shared grid!")
if procpaths == nowpaths:
dFiles = fld.dataFiles
break
if is_same_grid:
if field.grid != g_set:
field.grid = g_set
if field.field_chunksize != grid_chunksize:
field.field_chunksize = grid_chunksize
if field.dataFiles != dFiles:
field.dataFiles = dFiles
self.gridset.add_grid(field)
field.fieldset = self
def add_vector_field(self, vfield):
"""Add a :class:`parcels.field.VectorField` object to the FieldSet
:param vfield: :class:`parcels.field.VectorField` object to be added
"""
setattr(self, vfield.name, vfield)
for v in vfield.__dict__.values():
if isinstance(v, Field) and (v not in self.get_fields()):
self.add_field(v)
vfield.fieldset = self
if isinstance(vfield, NestedField):
for f in vfield:
f.fieldset = self
def check_complete(self):
assert self.U, 'FieldSet does not have a Field named "U"'
assert self.V, 'FieldSet does not have a Field named "V"'
for attr, value in vars(self).items():
if type(value) is Field:
assert value.name == attr, 'Field %s.name (%s) is not consistent' % (value.name, attr)
def check_velocityfields(U, V, W):
if (U.interp_method == 'cgrid_velocity' and V.interp_method != 'cgrid_velocity') or \
(U.interp_method != 'cgrid_velocity' and V.interp_method == 'cgrid_velocity'):
raise ValueError("If one of U,V.interp_method='cgrid_velocity', the other should be too")
if 'linear_invdist_land_tracer' in [U.interp_method, V.interp_method]:
raise NotImplementedError("interp_method='linear_invdist_land_tracer' is not implemented for U and V Fields")
if U.interp_method == 'cgrid_velocity':
if U.grid.xdim == 1 or U.grid.ydim == 1 or V.grid.xdim == 1 or V.grid.ydim == 1:
raise NotImplementedError('C-grid velocities require longitude and latitude dimensions at least length 2')
if U.gridindexingtype not in ['nemo', 'mitgcm', 'mom5', 'pop']:
raise ValueError("Field.gridindexing has to be one of 'nemo', 'mitgcm', 'mom5' or 'pop'")
if U.gridindexingtype == 'mitgcm' and U.grid.gtype in [GridCode.CurvilinearZGrid, GridCode.CurvilinearZGrid]:
raise NotImplementedError('Curvilinear Grids are not implemented for mitgcm-style grid indexing.'
'If you have a use-case for this, please let us know by filing an Issue on github')
if V.gridindexingtype != U.gridindexingtype or (W and W.gridindexingtype != U.gridindexingtype):
raise ValueError('Not all velocity Fields have the same gridindexingtype')
if isinstance(self.U, (SummedField, NestedField)):
w = self.W if hasattr(self, 'W') else [None]*len(self.U)
for U, V, W in zip(self.U, self.V, w):
check_velocityfields(U, V, W)
else:
W = self.W if hasattr(self, 'W') else None
check_velocityfields(self.U, self.V, W)
for g in self.gridset.grids:
g.check_zonal_periodic()
if len(g.time) == 1:
continue
assert isinstance(g.time_origin.time_origin, type(self.time_origin.time_origin)), 'time origins of different grids must be have the same type'
g.time = g.time + self.time_origin.reltime(g.time_origin)
if g.defer_load:
g.time_full = g.time_full + self.time_origin.reltime(g.time_origin)
g.time_origin = self.time_origin
if not hasattr(self, 'UV'):
if isinstance(self.U, SummedField):
self.add_vector_field(SummedField('UV', self.U, self.V))
elif isinstance(self.U, NestedField):
self.add_vector_field(NestedField('UV', self.U, self.V))
else:
self.add_vector_field(VectorField('UV', self.U, self.V))
if not hasattr(self, 'UVW') and hasattr(self, 'W'):
if isinstance(self.U, SummedField):
self.add_vector_field(SummedField('UVW', self.U, self.V, self.W))
elif isinstance(self.U, NestedField):
self.add_vector_field(NestedField('UVW', self.U, self.V, self.W))
else:
self.add_vector_field(VectorField('UVW', self.U, self.V, self.W))
ccode_fieldnames = []
counter = 1
for fld in self.get_fields():
if fld.name not in ccode_fieldnames:
fld.ccode_name = fld.name
else:
fld.ccode_name = fld.name + str(counter)
counter += 1
ccode_fieldnames.append(fld.ccode_name)
for f in self.get_fields():
if type(f) in [VectorField, NestedField, SummedField] or f.dataFiles is None:
continue
if f.grid.depth_field is not None:
if f.grid.depth_field == 'not_yet_set':
raise ValueError("If depth dimension is set at 'not_yet_set', it must be added later using Field.set_depth_from_field(field)")
if not f.grid.defer_load:
depth_data = f.grid.depth_field.data
f.grid.depth = depth_data if isinstance(depth_data, np.ndarray) else np.array(depth_data)
self.completed = True
@classmethod
def parse_wildcards(cls, paths, filenames, var):
if not isinstance(paths, list):
paths = sorted(glob(str(paths)))
if len(paths) == 0:
notfound_paths = filenames[var] if isinstance(filenames, dict) and var in filenames else filenames
raise IOError("FieldSet files not found: %s" % str(notfound_paths))
for fp in paths:
if not path.exists(fp):
raise IOError("FieldSet file not found: %s" % str(fp))
return paths
@classmethod
def from_netcdf(cls, filenames, variables, dimensions, indices=None, fieldtype=None,
mesh='spherical', timestamps=None, allow_time_extrapolation=None, time_periodic=False,
deferred_load=True, field_chunksize='auto', **kwargs):
"""Initialises FieldSet object from NetCDF files
:param filenames: Dictionary mapping variables to file(s). The
filepath may contain wildcards to indicate multiple files
or be a list of file.
filenames can be a list [files], a dictionary {var:[files]},
a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
or a dictionary of dictionaries {var:{dim:[files]}}.
time values are in filenames[data]
:param variables: Dictionary mapping variables to variable names in the netCDF file(s).
Note that the built-in Advection kernels assume that U and V are in m/s
:param dimensions: Dictionary mapping data dimensions (lon,
lat, depth, time, data) to dimensions in the netCF file(s).
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable
(e.g. dimensions['U'], dimensions['V'], etc).
:param indices: Optional dictionary of indices for each dimension
to read from file(s), to allow for reading of subset of data.
Default is to read the full extent of each dimension.
Note that negative indices are not allowed.
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation, see also `this tuturial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param timestamps: list of lists or array of arrays containing the timestamps for
each of the files in filenames. Outer list/array corresponds to files, inner
array corresponds to indices within files.
Default is None if dimensions includes time.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
:param deferred_load: boolean whether to only pre-load data (in deferred mode) or
fully load them (default: True). It is advised to deferred load the data, since in
that case Parcels deals with a better memory management during particle set execution.
deferred_load=False is however sometimes necessary for plotting the fields.
:param interp_method: Method for interpolation. Options are 'linear' (default), 'nearest',
'linear_invdist_land_tracer', 'cgrid_velocity', 'cgrid_tracer' and 'bgrid_velocity'
:param gridindexingtype: The type of gridindexing. Either 'nemo' (default) or 'mitgcm' are supported.
See also the Grid indexing documentation on oceanparcels.org
:param field_chunksize: size of the chunks in dask loading
:param netcdf_engine: engine to use for netcdf reading in xarray. Default is 'netcdf',
but in cases where this doesn't work, setting netcdf_engine='scipy' could help
For usage examples see the following tutorials:
* `Basic Parcels setup <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/parcels_tutorial.ipynb>`_
* `Argo floats <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_Argofloats.ipynb>`_
* `Timestamps <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_timestamps.ipynb>`_
* `Time-evolving depth dimensions <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_timevaryingdepthdimensions.ipynb>`_
"""
# Ensure that times are not provided both in netcdf file and in 'timestamps'.
if timestamps is not None and 'time' in dimensions:
logger.warning_once("Time already provided, defaulting to dimensions['time'] over timestamps.")
timestamps = None
fields = {}
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_netcdf'
for var, name in variables.items():
# Resolve all matching paths for the current variable
paths = filenames[var] if type(filenames) is dict and var in filenames else filenames
if type(paths) is not dict:
paths = cls.parse_wildcards(paths, filenames, var)
else:
for dim, p in paths.items():
paths[dim] = cls.parse_wildcards(p, filenames, var)
# Use dimensions[var] and indices[var] if either of them is a dict of dicts
dims = dimensions[var] if var in dimensions else dimensions
cls.checkvaliddimensionsdict(dims)
inds = indices[var] if (indices and var in indices) else indices
fieldtype = fieldtype[var] if (fieldtype and var in fieldtype) else fieldtype
chunksize = field_chunksize[var] if (field_chunksize and var in field_chunksize) else field_chunksize
grid = None
grid_chunksize = chunksize
dFiles = None
# check if grid has already been processed (i.e. if other fields have same filenames, dimensions and indices)
for procvar, _ in fields.items():
procdims = dimensions[procvar] if procvar in dimensions else dimensions
procinds = indices[procvar] if (indices and procvar in indices) else indices
procpaths = filenames[procvar] if isinstance(filenames, dict) and procvar in filenames else filenames
nowpaths = filenames[var] if isinstance(filenames, dict) and var in filenames else filenames
if procdims == dims and procinds == inds:
if 'depth' in dims and dims['depth'] == 'not_yet_set':
break
processedGrid = False
if ((not isinstance(filenames, dict)) or filenames[procvar] == filenames[var]):
processedGrid = True
elif isinstance(filenames[procvar], dict):
processedGrid = True
for dim in ['lon', 'lat', 'depth']:
if dim in dimensions:
processedGrid *= filenames[procvar][dim] == filenames[var][dim]
if processedGrid:
grid = fields[procvar].grid
# ==== check here that the dims of field_chunksize are the same ==== #
if grid.master_chunksize is not None:
res = False
if (isinstance(chunksize, tuple) and isinstance(grid.master_chunksize, tuple)) or (isinstance(chunksize, dict) and isinstance(grid.master_chunksize, dict)):
res |= functools.reduce(lambda i, j: i and j, map(lambda m, k: m == k, chunksize, grid.master_chunksize), True)
else:
res |= (chunksize == grid.master_chunksize)
if res:
grid_chunksize = grid.master_chunksize
logger.warning_once("Trying to initialize a shared grid with different chunking sizes - action prohibited. Replacing requested field_chunksize with grid's master chunksize.")
else:
raise ValueError("Conflict between grids of the same fieldset chunksize and requested field chunksize as well as the chunked name dimensions - Please apply the same chunksize to all fields in a shared grid!")
if procpaths == nowpaths:
dFiles = fields[procvar].dataFiles
break
fields[var] = Field.from_netcdf(paths, (var, name), dims, inds, grid=grid, mesh=mesh, timestamps=timestamps,
allow_time_extrapolation=allow_time_extrapolation,
time_periodic=time_periodic, deferred_load=deferred_load,
fieldtype=fieldtype, field_chunksize=grid_chunksize, dataFiles=dFiles, **kwargs)
u = fields.pop('U', None)
v = fields.pop('V', None)
return cls(u, v, fields=fields)
@classmethod
def from_nemo(cls, filenames, variables, dimensions, indices=None, mesh='spherical',
allow_time_extrapolation=None, time_periodic=False,
tracer_interp_method='cgrid_tracer', field_chunksize='auto', **kwargs):
"""Initialises FieldSet object from NetCDF files of Curvilinear NEMO fields.
See `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_nemo_curvilinear.ipynb>`_
for a detailed tutorial on the setup for 2D NEMO fields and `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_nemo_3D.ipynb>`_
for the tutorial on the setup for 3D NEMO fields.
See `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb>`_
for a more detailed explanation of the different methods that can be used for c-grid datasets.
:param filenames: Dictionary mapping variables to file(s). The
filepath may contain wildcards to indicate multiple files,
or be a list of file.
filenames can be a list [files], a dictionary {var:[files]},
a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
or a dictionary of dictionaries {var:{dim:[files]}}
time values are in filenames[data]
:param variables: Dictionary mapping variables to variable names in the netCDF file(s).
Note that the built-in Advection kernels assume that U and V are in m/s
:param dimensions: Dictionary mapping data dimensions (lon,
lat, depth, time, data) to dimensions in the netCF file(s).
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable.
Watch out: NEMO is discretised on a C-grid:
U and V velocities are not located on the same nodes (see https://www.nemo-ocean.eu/doc/node19.html ).
+-----------------------------+-----------------------------+-----------------------------+
| | V[k,j+1,i+1] | |
+-----------------------------+-----------------------------+-----------------------------+
|U[k,j+1,i] |W[k:k+2,j+1,i+1],T[k,j+1,i+1]|U[k,j+1,i+1] |
+-----------------------------+-----------------------------+-----------------------------+
| | V[k,j,i+1] + |
+-----------------------------+-----------------------------+-----------------------------+
To interpolate U, V velocities on the C-grid, Parcels needs to read the f-nodes,
which are located on the corners of the cells.
(for indexing details: https://www.nemo-ocean.eu/doc/img360.png )
In 3D, the depth is the one corresponding to W nodes
The gridindexingtype is set to 'nemo'. See also the Grid indexing documentation on oceanparcels.org
:param indices: Optional dictionary of indices for each dimension
to read from file(s), to allow for reading of subset of data.
Default is to read the full extent of each dimension.
Note that negative indices are not allowed.
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
:param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'cgrid_tracer' (default)
Note that in the case of from_nemo() and from_cgrid(), the velocity fields are default to 'cgrid_velocity'
:param field_chunksize: size of the chunks in dask loading
"""
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_nemo'
if kwargs.pop('gridindexingtype', 'nemo') != 'nemo':
raise ValueError("gridindexingtype must be 'nemo' in FieldSet.from_nemo(). Use FieldSet.from_c_grid_dataset otherwise")
fieldset = cls.from_c_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,
allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,
field_chunksize=field_chunksize, gridindexingtype='nemo', **kwargs)
if hasattr(fieldset, 'W'):
fieldset.W.set_scaling_factor(-1.)
return fieldset
@classmethod
def from_mitgcm(cls, filenames, variables, dimensions, indices=None, mesh='spherical',
allow_time_extrapolation=None, time_periodic=False,
tracer_interp_method='cgrid_tracer', field_chunksize='auto', **kwargs):
"""Initialises FieldSet object from NetCDF files of MITgcm fields.
All parameters and keywords are exactly the same as for FieldSet.from_nemo(), except that
gridindexing is set to 'mitgcm' for grids that have the shape
_________________V[k,j+1,i]__________________
| |
| |
U[k,j,i] W[k-1:k,j,i], T[k,j,i] U[k,j,i+1]
| |
| |
|_________________V[k,j,i]____________________|
(For indexing details: https://mitgcm.readthedocs.io/en/latest/algorithm/algorithm.html#spatial-discretization-of-the-dynamical-equations)
"""
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_mitgcm'
if kwargs.pop('gridindexingtype', 'mitgcm') != 'mitgcm':
raise ValueError("gridindexingtype must be 'mitgcm' in FieldSet.from_mitgcm(). Use FieldSet.from_c_grid_dataset otherwise")
fieldset = cls.from_c_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,
allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,
field_chunksize=field_chunksize, gridindexingtype='mitgcm', **kwargs)
if hasattr(fieldset, 'W'):
fieldset.W.set_scaling_factor(-1.)
return fieldset
@classmethod
def from_c_grid_dataset(cls, filenames, variables, dimensions, indices=None, mesh='spherical',
allow_time_extrapolation=None, time_periodic=False,
tracer_interp_method='cgrid_tracer', gridindexingtype='nemo', field_chunksize='auto', **kwargs):
"""Initialises FieldSet object from NetCDF files of Curvilinear NEMO fields.
See `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb>`_
for a more detailed explanation of the different methods that can be used for c-grid datasets.
:param filenames: Dictionary mapping variables to file(s). The
filepath may contain wildcards to indicate multiple files,
or be a list of file.
filenames can be a list [files], a dictionary {var:[files]},
a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
or a dictionary of dictionaries {var:{dim:[files]}}
time values are in filenames[data]
:param variables: Dictionary mapping variables to variable
names in the netCDF file(s).
:param dimensions: Dictionary mapping data dimensions (lon,
lat, depth, time, data) to dimensions in the netCF file(s).
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable.
Watch out: NEMO is discretised on a C-grid:
U and V velocities are not located on the same nodes (see https://www.nemo-ocean.eu/doc/node19.html ).
+-----------------------------+-----------------------------+-----------------------------+
| | V[k,j+1,i+1] | |
+-----------------------------+-----------------------------+-----------------------------+
|U[k,j+1,i] |W[k:k+2,j+1,i+1],T[k,j+1,i+1]|U[k,j+1,i+1] |
+-----------------------------+-----------------------------+-----------------------------+
| | V[k,j,i+1] + |
+-----------------------------+-----------------------------+-----------------------------+
To interpolate U, V velocities on the C-grid, Parcels needs to read the f-nodes,
which are located on the corners of the cells.
(for indexing details: https://www.nemo-ocean.eu/doc/img360.png )
In 3D, the depth is the one corresponding to W nodes.
:param indices: Optional dictionary of indices for each dimension
to read from file(s), to allow for reading of subset of data.
Default is to read the full extent of each dimension.
Note that negative indices are not allowed.
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
:param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'cgrid_tracer' (default)
Note that in the case of from_nemo() and from_cgrid(), the velocity fields are default to 'cgrid_velocity'
:param gridindexingtype: The type of gridindexing. Set to 'nemo' in FieldSet.from_nemo()
See also the Grid indexing documentation on oceanparcels.org
:param field_chunksize: size of the chunks in dask loading
"""
if 'U' in dimensions and 'V' in dimensions and dimensions['U'] != dimensions['V']:
raise ValueError("On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U and V. "
"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb")
if 'U' in dimensions and 'W' in dimensions and dimensions['U'] != dimensions['W']:
raise ValueError("On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U, V and W. "
"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb")
interp_method = {}
for v in variables:
if v in ['U', 'V', 'W']:
interp_method[v] = 'cgrid_velocity'
else:
interp_method[v] = tracer_interp_method
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_c_grid_dataset'
return cls.from_netcdf(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,
allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method,
field_chunksize=field_chunksize, gridindexingtype=gridindexingtype, **kwargs)
@classmethod
def from_pop(cls, filenames, variables, dimensions, indices=None, mesh='spherical',
allow_time_extrapolation=None, time_periodic=False,
tracer_interp_method='bgrid_tracer', field_chunksize='auto', depth_units='m', **kwargs):
"""Initialises FieldSet object from NetCDF files of POP fields.
It is assumed that the velocities in the POP fields is in cm/s.
:param filenames: Dictionary mapping variables to file(s). The
filepath may contain wildcards to indicate multiple files,
or be a list of file.
filenames can be a list [files], a dictionary {var:[files]},
a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
or a dictionary of dictionaries {var:{dim:[files]}}
time values are in filenames[data]
:param variables: Dictionary mapping variables to variable names in the netCDF file(s).
Note that the built-in Advection kernels assume that U and V are in m/s
:param dimensions: Dictionary mapping data dimensions (lon,
lat, depth, time, data) to dimensions in the netCF file(s).
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable.
Watch out: POP is discretised on a B-grid:
U and V velocity nodes are not located as W velocity and T tracer nodes (see http://www.cesm.ucar.edu/models/cesm1.0/pop2/doc/sci/POPRefManual.pdf ).
+-----------------------------+-----------------------------+-----------------------------+
|U[k,j+1,i],V[k,j+1,i] | |U[k,j+1,i+1],V[k,j+1,i+1] |
+-----------------------------+-----------------------------+-----------------------------+
| |W[k:k+2,j+1,i+1],T[k,j+1,i+1]| |
+-----------------------------+-----------------------------+-----------------------------+
|U[k,j,i],V[k,j,i] | +U[k,j,i+1],V[k,j,i+1] |
+-----------------------------+-----------------------------+-----------------------------+
In 2D: U and V nodes are on the cell vertices and interpolated bilinearly as a A-grid.
T node is at the cell centre and interpolated constant per cell as a C-grid.
In 3D: U and V nodes are at the middle of the cell vertical edges,
They are interpolated bilinearly (independently of z) in the cell.
W nodes are at the centre of the horizontal interfaces.
They are interpolated linearly (as a function of z) in the cell.
T node is at the cell centre, and constant per cell.
Note that Parcels assumes that the length of the depth dimension (at the W-points)
is one larger than the size of the velocity and tracer fields in the depth dimension.
:param indices: Optional dictionary of indices for each dimension
to read from file(s), to allow for reading of subset of data.
Default is to read the full extent of each dimension.
Note that negative indices are not allowed.
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
:param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'bgrid_tracer' (default)
Note that in the case of from_pop() and from_bgrid(), the velocity fields are default to 'bgrid_velocity'
:param field_chunksize: size of the chunks in dask loading
:param depth_units: The units of the vertical dimension. Default in Parcels is 'm',
but many POP outputs are in 'cm'
"""
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_pop'
fieldset = cls.from_b_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,
allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,
field_chunksize=field_chunksize, gridindexingtype='pop', **kwargs)
if hasattr(fieldset, 'U'):
fieldset.U.set_scaling_factor(0.01) # cm/s to m/s
if hasattr(fieldset, 'V'):
fieldset.V.set_scaling_factor(0.01) # cm/s to m/s
if hasattr(fieldset, 'W'):
if depth_units == 'm':
fieldset.W.set_scaling_factor(-0.01) # cm/s to m/s and change the W direction
logger.warning_once("Parcels assumes depth in POP output to be in 'm'. Use depth_units='cm' if the output depth is in 'cm'.")
elif depth_units == 'cm':
fieldset.W.set_scaling_factor(-1.) # change the W direction but keep W in cm/s because depth is in cm
else:
raise SyntaxError("'depth_units' has to be 'm' or 'cm'")
return fieldset
@classmethod
def from_mom5(cls, filenames, variables, dimensions, indices=None, mesh='spherical',
allow_time_extrapolation=None, time_periodic=False,
tracer_interp_method='bgrid_tracer', field_chunksize='auto', **kwargs):
"""Initialises FieldSet object from NetCDF files of MOM5 fields.
:param filenames: Dictionary mapping variables to file(s). The
filepath may contain wildcards to indicate multiple files,
or be a list of file.
filenames can be a list [files], a dictionary {var:[files]},
a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
or a dictionary of dictionaries {var:{dim:[files]}}
time values are in filenames[data]
:param variables: Dictionary mapping variables to variable names in the netCDF file(s).
Note that the built-in Advection kernels assume that U and V are in m/s
:param dimensions: Dictionary mapping data dimensions (lon,
lat, depth, time, data) to dimensions in the netCF file(s).
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable.
U[k,j+1,i],V[k,j+1,i] ____________________U[k,j+1,i+1],V[k,j+1,i+1]
| |
| W[k-1:k+1,j+1,i+1],T[k,j+1,i+1] |
| |
U[k,j,i],V[k,j,i] ________________________U[k,j,i+1],V[k,j,i+1]
In 2D: U and V nodes are on the cell vertices and interpolated bilinearly as a A-grid.
T node is at the cell centre and interpolated constant per cell as a C-grid.
In 3D: U and V nodes are at the midlle of the cell vertical edges,
They are interpolated bilinearly (independently of z) in the cell.
W nodes are at the centre of the horizontal interfaces, but below the U and V.
They are interpolated linearly (as a function of z) in the cell.
Note that W is normally directed upward in MOM5, but Parcels requires W
in the positive z-direction (downward) so W is multiplied by -1.
T node is at the cell centre, and constant per cell.
:param indices: Optional dictionary of indices for each dimension
to read from file(s), to allow for reading of subset of data.
Default is to read the full extent of each dimension.
Note that negative indices are not allowed.
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation, see also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
:param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'bgrid_tracer' (default)
Note that in the case of from_mom5() and from_bgrid(), the velocity fields are default to 'bgrid_velocity'
:param field_chunksize: size of the chunks in dask loading
"""
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_mom5'
fieldset = cls.from_b_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,
allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,
field_chunksize=field_chunksize, gridindexingtype='mom5', **kwargs)
if hasattr(fieldset, 'W'):
fieldset.W.set_scaling_factor(-1)
return fieldset
@classmethod
def from_b_grid_dataset(cls, filenames, variables, dimensions, indices=None, mesh='spherical',
allow_time_extrapolation=None, time_periodic=False,
tracer_interp_method='bgrid_tracer', field_chunksize='auto', **kwargs):
"""Initialises FieldSet object from NetCDF files of Bgrid fields.
:param filenames: Dictionary mapping variables to file(s). The
filepath may contain wildcards to indicate multiple files,
or be a list of file.
filenames can be a list [files], a dictionary {var:[files]},
a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),
or a dictionary of dictionaries {var:{dim:[files]}}
time values are in filenames[data]
:param variables: Dictionary mapping variables to variable
names in the netCDF file(s).
:param dimensions: Dictionary mapping data dimensions (lon,
lat, depth, time, data) to dimensions in the netCF file(s).
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable.
U and V velocity nodes are not located as W velocity and T tracer nodes (see http://www.cesm.ucar.edu/models/cesm1.0/pop2/doc/sci/POPRefManual.pdf ).
+-----------------------------+-----------------------------+-----------------------------+
|U[k,j+1,i],V[k,j+1,i] | |U[k,j+1,i+1],V[k,j+1,i+1] |
+-----------------------------+-----------------------------+-----------------------------+
| |W[k:k+2,j+1,i+1],T[k,j+1,i+1]| |
+-----------------------------+-----------------------------+-----------------------------+
|U[k,j,i],V[k,j,i] | +U[k,j,i+1],V[k,j,i+1] |
+-----------------------------+-----------------------------+-----------------------------+
In 2D: U and V nodes are on the cell vertices and interpolated bilinearly as a A-grid.
T node is at the cell centre and interpolated constant per cell as a C-grid.
In 3D: U and V nodes are at the midlle of the cell vertical edges,
They are interpolated bilinearly (independently of z) in the cell.
W nodes are at the centre of the horizontal interfaces.
They are interpolated linearly (as a function of z) in the cell.
T node is at the cell centre, and constant per cell.
:param indices: Optional dictionary of indices for each dimension
to read from file(s), to allow for reading of subset of data.
Default is to read the full extent of each dimension.
Note that negative indices are not allowed.
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
:param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'bgrid_tracer' (default)
Note that in the case of from_pop() and from_bgrid(), the velocity fields are default to 'bgrid_velocity'
:param field_chunksize: size of the chunks in dask loading
"""
if 'U' in dimensions and 'V' in dimensions and dimensions['U'] != dimensions['V']:
raise ValueError("On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U and V. "
"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb")
if 'U' in dimensions and 'W' in dimensions and dimensions['U'] != dimensions['W']:
raise ValueError("On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U, V and W. "
"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb")
interp_method = {}
for v in variables:
if v in ['U', 'V']:
interp_method[v] = 'bgrid_velocity'
elif v in ['W']:
interp_method[v] = 'bgrid_w_velocity'
else:
interp_method[v] = tracer_interp_method
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_b_grid_dataset'
return cls.from_netcdf(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,
allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method,
field_chunksize=field_chunksize, **kwargs)
@classmethod
def from_parcels(cls, basename, uvar='vozocrtx', vvar='vomecrty', indices=None, extra_fields=None,
allow_time_extrapolation=None, time_periodic=False, deferred_load=True,
field_chunksize='auto', **kwargs):
"""Initialises FieldSet data from NetCDF files using the Parcels FieldSet.write() conventions.
:param basename: Base name of the file(s); may contain
wildcards to indicate multiple files.
:param indices: Optional dictionary of indices for each dimension
to read from file(s), to allow for reading of subset of data.
Default is to read the full extent of each dimension.
Note that negative indices are not allowed.
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param extra_fields: Extra fields to read beyond U and V
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
:param deferred_load: boolean whether to only pre-load data (in deferred mode) or
fully load them (default: True). It is advised to deferred load the data, since in
that case Parcels deals with a better memory management during particle set execution.
deferred_load=False is however sometimes necessary for plotting the fields.
:param field_chunksize: size of the chunks in dask loading
"""
if extra_fields is None:
extra_fields = {}
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_parcels'
dimensions = {}
default_dims = {'lon': 'nav_lon', 'lat': 'nav_lat',
'depth': 'depth', 'time': 'time_counter'}
extra_fields.update({'U': uvar, 'V': vvar})
for vars in extra_fields:
dimensions[vars] = deepcopy(default_dims)
dimensions[vars]['depth'] = 'depth%s' % vars.lower()
filenames = dict([(v, str("%s%s.nc" % (basename, v)))
for v in extra_fields.keys()])
return cls.from_netcdf(filenames, indices=indices, variables=extra_fields,
dimensions=dimensions, allow_time_extrapolation=allow_time_extrapolation,
time_periodic=time_periodic, deferred_load=deferred_load,
field_chunksize=field_chunksize, **kwargs)
@classmethod
def from_xarray_dataset(cls, ds, variables, dimensions, mesh='spherical', allow_time_extrapolation=None,
time_periodic=False, **kwargs):
"""Initialises FieldSet data from xarray Datasets.
:param ds: xarray Dataset.
Note that the built-in Advection kernels assume that U and V are in m/s
:param variables: Dictionary mapping parcels variable names to data variables in the xarray Dataset.
:param dimensions: Dictionary mapping data dimensions (lon,
lat, depth, time, data) to dimensions in the xarray Dataset.
Note that dimensions can also be a dictionary of dictionaries if
dimension names are different for each variable
(e.g. dimensions['U'], dimensions['V'], etc).
:param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param allow_time_extrapolation: boolean whether to allow for extrapolation
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)
This flag overrides the allow_time_interpolation and sets it to False
"""
fields = {}
if 'creation_log' not in kwargs.keys():
kwargs['creation_log'] = 'from_xarray_dataset'
if 'time' in dimensions:
if 'units' not in ds[dimensions['time']].attrs and 'Unit' in ds[dimensions['time']].attrs:
# Fix DataArrays that have time.Unit instead of expected time.units
convert_xarray_time_units(ds, dimensions['time'])
for var, name in variables.items():
dims = dimensions[var] if var in dimensions else dimensions
cls.checkvaliddimensionsdict(dims)
fields[var] = Field.from_xarray(ds[name], var, dims, mesh=mesh, allow_time_extrapolation=allow_time_extrapolation,
time_periodic=time_periodic, **kwargs)
u = fields.pop('U', None)
v = fields.pop('V', None)
return cls(u, v, fields=fields)
def get_fields(self):
"""Returns a list of all the :class:`parcels.field.Field` and :class:`parcels.field.VectorField`
objects associated with this FieldSet"""
fields = []
for v in self.__dict__.values():
if type(v) in [Field, VectorField]:
if v not in fields:
fields.append(v)
elif type(v) in [NestedField, SummedField]:
if v not in fields:
fields.append(v)
for v2 in v:
if v2 not in fields:
fields.append(v2)
return fields
def add_constant(self, name, value):
"""Add a constant to the FieldSet. Note that all constants are
stored as 32-bit floats. While constants can be updated during
execution in SciPy mode, they can not be updated in JIT mode.
Tutorials using fieldset.add_constant:
`Analytical advection <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_analyticaladvection.ipynb>`_
`Diffusion <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_diffusion.ipynb>`_
`Periodic boundaries <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_periodic_boundaries.ipynb>`_
:param name: Name of the constant
:param value: Value of the constant (stored as 32-bit float)
"""
setattr(self, name, value)
def add_periodic_halo(self, zonal=False, meridional=False, halosize=5):
"""Add a 'halo' to all :class:`parcels.field.Field` objects in a FieldSet,
through extending the Field (and lon/lat) by copying a small portion
of the field on one side of the domain to the other.
:param zonal: Create a halo in zonal direction (boolean)
:param meridional: Create a halo in meridional direction (boolean)
:param halosize: size of the halo (in grid points). Default is 5 grid points
"""
for grid in self.gridset.grids:
grid.add_periodic_halo(zonal, meridional, halosize)
for attr, value in iter(self.__dict__.items()):
if isinstance(value, Field):
value.add_periodic_halo(zonal, meridional, halosize)
def write(self, filename):
"""Write FieldSet to NetCDF file using NEMO convention
:param filename: Basename of the output fileset"""
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
logger.info("Generating FieldSet output with basename: %s" % filename)
if hasattr(self, 'U'):
self.U.write(filename, varname='vozocrtx')
if hasattr(self, 'V'):
self.V.write(filename, varname='vomecrty')
for v in self.get_fields():
if (v.name != 'U') and (v.name != 'V'):
v.write(filename)
def advancetime(self, fieldset_new):
"""Replace oldest time on FieldSet with new FieldSet
:param fieldset_new: FieldSet snapshot with which the oldest time has to be replaced"""
logger.warning_once("Fieldset.advancetime() is deprecated.\n \
Parcels deals automatically with loading only 3 time steps simustaneously\
such that the total allocated memory remains limited.")
advance = 0
for gnew in fieldset_new.gridset.grids:
gnew.advanced = False
for fnew in fieldset_new.get_fields():
if isinstance(fnew, VectorField):
continue
f = getattr(self, fnew.name)
gnew = fnew.grid
if not gnew.advanced:
g = f.grid
advance2 = g.advancetime(gnew)
if advance2*advance < 0:
raise RuntimeError("Some Fields of the Fieldset are advanced forward and other backward")
advance = advance2
gnew.advanced = True
f.advancetime(fnew, advance == 1)
def computeTimeChunk(self, time, dt):
"""Load a chunk of three data time steps into the FieldSet.
This is used when FieldSet uses data imported from netcdf,
with default option deferred_load. The loaded time steps are at or immediatly before time
and the two time steps immediately following time if dt is positive (and inversely for negative dt)
:param time: Time around which the FieldSet chunks are to be loaded. Time is provided as a double, relatively to Fieldset.time_origin
:param dt: time step of the integration scheme
"""
signdt = np.sign(dt)
nextTime = np.infty if dt > 0 else -np.infty
for g in self.gridset.grids:
g.update_status = 'not_updated'
for f in self.get_fields():
if type(f) in [VectorField, NestedField, SummedField] or not f.grid.defer_load:
continue
if f.grid.update_status == 'not_updated':
nextTime_loc = f.grid.computeTimeChunk(f, time, signdt)
if time == nextTime_loc and signdt != 0:
raise TimeExtrapolationError(time, field=f, msg='In fset.computeTimeChunk')
nextTime = min(nextTime, nextTime_loc) if signdt >= 0 else max(nextTime, nextTime_loc)
for f in self.get_fields():
if type(f) in [VectorField, NestedField, SummedField] or not f.grid.defer_load or f.dataFiles is None:
continue
g = f.grid
if g.update_status == 'first_updated': # First load of data
if f.data is not None and not isinstance(f.data, DeferredArray):
if not isinstance(f.data, list):
f.data = None
else:
for i in range(len(f.data)):
del f.data[i, :]
lib = np if f.field_chunksize in [False, None] else da
if f.gridindexingtype == 'pop' and g.zdim > 1:
zd = g.zdim - 1
else:
zd = g.zdim
data = lib.empty((g.tdim, zd, g.ydim-2*g.meridional_halo, g.xdim-2*g.zonal_halo), dtype=np.float32)
f.loaded_time_indices = range(3)
for tind in f.loaded_time_indices:
for fb in f.filebuffers:
if fb is not None:
fb.close()
fb = None
data = f.computeTimeChunk(data, tind)
data = f.rescale_and_set_minmax(data)
if(isinstance(f.data, DeferredArray)):
f.data = DeferredArray()
f.data = f.reshape(data)
if not f.chunk_set:
f.chunk_setup()
if len(g.load_chunk) > 0:
g.load_chunk = np.where(g.load_chunk == 2, 1, g.load_chunk)
g.load_chunk = np.where(g.load_chunk == 3, 0, g.load_chunk)
elif g.update_status == 'updated':
lib = np if isinstance(f.data, np.ndarray) else da
if f.gridindexingtype == 'pop' and g.zdim > 1:
zd = g.zdim - 1
else:
zd = g.zdim
data = lib.empty((g.tdim, zd, g.ydim-2*g.meridional_halo, g.xdim-2*g.zonal_halo), dtype=np.float32)
if signdt >= 0:
f.loaded_time_indices = [2]
if f.filebuffers[0] is not None:
f.filebuffers[0].close()
f.filebuffers[0] = None
f.filebuffers[:2] = f.filebuffers[1:]
data = f.computeTimeChunk(data, 2)
else:
f.loaded_time_indices = [0]
if f.filebuffers[2] is not None:
f.filebuffers[2].close()
f.filebuffers[2] = None
f.filebuffers[1:] = f.filebuffers[:2]
data = f.computeTimeChunk(data, 0)
data = f.rescale_and_set_minmax(data)
if signdt >= 0:
data = f.reshape(data)[2:, :]
if lib is da:
f.data = lib.concatenate([f.data[1:, :], data], axis=0)
else:
if not isinstance(f.data, DeferredArray):
if isinstance(f.data, list):
del f.data[0, :]
else:
f.data[0, :] = None
f.data[:2, :] = f.data[1:, :]
f.data[2, :] = data
else:
data = f.reshape(data)[0:1, :]
if lib is da:
f.data = lib.concatenate([data, f.data[:2, :]], axis=0)
else:
if not isinstance(f.data, DeferredArray):
if isinstance(f.data, list):
del f.data[2, :]
else:
f.data[2, :] = None
f.data[1:, :] = f.data[:2, :]
f.data[0, :] = data
g.load_chunk = np.where(g.load_chunk == 3, 0, g.load_chunk)
if isinstance(f.data, da.core.Array) and len(g.load_chunk) > 0:
if signdt >= 0:
for block_id in range(len(g.load_chunk)):
if g.load_chunk[block_id] == 2:
if f.data_chunks[block_id] is None:
# file chunks were never loaded.
# happens when field not called by kernel, but shares a grid with another field called by kernel
break
block = f.get_block(block_id)
f.data_chunks[block_id][0] = None
f.data_chunks[block_id][:2] = f.data_chunks[block_id][1:]
f.data_chunks[block_id][2] = np.array(f.data.blocks[(slice(3),)+block][2])
else:
for block_id in range(len(g.load_chunk)):
if g.load_chunk[block_id] == 2:
if f.data_chunks[block_id] is None:
# file chunks were never loaded.
# happens when field not called by kernel, but shares a grid with another field called by kernel
break
block = f.get_block(block_id)
f.data_chunks[block_id][2] = None
f.data_chunks[block_id][1:] = f.data_chunks[block_id][:2]
f.data_chunks[block_id][0] = np.array(f.data.blocks[(slice(3),)+block][0])
# do user-defined computations on fieldset data
if self.compute_on_defer:
self.compute_on_defer(self)
# update time varying grid depth
for f in self.get_fields():
if type(f) in [VectorField, NestedField, SummedField] or not f.grid.defer_load or f.dataFiles is None:
continue
if f.grid.depth_field is not None:
depth_data = f.grid.depth_field.data
f.grid.depth = depth_data if isinstance(depth_data, np.ndarray) else np.array(depth_data)
if abs(nextTime) == np.infty or np.isnan(nextTime): # Second happens when dt=0
return nextTime
else:
nSteps = int((nextTime - time) / dt)
if nSteps == 0:
return nextTime
else:
return time + nSteps * dt
|
OceanPARCELS/parcels
|
parcels/fieldset.py
|
Python
|
mit
| 76,063
|
[
"NetCDF"
] |
b29bcd0e6cfd910c9ece8f8f4e4e012f6a6813cd0c31229393ef05181599fc5f
|
"""Convert to and from Roman numerals
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:20 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
def toRoman(n):
"""convert integer to Roman numeral"""
pass
def fromRoman(s):
"""convert Roman numeral to integer"""
pass
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/roman/stage1/roman1.py
|
Python
|
mit
| 719
|
[
"VisIt"
] |
38e6811fd7c889c27e1a79dce7bac26339afa9ff8be21a5593563d9c85136663
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import random
import re
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins import module_utils_loader
from ansible.plugins.shell.powershell import async_watchdog, async_wrapper, become_wrapper, leaf_exec
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.abspath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read.
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import shutil
import zipfile
import tempfile
import subprocess
if sys.version_info < (3,):
bytes = str
PY3 = False
else:
unicode = str
PY3 = True
try:
# Python-2.6+
from io import BytesIO as IOStream
except ImportError:
# Python < 2.6
from StringIO import StringIO as IOStream
ZIPDATA = """%(zipdata)s"""
def invoke_module(module, modlib_path, json_params):
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, 'ansible_module_%(ansible_module)s.py')
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# This differs slightly from default Ansible execution of Python modules
# as it passes the arguments to the module via a file instead of stdin.
# Set pythonpath to the debug dir
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath))
else:
os.environ['PYTHONPATH'] = basedir
p = subprocess.Popen([%(interpreter)s, script_path, args_path],
env=os.environ, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
elif command == 'excommunicate':
# This attempts to run the module in-process (by importing a main
# function and then calling it). It is not the way ansible generally
# invokes the module so it won't work in every case. It is here to
# aid certain debuggers which work better when the code doesn't change
# from one process to another but there may be problems that occur
# when using this that are only artifacts of how we're invoking here,
# not actual bugs (as they don't affect the real way that we invoke
# ansible modules)
# stub the args and python path
sys.argv = ['%(ansible_module)s', args_path]
sys.path.insert(0, basedir)
from ansible_module_%(ansible_module)s import main
main()
print('WARNING: Module returned to wrapper instead of exiting')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
if __name__ == '__main__':
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
temp_path = tempfile.mkdtemp(prefix='ansible_')
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
modlib = open(zipped_mod, 'wb')
modlib.write(base64.b64decode(ZIPDATA))
modlib.close()
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
z = zipfile.ZipFile(zipped_mod, mode='r')
module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py')
f = open(module, 'wb')
f.write(z.read('ansible_module_%(ansible_module)s.py'))
f.close()
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(zipped_mod, mode='a')
# py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
z.close()
exitcode = invoke_module(module, zipped_mod, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except OSError:
# tempdir creation probably failed
pass
sys.exit(exitcode)
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = task_vars[interpreter_config].strip()
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
tree = ast.parse(data)
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
if py_module_name not in py_module_names:
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py'))
normalized_data = _slurp(normalized_path)
else:
normalized_name = py_module_name
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name][0])
display.vvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, module_compression):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data or b'#Requires -Module' in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = repr(json.dumps(params))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
zipdata = open(cached_module_filename, 'rb').read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('ansible_module_%s.py' % module_name, b_module_data)
py_module_cache = { ('__init__',): (b'', '[builtin]') }
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file.'
' Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
now=datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# powershell wrapper build is currently handled in build_windows_module_payload, called in action
# _configure_module after this function returns.
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, module_compression)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
lines = b_module_data.split(b"\n", 1)
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter = to_bytes(interpreter)
new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='surrogate_or_strict', nonstring='passthru')
if new_shebang:
lines[0] = shebang = new_shebang
if os.path.basename(interpreter).startswith(b'python'):
lines.insert(1, to_bytes(ENCODING_STRING))
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(lines)
else:
shebang = to_bytes(shebang, errors='surrogate_or_strict')
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
def build_windows_module_payload(module_name, module_path, b_module_data, module_args, task_vars, task, play_context, environment):
exec_manifest = dict(
module_entry=to_text(base64.b64encode(b_module_data)),
powershell_modules=dict(),
module_args=module_args,
actions=['exec'],
environment=environment
)
exec_manifest['exec'] = to_text(base64.b64encode(to_bytes(leaf_exec)))
if task.async > 0:
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["async_watchdog"] = to_text(base64.b64encode(to_bytes(async_watchdog)))
exec_manifest["actions"].insert(0, 'async_wrapper')
exec_manifest["async_wrapper"] = to_text(base64.b64encode(to_bytes(async_wrapper)))
exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
exec_manifest["async_timeout_sec"] = task.async
if play_context.become and play_context.become_method=='runas':
exec_manifest["actions"].insert(0, 'become')
exec_manifest["become_user"] = play_context.become_user
exec_manifest["become_password"] = play_context.become_pass
exec_manifest["become"] = to_text(base64.b64encode(to_bytes(become_wrapper)))
lines = b_module_data.split(b'\n')
module_names = set()
requires_module_list = re.compile(r'(?i)^#requires \-module(?:s?) (.+)')
for line in lines:
# legacy, equivalent to #Requires -Modules powershell
if REPLACER_WINDOWS in line:
module_names.add(b'powershell')
# TODO: add #Requires checks for Ansible.ModuleUtils.X
for m in module_names:
m = to_text(m)
exec_manifest["powershell_modules"][m] = to_text(
base64.b64encode(
to_bytes(
_slurp(os.path.join(_MODULE_UTILS_PATH, m + ".ps1"))
)
)
)
# FUTURE: smuggle this back as a dict instead of serializing here; the connection plugin may need to modify it
b_module_data = json.dumps(exec_manifest)
return b_module_data
|
andreaso/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 39,245
|
[
"VisIt"
] |
25e0978e47ab59110de44faba64f1668695bcdfb5cfad02fe05b67d23af7f702
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from __future__ import print_function
import os
import re
import pickle
import shutil
import mx_javacompliance
from os.path import join, exists, dirname, basename, isdir, islink
from collections import defaultdict
from zipfile import ZipFile
import mx
# Temporary imports and (re)definitions while porting mx from Python 2 to Python 3
import sys
import itertools
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
class JavaModuleDescriptor(mx.Comparable):
"""
Describes a Java module. This class closely mirrors ``java.lang.module.ModuleDescriptor``.
:param str name: the name of the module
:param dict exports: dict from a package defined by this module to the modules it's exported to. An
empty list denotes an unqualified export.
:param dict requires: dict from a module dependency to the modifiers of the dependency
:param dict concealedRequires: dict from a module dependency to its concealed packages required by this module
:param set uses: the service types used by this module
:param dict provides: dict from a service name to the set of providers of the service defined by this module
:param iterable packages: the packages defined by this module
:param set conceals: the packages defined but not exported to anyone by this module
:param str jarpath: path to module jar file
:param JARDistribution dist: distribution from which this module was derived
:param Library lib: library from which this module was derived
:param list modulepath: list of `JavaModuleDescriptor` objects for the module dependencies of this module
:param dict alternatives: name to JavaModuleDescriptor for alternative definitions of the module. If this
is an alternative itself, then the dict has a single entry mapping its alternative name to None.
:param bool boot: specifies if this module is in the boot layer
:param JDKConfig jdk: the JDK containing this module
"""
def __init__(self, name, exports, requires, uses, provides, packages=None, concealedRequires=None,
jarpath=None, dist=None, lib=None, modulepath=None, alternatives=None, boot=False, jdk=None, opens=None):
self.name = name
self.exports = exports
self.requires = requires
self.concealedRequires = concealedRequires if concealedRequires else {}
self.uses = frozenset(uses)
self.opens = opens if opens else {}
self.provides = provides
exportedPackages = frozenset(exports.keys())
self.packages = exportedPackages if packages is None else frozenset(packages)
assert len(exports) == 0 or exportedPackages.issubset(self.packages), exportedPackages - self.packages
self.conceals = self.packages - exportedPackages
self.jarpath = jarpath
self.dist = dist
self.lib = lib
self.modulepath = modulepath
self.alternatives = alternatives
self.boot = boot
self.jdk = jdk
if not self.dist and not self.jarpath and not self.jdk:
mx.abort('JavaModuleDescriptor requires at least one of the "dist", "jarpath" or "jdk" attributes: ' + self.name)
def __str__(self):
return 'module:' + self.name
def __repr__(self):
return self.__str__()
def __cmp__(self, other):
assert isinstance(other, JavaModuleDescriptor)
return (self.name > other.name) - (self.name < other.name)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, JavaModuleDescriptor) and self.name == other.name
def get_jmod_path(self, respect_stripping=True, alt_module_info_name=None):
"""
Gets the path to the .jmod file corresponding to this module descriptor.
:param bool respect_stripping: Specifies whether or not to return a path
to a stripped .jmod file if this module is based on a dist
"""
if respect_stripping and self.dist is not None:
assert alt_module_info_name is None, 'alternate modules not supported for stripped dist ' + self.dist.name
return join(dirname(self.dist.path), self.name + '.jmod')
if self.dist is not None:
qualifier = '_' + alt_module_info_name if alt_module_info_name else ''
return join(dirname(self.dist.original_path()), self.name + qualifier + '.jmod')
if self.jarpath:
return join(dirname(self.jarpath), self.name + '.jmod')
assert self.jdk, self.name
p = join(self.jdk.home, 'jmods', self.name + '.jmod')
assert exists(p), p
return p
@staticmethod
def load(dist, jdk, fatalIfNotCreated=True, pickled_path=None):
"""
Unpickles the module descriptor corresponding to a given distribution.
:param str dist: the distribution for which to read the pickled object
:param JDKConfig jdk: used to resolve pickled references to JDK modules
:param bool fatalIfNotCreated: specifies whether to abort if a descriptor has not been created yet
"""
if not pickled_path:
_, pickled_path, _ = get_java_module_info(dist, fatalIfNotModule=True) # pylint: disable=unpacking-non-sequence
if not exists(pickled_path):
if fatalIfNotCreated:
mx.abort(pickled_path + ' does not exist')
else:
return None
with open(pickled_path, 'rb') as fp:
jmd = pickle.load(fp)
jdkmodules = {m.name: m for m in jdk.get_modules()}
resolved = []
for name in jmd.modulepath:
if name.startswith('dist:'):
distName = name[len('dist:'):]
resolved.append(as_java_module(mx.distribution(distName), jdk))
elif name.startswith('lib:'):
libName = name[len('lib:'):]
resolved.append(get_library_as_module(mx.dependency(libName), jdk))
else:
resolved.append(jdkmodules[name])
jmd.modulepath = resolved
jmd.dist = mx.distribution(jmd.dist)
if jmd.alternatives:
alternatives = {}
for alt_name, value in jmd.alternatives.items():
if value is not None:
alt_pickled_path = JavaModuleDescriptor._get_alt_pickled_path(pickled_path, alt_name)
value = JavaModuleDescriptor.load(dist, jdk, fatalIfNotCreated=fatalIfNotCreated, pickled_path=alt_pickled_path)
alternatives[alt_name] = value
jmd.alternatives = alternatives
if not os.path.isabs(jmd.jarpath):
jmd.jarpath = join(dirname(pickled_path), jmd.jarpath)
return jmd
def _get_alternative_name(self):
if self.alternatives and len(self.alternatives) == 1:
alt_name, jmd = next(iter(self.alternatives.items()))
if jmd is None:
return alt_name
return None
@staticmethod
def _get_alt_pickled_path(pickled_path, alt_name):
assert pickled_path.endswith('.jar.pickled')
return pickled_path[:-len('.jar.pickled')] + '-' + alt_name + '.jar.pickled'
def save(self):
"""
Pickles this module descriptor to a file if it corresponds to a distribution.
Otherwise, does nothing.
:return: the path to which this module descriptor was pickled or None
"""
dist = self.dist
if not dist:
# Don't pickle a JDK module
return None
_, pickled_path, _ = get_java_module_info(dist, fatalIfNotModule=True) # pylint: disable=unpacking-non-sequence
assert pickled_path.endswith('.pickled')
alt_name = self._get_alternative_name()
if alt_name:
pickled_path = JavaModuleDescriptor._get_alt_pickled_path(pickled_path, alt_name)
modulepath = self.modulepath
jarpath = self.jarpath
alternatives = self.alternatives
self.modulepath = []
for m in modulepath:
if m.dist:
pickled_name = 'dist:' + m.dist.name
elif m.lib:
pickled_name = 'lib:' + m.lib.suite.name + ':' + m.lib.name
else:
pickled_name = m.name
self.modulepath.append(pickled_name)
self.dist = dist.name
self.jarpath = os.path.relpath(jarpath, dirname(pickled_path))
if self.alternatives:
self.alternatives = {alt_name : None if v is None else alt_name for alt_name, v in self.alternatives.items()}
try:
with mx.SafeFileCreation(pickled_path) as sfc, open(sfc.tmpPath, 'wb') as f:
pickle.dump(self, f)
finally:
# Restore fields that were modified for pickling
self.modulepath = modulepath
self.dist = dist
self.jarpath = jarpath
self.alternatives = alternatives
def as_module_info(self, extras_as_comments=True):
"""
Gets this module descriptor expressed as the contents of a ``module-info.java`` file.
:param bool extras_as_comments: whether to emit comments documenting attributes not supported
by the module-info.java format
"""
out = StringIO()
print('module ' + self.name + ' {', file=out)
for dependency, modifiers in sorted(self.requires.items()):
modifiers_string = (' '.join(sorted(modifiers)) + ' ') if len(modifiers) != 0 else ''
print(' requires ' + modifiers_string + dependency + ';', file=out)
for source, targets in sorted(self.exports.items()):
targets_string = (' to ' + ', '.join(sorted(targets))) if len(targets) != 0 else ''
print(' exports ' + source + targets_string + ';', file=out)
for use in sorted(self.uses):
print(' uses ' + use + ';', file=out)
for opens in sorted(self.opens):
print(' opens ' + opens + ';', file=out)
for service, providers in sorted(self.provides.items()):
print(' provides ' + service + ' with ' + ', '.join((p for p in providers)) + ';', file=out)
if extras_as_comments:
for pkg in sorted(self.conceals):
print(' // conceals: ' + pkg, file=out)
if self.jarpath:
print(' // jarpath: ' + self.jarpath.replace('\\', '\\\\'), file=out)
if self.dist:
print(' // dist: ' + self.dist.name, file=out)
if self.modulepath:
print(' // modulepath: ' + ', '.join([jmd.name for jmd in self.modulepath]), file=out)
if self.concealedRequires:
for dependency, packages in sorted(self.concealedRequires.items()):
for package in sorted(packages):
print(' // concealed-requires: ' + dependency + '/' + package, file=out)
print('}', file=out)
return out.getvalue()
def get_package_visibility(self, package, importer):
"""
Gets the visibility of `package` in this module.
:param str package: a package name
:param str importer: the name of the module importing the package (use "<unnamed>" or None for the unnamed module)
:return: if `package` is in this module, then return 'concealed' or 'exported' depending on the
visibility of the package with respect to `importer` otherwise return None
"""
targets = self.exports.get(package, None)
if targets is not None:
if len(targets) == 0 or importer in targets:
return 'exported'
return 'concealed'
elif package in self.conceals:
return 'concealed'
def collect_required_exports(self, required_exports):
"""
Adds required exports information that is needed to use this module to `required_exports`.
:param defaultdict(set) required_exports: dict where required exports information of this module should be added
"""
concealedRequires = self.concealedRequires
for module_name, packages in concealedRequires.items():
for package_name in packages:
required_exports[(module_name, package_name)].add(self)
def lookup_package(modulepath, package, importer):
"""
Searches `modulepath` for the module defining `package`.
:param list modulepath: an iterable of `JavaModuleDescriptors`
:param str package: a package name
:param str importer: the name of the module importing the package (use "<unnamed>" or None for the unnamed module)
:return: if the package is found, then a tuple containing the defining module
and a value of 'concealed' or 'exported' denoting the visibility of the package.
Otherwise (None, None) is returned.
"""
for jmd in modulepath:
visibility = jmd.get_package_visibility(package, importer)
if visibility is not None:
return jmd, visibility
return (None, None)
def get_module_deps(dist):
"""
Gets the JAR distributions and their constituent Java projects whose artifacts (i.e., class files and
resources) are the input to the Java module jar created by `make_java_module` for a given distribution.
:return: the set of `JARDistribution` objects and their constituent `JavaProject` transitive
dependencies denoted by the ``moduledeps`` attribute
"""
if dist.suite.getMxCompatibility().moduleDepsEqualDistDeps():
return dist.archived_deps()
if not hasattr(dist, '.module_deps'):
roots = getattr(dist, 'moduledeps', [])
if not roots:
return roots
for root in roots:
if not root.isJARDistribution():
mx.abort('moduledeps can only include JAR distributions: {}\n'
'Try updating to mxversion >= 5.34.4 where `moduledeps` is not needed.'.format(root), context=dist)
moduledeps = []
def _visit(dep, edges):
if dep is not dist:
if dep.isJavaProject() or dep.isJARDistribution():
if dep not in moduledeps:
moduledeps.append(dep)
else:
mx.abort('modules can only include JAR distributions and Java projects: {}\n'
'Try updating to mxversion >= 5.34.4 where `moduledeps` is not needed.'.format(dep), context=dist)
def _preVisit(dst, edge):
return not dst.isJreLibrary() and not dst.isJdkLibrary()
mx.walk_deps(roots, preVisit=_preVisit, visit=_visit)
setattr(dist, '.module_deps', moduledeps)
return getattr(dist, '.module_deps')
def as_java_module(dist, jdk, fatalIfNotCreated=True):
"""
Gets the Java module created from a given distribution.
:param JARDistribution dist: a distribution that defines a Java module
:param JDKConfig jdk: a JDK with a version >= 9 that can be used to resolve references to JDK modules
:param bool fatalIfNotCreated: specifies whether to abort if a descriptor has not been created yet
:return: the descriptor for the module
:rtype: `JavaModuleDescriptor`
"""
if not hasattr(dist, '.javaModule'):
jmd = JavaModuleDescriptor.load(dist, jdk, fatalIfNotCreated)
if jmd:
setattr(dist, '.javaModule', jmd)
return jmd
return getattr(dist, '.javaModule')
def get_module_name(dist):
"""
Gets the name of the module defined by `dist`.
"""
if dist.suite.getMxCompatibility().moduleDepsEqualDistDeps():
module_name = getattr(dist, 'moduleName', None)
mi = getattr(dist, 'moduleInfo', None)
if mi is not None:
if module_name:
mx.abort('The "moduleName" and "moduleInfo" attributes are mutually exclusive', context=dist)
module_name = mi.get('name', None)
if module_name is None:
mx.abort('The "moduleInfo" attribute requires either a "name" sub-attribute', context=dist)
elif module_name is not None and len(module_name) == 0:
mx.abort('"moduleName" attribute cannot be empty', context=dist)
else:
if not get_module_deps(dist):
return None
module_name = dist.name.replace('_', '.').lower()
return module_name
def get_java_module_info(dist, fatalIfNotModule=False):
"""
Gets the metadata for the module derived from `dist`.
:param JARDistribution dist: a distribution possibly defining a module
:param bool fatalIfNotModule: specifies whether to abort if `dist` does not define a module
:return: None if `dist` does not define a module otherwise a tuple containing
the name of the module, the descriptor pickle path, and finally the path to the
(unstripped) modular jar file
"""
if not dist.isJARDistribution():
if fatalIfNotModule:
mx.abort('Distribution ' + dist.name + ' is not a JARDistribution')
return None
module_name = get_module_name(dist)
if not module_name:
if fatalIfNotModule:
mx.abort('Distribution ' + dist.name + ' does not define a module')
return None
return module_name, dist.original_path() + '.pickled', dist.original_path()
def get_library_as_module(dep, jdk):
"""
Converts a (modular or non-modular) jar library to a module descriptor.
:param Library dep: a library dependency
:param JDKConfig jdk: a JDK with a version >= 9 that can be used to describe the module
:return: a module descriptor
"""
assert dep.isLibrary()
def is_valid_module_name(name):
identRE = re.compile(r"^[A-Za-z][A-Za-z0-9]*$")
return all(identRE.match(ident) for ident in name.split('.'))
if hasattr(dep, 'moduleName'):
moduleName = dep.moduleName
else:
moduleName = jdk.get_automatic_module_name(dep.path)
if not is_valid_module_name(moduleName):
mx.abort("Invalid identifier in automatic module name derived for library {}: {} (path: {})".format(dep.name, moduleName, dep.path))
dep.moduleName = moduleName
modulesDir = mx.ensure_dir_exists(join(mx.primary_suite().get_output_root(), 'modules'))
cache = join(modulesDir, moduleName + '.desc')
fullpath = dep.get_path(resolve=True)
save = False
if not exists(cache) or mx.TimeStampFile(fullpath).isNewerThan(cache) or mx.TimeStampFile(__file__).isNewerThan(cache):
out = mx.LinesOutputCapture()
err = mx.LinesOutputCapture()
rc = mx.run([jdk.java, '--module-path', fullpath, '--describe-module', moduleName], out=out, err=err, nonZeroIsFatal=False)
lines = out.lines
if rc != 0:
mx.abort("java --describe-module {} failed. Please verify the moduleName attribute of {}.\nstdout:\n{}\nstderr:\n{}".format(moduleName, dep.name, "\n".join(lines), "\n".join(err.lines)))
save = True
else:
with open(cache) as fp:
lines = fp.read().splitlines()
assert lines and lines[0].startswith(moduleName), (dep.name, moduleName, lines)
accepted_modifiers = set(['transitive'])
requires = {}
exports = {}
provides = {}
opens = {}
uses = set()
packages = set()
for line in lines[1:]:
parts = line.strip().split()
assert len(parts) >= 2, '>>>'+line+'<<<'
if parts[0:2] == ['qualified', 'exports']:
parts = parts[1:]
a = parts[0]
if a == 'requires':
module = parts[1]
modifiers = parts[2:]
requires[module] = set(m for m in modifiers if m in accepted_modifiers)
elif a == 'exports':
source = parts[1]
if len(parts) > 2:
assert parts[2] == 'to'
targets = parts[3:]
else:
targets = []
exports[source] = targets
elif a == 'uses':
uses.update(parts[1:])
elif a == 'opens':
opens.update(parts[1:])
elif a == 'contains':
packages.update(parts[1:])
elif a == 'provides':
assert len(parts) >= 4 and parts[2] == 'with'
service = parts[1]
providers = parts[3:]
provides.setdefault(service, []).extend(providers)
else:
mx.abort('Cannot parse module descriptor line: ' + str(parts))
packages.update(exports.keys())
if save:
try:
with open(cache, 'w') as fp:
fp.write('\n'.join(lines) + '\n')
except IOError as e:
mx.warn('Error writing to ' + cache + ': ' + str(e))
os.remove(cache)
return JavaModuleDescriptor(moduleName, exports, requires, uses, provides, packages, jarpath=fullpath, opens=opens, lib=dep)
_versioned_prefix = 'META-INF/versions/'
_special_versioned_prefix = 'META-INF/_versions/' # used for versioned services
_versioned_re = re.compile(r'META-INF/_?versions/([1-9][0-9]*)/(.+)')
_javamodule_buildlevel = None
def make_java_module(dist, jdk, archive, javac_daemon=None, alt_module_info_name=None):
"""
Creates a Java module from a distribution.
This updates the jar (or exploded jar) by adding `module-info` classes.
The `META-INF` directory can not be versioned. However, we make an exception here for `META-INF/services`:
if different versions should have different service providers, a `META-INF/_versions/<version>/META-INF/services`
directory can be used (note the `_` before `versions`).
These service provider declarations will be used to build the versioned module-info files and the
`META-INF/_versions/<version>` directories will be removed from the archive.
This is done using a separate versioning directory so that the JAR can be a valid multi-release JAR before this
transformation.
input:
com/foo/MyProvider.class # JDK 8 or earlier specific provider
META-INF/services/com.foo.MyService # Contains: com.foo.MyProvider
META-INF/_versions/9/META-INF/services/com.foo.MyService # Contains: com.foo.MyProvider
META-INF/versions/9/com/foo/MyProvider.class # JDK 9 and 10 specific provider
META-INF/_versions/11/META-INF/services/com.foo.MyService # Contains: provides com.foo.MyService with com.foo.MyProvider
META-INF/versions/11/com/foo/MyProvider.class # JDK 11 and later specific provider
output:
com/foo/MyProvider.class # JDK 8 or earlier specific provider
META-INF/services/com.foo.MyService # Contains: com.foo.MyProvider
META-INF/versions/9/module-info.class # Contains: provides com.foo.MyService with com.foo.MyProvider
META-INF/versions/9/com/foo/MyProvider.class # JDK 9 and 10 specific provider
META-INF/versions/11/module-info.class # Contains: provides com.foo.MyService with com.foo.MyProvider
META-INF/versions/11/com/foo/MyProvider.class # JDK 11 and later specific provider
:param JARDistribution dist: the distribution from which to create a module
:param JDKConfig jdk: a JDK with a version >= 9 that can be used to compile the module-info class
:param _Archive archive: info about the jar being converted to a module
:param CompilerDaemon javac_daemon: compiler daemon (if not None) to use for compiling module-info.java
:param str alt_module_info_name: name of alternative module descriptor in `dist` (in the attribute "moduleInfo:" + `alt_module_info_name`)
:return: the `JavaModuleDescriptor` for the latest version of the created Java module
"""
info = get_java_module_info(dist)
if info is None:
return None
from mx_jardistribution import _FileContentsSupplier, _Archive, _staging_dir_suffix
times = []
with mx.Timer('total', times):
moduleName, _, module_jar = info # pylint: disable=unpacking-non-sequence
exports = {}
requires = {}
opens = {}
concealedRequires = {}
base_uses = set()
modulepath = list()
with mx.Timer('requires', times):
if dist.suite.getMxCompatibility().moduleDepsEqualDistDeps():
module_deps = dist.archived_deps()
for dep in mx.classpath_entries(dist, includeSelf=False):
if dep.isJARDistribution():
jmd = as_java_module(dep, jdk)
modulepath.append(jmd)
requires[jmd.name] = {jdk.get_transitive_requires_keyword()}
elif (dep.isJdkLibrary() or dep.isJreLibrary()) and dep.is_provided_by(jdk):
pass
elif dep.isLibrary():
jmd = get_library_as_module(dep, jdk)
modulepath.append(jmd)
requires[jmd.name] = set()
else:
mx.abort(dist.name + ' cannot depend on ' + dep.name + ' as it does not define a module')
else:
module_deps = get_module_deps(dist)
jdk_modules = list(jdk.get_modules())
java_projects = [d for d in module_deps if d.isJavaProject()]
java_libraries = [d for d in module_deps if d.isLibrary()]
# Collect packages in the module first
with mx.Timer('packages', times):
module_packages = set()
for project in java_projects:
module_packages.update(project.defined_java_packages())
# Collect the required modules denoted by the dependencies of each project
entries = mx.classpath_entries(project, includeSelf=False)
for e in entries:
e_module_name = e.get_declaring_module_name()
if e_module_name and e_module_name != moduleName:
requires.setdefault(e_module_name, set())
for library in java_libraries:
module_packages.update(library.defined_java_packages())
def _parse_packages_spec(packages_spec, available_packages, project_scope):
"""
Parses a packages specification against a set of available packages:
"org.graalvm.foo,org.graalvm.bar" -> set("org.graalvm.foo", "org.graalvm.bar")
"<package-info>" -> set of all entries in `available_packages` denoting a package with a package-info.java file
"org.graalvm.*" -> set of all entries in `available_packages` that start with "org.graalvm."
"org.graalvm.compiler.code" -> set("org.graalvm.compiler.code")
:param dict available_packages: map from package names to JavaCompliance values
:return dict: entries from `available_packages` selected by `packages_spec`
"""
if not packages_spec:
mx.abort('exports attribute cannot have entry with empty packages specification', context=dist)
res = set()
for spec in packages_spec.split(','):
if spec.endswith('*'):
prefix = spec[0:-1]
selection = set(p for p in available_packages if p.startswith(prefix))
if not selection:
mx.abort('The export package specifier "{}" does not match any of {}'.format(spec, available_packages), context=dist)
res.update(selection)
elif spec == '<package-info>':
if not isinstance(project_scope, mx.Project):
mx.abort('The export package specifier "<package-info>" can only be used in a project, not a distribution', context=dist)
res.update(mx._find_packages(project_scope, onlyPublic=True))
else:
if spec not in module_packages:
mx.abort('Cannot export package {0} from {1} as it is not defined by any project in the module {1}'.format(spec, moduleName), context=dist)
if project_scope and spec not in available_packages and project_scope.suite.requiredMxVersion >= mx.VersionSpec("5.226.1"):
mx.abort('Package {} in "exports" attribute not defined by project {}'.format(spec, project_scope), context=project_scope)
res.add(spec)
return res
def _process_exports(export_specs, available_packages, project_scope=None):
unqualified_exports = []
for export in export_specs:
if ' to ' in export:
splitpackage = export.split(' to ')
packages_spec = splitpackage[0].strip()
targets = [n.strip() for n in splitpackage[1].split(',')]
if not targets:
mx.abort('exports attribute must have at least one target for qualified export', context=dist)
for p in _parse_packages_spec(packages_spec, available_packages, project_scope):
exports.setdefault(p, set()).update(targets)
else:
unqualified_exports.append(export)
for unqualified_export in unqualified_exports:
for p in _parse_packages_spec(unqualified_export, available_packages, project_scope):
exports[p] = set()
module_info = getattr(dist, 'moduleInfo', None)
alt_module_info = None
if alt_module_info_name is not None:
assert not archive.exploded, archive
assert isinstance(alt_module_info_name, str)
alt_module_info_attr_name = 'moduleInfo:' + alt_module_info_name
alt_module_info = getattr(dist, alt_module_info_attr_name, None)
if alt_module_info is None or not isinstance(alt_module_info, dict):
mx.abort('"{}" attribute must be a dictionary'.format(alt_module_info_attr_name), context=dist)
if module_info is None:
mx.abort('"{}" attribute found but required "moduleInfo" attribute is missing'.format(alt_module_info_attr_name), context=dist)
invalid = [k for k in alt_module_info.keys() if k != 'exports']
if invalid:
mx.abort('Sub-attribute(s) "{}" of "{}" attribute not supported. Only "exports" is currently supported.'.format('", "'.join(invalid), alt_module_info_attr_name), context=dist)
alt_module_jar = join(dirname(module_jar), basename(module_jar)[:-len('.jar')] + '-' + alt_module_info_name + '.jar')
alt_module_src_zip = alt_module_jar[:-len('.jar')] + '.src.zip'
module_src_zip = module_jar[:-len('.jar')] + '.src.zip'
def replicate(src, dst):
"""
Replicates `src` at `dst`.
If `src` does not exist, `dst` is deleted.
If `exploded` is True, `src` is assumed to be a directory and it is deep copied to `dst`,
otherwise `src` is assumed to be a normal file and is copied to `dst`.
"""
if isdir(dst) and not islink(dst):
mx.rmtree(dst)
elif exists(dst):
os.remove(dst)
if exists(src):
if isdir(src):
mx.copytree(src, dst, symlinks=True)
else:
shutil.copy(src, dst)
replicate(module_jar, alt_module_jar)
replicate(module_jar + _staging_dir_suffix, alt_module_jar + _staging_dir_suffix)
replicate(module_src_zip, alt_module_src_zip)
module_jar = alt_module_jar
module_jar_staging_dir = module_jar + _staging_dir_suffix
alternatives = {alt_module_info_name : None}
elif not archive.exploded:
alt_module_info_names = [key[len('moduleInfo:'):] for key in dir(dist) if key.startswith('moduleInfo:')]
alternatives = {
name : make_java_module(dist, jdk, archive, javac_daemon=javac_daemon, alt_module_info_name=name)
for name in alt_module_info_names
}
module_jar_staging_dir = module_jar + _staging_dir_suffix
else:
alternatives = {}
module_jar_staging_dir = module_jar
mx.log('Building Java module {} ({}) from {}'.format(moduleName, basename(module_jar), dist.name))
if module_info:
for entry in module_info.get("requires", []):
parts = entry.split()
qualifiers = parts[0:-1]
name = parts[-1]
requires.setdefault(name, set()).update(qualifiers)
base_uses.update(module_info.get('uses', []))
_process_exports((alt_module_info or module_info).get('exports', []), module_packages)
opens = module_info.get('opens', {})
requires_concealed = module_info.get('requiresConcealed', None)
if requires_concealed is not None:
parse_requiresConcealed_attribute(jdk, requires_concealed, concealedRequires, None, dist, modulepath)
enhanced_module_usage_info = dist.suite.getMxCompatibility().enhanced_module_usage_info()
with mx.Timer('projects', times):
for project in java_projects:
base_uses.update(getattr(project, 'uses', []))
for m in getattr(project, 'runtimeDeps', []):
requires.setdefault(m, set()).add('static')
if not enhanced_module_usage_info:
# In the absence of "requiresConcealed" and "requires" attributes, the import statements
# in the Java sources need to be scanned to determine what modules are
# required and what concealed packages are used.
allmodules = modulepath + jdk_modules
for pkg in itertools.chain(project.imported_java_packages(projectDepsOnly=False), getattr(project, 'imports', [])):
# Only consider packages not defined by the module we're creating. This handles the
# case where we're creating a module that will upgrade an existing upgradeable
# module in the JDK such as jdk.internal.vm.compiler.
if pkg not in module_packages:
module, visibility = lookup_package(allmodules, pkg, moduleName)
if module and module.name != moduleName:
requires.setdefault(module.name, set())
if visibility != 'exported':
assert visibility == 'concealed'
concealedRequires.setdefault(module.name, set()).add(pkg)
else:
for module, packages in project.get_concealed_imported_packages(jdk).items():
concealedRequires.setdefault(module, set()).update(packages)
for module in getattr(project, 'requires', []):
requires.setdefault(module, set())
if not module_info:
# If neither an "exports" nor distribution-level "moduleInfo" attribute is present,
# all packages are exported.
default_exported_java_packages = [] if module_info else project.defined_java_packages()
_process_exports(getattr(project, 'exports', default_exported_java_packages), project.defined_java_packages(), project)
if enhanced_module_usage_info:
with mx.Timer('libraries', times):
for library in java_libraries:
base_uses.update(getattr(library, 'uses', []))
for m in getattr(library, 'runtimeDeps', []):
requires.setdefault(m, set()).add('static')
requires_concealed = getattr(library, 'requiresConcealed', None)
if requires_concealed is not None:
concealed = {}
parse_requiresConcealed_attribute(jdk, requires_concealed, concealed, None, library)
for module, packages in concealed.items():
concealedRequires.setdefault(module, set()).update(packages)
for module in getattr(library, 'requires', []):
requires.setdefault(module, set())
if hasattr(library, 'exports'):
_process_exports(getattr(library, 'exports'), library.defined_java_packages(), library)
if not module_info:
mx.warn("Module {} re-packages library {} but doesn't have a `moduleInfo` attribute. Note that library packages are not auto-exported")
build_directory = mx.ensure_dir_exists(module_jar + ".build")
try:
files_to_remove = set()
# To compile module-info.java, all classes it references must either be given
# as Java source files or already exist as class files in the output directory.
# This is due to the constraint that all the classes in a module must be in
# a single directory (or jar).
# As such, the jar file for each constituent distribution must be unpacked
# in the output directory.
# Set of ints representing version numbers
versions = set()
# List of 4-tuples representing a versioned resource:
# str arcname: name of resource within its archive
# _ArchiveEntry entry: describes the contents of the resource
# int version: earliest Java version in which resource is valid
# str unversioned_name: name of the resource in a version-flattened archive
versioned = []
# List of 2-tuples representing a versioned resource:
# str arcname: name of resource within its archive
# _ArchiveEntry entry: describes the contents of the resource
unversioned = []
for arcname, entry in archive.entries.items():
m = _versioned_re.match(arcname)
if m:
version = int(m.group(1))
versions.add(version)
if version > jdk.javaCompliance.value:
# Ignore resource whose version is too high
continue
unversioned_name = m.group(2)
if not archive.exploded:
if unversioned_name.startswith('META-INF/services/'):
files_to_remove.add(arcname)
elif unversioned_name.startswith('META-INF/'):
mx.abort("META-INF resources can not be versioned and will make modules fail to load ({}).".format(arcname))
versioned.append((arcname, entry, version, unversioned_name))
else:
unversioned.append((arcname, entry))
if archive.exploded:
jmod_version = None
all_versions = [str(jdk.javaCompliance.value)]
else:
# Ensure that created .jmod is compatible with the default JDK
default_jdk = mx.get_jdk(tag=mx.DEFAULT_JDK_TAG)
try:
jmod_version = str(max(v for v in versions if v <= default_jdk.javaCompliance.value))
except ValueError:
jmod_version = None if default_jdk.javaCompliance < '9' else 'common'
# Sort versions in increasing order as expected by the rest of the code
all_versions = [str(v) for v in sorted(versions)]
if '9' not in all_versions:
# 9 is the first version that supports modules and can be versioned in the JAR:
# if there is no `META-INF/versions/9` then we should add a `module-info.class`
# to the root of the JAR so that the module works on JDK 9.
all_versions = ['common'] + all_versions
assert jmod_version is None or jmod_version in all_versions
for version in all_versions:
restore_files = {}
with mx.Timer('jmd@' + version, times):
uses = base_uses.copy()
provides = {}
int_version = int(version) if version != 'common' else -1
# Modify staging directory in-situ
dest_dir = module_jar_staging_dir
if not archive.exploded:
def create_missing_dirs(path):
if not exists(path):
create_missing_dirs(dirname(path))
os.mkdir(path)
_Archive.create_jdk_8268216(path)
def sync_file(src, dst, restore_files):
"""
Ensures that `dst` points at or contains the same contents as `src`.
:param dict restore_files: map from `dst` to a callable that will restore its original
content or to None should `dst` be deleted once the module-info.class has
been produced
"""
while islink(src):
src = os.readlink(src)
if not mx.can_symlink():
mx.ensure_dir_exists(dirname(dst))
if exists(dst):
restore_files[dst] = _FileContentsSupplier(dst, eager=True).restore
os.remove(dst)
else:
restore_files[dst] = None
shutil.copy(src, dst)
else:
if exists(dst):
if islink(dst):
target = os.readlink(dst)
if target == src:
return
if mx.is_windows() and target.startswith('\\\\?\\') and target[4:] == src:
# os.readlink was changed in python 3.8 to include a \\?\ prefix on Windows
return
restore_files[dst] = lambda: os.symlink(target, dst)
else:
restore_files[dst] = _FileContentsSupplier(dst, eager=True).restore
os.remove(dst)
else:
restore_files[dst] = None
create_missing_dirs(dirname(dst))
os.symlink(src, dst)
# Put versioned resources into their non-versioned locations
for arcname, entry, entry_version, unversioned_name in versioned:
if entry_version > int_version:
continue
if arcname.startswith(_special_versioned_prefix):
if not unversioned_name.startswith('META-INF/services'):
raise mx.abort("The special versioned directory ({}) is only supported for META-INF/services files. Got {}".format(_special_versioned_prefix, name))
if unversioned_name:
dst = join(dest_dir, unversioned_name)
sync_file(entry.staged, dst, restore_files)
services_dir = join(dest_dir, 'META-INF', 'services')
if exists(services_dir):
for servicePathName in os.listdir(services_dir):
if servicePathName == _Archive.jdk_8268216:
continue
# While a META-INF provider configuration file must use a fully qualified binary
# name[1] of the service, a provides directive in a module descriptor must use
# the fully qualified non-binary name[2] of the service.
#
# [1] https://docs.oracle.com/javase/9/docs/api/java/util/ServiceLoader.html
# [2] https://docs.oracle.com/javase/9/docs/api/java/lang/module/ModuleDescriptor.Provides.html#service--
service = servicePathName.replace('$', '.')
assert '/' not in service
with open(join(services_dir, servicePathName)) as fp:
serviceContent = fp.read()
provides.setdefault(service, set()).update(provider.replace('$', '.') for provider in serviceContent.splitlines())
# Service types defined in the module are assumed to be used by the module
serviceClassfile = service.replace('.', '/') + '.class'
if exists(join(dest_dir, serviceClassfile)):
uses.add(service)
def exported_package_exists(p):
package_exists = exists(join(dest_dir, p.replace('.', os.sep)))
if not package_exists and dist.suite.getMxCompatibility().enforce_spec_compliant_exports():
pp = [proj for proj in java_projects if p in proj.defined_java_packages()][0]
dist.abort('Modular multi-release JARs cannot export packages defined only by versioned projects: '
'{} is defined by {} with multiReleaseJarVersion={}'.format(p, pp, pp.multiReleaseJarVersion))
return package_exists
# Exports of modular multi-release JARs must be exactly the same in all versions,
# but for backward compatibility we tolerate version-specific exports.
exports_clean = {p: exports[p] for p in exports if exported_package_exists(p)}
requires_clean = {}
for required_module_spec, requires_directives in requires.items():
if '@' in required_module_spec:
module_name, java_compliance = required_module_spec.split('@', 1)
module_java_compliance = mx_javacompliance.JavaCompliance(java_compliance)
if module_java_compliance not in jdk.javaCompliance:
continue
else:
module_name = required_module_spec
requires_clean[module_name] = requires_directives
jmd = JavaModuleDescriptor(moduleName, exports_clean, requires_clean, uses, provides, packages=module_packages, concealedRequires=concealedRequires,
jarpath=module_jar, dist=dist, modulepath=modulepath, alternatives=alternatives, opens=opens)
# Compile module-info.class
module_info_java = join(dest_dir, 'module-info.java')
with open(module_info_java, 'w') as fp:
print(jmd.as_module_info(), file=fp)
with mx.Timer('compile@' + version, times):
def safe_path_arg(p):
r"""
Return `p` with all `\` characters replaced with `\\`, all spaces replaced
with `\ ` and the result enclosed in double quotes.
"""
return '"{}"'.format(p.replace('\\', '\\\\').replace(' ', '\\ '))
javac_args = ['-d', safe_path_arg(dest_dir)]
modulepath_jars = [m.jarpath for m in modulepath if m.jarpath]
# TODO we should rather use the right JDK
javac_args += ['-target', version if version != 'common' else '9', '-source', version if version != 'common' else '9']
# The --system=none and --limit-modules options are used to support distribution defined modules
# that override non-upgradeable modules in the source JDK (e.g. org.graalvm.sdk is part of a
# GraalVM JDK). This means --module-path needs to contain the jmods for the JDK modules.
javac_args.append('--system=none')
if requires_clean:
javac_args.append('--limit-modules=' + ','.join(requires_clean.keys()))
jdk_jmods = (mx.get_opts().jmods_dir or join(jdk.home, 'jmods'))
if not exists(jdk_jmods):
mx.abort('Missing directory containing JMOD files: ' + jdk_jmods)
modulepath_jars.extend((join(jdk_jmods, m) for m in os.listdir(jdk_jmods) if m.endswith('.jmod')))
javac_args.append('--module-path=' + safe_path_arg(os.pathsep.join(modulepath_jars)))
if concealedRequires:
for module, packages in concealedRequires.items():
for package in packages:
javac_args.append('--add-exports=' + module + '/' + package + '=' + moduleName)
# https://blogs.oracle.com/darcy/new-javac-warning-for-setting-an-older-source-without-bootclasspath
# Disable the "bootstrap class path not set in conjunction with -source N" warning
# as we're relying on the Java compliance of project to correctly specify a JDK range
# providing the API required by the project. Also disable the warning about unknown
# modules in qualified exports (not sure how to avoid these since we build modules
# separately).
javac_args.append('-Xlint:-options,-module')
javac_args.append(safe_path_arg(module_info_java))
# Convert javac args to @args file
javac_args_file = mx._derived_path(dest_dir, '.javac_args')
with open(javac_args_file, 'w') as fp:
fp.write(os.linesep.join(javac_args))
javac_args = ['@' + javac_args_file]
if javac_daemon:
javac_daemon.compile(javac_args)
else:
mx.run([jdk.javac] + javac_args, cmdlinefile=dest_dir + '.cmdline')
# Create .jmod for module
if version == jmod_version:
assert not archive.exploded
class HideDirectory(object):
def __init__(self, dirpath):
self.dirpath = dirpath
self.tmp_dirpath = None
def __enter__(self):
if exists(self.dirpath):
self.tmp_dirpath = join(build_directory, '{}_{}.{}'.format(version, basename(self.dirpath), os.getpid()))
os.rename(self.dirpath, self.tmp_dirpath)
def __exit__(self, exc_type, exc_value, traceback):
if self.tmp_dirpath:
os.rename(self.tmp_dirpath, self.dirpath)
# Temporarily move META-INF/services and META-INF/versions out of dest_dir
# so that they do not end up in the jmod.
with HideDirectory(join(dest_dir, 'META-INF', 'services')), HideDirectory(join(dest_dir, 'META-INF', 'versions')):
jmod_path = jmd.get_jmod_path(respect_stripping=False, alt_module_info_name=alt_module_info_name)
if exists(jmod_path):
os.remove(jmod_path)
jdk_jmod = join(default_jdk.home, 'jmods', basename(jmod_path))
jmod_args = ['create', '--class-path=' + dest_dir]
if not dist.is_stripped():
# There is a ProGuard bug that corrupts the ModuleTarget
# attribute of module-info.class.
target_os = mx.get_os()
target_os = 'macos' if target_os == 'darwin' else target_os
target_arch = mx.get_arch()
jmod_args.append('--target-platform={}-{}'.format(target_os, target_arch))
if exists(jdk_jmod):
with ZipFile(jdk_jmod, 'r') as zf:
# Copy commands and legal notices (if any) from JDK version of the module
for jmod_dir, jmod_option in (('bin', '--cmds'), ('legal', '--legal-notices')):
entries = [name for name in zf.namelist() if name.startswith(jmod_dir + '/')]
if entries:
extracted_dir = join(dest_dir, jmod_dir)
assert not exists(extracted_dir), extracted_dir
zf.extractall(dest_dir, entries)
entries_dir = mx._derived_path(dest_dir, '.' + jmod_dir)
if exists(entries_dir):
shutil.rmtree(entries_dir)
os.rename(extracted_dir, entries_dir)
jmod_args.extend([jmod_option, join(entries_dir)])
mx.run([default_jdk.exe_path('jmod')] + jmod_args + [jmod_path])
with mx.Timer('jar@' + version, times):
if not archive.exploded:
# Append the module-info.class
module_info_arc_dir = '' if version == 'common' else _versioned_prefix + version + '/'
with ZipFile(module_jar, 'a') as zf:
module_info_class = join(dest_dir, 'module-info.class')
arcname = module_info_arc_dir + basename(module_info_class)
zf.write(module_info_class, arcname)
os.remove(module_info_class)
if restore_files:
for dst, restore in restore_files.items():
os.remove(dst)
if restore is not None:
restore()
if files_to_remove:
with mx.Timer('cleanup', times), mx.SafeFileCreation(module_jar) as sfc:
with ZipFile(module_jar, 'r') as inzf, ZipFile(sfc.tmpPath, 'w', inzf.compression) as outzf:
for info in inzf.infolist():
if info.filename not in files_to_remove:
outzf.writestr(info, inzf.read(info))
finally:
if not mx.get_opts().verbose:
# Preserve build directory so that javac command can be re-executed
# by cutting and pasting verbose output.
mx.rmtree(build_directory)
jmd.save()
mx.logv('[' + moduleName + ' times: ' + ', '.join(['{}={:.3f}s'.format(name, secs) for name, secs in sorted(times, key=lambda pair: pair[1], reverse=True)]) + ']')
assert version == (str(max(versions)) if versions else str(jdk.javaCompliance.value) if archive.exploded else 'common')
return jmd
def get_transitive_closure(roots, observable_modules):
"""
Gets the transitive closure of the dependencies of a set of root modules
(i.e. `roots`) with respect to a set of observable modules (i.e. `observable_modules`)
:param iterable roots: the roots modules (JavaModulesDescriptors or module names) for
which the transitive closure is being requested
:param iterable observable_modules: set of modules in which the transitive dependencies must exist
"""
name_to_module = {m.name : m for m in observable_modules}
transitive_closure = set()
def lookup_module(name):
m = name_to_module.get(name, None)
if m is None:
mx.abort('{} is not in the set of observable modules {}'.format(name, list(name_to_module.keys())))
return m
def add_transitive(mod):
if mod not in transitive_closure:
transitive_closure.add(mod)
for name in mod.requires.keys():
add_transitive(lookup_module(name))
for root in roots:
if isinstance(root, str):
root = lookup_module(root)
add_transitive(root)
return transitive_closure
def parse_requiresConcealed_attribute(jdk, value, result, importer, context, modulepath=None):
"""
Parses the "requiresConcealed" attribute value in `value` and updates `result`
which is a dict from module name to set of package names.
:param str importer: the name of the module importing the packages ("<unnamed>" or None denotes the unnamed module)
:param context: context value to use when reporting errors
:return: `result`
"""
if value is None:
return result
all_modules = (modulepath or []) + list(jdk.get_modules())
if not isinstance(value, dict):
mx.abort('"requiresConcealed" attribute must be a dict', context=context)
for module, packages in value.items():
if '@' in module:
module, java_compliance = module.split('@', 1)
java_compliance = mx_javacompliance.JavaCompliance(java_compliance, context=context)
if java_compliance not in jdk.javaCompliance:
continue
matches = [jmd for jmd in all_modules if jmd.name == module]
if not matches:
mx.abort('Module {} in "requiresConcealed" attribute does not exist in {}'.format(module, jdk), context=context)
jmd = matches[0]
package_set = result.setdefault(module, set())
if packages == '*':
star = True
packages = jmd.packages
else:
star = False
if not isinstance(packages, list):
mx.abort('Packages for module {} in "requiresConcealed" attribute must be either "*" or a list of package names'.format(module), context=context)
for package in packages:
if package.endswith('?'):
optional = True
package = package[0:-1]
else:
optional = False
visibility = jmd.get_package_visibility(package, importer)
if visibility == 'concealed':
package_set.add(package)
elif visibility == 'exported':
if not star:
suffix = '' if not importer else ' from module {}'.format(importer)
mx.warn('Package {} is not concealed in module {}{}'.format(package, module, suffix), context=context)
elif not optional:
m, _ = lookup_package(all_modules, package, importer)
suffix = '' if not m else ' but in module {}'.format(m.name)
mx.abort('Package {} is not defined in module {}{}'.format(package, module, suffix), context=context)
return result
def requiredExports(distributions, jdk):
"""
Collects requiredExports information for all passed-in distributions that are modules. The structure of this
information is described in the return value documentation.
:param distributions: list of Distribution objects that should be looked through for requiredExports information
:param JDKConfig jdk: a JDK with a version >= 9 that can be used to compile the module-info class
:return: A dictionary with (module_name, package_name) keys and values that are sets of `JavaModuleDescriptors` that require the export
described by the given key. For example: ('java.base', 'jdk.internal.module'): set([module:org.graalvm.nativeimage.pointsto,
module:org.graalvm.nativeimage.builder]) means that module java.base needs to be updated to export (i.e. --add-exports)
jdk.internal.module to the modules org.graalvm.nativeimage.pointsto and org.graalvm.nativeimage.builder.
"""
def _opt_as_java_module(dist):
if not get_java_module_info(dist, fatalIfNotModule=False):
return None
return as_java_module(dist, jdk, fatalIfNotCreated=False)
required_exports = defaultdict(set)
for dist in distributions:
target_module = _opt_as_java_module(dist)
if target_module:
target_module.collect_required_exports(required_exports)
return required_exports
|
graalvm/mx
|
mx_javamodules.py
|
Python
|
gpl-2.0
| 62,825
|
[
"VisIt"
] |
ef3502161090002fc8039a9159b97ad2254a0ec3b53c6bf98f9632208cf3f050
|
# Setup customisation for gpaw/cuda
import os
# compiler and linker
compiler = 'gcc'
mpicompiler = 'mpicc'
mpilinker = 'mpicc'
extra_compile_args = ['-std=c99']
# libraries
libraries = ['z']
# cuda
library_dirs += [os.environ['CUDALIB'], './c/cuda']
include_dirs += [os.environ['CUDADIR'] + '/include']
libraries += ['gpaw-cuda', 'cublas', 'cudart', 'stdc++']
# libxc
library_dirs += [os.environ['LIBXCDIR'] + '/lib']
include_dirs += [os.environ['LIBXCDIR'] + '/include']
libraries += ['xc']
# GPAW defines
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
define_macros += [("GPAW_ASYNC",1)]
define_macros += [("GPAW_MPI2",1)]
define_macros += [('GPAW_CUDA', '1')]
# ScaLAPACK
scalapack = True
# HDF5
hdf5 = False
|
mlouhivu/build-recipes
|
gpaw-bundle/examples/taito-gpu-2017-01/setup/customize-cuda.py
|
Python
|
mit
| 785
|
[
"GPAW"
] |
3657a300ed3ac201de9d5b94f4a8776bb69a73474935b6f9ebcf6d9e0edb81db
|
from contextlib import closing
import os
import subprocess
import argweaver
import argweaver.smc
from compbio import arglib
def inorder_tree(tree):
queue = [("queue", tree.root)]
while queue:
cmd, node = queue.pop()
if cmd == "visit":
yield node
elif cmd == "queue":
if node.is_leaf():
yield node
else:
queue.extend(
[("queue", node.children[1]),
("visit", node),
("queue", node.children[0])])
def layout_tree_leaves(tree, age_func=lambda node: node.age):
layout = {}
y = 0
for node in inorder_tree(tree):
if node.is_leaf():
layout[node.name] = y
else:
y += (age_func(node) / 1e3) + 1
vals = layout.values()
mid = (max(vals) + min(vals)) / 2.0
for k, v in layout.items():
layout[k] = (v - mid)
return layout
def orient_tree(tree, last_tree, recomb_node):
"""
Flip nodes in tree to match last_tree.
"""
order = dict((n, i) for i, n in enumerate(last_tree.leaf_names()))
# make all leaves of recomb node have max order
for leaf in recomb_node.leaf_names():
order[leaf] = len(order)
# flip nodes in tree
for node in tree.postorder():
if node.is_leaf():
continue
assert len(node.children) == 2
if order[node.children[0].name] > order[node.children[1].name]:
node.children = [node.children[1], node.children[0]]
order[node.name] = min(order[node.children[0].name],
order[node.children[1].name])
# flip node above subtree if needed
inorder = dict((n.name, i) for i, n in enumerate(inorder_tree(last_tree)))
node = tree[recomb_node.name].parent
if inorder[node.children[0].name] > inorder[node.children[1].name]:
node.children = [node.children[1], node.children[0]]
def iter_layout_smc(smc, names=None):
age_func = lambda node: node.data["age"]
last_tree = None
spr = None
for item in smc:
if item["tag"] == "NAMES":
names = item["names"]
if item["tag"] == "SPR":
spr = item
elif item["tag"] == "TREE":
assert names is not None
tree = item["tree"]
if isinstance(tree, basestring):
raise Exception("Trees need to be parsed")
block = [item["start"]-1, item["end"]]
if last_tree:
orient_tree(tree, last_tree, last_tree[spr["recomb_node"]])
layout = layout_tree_leaves(tree, age_func=age_func)
layout2 = dict((name, layout[i]) for i, name in enumerate(names))
yield block, layout2
last_tree = tree
class ArgLayout(object):
"""
"""
def __init__(self):
self.chrom = "chr"
self.blocks = []
self.leaf_layout = []
def layout_smc(self, smc):
names = smc.header["names"]
self.chrom = smc.header["chrom"]
self.blocks = []
self.leaf_layout = []
for block, leaf_layout in iter_layout_smc(smc, names=names):
self.blocks.append(block)
self.leaf_layout.append(leaf_layout)
def read(self, filename):
self.blocks = []
self.leaf_layout = []
for block, leaf_layout in iter_arg_layout(filename):
self.chrom = block[0]
self.blocks.append([block[1] - 1, block[2]])
self.leaf_layout.append(leaf_layout)
def iter_arg_layout(filename):
"""
Iterate through an ARG layout file.
"""
with closing(argweaver.open_stream(filename, compress='bgzip')) as infile:
for line in infile:
tokens = line.rstrip().split("\t")
block = [tokens[0], int(tokens[1]), int(tokens[2])]
leaf_layout = {}
for i in range(3, len(tokens), 2):
leaf_layout[tokens[i]] = float(tokens[i+1])
yield block, leaf_layout
def index_arg_layout(filename):
subprocess.call(["tabix", "-s", "1", "-b", "2", "-e", "3", "-f", filename])
def query_arg_layout(filename, chrom, start, end):
cmd = ["tabix", filename, "%s:%d-%d" % (chrom, start, end)]
null = open(os.devnull, 'w')
infile = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=null).stdout
return iter_arg_layout(infile)
def layout_arg(arg, start=None, end=None):
if start is None:
start = arg.start
if end is None:
end = arg.end
tree = arg.get_marginal_tree(start)
arglib.remove_single_lineages(tree)
last_pos = start
blocks = []
leaf_layout = []
layout_func = layout_tree_leaves
for spr in arglib.iter_arg_sprs(
arg, start=start, end=end, use_leaves=True):
blocks.append([last_pos, spr[0]])
leaf_layout.append(layout_func(tree))
inorder = dict((n, i) for i, n in enumerate(inorder_tree(tree)))
# determine SPR nodes
rnode = arglib.arg_lca(tree, spr[1][0], spr[0])
cnode = arglib.arg_lca(tree, spr[2][0], spr[0])
# determine best side for adding new sister
left = (inorder[rnode] < inorder[cnode])
# apply spr
arglib.apply_spr(tree, rnode, spr[1][1], cnode, spr[2][1], spr[0])
# adjust sister
rindex = rnode.parents[0].children.index(rnode)
if left and rindex != 0:
rnode.parents[0].children.reverse()
last_pos = spr[0]
blocks.append([last_pos, end])
leaf_layout.append(layout_func(tree))
return blocks, leaf_layout
|
mdrasmus/argweaver
|
argweaver/vis.py
|
Python
|
mit
| 5,663
|
[
"VisIt"
] |
9dd1575bc07bfa9574bae72e6a2f2ec5f7a63696f2264e260529b98df18c6b7a
|
#!/usr/bin/env python
#
# Data manager for reference data for the 'humann2' Galaxy tools
import json
import optparse
import os
import subprocess
import sys
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def download_metaphlan2_db(data_tables, build, table_name, target_dir):
"""Download MetaPhlAn2 database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
table_name: name of the table
target_dir: directory to put copy or link to the data file
"""
cmd = "download_metaphlan2_db.py --output %s" % (target_dir)
db_dir = os.path.join(target_dir, build)
subprocess.check_call(cmd, shell=True)
os.rename(os.path.join(target_dir, "db_v20"), db_dir)
add_data_table_entry(
data_tables,
table_name,
dict(
dbkey=build,
value="mpa_v20_m200",
name="MetaPhlAn2 clade-specific marker genes",
path=db_dir))
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = optparse.OptionParser(description='Download MetaPhlan2 database')
parser.add_option('--database', help="Database name")
options, args = parser.parse_args()
print("args : %s" % args)
# Check for JSON file
if len(args) != 1:
sys.stderr.write("Need to supply JSON file name")
sys.exit(1)
jsonfile = args[0]
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print("Making %s" % target_dir)
os.mkdir(target_dir)
# Set up data tables dictionary
data_tables = create_data_tables_dict()
add_data_table(data_tables, "metaphlan2_database")
# Fetch data from specified data sources
if options.database == "db_v20":
download_metaphlan2_db(
data_tables,
"v20",
"metaphlan2_database",
target_dir)
# Write output JSON
print("Outputting JSON")
print(str(json.dumps(data_tables)))
with open(jsonfile, 'wb') as out:
out.write(json.dumps(data_tables))
print("Done.")
|
mblue9/tools-iuc
|
data_managers/data_manager_metaphlan2_database_downloader/data_manager/data_manager_metaphlan2_download.py
|
Python
|
mit
| 4,332
|
[
"Galaxy"
] |
bc8d90a29524d35c2f3ffda24ccaf61083275242e6668150d940c4789f8a7476
|
# encoding: utf-8
"""
A lightweight Traits like module.
This is designed to provide a lightweight, simple, pure Python version of
many of the capabilities of enthought.traits. This includes:
* Validation
* Type specification with defaults
* Static and dynamic notification
* Basic predefined types
* An API that is similar to enthought.traits
We don't support:
* Delegation
* Automatic GUI generation
* A full set of trait types. Most importantly, we don't provide container
traits (list, dict, tuple) that can trigger notifications if their
contents change.
* API compatibility with enthought.traits
There are also some important difference in our design:
* enthought.traits does not validate default values. We do.
We choose to create this module because we need these capabilities, but
we need them to be pure Python so they work in all Python implementations,
including Jython and IronPython.
Authors:
* Brian Granger
* Enthought, Inc. Some of the code in this file comes from enthought.traits
and is licensed under the BSD license. Also, many of the ideas also come
from enthought.traits even though our implementation is very different.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import inspect
import re
import sys
import types
from types import FunctionType
try:
from types import ClassType, InstanceType
ClassTypes = (ClassType, type)
except:
ClassTypes = (type,)
from .importstring import import_item
from IPython.utils import py3compat
SequenceTypes = (list, tuple, set, frozenset)
#-----------------------------------------------------------------------------
# Basic classes
#-----------------------------------------------------------------------------
class NoDefaultSpecified ( object ): pass
NoDefaultSpecified = NoDefaultSpecified()
class Undefined ( object ): pass
Undefined = Undefined()
class TraitError(Exception):
pass
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, basestring ):
return add_article( object )
return add_article( object.__class__.__name__ )
def add_article ( name ):
""" Returns a string containing the correct indefinite article ('a' or 'an')
prefixed to the specified string.
"""
if name[:1].lower() in 'aeiou':
return 'an ' + name
return 'a ' + name
def repr_type(obj):
""" Return a string representation of a value and its type for readable
error messages.
"""
the_type = type(obj)
if (not py3compat.PY3) and the_type is InstanceType:
# Old-style class.
the_type = obj.__class__
msg = '%r %r' % (obj, the_type)
return msg
def parse_notifier_name(name):
"""Convert the name argument to a list of names.
Examples
--------
>>> parse_notifier_name('a')
['a']
>>> parse_notifier_name(['a','b'])
['a', 'b']
>>> parse_notifier_name(None)
['anytrait']
"""
if isinstance(name, str):
return [name]
elif name is None:
return ['anytrait']
elif isinstance(name, (list, tuple)):
for n in name:
assert isinstance(n, str), "names must be strings"
return name
class _SimpleTest:
def __init__ ( self, value ): self.value = value
def __call__ ( self, test ):
return test == self.value
def __repr__(self):
return "<SimpleTest(%r)" % self.value
def __str__(self):
return self.__repr__()
def getmembers(object, predicate=None):
"""A safe version of inspect.getmembers that handles missing attributes.
This is useful when there are descriptor based attributes that for
some reason raise AttributeError even though they exist. This happens
in zope.inteface with the __provides__ attribute.
"""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
pass
else:
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
#-----------------------------------------------------------------------------
# Base TraitType for all traits
#-----------------------------------------------------------------------------
class TraitType(object):
"""A base class for all trait descriptors.
Notes
-----
Our implementation of traits is based on Python's descriptor
prototol. This class is the base class for all such descriptors. The
only magic we use is a custom metaclass for the main :class:`HasTraits`
class that does the following:
1. Sets the :attr:`name` attribute of every :class:`TraitType`
instance in the class dict to the name of the attribute.
2. Sets the :attr:`this_class` attribute of every :class:`TraitType`
instance in the class dict to the *class* that declared the trait.
This is used by the :class:`This` trait to allow subclasses to
accept superclasses for :class:`This` values.
"""
metadata = {}
default_value = Undefined
info_text = 'any value'
def __init__(self, default_value=NoDefaultSpecified, **metadata):
"""Create a TraitType.
"""
if default_value is not NoDefaultSpecified:
self.default_value = default_value
if len(metadata) > 0:
if len(self.metadata) > 0:
self._metadata = self.metadata.copy()
self._metadata.update(metadata)
else:
self._metadata = metadata
else:
self._metadata = self.metadata
self.init()
def init(self):
pass
def get_default_value(self):
"""Create a new instance of the default value."""
return self.default_value
def instance_init(self, obj):
"""This is called by :meth:`HasTraits.__new__` to finish init'ing.
Some stages of initialization must be delayed until the parent
:class:`HasTraits` instance has been created. This method is
called in :meth:`HasTraits.__new__` after the instance has been
created.
This method trigger the creation and validation of default values
and also things like the resolution of str given class names in
:class:`Type` and :class`Instance`.
Parameters
----------
obj : :class:`HasTraits` instance
The parent :class:`HasTraits` instance that has just been
created.
"""
self.set_default_value(obj)
def set_default_value(self, obj):
"""Set the default value on a per instance basis.
This method is called by :meth:`instance_init` to create and
validate the default value. The creation and validation of
default values must be delayed until the parent :class:`HasTraits`
class has been instantiated.
"""
# Check for a deferred initializer defined in the same class as the
# trait declaration or above.
mro = type(obj).mro()
meth_name = '_%s_default' % self.name
for cls in mro[:mro.index(self.this_class)+1]:
if meth_name in cls.__dict__:
break
else:
# We didn't find one. Do static initialization.
dv = self.get_default_value()
newdv = self._validate(obj, dv)
obj._trait_values[self.name] = newdv
return
# Complete the dynamic initialization.
obj._trait_dyn_inits[self.name] = cls.__dict__[meth_name]
def __get__(self, obj, cls=None):
"""Get the value of the trait by self.name for the instance.
Default values are instantiated when :meth:`HasTraits.__new__`
is called. Thus by the time this method gets called either the
default value or a user defined value (they called :meth:`__set__`)
is in the :class:`HasTraits` instance.
"""
if obj is None:
return self
else:
try:
value = obj._trait_values[self.name]
except KeyError:
# Check for a dynamic initializer.
if self.name in obj._trait_dyn_inits:
value = obj._trait_dyn_inits[self.name](obj)
# FIXME: Do we really validate here?
value = self._validate(obj, value)
obj._trait_values[self.name] = value
return value
else:
raise TraitError('Unexpected error in TraitType: '
'both default value and dynamic initializer are '
'absent.')
except Exception:
# HasTraits should call set_default_value to populate
# this. So this should never be reached.
raise TraitError('Unexpected error in TraitType: '
'default value not set properly')
else:
return value
def __set__(self, obj, value):
new_value = self._validate(obj, value)
old_value = self.__get__(obj)
if old_value != new_value:
obj._trait_values[self.name] = new_value
obj._notify_trait(self.name, old_value, new_value)
def _validate(self, obj, value):
if hasattr(self, 'validate'):
return self.validate(obj, value)
elif hasattr(self, 'is_valid_for'):
valid = self.is_valid_for(value)
if valid:
return value
else:
raise TraitError('invalid value for type: %r' % value)
elif hasattr(self, 'value_for'):
return self.value_for(value)
else:
return value
def info(self):
return self.info_text
def error(self, obj, value):
if obj is not None:
e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj),
self.info(), repr_type(value))
else:
e = "The '%s' trait must be %s, but a value of %r was specified." \
% (self.name, self.info(), repr_type(value))
raise TraitError(e)
def get_metadata(self, key):
return getattr(self, '_metadata', {}).get(key, None)
def set_metadata(self, key, value):
getattr(self, '_metadata', {})[key] = value
#-----------------------------------------------------------------------------
# The HasTraits implementation
#-----------------------------------------------------------------------------
class MetaHasTraits(type):
"""A metaclass for HasTraits.
This metaclass makes sure that any TraitType class attributes are
instantiated and sets their name attribute.
"""
def __new__(mcls, name, bases, classdict):
"""Create the HasTraits class.
This instantiates all TraitTypes in the class dict and sets their
:attr:`name` attribute.
"""
# print "MetaHasTraitlets (mcls, name): ", mcls, name
# print "MetaHasTraitlets (bases): ", bases
# print "MetaHasTraitlets (classdict): ", classdict
for k,v in classdict.iteritems():
if isinstance(v, TraitType):
v.name = k
elif inspect.isclass(v):
if issubclass(v, TraitType):
vinst = v()
vinst.name = k
classdict[k] = vinst
return super(MetaHasTraits, mcls).__new__(mcls, name, bases, classdict)
def __init__(cls, name, bases, classdict):
"""Finish initializing the HasTraits class.
This sets the :attr:`this_class` attribute of each TraitType in the
class dict to the newly created class ``cls``.
"""
for k, v in classdict.iteritems():
if isinstance(v, TraitType):
v.this_class = cls
super(MetaHasTraits, cls).__init__(name, bases, classdict)
class HasTraits(object):
__metaclass__ = MetaHasTraits
def __new__(cls, **kw):
# This is needed because in Python 2.6 object.__new__ only accepts
# the cls argument.
new_meth = super(HasTraits, cls).__new__
if new_meth is object.__new__:
inst = new_meth(cls)
else:
inst = new_meth(cls, **kw)
inst._trait_values = {}
inst._trait_notifiers = {}
inst._trait_dyn_inits = {}
# Here we tell all the TraitType instances to set their default
# values on the instance.
for key in dir(cls):
# Some descriptors raise AttributeError like zope.interface's
# __provides__ attributes even though they exist. This causes
# AttributeErrors even though they are listed in dir(cls).
try:
value = getattr(cls, key)
except AttributeError:
pass
else:
if isinstance(value, TraitType):
value.instance_init(inst)
return inst
def __init__(self, **kw):
# Allow trait values to be set using keyword arguments.
# We need to use setattr for this to trigger validation and
# notifications.
for key, value in kw.iteritems():
setattr(self, key, value)
def _notify_trait(self, name, old_value, new_value):
# First dynamic ones
callables = self._trait_notifiers.get(name,[])
more_callables = self._trait_notifiers.get('anytrait',[])
callables.extend(more_callables)
# Now static ones
try:
cb = getattr(self, '_%s_changed' % name)
except:
pass
else:
callables.append(cb)
# Call them all now
for c in callables:
# Traits catches and logs errors here. I allow them to raise
if callable(c):
argspec = inspect.getargspec(c)
nargs = len(argspec[0])
# Bound methods have an additional 'self' argument
# I don't know how to treat unbound methods, but they
# can't really be used for callbacks.
if isinstance(c, types.MethodType):
offset = -1
else:
offset = 0
if nargs + offset == 0:
c()
elif nargs + offset == 1:
c(name)
elif nargs + offset == 2:
c(name, new_value)
elif nargs + offset == 3:
c(name, old_value, new_value)
else:
raise TraitError('a trait changed callback '
'must have 0-3 arguments.')
else:
raise TraitError('a trait changed callback '
'must be callable.')
def _add_notifiers(self, handler, name):
if not self._trait_notifiers.has_key(name):
nlist = []
self._trait_notifiers[name] = nlist
else:
nlist = self._trait_notifiers[name]
if handler not in nlist:
nlist.append(handler)
def _remove_notifiers(self, handler, name):
if self._trait_notifiers.has_key(name):
nlist = self._trait_notifiers[name]
try:
index = nlist.index(handler)
except ValueError:
pass
else:
del nlist[index]
def on_trait_change(self, handler, name=None, remove=False):
"""Setup a handler to be called when a trait changes.
This is used to setup dynamic notifications of trait changes.
Static handlers can be created by creating methods on a HasTraits
subclass with the naming convention '_[traitname]_changed'. Thus,
to create static handler for the trait 'a', create the method
_a_changed(self, name, old, new) (fewer arguments can be used, see
below).
Parameters
----------
handler : callable
A callable that is called when a trait changes. Its
signature can be handler(), handler(name), handler(name, new)
or handler(name, old, new).
name : list, str, None
If None, the handler will apply to all traits. If a list
of str, handler will apply to all names in the list. If a
str, the handler will apply just to that name.
remove : bool
If False (the default), then install the handler. If True
then unintall it.
"""
if remove:
names = parse_notifier_name(name)
for n in names:
self._remove_notifiers(handler, n)
else:
names = parse_notifier_name(name)
for n in names:
self._add_notifiers(handler, n)
@classmethod
def class_trait_names(cls, **metadata):
"""Get a list of all the names of this classes traits.
This method is just like the :meth:`trait_names` method, but is unbound.
"""
return cls.class_traits(**metadata).keys()
@classmethod
def class_traits(cls, **metadata):
"""Get a list of all the traits of this class.
This method is just like the :meth:`traits` method, but is unbound.
The TraitTypes returned don't know anything about the values
that the various HasTrait's instances are holding.
This follows the same algorithm as traits does and does not allow
for any simple way of specifying merely that a metadata name
exists, but has any value. This is because get_metadata returns
None if a metadata key doesn't exist.
"""
traits = dict([memb for memb in getmembers(cls) if \
isinstance(memb[1], TraitType)])
if len(metadata) == 0:
return traits
for meta_name, meta_eval in metadata.items():
if type(meta_eval) is not FunctionType:
metadata[meta_name] = _SimpleTest(meta_eval)
result = {}
for name, trait in traits.items():
for meta_name, meta_eval in metadata.items():
if not meta_eval(trait.get_metadata(meta_name)):
break
else:
result[name] = trait
return result
def trait_names(self, **metadata):
"""Get a list of all the names of this classes traits."""
return self.traits(**metadata).keys()
def traits(self, **metadata):
"""Get a list of all the traits of this class.
The TraitTypes returned don't know anything about the values
that the various HasTrait's instances are holding.
This follows the same algorithm as traits does and does not allow
for any simple way of specifying merely that a metadata name
exists, but has any value. This is because get_metadata returns
None if a metadata key doesn't exist.
"""
traits = dict([memb for memb in getmembers(self.__class__) if \
isinstance(memb[1], TraitType)])
if len(metadata) == 0:
return traits
for meta_name, meta_eval in metadata.items():
if type(meta_eval) is not FunctionType:
metadata[meta_name] = _SimpleTest(meta_eval)
result = {}
for name, trait in traits.items():
for meta_name, meta_eval in metadata.items():
if not meta_eval(trait.get_metadata(meta_name)):
break
else:
result[name] = trait
return result
def trait_metadata(self, traitname, key):
"""Get metadata values for trait by key."""
try:
trait = getattr(self.__class__, traitname)
except AttributeError:
raise TraitError("Class %s does not have a trait named %s" %
(self.__class__.__name__, traitname))
else:
return trait.get_metadata(key)
#-----------------------------------------------------------------------------
# Actual TraitTypes implementations/subclasses
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# TraitTypes subclasses for handling classes and instances of classes
#-----------------------------------------------------------------------------
class ClassBasedTraitType(TraitType):
"""A trait with error reporting for Type, Instance and This."""
def error(self, obj, value):
kind = type(value)
if (not py3compat.PY3) and kind is InstanceType:
msg = 'class %s' % value.__class__.__name__
else:
msg = '%s (i.e. %s)' % ( str( kind )[1:-1], repr( value ) )
if obj is not None:
e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj),
self.info(), msg)
else:
e = "The '%s' trait must be %s, but a value of %r was specified." \
% (self.name, self.info(), msg)
raise TraitError(e)
class Type(ClassBasedTraitType):
"""A trait whose value must be a subclass of a specified class."""
def __init__ (self, default_value=None, klass=None, allow_none=True, **metadata ):
"""Construct a Type trait
A Type trait specifies that its values must be subclasses of
a particular class.
If only ``default_value`` is given, it is used for the ``klass`` as
well.
Parameters
----------
default_value : class, str or None
The default value must be a subclass of klass. If an str,
the str must be a fully specified class name, like 'foo.bar.Bah'.
The string is resolved into real class, when the parent
:class:`HasTraits` class is instantiated.
klass : class, str, None
Values of this trait must be a subclass of klass. The klass
may be specified in a string like: 'foo.bar.MyClass'.
The string is resolved into real class, when the parent
:class:`HasTraits` class is instantiated.
allow_none : boolean
Indicates whether None is allowed as an assignable value. Even if
``False``, the default value may be ``None``.
"""
if default_value is None:
if klass is None:
klass = object
elif klass is None:
klass = default_value
if not (inspect.isclass(klass) or isinstance(klass, basestring)):
raise TraitError("A Type trait must specify a class.")
self.klass = klass
self._allow_none = allow_none
super(Type, self).__init__(default_value, **metadata)
def validate(self, obj, value):
"""Validates that the value is a valid object instance."""
try:
if issubclass(value, self.klass):
return value
except:
if (value is None) and (self._allow_none):
return value
self.error(obj, value)
def info(self):
""" Returns a description of the trait."""
if isinstance(self.klass, basestring):
klass = self.klass
else:
klass = self.klass.__name__
result = 'a subclass of ' + klass
if self._allow_none:
return result + ' or None'
return result
def instance_init(self, obj):
self._resolve_classes()
super(Type, self).instance_init(obj)
def _resolve_classes(self):
if isinstance(self.klass, basestring):
self.klass = import_item(self.klass)
if isinstance(self.default_value, basestring):
self.default_value = import_item(self.default_value)
def get_default_value(self):
return self.default_value
class DefaultValueGenerator(object):
"""A class for generating new default value instances."""
def __init__(self, *args, **kw):
self.args = args
self.kw = kw
def generate(self, klass):
return klass(*self.args, **self.kw)
class Instance(ClassBasedTraitType):
"""A trait whose value must be an instance of a specified class.
The value can also be an instance of a subclass of the specified class.
"""
def __init__(self, klass=None, args=None, kw=None,
allow_none=True, **metadata ):
"""Construct an Instance trait.
This trait allows values that are instances of a particular
class or its sublclasses. Our implementation is quite different
from that of enthough.traits as we don't allow instances to be used
for klass and we handle the ``args`` and ``kw`` arguments differently.
Parameters
----------
klass : class, str
The class that forms the basis for the trait. Class names
can also be specified as strings, like 'foo.bar.Bar'.
args : tuple
Positional arguments for generating the default value.
kw : dict
Keyword arguments for generating the default value.
allow_none : bool
Indicates whether None is allowed as a value.
Default Value
-------------
If both ``args`` and ``kw`` are None, then the default value is None.
If ``args`` is a tuple and ``kw`` is a dict, then the default is
created as ``klass(*args, **kw)``. If either ``args`` or ``kw`` is
not (but not both), None is replace by ``()`` or ``{}``.
"""
self._allow_none = allow_none
if (klass is None) or (not (inspect.isclass(klass) or isinstance(klass, basestring))):
raise TraitError('The klass argument must be a class'
' you gave: %r' % klass)
self.klass = klass
# self.klass is a class, so handle default_value
if args is None and kw is None:
default_value = None
else:
if args is None:
# kw is not None
args = ()
elif kw is None:
# args is not None
kw = {}
if not isinstance(kw, dict):
raise TraitError("The 'kw' argument must be a dict or None.")
if not isinstance(args, tuple):
raise TraitError("The 'args' argument must be a tuple or None.")
default_value = DefaultValueGenerator(*args, **kw)
super(Instance, self).__init__(default_value, **metadata)
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
self.error(obj, value)
if isinstance(value, self.klass):
return value
else:
self.error(obj, value)
def info(self):
if isinstance(self.klass, basestring):
klass = self.klass
else:
klass = self.klass.__name__
result = class_of(klass)
if self._allow_none:
return result + ' or None'
return result
def instance_init(self, obj):
self._resolve_classes()
super(Instance, self).instance_init(obj)
def _resolve_classes(self):
if isinstance(self.klass, basestring):
self.klass = import_item(self.klass)
def get_default_value(self):
"""Instantiate a default value instance.
This is called when the containing HasTraits classes'
:meth:`__new__` method is called to ensure that a unique instance
is created for each HasTraits instance.
"""
dv = self.default_value
if isinstance(dv, DefaultValueGenerator):
return dv.generate(self.klass)
else:
return dv
class This(ClassBasedTraitType):
"""A trait for instances of the class containing this trait.
Because how how and when class bodies are executed, the ``This``
trait can only have a default value of None. This, and because we
always validate default values, ``allow_none`` is *always* true.
"""
info_text = 'an instance of the same type as the receiver or None'
def __init__(self, **metadata):
super(This, self).__init__(None, **metadata)
def validate(self, obj, value):
# What if value is a superclass of obj.__class__? This is
# complicated if it was the superclass that defined the This
# trait.
if isinstance(value, self.this_class) or (value is None):
return value
else:
self.error(obj, value)
#-----------------------------------------------------------------------------
# Basic TraitTypes implementations/subclasses
#-----------------------------------------------------------------------------
class Any(TraitType):
default_value = None
info_text = 'any value'
class Int(TraitType):
"""An int trait."""
default_value = 0
info_text = 'an int'
def validate(self, obj, value):
if isinstance(value, int):
return value
self.error(obj, value)
class CInt(Int):
"""A casting version of the int trait."""
def validate(self, obj, value):
try:
return int(value)
except:
self.error(obj, value)
if py3compat.PY3:
Long, CLong = Int, CInt
Integer = Int
else:
class Long(TraitType):
"""A long integer trait."""
default_value = 0L
info_text = 'a long'
def validate(self, obj, value):
if isinstance(value, long):
return value
if isinstance(value, int):
return long(value)
self.error(obj, value)
class CLong(Long):
"""A casting version of the long integer trait."""
def validate(self, obj, value):
try:
return long(value)
except:
self.error(obj, value)
class Integer(TraitType):
"""An integer trait.
Longs that are unnecessary (<= sys.maxint) are cast to ints."""
default_value = 0
info_text = 'an integer'
def validate(self, obj, value):
if isinstance(value, int):
return value
elif isinstance(value, long):
# downcast longs that fit in int:
# note that int(n > sys.maxint) returns a long, so
# we don't need a condition on this cast
return int(value)
self.error(obj, value)
class Float(TraitType):
"""A float trait."""
default_value = 0.0
info_text = 'a float'
def validate(self, obj, value):
if isinstance(value, float):
return value
if isinstance(value, int):
return float(value)
self.error(obj, value)
class CFloat(Float):
"""A casting version of the float trait."""
def validate(self, obj, value):
try:
return float(value)
except:
self.error(obj, value)
class Complex(TraitType):
"""A trait for complex numbers."""
default_value = 0.0 + 0.0j
info_text = 'a complex number'
def validate(self, obj, value):
if isinstance(value, complex):
return value
if isinstance(value, (float, int)):
return complex(value)
self.error(obj, value)
class CComplex(Complex):
"""A casting version of the complex number trait."""
def validate (self, obj, value):
try:
return complex(value)
except:
self.error(obj, value)
# We should always be explicit about whether we're using bytes or unicode, both
# for Python 3 conversion and for reliable unicode behaviour on Python 2. So
# we don't have a Str type.
class Bytes(TraitType):
"""A trait for byte strings."""
default_value = b''
info_text = 'a string'
def validate(self, obj, value):
if isinstance(value, bytes):
return value
self.error(obj, value)
class CBytes(Bytes):
"""A casting version of the byte string trait."""
def validate(self, obj, value):
try:
return bytes(value)
except:
self.error(obj, value)
class Unicode(TraitType):
"""A trait for unicode strings."""
default_value = u''
info_text = 'a unicode string'
def validate(self, obj, value):
if isinstance(value, unicode):
return value
if isinstance(value, bytes):
return unicode(value)
self.error(obj, value)
class CUnicode(Unicode):
"""A casting version of the unicode trait."""
def validate(self, obj, value):
try:
return unicode(value)
except:
self.error(obj, value)
class ObjectName(TraitType):
"""A string holding a valid object name in this version of Python.
This does not check that the name exists in any scope."""
info_text = "a valid object identifier in Python"
if py3compat.PY3:
# Python 3:
coerce_str = staticmethod(lambda _,s: s)
else:
# Python 2:
def coerce_str(self, obj, value):
"In Python 2, coerce ascii-only unicode to str"
if isinstance(value, unicode):
try:
return str(value)
except UnicodeEncodeError:
self.error(obj, value)
return value
def validate(self, obj, value):
value = self.coerce_str(obj, value)
if isinstance(value, str) and py3compat.isidentifier(value):
return value
self.error(obj, value)
class DottedObjectName(ObjectName):
"""A string holding a valid dotted object name in Python, such as A.b3._c"""
def validate(self, obj, value):
value = self.coerce_str(obj, value)
if isinstance(value, str) and py3compat.isidentifier(value, dotted=True):
return value
self.error(obj, value)
class Bool(TraitType):
"""A boolean (True, False) trait."""
default_value = False
info_text = 'a boolean'
def validate(self, obj, value):
if isinstance(value, bool):
return value
self.error(obj, value)
class CBool(Bool):
"""A casting version of the boolean trait."""
def validate(self, obj, value):
try:
return bool(value)
except:
self.error(obj, value)
class Enum(TraitType):
"""An enum that whose value must be in a given sequence."""
def __init__(self, values, default_value=None, allow_none=True, **metadata):
self.values = values
self._allow_none = allow_none
super(Enum, self).__init__(default_value, **metadata)
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
if value in self.values:
return value
self.error(obj, value)
def info(self):
""" Returns a description of the trait."""
result = 'any of ' + repr(self.values)
if self._allow_none:
return result + ' or None'
return result
class CaselessStrEnum(Enum):
"""An enum of strings that are caseless in validate."""
def validate(self, obj, value):
if value is None:
if self._allow_none:
return value
if not isinstance(value, basestring):
self.error(obj, value)
for v in self.values:
if v.lower() == value.lower():
return v
self.error(obj, value)
class Container(Instance):
"""An instance of a container (list, set, etc.)
To be subclassed by overriding klass.
"""
klass = None
_valid_defaults = SequenceTypes
_trait = None
def __init__(self, trait=None, default_value=None, allow_none=True,
**metadata):
"""Create a container trait type from a list, set, or tuple.
The default value is created by doing ``List(default_value)``,
which creates a copy of the ``default_value``.
``trait`` can be specified, which restricts the type of elements
in the container to that TraitType.
If only one arg is given and it is not a Trait, it is taken as
``default_value``:
``c = List([1,2,3])``
Parameters
----------
trait : TraitType [ optional ]
the type for restricting the contents of the Container. If unspecified,
types are not checked.
default_value : SequenceType [ optional ]
The default value for the Trait. Must be list/tuple/set, and
will be cast to the container type.
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
istrait = lambda t: isinstance(t, type) and issubclass(t, TraitType)
# allow List([values]):
if default_value is None and not istrait(trait):
default_value = trait
trait = None
if default_value is None:
args = ()
elif isinstance(default_value, self._valid_defaults):
args = (default_value,)
else:
raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
if istrait(trait):
self._trait = trait()
self._trait.name = 'element'
elif trait is not None:
raise TypeError("`trait` must be a Trait or None, got %s"%repr_type(trait))
super(Container,self).__init__(klass=self.klass, args=args,
allow_none=allow_none, **metadata)
def element_error(self, obj, element, validator):
e = "Element of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
% (self.name, class_of(obj), validator.info(), repr_type(element))
raise TraitError(e)
def validate(self, obj, value):
value = super(Container, self).validate(obj, value)
if value is None:
return value
value = self.validate_elements(obj, value)
return value
def validate_elements(self, obj, value):
validated = []
if self._trait is None or isinstance(self._trait, Any):
return value
for v in value:
try:
v = self._trait.validate(obj, v)
except TraitError:
self.element_error(obj, v, self._trait)
else:
validated.append(v)
return self.klass(validated)
class List(Container):
"""An instance of a Python list."""
klass = list
def __init__(self, trait=None, default_value=None, minlen=0, maxlen=sys.maxint,
allow_none=True, **metadata):
"""Create a List trait type from a list, set, or tuple.
The default value is created by doing ``List(default_value)``,
which creates a copy of the ``default_value``.
``trait`` can be specified, which restricts the type of elements
in the container to that TraitType.
If only one arg is given and it is not a Trait, it is taken as
``default_value``:
``c = List([1,2,3])``
Parameters
----------
trait : TraitType [ optional ]
the type for restricting the contents of the Container. If unspecified,
types are not checked.
default_value : SequenceType [ optional ]
The default value for the Trait. Must be list/tuple/set, and
will be cast to the container type.
minlen : Int [ default 0 ]
The minimum length of the input list
maxlen : Int [ default sys.maxint ]
The maximum length of the input list
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
self._minlen = minlen
self._maxlen = maxlen
super(List, self).__init__(trait=trait, default_value=default_value,
allow_none=allow_none, **metadata)
def length_error(self, obj, value):
e = "The '%s' trait of %s instance must be of length %i <= L <= %i, but a value of %s was specified." \
% (self.name, class_of(obj), self._minlen, self._maxlen, value)
raise TraitError(e)
def validate_elements(self, obj, value):
length = len(value)
if length < self._minlen or length > self._maxlen:
self.length_error(obj, value)
return super(List, self).validate_elements(obj, value)
class Set(Container):
"""An instance of a Python set."""
klass = set
class Tuple(Container):
"""An instance of a Python tuple."""
klass = tuple
def __init__(self, *traits, **metadata):
"""Tuple(*traits, default_value=None, allow_none=True, **medatata)
Create a tuple from a list, set, or tuple.
Create a fixed-type tuple with Traits:
``t = Tuple(Int, Str, CStr)``
would be length 3, with Int,Str,CStr for each element.
If only one arg is given and it is not a Trait, it is taken as
default_value:
``t = Tuple((1,2,3))``
Otherwise, ``default_value`` *must* be specified by keyword.
Parameters
----------
*traits : TraitTypes [ optional ]
the tsype for restricting the contents of the Tuple. If unspecified,
types are not checked. If specified, then each positional argument
corresponds to an element of the tuple. Tuples defined with traits
are of fixed length.
default_value : SequenceType [ optional ]
The default value for the Tuple. Must be list/tuple/set, and
will be cast to a tuple. If `traits` are specified, the
`default_value` must conform to the shape and type they specify.
allow_none : Bool [ default True ]
Whether to allow the value to be None
**metadata : any
further keys for extensions to the Trait (e.g. config)
"""
default_value = metadata.pop('default_value', None)
allow_none = metadata.pop('allow_none', True)
istrait = lambda t: isinstance(t, type) and issubclass(t, TraitType)
# allow Tuple((values,)):
if len(traits) == 1 and default_value is None and not istrait(traits[0]):
default_value = traits[0]
traits = ()
if default_value is None:
args = ()
elif isinstance(default_value, self._valid_defaults):
args = (default_value,)
else:
raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
self._traits = []
for trait in traits:
t = trait()
t.name = 'element'
self._traits.append(t)
if self._traits and default_value is None:
# don't allow default to be an empty container if length is specified
args = None
super(Container,self).__init__(klass=self.klass, args=args,
allow_none=allow_none, **metadata)
def validate_elements(self, obj, value):
if not self._traits:
# nothing to validate
return value
if len(value) != len(self._traits):
e = "The '%s' trait of %s instance requires %i elements, but a value of %s was specified." \
% (self.name, class_of(obj), len(self._traits), repr_type(value))
raise TraitError(e)
validated = []
for t,v in zip(self._traits, value):
try:
v = t.validate(obj, v)
except TraitError:
self.element_error(obj, v, t)
else:
validated.append(v)
return tuple(validated)
class Dict(Instance):
"""An instance of a Python dict."""
def __init__(self, default_value=None, allow_none=True, **metadata):
"""Create a dict trait type from a dict.
The default value is created by doing ``dict(default_value)``,
which creates a copy of the ``default_value``.
"""
if default_value is None:
args = ((),)
elif isinstance(default_value, dict):
args = (default_value,)
elif isinstance(default_value, SequenceTypes):
args = (default_value,)
else:
raise TypeError('default value of Dict was %s' % default_value)
super(Dict,self).__init__(klass=dict, args=args,
allow_none=allow_none, **metadata)
class TCPAddress(TraitType):
"""A trait for an (ip, port) tuple.
This allows for both IPv4 IP addresses as well as hostnames.
"""
default_value = ('127.0.0.1', 0)
info_text = 'an (ip, port) tuple'
def validate(self, obj, value):
if isinstance(value, tuple):
if len(value) == 2:
if isinstance(value[0], basestring) and isinstance(value[1], int):
port = value[1]
if port >= 0 and port <= 65535:
return value
self.error(obj, value)
class CRegExp(TraitType):
"""A casting compiled regular expression trait.
Accepts both strings and compiled regular expressions. The resulting
attribute will be a compiled regular expression."""
info_text = 'a regular expression'
def validate(self, obj, value):
try:
return re.compile(value)
except:
self.error(obj, value)
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/utils/traitlets.py
|
Python
|
lgpl-3.0
| 46,762
|
[
"Brian"
] |
0765b55785a24d5756499c97cb6b279dfcd3f1b9baa2632759ff6bb0c2e02232
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function,absolute_import
"""Queries the PubChem database using a compound name (i.e. 1,3,5-hexatriene)
to obtain a molecule string that can be passed to Molecule. ::
results = getPubChemObj("1,3,5-hexatriene")
Results is an array of results from PubChem matches to your query.
for entry in results:
entry["CID"] => PubChem compound identifer
entry["IUPAC"] => IUPAC name for the resulting compound
entry["PubChemObj"] => instance of PubChemObj for this compound
entry["PubChemObj"].getMoleculeString() => returns a string compatible
with Psi4's Molecule creation
"""
try:
# Python 2 syntax
from urllib2 import urlopen, Request
from urllib2 import quote
from urllib2 import URLError
except ImportError:
# Python 3 syntax
from urllib.request import urlopen, Request
from urllib.parse import quote
from urllib.error import URLError
import xml.etree.ElementTree as ET
import json
import time
import gzip
import re
import sys
import os
from psi4.driver.p4util.exceptions import *
class PubChemObj(object):
def __init__(self, cid, mf, iupac):
self.url = 'http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi'
self.cid = cid
self.mf = mf
self.iupac = iupac
self.natom = 0
self.dataSDF = ''
def __str__(self):
return "%17d %s\n" % (self.cid, self.iupac)
def getSDF(self):
"""Function to return the SDF (structure-data file) of the PubChem object."""
if (len(self.dataSDF) == 0):
def extract_xml_keyval(xml, key):
""" A useful helper function for parsing a single key from XML. """
try:
# Python 2.6 (ElementTree 1.2 API)
matches = xml.getiterator(key)
except:
# Python 2.7 (ElementTree 1.3 API)
matches = list(xml.iter(key))
if len(matches) == 0:
return None
elif len(matches) == 1:
return matches[0].text
else:
print(matches)
raise ValidationError("""PubChem: too many matches found %d""" % (len(matches)))
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/%d/SDF?record_type=3d' % self.cid
req = Request(url, headers={'Accept' : 'chemical/x-mdl-sdfile'})
try:
self.dataSDF = urlopen(req).read().decode('utf-8')
except URLError as e:
msg = "Unable to open\n\n%s\n\ndue to the error\n\n%s\n\n" %(url, e)
msg += "It is possible that 3D information does not exist for this molecule in the PubChem database\n"
print(msg)
raise ValidationError(msg)
return self.dataSDF
def name(self):
"""Function to return the IUPAC name of the PubChem object."""
return self.iupac
def getCartesian(self):
"""Function to return a string of the atom symbol and XYZ
coordinates of the PubChem object.
"""
try:
sdfText = self.getSDF()
except Exception as e:
raise e
# Find
# NA NB CONSTANT
# 14 13 0 0 0 0 0 0 0999 V2000
m = re.search(r'^\s*(\d+)\s+(?:\d+\s+){8}V2000$', sdfText, re.MULTILINE)
self.natom = 0
if (m):
self.natom = int(m.group(1))
if self.natom == 0:
raise ValidationError("PubChem: Cannot find the number of atoms. 3D data doesn't appear\n" +
"to be available for %s.\n" % self.iupac)
lines = re.split('\n', sdfText)
# 3.7320 -0.2500 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
atom_re = re.compile(r'^\s*' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*(\w+)(?:\s+\d+){12}')
molecule_string = "PubchemInput\n"
atom_count = 0
for line in lines:
if (not line or line.isspace()):
continue
atom_match = atom_re.match(line)
if atom_match:
x = float(atom_match.group(1))
y = float(atom_match.group(2))
z = float(atom_match.group(3))
sym = atom_match.group(4)
atom_count = atom_count + 1
molecule_string += "%s %10.6f %10.6f %10.6f\n" % (sym, x, y, z)
if (atom_count == self.natom):
break
return molecule_string
def getXYZFile(self):
"""Function to obtain preferentially a molecule string
through getCartesian() or a query string otherwise.
"""
try:
temp = self.getCartesian()
except Exception as e:
raise
molstr = "%d\n%s\n%s" % (self.natom, self.iupac, temp)
return molstr
def getMoleculeString(self):
"""Function to obtain a molecule string through
getCartesian() or fail.
"""
try:
return self.getCartesian()
except Exception as e:
return e.message
def getPubChemResults(name):
"""Function to query the PubChem database for molecules matching the
input string. Builds a PubChem object if found.
"""
print("\tSearching PubChem database for %s" % (name))
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/%s/property/IUPACName,MolecularFormula/JSON' % quote(name)
try:
response = urlopen(url)
except URLError as e:
msg = "\tPubchemError\n%s\n\treceived when trying to open\n\t%s\n" % (str(e), url)
msg += "\tCheck your internet connection, and the above URL, and try again.\n"
raise ValidationError(msg)
data = json.loads(response.read().decode('utf-8'))
results = []
for d in data['PropertyTable']['Properties']:
pubobj = PubChemObj(d['CID'], d['IUPACName'], d['IUPACName'])
results.append(pubobj)
print("\tFound %d result%s" % (len(results), "" if len(results)==1 else "s"))
return results
if __name__ == "__main__":
try:
#obj = getPubChemResults("1-methoxy-4-[(E)-prop-1-enyl]benzene")
#obj = getPubChemResults("sodium benzenesulfonate")
obj = getPubChemResults("4-[bis(4-hydroxyphenyl)methyl]phenol")
except Exception as e:
print(e.message)
for r in obj:
print(r)
print(r.getMoleculeString())
|
rmcgibbo/psi4public
|
psi4/driver/pubchem.py
|
Python
|
lgpl-3.0
| 7,634
|
[
"Psi4"
] |
56ca4dc8e3ef087a799f7b5ac5f97cfcc62778c43fd3b2fb1864cc63f5a34383
|
""" Transformers to perform common preprocessing steps.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD
import numpy as np
import scipy.sparse as sp
from ..utils import check_arrays
from ..base import BaseEstimator, TransformerMixin
from ._preprocessing import inplace_csr_row_normalize_l1
from ._preprocessing import inplace_csr_row_normalize_l2
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std dev for centering, scaling
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asanyarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Scaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sp.issparse(X):
raise NotImplementedError(
"Scaling is not yet implement for sparse matrices")
X = np.asanyarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class Scaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen indepently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
if sp.issparse(X):
raise NotImplementedError(
"Scaling is not yet implement for sparse matrices")
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sp.issparse(X):
raise NotImplementedError(
"Scaling is not yet implement for sparse matrices")
X = np.asanyarray(X)
if copy:
X = X.copy()
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sp.issparse(X):
raise NotImplementedError(
"Scaling is not yet implement for sparse matrices")
X = np.asanyarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
def normalize(X, norm='l2', axis=1, copy=True):
"""Normalize a dataset along any axis
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
if axis == 0:
X = X.T
if sp.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)[:, np.newaxis]
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis]
norms[norms == 0.0] = 1.0
X /= norms
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Note
----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_arrays(X, sparse_format='csr', copy=copy)[0]
if sp.issparse(X):
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
# FIXME: if enough values became 0, it may be worth changing
# the sparsity structure
X.data[not_cond] = 0
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
The default threshold is 0.0 so that any non-zero values are set to 1.0
and zeros are left untouched.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modeled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
def _is_multilabel(y):
return isinstance(y[0], tuple) or isinstance(y[0], list)
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
Examples
--------
>>> from sklearn import preprocessing
>>> clf = preprocessing.LabelBinarizer()
>>> clf.fit([1, 2, 6, 4, 2])
LabelBinarizer()
>>> clf.classes_
array([1, 2, 4, 6])
>>> clf.transform([1, 6])
array([[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.]])
>>> clf.fit_transform([(1, 2), (3,)])
array([[ 1., 1., 0.],
[ 0., 0., 1.]])
>>> clf.classes_
array([1, 2, 3])
"""
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
self : returns an instance of self.
"""
self.multilabel = _is_multilabel(y)
if self.multilabel:
# concatenation of the sub-sequences
self.classes_ = np.unique(reduce(lambda a, b: a + b, y))
else:
self.classes_ = np.unique(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
"""
if len(self.classes_) == 2:
Y = np.zeros((len(y), 1))
else:
Y = np.zeros((len(y), len(self.classes_)))
y_is_multilabel = _is_multilabel(y)
if y_is_multilabel and not self.multilabel:
raise ValueError("The object was not fitted with multilabel input!")
elif self.multilabel:
if not _is_multilabel(y):
raise ValueError("y should be a list of label lists/tuples,"
"got %r" % (y,))
# inverse map: label => column index
imap = dict((v, k) for k, v in enumerate(self.classes_))
for i, label_tuple in enumerate(y):
for label in label_tuple:
Y[i, imap[label]] = 1
return Y
elif len(self.classes_) == 2:
Y[y == self.classes_[1], 0] = 1
return Y
elif len(self.classes_) >= 2:
for i, k in enumerate(self.classes_):
Y[y == k, i] = 1
return Y
else:
# Only one class, returns a matrix with all 0s.
return Y
def inverse_transform(self, Y, threshold=0):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array of shape [n_samples, n_classes]
Target values.
threshold : float
Threshold used to decide whether to assign the positive class or the
negative class in the binary case. Use 0.5 when Y contains
probabilities.
Returns
-------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Note
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
if self.multilabel:
Y = np.array(Y > 0, dtype=int)
return [tuple(self.classes_[np.flatnonzero(Y[i])])
for i in range(Y.shape[0])]
if len(Y.shape) == 1 or Y.shape[1] == 1:
y = np.array(Y.ravel() > threshold, dtype=int)
else:
y = Y.argmax(axis=1)
return self.classes_[y]
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
This is equivalent to centering phi(X) with
sklearn.preprocessing.Scaler(with_std=False).
"""
def fit(self, K):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, copy=True):
"""Center kernel
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
|
ominux/scikit-learn
|
sklearn/preprocessing/__init__.py
|
Python
|
bsd-3-clause
| 20,823
|
[
"Gaussian"
] |
e90c25bb1391b5fae55f9712ba4e8def2eae80edeb618b58af0178f29427de5e
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
import Globals
from Products.ZenModel.ZenPack import ZenPack as ZenPackBase
from Products.ZenUtils.Utils import unused
unused(Globals)
class ZenPack(ZenPackBase):
def install(self, app):
"""Custom install method for this ZenPack."""
self.pre_install(app)
super(ZenPack, self).install(app)
def pre_install(self, app):
"""Perform steps that should be done before default install."""
devices = app.zport.dmd.Devices
events = app.zport.dmd.Events
# /Network/Switch/HP device class is a prerequisite.
devices.createOrganizer('/Network/Switch/HP')
# /Net/traps event class is a prerequisite.
events.createOrganizer('/Net/traps')
|
zenoss/ZenPacks.networking.HPProcurve
|
ZenPacks/networking/HPProcurve/__init__.py
|
Python
|
gpl-2.0
| 1,240
|
[
"VisIt"
] |
d223b220a10838396efe4ebdf10300c77d0c52e95e0e563994e459ab600ccb47
|
import sys
from pybotvac import (
Account,
Neato,
OAuthSession,
PasswordlessSession,
PasswordSession,
Vorwerk,
)
# Set email and password if you plan to use password authentication.
# Set Client ID and Secret if you plan to use OAuth2.
# If you plan to use email OTP, all you need to do is specify your email and a Client ID.
email = "Your email"
password = "Your password"
client_id = "Your client it"
client_secret = "Your client secret"
redirect_uri = "Your redirect URI"
# Set your vendor
vendor = Neato()
##########################
# Authenticate via Email and Password
##########################
# session = PasswordSession(email=email, password=password, vendor=vendor)
# account = Account(session)
##########################
# Authenticate via OAuth2
##########################
session = OAuthSession(
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
vendor=vendor,
)
authorization_url = session.get_authorization_url()
print("Visit: " + authorization_url)
authorization_response = input("Enter the full callback URL: ")
token = session.fetch_token(authorization_response)
account = Account(session)
##########################
# Authenticate via One Time Password
##########################
# session = PasswordlessSession(client_id=client_id, vendor=vendor)
# session.send_email_otp(email)
# code = input("Enter the code: ")
# session.fetch_token_passwordless(email, code)
# account = Account(session)
print("Robots:")
for robot in account.robots:
print(robot)
print()
print("State:\n", robot.state)
print()
print("Schedule enabled:", robot.schedule_enabled)
print("Disabling schedule")
robot.schedule_enabled = False
print("Schedule enabled:", robot.schedule_enabled)
print("Enabling schedule")
robot.schedule_enabled = True
print("Schedule enabled:", robot.schedule_enabled)
print()
|
stianaske/pybotvac
|
sample/sample.py
|
Python
|
mit
| 1,923
|
[
"VisIt"
] |
c3fffb76ce5d2cf82a902e72453df1f2330108cbfee1d2d71c273fb072043579
|
#!/home/epicardi/bin/python27/bin/python
# Copyright (c) 2013-2014 Ernesto Picardi <ernesto.picardi@uniba.it>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os, getopt, time, random, heapq, shutil
from tempfile import gettempdir
from itertools import islice, cycle
from collections import namedtuple
from operator import itemgetter
try: import pysam
except: sys.exit('Pysam module not found.')
version='1.0'
pid=str(os.getpid()+random.randint(0,999999999))
def usage():
print """
USAGE: python tableToTabix.py [options]
Options:
-i TAB-delimited file
-s Sequence name column [1]
-c Start column [4]
-e End column (can be identical to -c) [5]
-m Skip lines starting with [#]
-0 Zero based coordinates
-S Do not sort input file (sort by default)
-b Buffer size (as number of lines) [32000]
-t Temporary directory to use (multiple -t may be used)
-u Save an uncompressed GFF copy (add _copy suffix)
-h Print this help
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "i:Sb:t:hus:c:e:m:0",["help"])
if len(opts)==0:
usage()
sys.exit(2)
except getopt.GetoptError as err:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
GFFfile=''
buffer_size=32000
tempdirs=[]
sort=1
mc=0 # save an uncompressed GFF copy, default no
scol=0 # sequence column name
bcol=3 # start column
ecol=4 # end column
schar='#' # skip lines starting with this character
zcoord=False # zero based coordinated
for o, a in opts:
if o in ("-h","--help"):
usage()
sys.exit()
elif o == "-i":
GFFfile=a
outfile='.'.join(GFFfile.split('.')[:-1])+'.sorted.gff'
if not os.path.exists(GFFfile):
usage()
sys.exit('GFF file not found')
elif o == "-b": buffer_size=int(a)
elif o == "-t": tempdirs.append(a)
elif o == "-S": sort=0
elif o == "-u": mc=1
elif o == "-m": schar=a
elif o == "-s": scol=int(a)-1
elif o == "-c": bcol=int(a)-1
elif o == "-e": ecol=int(a)-1
elif o == "-0": zcoord=True
else:
assert False, "Unhandled Option"
Keyed = namedtuple("Keyed", ["key", "obj"])
key_=eval('lambda line : (%s)' %('line[:]'))
def gk(key,obj):
ik=itemgetter(scol,bcol,ecol)(obj.split('\t'))
return key((ik[0],int(ik[1]),int(ik[2])))
def merge(key=None, *iterables):
# based on code posted by Scott David Daniels in c.l.p.
# http://groups.google.com/group/comp.lang.python/msg/484f01f1ea3c832d
#print iterables
if key is None:
keyed_iterables = iterables
else:
keyed_iterables = [(Keyed(gk(key,obj), obj) for obj in iterable) for iterable in iterables]
#print keyed_iterables
for element in heapq.merge(*keyed_iterables):
yield element.obj
def batch_sort(input, output, key=None, buffer_size=32000, tempdirs=None):
if tempdirs is None:
tempdirs = []
if not tempdirs:
tempdirs.append(gettempdir())
chunks = []
xx=0
try:
with open(input,'rb',64*1024) as input_file:
input_iterator = iter(input_file)
for tempdir in cycle(tempdirs):
current_chunk2=[]
for j in islice(input_iterator,buffer_size):
if j.startswith('Region'): continue
if j.startswith(schar): continue
l=(j.strip()).split('\t')
l[bcol]=int(l[bcol])
l[ecol]=int(l[ecol])
current_chunk2.append(l)
current_chunk3=[]
for j in sorted(current_chunk2, key=itemgetter(scol,bcol,ecol)):
j[bcol]=str(j[bcol])
j[ecol]=str(j[ecol])
current_chunk3.append('\t'.join(j)+'\n')
xx+=len(current_chunk3)
if not current_chunk3: break
sys.stdout.write("Loaded and sorted %i lines.\n"%(xx))
output_chunk = open(os.path.join(tempdir,'%06i_%s'%(len(chunks),pid)),'w+b',64*1024)
chunks.append(output_chunk)
output_chunk.writelines(current_chunk3)
output_chunk.flush()
output_chunk.seek(0)
sys.stdout.write("Merging from %i files.\n"%(len(chunks)))
with open(output,'wb',64*1024) as output_file:
output_file.writelines(merge(key, *chunks))
finally:
for chunk in chunks:
try:
chunk.close()
os.remove(chunk.name)
except Exception:
pass
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stdout.write("Script time --> START: %s\n"%(script_time))
if sort:
sys.stdout.write("Sorting GFF file...\n")
batch_sort(GFFfile,outfile,key_,buffer_size,tempdirs)
GFFfile=outfile
if mc:
copyfile=GFFfile+'_copy'
shutil.copyfile(GFFfile,copyfile)
sys.stdout.write("A copy of uncompressed GFF file has been saved on %s.\n" %(copyfile))
sys.stdout.write("Indexing GFF file...\n")
GFFfile=pysam.tabix_index(GFFfile,seq_col=scol, start_col=bcol, end_col=ecol, zerobased=zcoord)
sys.stdout.write("Tabix file saved on %s.\n" %(GFFfile))
sys.stdout.write("Indices saved on %s.tbi.\n" %(GFFfile))
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stdout.write("Script time --> END: %s\n"%(script_time))
|
RNAEDITINGPLUS/main
|
node/reditools-1.0.4/reditools/tableToTabix.py
|
Python
|
apache-2.0
| 5,802
|
[
"pysam"
] |
9f304c4f8d5e0d238507680e629f014fbcbe0f8c934e4ef770ee787ac9b74d5a
|
#!/usr/bin/env python2
desc="""Fetch all entries from SRA for given taxid.
Save the biggest run per each SAMPLE (SRS) from given date. Paired first, if any.
Note, it run fastq-dump in background. Make sure you have enough free cores;)
DEPENDENCIES:
Biopython
"""
epilog="""Author:
l.p.pryszcz+git@gmail.com
Barcelona/Warsaw, 2/10/2012
"""
changelog="""
1.1:
- argparse
-
"""
import argparse, os, sys
from datetime import datetime
#from optparse import OptionParser
from ftplib import FTP
from Bio import Entrez
import xml.etree.ElementTree as ET
def srr2info( srr ):
"""Return info for SRR entry
- experiment id
- submission id
- project id
- biosample id
- run date
- bases
- insert size
- insert std
- reads orientation
"""
'''
for child in root[0]: print child.tag, child.attrib
EXPERIMENT {'center_name': 'BI', 'alias': '74116.WR23613.Solexa-42619.62C7UAAXX100916.P', 'accession': 'SRX026545'}
SUBMISSION {'submission_date': '2009-06-01T02:01:25Z', 'lab_name': 'Genome Sequencing', 'submission_comment': 'Produced by user cristyn on Sun May 31 22:01:25 EDT 2009', 'alias': 'BI.Streptococcus_pyogenes_Pathogenomics', 'center_name': 'BI', 'accession': 'SRA008647'}
STUDY {'center_name': 'BI', 'alias': 'Fusarium_oxysporum_Diversity_RNA_Sequencing_multi_isolate', 'accession': 'SRP002351'}
SAMPLE {'center_name': 'BI', 'alias': '74336.0', 'accession': 'SRS190364'}
RUN_SET {}
root[0][0].keys()
['center_name', 'alias', 'accession']
'''
#search NCBI
result = Entrez.read( Entrez.esearch(db="sra",term=srr ) )
if not result['IdList']:
sys.stderr.write( " Entrez Error: No results for %s\n" % srr )
return
elif len(result['IdList'])>1:
sys.stderr.write( " Entrez Warning: Multiple hits for %s: %s\n" % (srr,",".join(result['IdList'])) )
#fetch info from NCBI
xml = Entrez.efetch( db="sra",id=result['IdList'][0] ).read()
root = ET.fromstring(xml)#; print xml
#get experiment
EXPERIMENT = root[0].find("EXPERIMENT")
srx = EXPERIMENT.attrib['accession']
#get submission
s = root[0].find("SUBMISSION")
sra = s.attrib['accession']
#get accession
s = root[0].find("STUDY")
srp = s.attrib['accession']
#get accession
s = root[0].find("SAMPLE")
srs = s.attrib['accession']
''' <Element 'RUN_SET' at 0x31a1590>
RUN{'run_date': '2010-09-16T04:00:00Z',
'center_name': 'BI',
'total_bases': '3035446384',
'run_center': 'BI',
'accession': 'SRR190806',
'total_spots': '19970042',
'cluster_name': 'public',
'alias': 'BI.PE.100916_SL-XDX_00019_FC62C7UAAXX.7.srf',
'instrument_name': 'SL-XDX',
'published': '2011-04-28 14:48:11',
'static_data_available': '1',
'is_public': 'true',
'load_done': 'true',
'size': '1839421668'}
'''
#run data
s = root[0].find('RUN_SET') #it's within RUN_SET
date = s[0].attrib['run_date']
bases = s[0].attrib['total_bases']
#LIBRARY_LAYOUT - maybe try to simplify it
isize=istdv=orient = 0
DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout
LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR")
LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT")
PAIRED = LIBRARY_LAYOUT.find("PAIRED")
if PAIRED is not None:
layout = PAIRED.attrib
isize = layout['NOMINAL_LENGTH'] # NOMINAL_LENGTH="476"
orient = layout['ORIENTATION'] #ORIENTATION="5\'3\'-3\'5\'
istdv = layout['NOMINAL_SDEV'] ##PAIRED NOMINAL_SDEV="149.286"
return ( srx,sra,srp,srs,date,bases,isize,istdv,orient )
def xml2data( child,taxid2srs,verbose ):
""" """
#get experiment
EXPERIMENT = child.find("EXPERIMENT")
srx = EXPERIMENT.attrib['accession']
#get submission
s = child.find("SUBMISSION")
sra = s.attrib['accession']
#get accession
s = child.find("STUDY")
srp = s.attrib['accession']
#get accession
for SAMPLE in child.findall("SAMPLE"):
#if SAMPLE.attrib['accession']!=
srs = SAMPLE.attrib['accession']
#get taxid
SAMPLE_NAME = SAMPLE.find("SAMPLE_NAME")
TAXON_ID = SAMPLE_NAME.find("TAXON_ID")
taxid = int(TAXON_ID.text)
SCIENTIFIC_NAME = SAMPLE_NAME.find("SCIENTIFIC_NAME")
#malformed xml?
if SCIENTIFIC_NAME is None:
return taxid2srs
strain = SCIENTIFIC_NAME.text
#get strain tag - this may cause problems with non-ENA accessions!
SAMPLE_ATTRIBUTES = SAMPLE.find("SAMPLE_ATTRIBUTES")
if SAMPLE_ATTRIBUTES is None:
continue
for SAMPLE_ATTRIBUTE in SAMPLE_ATTRIBUTES.findall("SAMPLE_ATTRIBUTE"):
#print SAMPLE_ATTRIBUTE.find("TAG").text
if SAMPLE_ATTRIBUTE.find("TAG").text == "strain":
#print SAMPLE_ATTRIBUTE.find("VALUE")
strain += " %s" % SAMPLE_ATTRIBUTE.find("VALUE").text
break
if strain!="unidentified organism":
break
#LIBRARY_LAYOUT - maybe try to simplify it
isize=istdv=orient = 0
DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout
LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR")
LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT")
PAIRED = LIBRARY_LAYOUT.find("PAIRED")
if PAIRED is not None:
layout = PAIRED.attrib
isize = 0
if 'NOMINAL_LENGTH' in layout:
isize = float( layout['NOMINAL_LENGTH'] ) # NOMINAL_LENGTH="476"
elif not verbose:
sys.stderr.write( " %s: Paired run and no or zero insert. Skipped!\n" % srx )
istdv = 0
if 'NOMINAL_SDEV' in layout:
istdv = float( layout['NOMINAL_SDEV'] ) ##PAIRED NOMINAL_SDEV="149.286"
elif verbose:
sys.stderr.write( " %s: Paired run and no NOMINAL_SDEV\n" % srx )
orient = ""
if 'ORIENTATION' in layout:
orient = layout['ORIENTATION'] #ORIENTATION="5\'3\'-3\'5\'
elif verbose:
sys.stderr.write( " %s: Paired run and no orientation\n" % srx )
#run data
runs = []
RUN_SET = child.find('RUN_SET') #it's within RUN_SET
for RUN in RUN_SET.findall("RUN"):
srr = RUN.attrib['accession']
date = ""
if 'run_date' in RUN.attrib:
date = RUN.attrib['run_date']
elif verbose:
sys.stderr.write( " %s (%s): No run date!\n" % (srx,srr) )
bases = 0
if 'total_bases' in RUN.attrib:
bases = int( RUN.attrib['total_bases'] )
elif verbose:
sys.stderr.write( " %s (%s): No total bases!\n" % (srx,srr) )
runs.append( (srr,bases,date) )
#store data
childdata = ( strain,taxid,srx,srp,isize,istdv,orient,runs )
if verbose:
sys.stderr.write( " %s: %s: %s\n" % (taxid,srs,str(childdata)) )
if not taxid in taxid2srs:
taxid2srs[taxid] = {}
if not srs in taxid2srs[taxid]:
taxid2srs[taxid][srs] = []
taxid2srs[taxid][srs].append( childdata )
return taxid2srs
def taxid2runs(outfn, taxid, verbose, term, db="sra", retmode="xml", retmax=10**6):
"""Return info from SRA for given taxid. """
taxid2srs = {}
#search NCBI
if verbose:
sys.stderr.write("Query: %s\n" % term)
result = Entrez.read( Entrez.esearch( db=db,term=term,retmax=retmax ) )
ids = result['IdList']
if not ids:
sys.stderr.write( " Entrez Error: No results for %s\n" % taxid )
return
if verbose:
sys.stderr.write( "Downloading %s entries from NCBI %s database...\n" % ( len(ids),db ) )
#post NCBI query
'''search_handle = Entrez.epost( db,id=",".join( ids ) )
search_results = Entrez.read( search_handle )
webenv,query_key = search_results["WebEnv"], search_results["QueryKey"]
#fetch info from NCBI
xml = Entrez.efetch( db=db,retmode=retmode,retmax=retmax,webenv=webenv,query_key=query_key ).read()
root = ET.fromstring( xml )
for child in root:
#update dict
srs2exp = xml2data( child,srs2exp,verbose )'''
for id in ids:
xml = Entrez.efetch( db=db,retmode=retmode,id=id ).read()#; print xml
root = ET.fromstring( xml )
child = root[0]
taxid2srs = xml2data( child,taxid2srs,verbose )
#print output
out = open( outfn,"w" )
header = "#Strain\tTaxid\tSample\tExperiment\tProject\tInsert size\tOrientation\tRun\tBases\tDate\n"
out.write( header )
sys.stderr.write( "Saving SRA info to: %s\n" % outfn )
for taxid in taxid2srs:
for srs in taxid2srs[taxid]:
for strain,taxid,srx,srp,isize,istdv,orient,runs in taxid2srs[taxid][srs]:
for srr,bases,date in runs:
line = u"%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (strain,taxid,srs,srx,srp,isize,orient,srr,bases,date)
out.write( line.encode('ascii', 'xmlcharrefreplace') )
out.close()
return taxid2srs
def srr2fastq( odir,srr,srs,srx,ncbitimestamp,isize,ftpdomain,verbose ):
"""Fetch file."""
#connect to ftp
ftp = FTP(ftpdomain)
ftp.login('anonymous', '')
#create outdir
outdir = os.path.join( odir,srs,srx )
if not os.path.isdir( outdir ):
os.makedirs( outdir )
#get nice date timestamp
date = datetime.strptime( ncbitimestamp,"%Y-%m-%dT%H:%M:%SZ" )
tstamp = date.strftime("%Y%m%d")
#ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/SRR540/SRR540367
remotefpath = "sra/sra-instant/reads/ByRun/sra/SRR/%s/%s/%s.sra" % ( srr[:6],srr,srr )
localfpath = os.path.join( outdir,"%s_%s.sra" % ( tstamp,srr) )
if not os.path.isfile( localfpath ):
if verbose:
sys.stdout.write( " %s > %s\n" % ( remotefpath,localfpath ) )
#fetch file
lfile = open( localfpath,"w" )
ftp.retrbinary( "RETR %s" % remotefpath, lfile.write )
lfile.close()
else:
if verbose:
sys.stdout.write( " File exists: %s\n" % ( localfpath, ) )
#get paired if insert size fastq
if isize:
cmd = "fastq-dump --gzip --split-3 -O %s %s 2>&1 > %s.log &" % ( outdir,localfpath,localfpath )
fqfn1 = "%s_1.fastq.gz" % localfpath[:-4]
fqfn2 = "%s_2.fastq.gz" % localfpath[:-4]
if not os.path.isfile(fqfn1) or not os.path.isfile(fqfn2):
if verbose:
sys.stderr.write( " %s\n" % cmd )
os.system( cmd )
else:
cmd = "fastq-dump --gzip -O %s %s 2>&1 > %s.log &" % ( outdir,localfpath,localfpath )
fqfn1 = "%s.fastq.gz" % localfpath[:-4]
if not os.path.isfile(fqfn1): # or not os.path.isfile(fqfn2):
if verbose:
sys.stderr.write( " %s\n" % cmd )
os.system( cmd )
def get_runs( taxid2srs,ftpdomain,orientth,maxisize,paired,minbases,verbose ):
"""Select the best run for each uniq taxid-srs-date combination
"""
if verbose:
sys.stderr.write( "Fetching best run for each uniq taxid-srs-date combination...\n" )
#select the best run for each uniq taxid-srs-date combination
for taxid in taxid2srs:
for srs in taxid2srs[taxid]:
date2runs={}
for strain,taxid,srx,srp,isize,istdv,orient,runs in taxid2srs[taxid][srs]:
#check if paired
if paired:
if not isize:
continue
#skip if wrong orientation
if orientth and orientth!=orient:
continue
#skip big insert size or not paired
if maxisize:
if isize>maxisize:
continue
#add runs passed filtering
for srr,bases,date in runs:
#skip if too small yield
if bases < minbases*10**6:
continue
if date not in date2runs:
date2runs[date]=[]
date2runs[date].append( (srr,srx,srp,isize,bases) )
#process best run for each uniq taxid-srs-date combination
for date in date2runs:
#
fltruns = filter( lambda x: x[3]!=0, date2runs[date] )
if not fltruns:
fltruns = date2runs[date]
#sort by size
bestrun = sorted( fltruns,key=lambda x: x[-1],reverse=True )[0]
#print bestrun,date2runs[date]
srr,srx,srp,isize,bases = bestrun
#fetch
odir = "taxid%s" % taxid
srr2fastq( odir,srr,srs,srx,date,isize,ftpdomain,verbose )
def main():
usage = "%(prog)s -v"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.1')
parser.add_argument("-d", "--download", default=False, action="store_true",
help="download SRA files")
parser.add_argument("-t", "--taxid", type=int, required=True,
help="taxid of interest " )
parser.add_argument("-f", dest="ftp", default="ftp-trace.ncbi.nih.gov",
help="ftp server address [%(default)s]" )
parser.add_argument("-e", "--email", default="lpryszcz@crg.es", type=str,
help="email address [%(default)s]" )
parser.add_argument("-o", dest="orient", default="5'3'-3'5'",
help="orientation [%(default)s]" )
parser.add_argument("-m", dest="maxisize", default=1000, type=int,
help="max allowed insert [%(default)s]" )
parser.add_argument("-b", dest="minbases", default=600, type=int,
help="min Mbases in run [%(default)s Mbases -> 10x for 60Mb genome]" )
parser.add_argument("-p", "--paired", default=False, action="store_true",
help="fetch only paired runs" )
parser.add_argument("-t", "--term", default="txid%s[organism] AND dna_data[filter] AND sra_public[filter] AND type_genome[filter]"
help="NCBI term [%(default)s]" )
term = % taxid
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
Entrez.email = o.email
#get all runs for taxid
outfn = "sra.tsv"
taxid2srs = taxid2runs(outfn, o.taxid, o.term%taxid, o.verbose)
if o.download:
#fetch best srr
get_runs(taxid2srs, o.ftp, o.orient, o.maxisize, o.paired, o.minbases, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
main()
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
|
lpryszcz/bin
|
taxid2sra.py
|
Python
|
gpl-3.0
| 15,103
|
[
"Biopython"
] |
0e02698442997ebbdd96f099d680cdb3329cd4c81ee680ff16315d8dfdc1840d
|
from __future__ import print_function
import os
import numpy as np
from astropy.table import Table
from astropy.io import fits
from .. import fileio
from .starpop import StarPop
from .asts import parse_pipeline
from ..angst_tables import angst_data
from ..astronomy_utils import mag2Mag
__all__ = ['Galaxy']
class Galaxy(StarPop):
'''angst and angrrr galaxy object'''
def __init__(self, fname):
self.base, self.name = os.path.split(fname)
StarPop.__init__(self)
# name spaces
self.load_data(fname)
def load_data(self, fname):
if fname.endswith('fits'):
self.data = fits.getdata(fname)
else:
self.data = Table.read(fname)
self.target, self.filters = parse_pipeline(fname)
def trgb_av_dmod(self, filt):
'''returns trgb, av, dmod from angst table'''
return angst_data.get_tab5_trgb_av_dmod(self.target, filt)
def check_column(self, column, loud=False):
vomit = ''
if loud:
vomit = ', '.join(self.dtype.names)
assert column.upper() in self.data.dtype.names, \
'{} not found. {}'.format(column, vomit)
def absmag(self, column, filt, photsys=None, dmod=None, Av=None):
self.check_column(column)
if dmod is None:
_, av, dmod = self.trgb_av_dmod(filt)
if Av is None:
Av = av
if photsys is None:
if 'ACS' in column.upper():
photsys = 'acs_wfc'
elif 'IR' in column.upper():
photsys = 'wfc3ir'
return mag2Mag(self.data[column], filt, photsys=photsys, dmod=dmod, Av=Av)
|
philrosenfield/ResolvedStellarPops
|
galaxies/galaxy.py
|
Python
|
bsd-3-clause
| 1,664
|
[
"Galaxy"
] |
fbd0e51f96061d1a83752dac4c068cddf5604a85b59fbe886e3d0cdb29dfca7a
|
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from zope.interface import implementer
from stoqlib.database.migration import PluginSchemaMigration
from stoqlib.lib.interfaces import IPlugin
from stoqlib.lib.pluginmanager import register_plugin
@implementer(IPlugin)
class ECFPlugin(object):
name = u'ecf'
def __init__(self):
self.ui = None
#
# IPlugin
#
def get_migration(self):
return PluginSchemaMigration(self.name, 'ecf', 'sql',
['*.sql', '*.py'])
def get_tables(self):
return [('ecfdomain', ["ECFPrinter", "DeviceConstant",
"FiscalSaleHistory", "ECFDocumentHistory"])]
def activate(self):
# Do this in a nested import so we can import the plugin without
# importing gtk.
from ecf.ecfui import ECFUI
self.ui = ECFUI()
def get_dbadmin_commands(self):
return []
def handle_dbadmin_command(self, command, options, args):
assert False
register_plugin(ECFPlugin)
|
tiagocardosos/stoq
|
plugins/ecf/ecfplugin.py
|
Python
|
gpl-2.0
| 1,914
|
[
"VisIt"
] |
3e5d0055e121231f1c061862eb508e59d605eb5fdc77fdc5e5490c94a7e1e9fb
|
from constants import *
from operator import itemgetter
import web
import os
import codecs
import urllib
from models import *
# Form Handlers
LoginForm = web.form.Form(
web.form.Textbox('ra', web.form.notnull, Class="form-control"),
web.form.Password('senha', web.form.notnull, Class="form-control"),
web.form.Button('login', Class="btn btn-primary"),
)
RegisterForm = web.form.Form(
web.form.Textbox('RA', web.form.notnull, Class="form-control"),
web.form.Textbox('Nome', web.form.notnull, Class="form-control"),
web.form.Textbox('E-mail', web.form.notnull, Class="form-control"),
web.form.Password('Senha', web.form.notnull, Class="form-control"),
web.form.Button('Login', Class="btn btn-primary"),
)
SearchForm = web.form.Form(
web.form.Textbox('Busca', Class="form-control"),
)
ForgottenForm = web.form.Form(
web.form.Textbox('email', web.form.notnull, Class="form-control"),
web.form.Button('Enviar', Class="btn btn-primary"),
)
ConfirmationForm = web.form.Form(
web.form.Textbox('Codigo de confirmacao', web.form.notnull, Class="form-control"),
web.form.Button('Submeter', Class="btn btn-primary"),
)
UserForm = web.form.Form(
web.form.Textbox('RA', web.form.notnull, Class="form-control"),
web.form.Textbox('Nome', web.form.notnull, Class="form-control"),
web.form.Password('Current', Class="form-control"),
web.form.Password('New', Class="form-control"),
web.form.Password('Repeat', Class="form-control"),
web.form.Button('Login', Class="btn btn-primary"),
)
semesters = []
teachers = []
subjects = []
def UpdateLists():
S = sessionmaker(bind=DB)()
SemestersList = S.query(Semester).order_by(Semester.id)
TeachersList = S.query(Teacher).order_by(Teacher.name)
SubjectsList = S.query(Subject).order_by(Subject.code)
for Line in SemestersList:
sem = '%s semestre de %s' % (Line.sem, Line.year)
if (Line.id,sem) not in semesters:
semesters.insert(-1,(Line.id,sem))
for Line in TeachersList:
t = '%s' % (Line.name)
if (Line.id,t) not in teachers:
teachers.insert(-1,(Line.id,t))
for Line in SubjectsList:
sub = '%s %s' % (Line.code, Line.name)
if (Line.id,sub) not in subjects:
subjects.insert(-1,(Line.id,sub))
sorted(semesters, key=itemgetter(1))
sorted(teachers, key=itemgetter(1))
sorted(subjects, key=itemgetter(1))
DeleteTeacher = web.form.Form(
web.form.Dropdown('Professores', args = teachers),
web.form.Button('Submeter', Class="btn btn-primary"))
DeleteSemester = web.form.Form(
web.form.Dropdown('id', args = semesters),
web.form.Button('Submeter', Class="btn btn-primary"))
DeleteSubject = web.form.Form(
web.form.Radio('id', args = subjects),
web.form.Button('Submeter', Class="btn btn-primary"))
AddOffering = web.form.Form(
web.form.Dropdown('Semestre', args = semesters),
web.form.Dropdown('Disciplina', args = subjects),
web.form.Dropdown('Professor', args = teachers),
web.form.Textbox('Turma', web.form.notnull),
web.form.Textbox('Matriculados', web.form.notnull),
web.form.Button('Submeter', Class="btn btn-primary"))
AddSemester = web.form.Form(
web.form.Dropdown('Semestre', [('1','primeiro'), ('2','segundo')]),
web.form.Textbox('Ano', web.form.notnull),
web.form.Button('Submeter', Class="btn btn-primary"))
AddTeacher = web.form.Form(
web.form.Textbox('Nome', web.form.notnull),
web.form.Button('Submeter', Class="btn btn-primary"))
AddSubject = web.form.Form(
web.form.Textbox('Codigo', web.form.notnull),
web.form.Textbox('Nome', web.form.notnull),
web.form.Textbox('Creditos', web.form.notnull),
web.form.Textbox('Ementa', web.form.notnull),
web.form.Button('Submeter', Class="btn btn-primary"))
RateOffering = web.form.Form(
web.form.Textbox('Respostas'),
web.form.Textbox('Coluna11'),
web.form.Textbox('Coluna12'),
web.form.Textbox('Coluna13'),
web.form.Textbox('Coluna14'),
web.form.Textbox('Coluna15'),
web.form.Textbox('Coluna16'),
web.form.Button('Submeter', Class="btn btn-primary"))
#MyForm = web.form.Form(
#form.Textbox("boe"),
#form.Textbox("bax",
#form.notnull,
#form.regexp('\d+', 'Must be a digit'),
#form.Validator('Must be more than 5', lambda x:int(x)>5)),
#form.Textarea('moe'),
#form.Checkbox('curly'),
#web.form.Button('Submeter1', Class="btn btn-primary")),
|
rodrigosurita/gda
|
forms.py
|
Python
|
gpl-3.0
| 4,488
|
[
"MOE"
] |
c28366a9f6349dcbbae45e4113142b350e73184f5314be94d3384d7f88f95619
|
#Copyright (c) 2014, Ben Goodrich
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.#
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from nnet import *
import numpy as np
import cudamat as cm
#from cudamat import learn as cl
class net_cuda(net):
def __init__(self,layer,step_size=None,dropout=None):
#TODO: should probably put cudamat initialization elsewhere
#in case it is used by more than one network
cm.cuda_set_device(0)
cm.init()
super(net_cuda,self).__init__(layer,step_size,dropout)
def initialize_weights(self):
super(net_cuda,self).initialize_weights()
for index,l in enumerate(self.layer):
l.weights = cm.CUDAMatrix(l.weights)
def zero_gradients(self):
#TODO: make empty matrix and set it to 0
for l in self.layer:
l.gradient = cm.CUDAMatrix(np.zeros(l.weights.shape))
@property
def input(self):
return self._input
@input.setter
def input(self,value):
self._input = value
self._input = cm.CUDAMatrix(np.append(value,np.ones((1,value.shape[1])),axis=0))
@input.deleter
def input(self):
del self._input
def feed_forward(self,input=None):
#optionally allow passing input as an argument
if input is not None:
self.input = input
for index,l in enumerate(self.layer):
if(index == 0):
input = self.input
else:
input = self.layer[index-1].output
l.input = input
#print(str(index) + " " + str(l.weights.shape) + " " + str(l.input.shape))
l.weighted_sums = cm.dot(l.weights,l.input)
#apply activation function
if(l.activation == 'squash'):
pass
#TODO: write kernal for this
#l.output = l.weighted_sums / (1+np.abs(l.weighted_sums))
elif(l.activation == 'sigmoid'):
l.output = l.weighted_sums.apply_sigmoid()
#elif(l.activation == 'linear_rectifier'):
# l.output = np.maximum(0,l.weighted_sums)
else: #base case is linear
l.output = l.weighted_sums
#if(l.dropout is not None and self.train == True):
# if(l.dropout == 0.5):
# l.output = l.output*np.random.randint(0,2,l.output.shape);
# else:
# l.output = l.output*np.random.binomial(1,l.dropout,l.output.shape);
#elif(l.dropout is not None and self.train == False):
# l.output = l.output*(1.0 - l.dropout);
self.output = self.layer[len(self.layer)-1].output
self.output.copy_to_host()
self.output = self.output.numpy_array
self.output = self.output[0:-1,:]
def back_propagate(self,error=None):
if(error is not None):
self.error = cm.error
#python doesn't easily allow reversed(enumerate()) - use this instead
for l in reversed(self.layer):
#if we're on the last layer
#print(str(index));
if(l.index == len(self.layer)-1):
delta_temp = cm.CUDAMatrix(np.append(self.error,np.zeros((1,self.error.shape[1])),axis=0))
else:
#Possible TODO?: is there a way to get rid of this transpose? it is slow to have to do this
#delta_temp = cm.empty((self.layer[l.index+1].weights.shape[1],self.layer[l.index+1].weights.shape[0]))
#delta_temp = self.layer[l.index+1].weights.transpose()
self.layer[l.index+1].weights.set_trans(True);
delta_temp = cm.dot(self.layer[l.index+1].weights,self.layer[l.index+1].delta);
self.layer[l.index+1].weights.set_trans(False);
if(l.activation == 'squash'):
pass
#l.activation_derivative = 1.0/((1+np.abs(l.weighted_sums)**2))
elif(l.activation == 'sigmoid'):
#l.activation_derivative = cm.empty(l.output.shape);
l.output.apply_logistic_deriv(l.output)
l.activation_derivative = l.output
#elif(l.activation == 'linear_rectifier'):
#1 if greater than 0, 0 otherwise.
#This stores them as bools - but it doesn't matter
#l.activation_derivative = np.greater(l.output,0);
else: #base case is linear
l.activation_derivative = cm.empty(l.output.shape);
l.activation_derivitive.assign_scalar(1.0)
#bottom row of activation derivative is the bias 'neuron'
l.delta = cm.empty(delta_temp.shape)
l.activation_derivative.mult(delta_temp,target=l.delta)
#calculate weight gradient
#input_t = cm.empty((l.input.shape[1],l.input.shape[0]))
#input_t = l.input.transpose()
l.input.set_trans(True)
l.gradient.add_dot(l.delta,l.input);
l.input.set_trans(False)
self.epoch_size = self.epoch_size + self.input.shape[1];
def update_weights(self):
for l in reversed(self.layer):
#l.weight_change = -l.step_size*l.gradient/self.epoch_size;
l.gradient.mult(-l.step_size/self.epoch_size)
l.weights.add(l.gradient);
l.gradient.assign_scalar(0.0);
self.epoch_size = 0;
|
bbitmaster/python_nn_toolkit
|
nnet_toolkit/nnet_cuda.py
|
Python
|
bsd-2-clause
| 5,709
|
[
"NEURON"
] |
fc7285a0c0a9c6b36f13b5984dca8e7346080e6ffdd4e4803d05b985d3e7d7c2
|
#!/usr/bin/env python
from __future__ import print_function, division #, unicode_literals
import os
import sys
import imp
from os.path import join as pj, abspath as absp, exists as pexists, basename, splitext, isfile
#try:
# import tests as abitests
#except ImportError:
# Add the directory [...]/abinit/tests to $PYTHONPATH
pack_dir, x = os.path.split(absp(__file__))
pack_dir, x = os.path.split(pack_dir)
sys.path.insert(0,pack_dir)
import tests as abitests
del pack_dir, x
def tests_from_pymod(pymod_path, abenv):
# Import the module
mod_name = basename(pymod_path).split(".py")[0]
module = imp.load_source(mod_name, pymod_path)
# Inspect the module and build the list of tests.
tests = list()
test_gen = getattr(module,"abinit_test_generator", None)
if test_gen is not None:
tests.append( PythonTest(abenv, test_gen()) )
suite_gen = getattr(module, "abinit_suite_generator", None)
if suite_gen is not None:
for test_gen in suite_gen():
tests.append( PythonTest(abenv, test_gen) )
assert tests
return tests
class PythonTest(object):
_attrbs = [
"test_func",
#"pyscript"
#"name",
]
def __init__(self, abenv, dictionary):
self.abenv = abenv
for k in PythonTest._attrbs:
self.__dict__[k] = dictionary.get(k, None)
assert self.test_func is not None
self.info = self.test_func.__doc__
assert self.info is not None
# TODO: Import conditions.
def __str__(self):
return "\n".join( [ str(k) + " : " + str(v) for k,v in self.__dict__.items()] )
def run(self, workdir=None):
if workdir is None: workdir = os.path.abspath(os.curdir)
self.workdir = workdir
print(self.info +"...",)
#stderr_fname = pj(workdir, self.name + ".stderr")
#stdout_fname = pj(workdir, self.name + ".stdout")
#self.stderr = open(stderr_fname, "w")
#self.stdout = open(stdout_fname, "w")
from StringIO import StringIO
self.stdout, self.stderr = StringIO(), StringIO()
sys.stdout, sys.stderr = self.stdout, self.stderr
try:
exit_status = self.test_func(self.abenv)
except:
import traceback
var = traceback.format_exc()
#print(self.name + ": " + var)
print(var)
exit_status = 99
self.stdout.seek(0)
self.stderr.seek(0)
#self.stderr.close()
#self.stdin.close()
# Reinstate stdout and stderr.
sys.stdout, sys.stderr = sys.__stdout__ , sys.__stderr__
#print("stdout:")
#print(self.stdout.read())
#print( "stderr:")
#print( self.stderr.read())
#print( "done")
if exit_status != 0: # Check reference files.
print(" (Comparing with reference files)")
#FIXME: I need to change the name of the reference files.
#print self.stdout.read()
#print self.stderr.read()
ref_dir = self.abenv.apath_of("tests/abirules/Refs")
#for idx, ext in enumerate(["", ".stdout", ".stderr"]):
#for idx, ext in enumerate([""]):
for idx, ext in enumerate([]):
#ref_file = pj(ref_dir, self.name + ext)
ref_file = pj(ref_dir, "t01.out")
if not isfile(ref_file): continue
fh = open(ref_file, "r")
ref_lines = fh.readlines()
fh.close()
if idx == 0: from_lines = self.stdout.readlines()
if idx == 1: from_lines = self.stderr.readlines()
import difflib
diff = difflib.unified_diff(from_lines, ref_lines) #, fromfile, tofile, fromdate, todate, n=n)
diff = list(diff)
for l in diff: print l
exit_status = 0
if len(diff) != 0: exit_status = 1
self.exit_status = exit_status
if exit_status != 0:
print("FAILED")
else:
print("SUCCESS")
return self.exit_status
def run_abirules_suite():
abenv = abitests.abenv
import abirules
script_dir = abenv.apath_of("special/scripts/")
scripts = [ absp(pj(script_dir,s)) for s in abirules.pyscripts ]
exit_status = 0
for script in scripts:
tests = tests_from_pymod(script, abenv)
for test in tests:
retval = test.run()
if retval != 0 : exit_status = retval
return exit_status
def run_buildsys_suite():
abenv = abitests.abenv
import buildsys
script_dir = abenv.apath_of("special/scripts/")
scripts = [ absp(pj(script_dir,s)) for s in buildsys.pyscripts ]
exit_status = 0
for script in scripts:
tests = tests_from_pymod(script, abenv)
for test in tests:
retval = test.run()
if retval != 0 : exit_status = retval
return exit_status
if __name__ == "__main__":
#print 30*"=" + " buildsys_tests " + 30*"="
#run_buildsys_suite()
print(30*"=" + " abirules_tests " + 30*"=")
run_abirules_suite()
|
SamKChang/abinit-7.10.5_multipole
|
tests/pytests.py
|
Python
|
gpl-3.0
| 4,694
|
[
"ABINIT"
] |
717b64da7871dfa8301bc1ef1820b611ddb391c812380e5dbcc08b39297009fe
|
import numpy as np
import netCDF4 as ncd
FILL_VALUE = None
def new(filename):
'''
Return the netcdf4-python rootgroup for a new netcdf file
for writing.
'''
return ncd.Dataset(filename, 'w', clobber=False)
def add_coordinates(nc, dict_of_dims):
'''
Create dimensions in netcdf file nc.
>> add_coordinates(nc, {"time", (5040,)})
'''
# Loop through keys, and add each as a dimension, with the
# cooresponding dict value tuple as the representative
# shape of the dimension.
for dimname in dict_of_dims.iterkeys():
t = nc.createDimension(dimname, size=dict_of_dims[dimname])
nc.sync()
def add_variable(nc, varname, data, dims, compress=False, fill=FILL_VALUE):
'''
Thin wrapper for easily adding data to netcdf variable with just
the variable name the current array of values, and a tuple with
the cooresponding dimension names
'''
v = nc.createVariable(varname, data.dtype, dimensions=dims, zlib=compress, fill_value=fill)
v[:] = data
nc.sync()
def add_scalar(nc, varname, data, compress=False, fill=FILL_VALUE):
'''
Functionality to simply add a scalar variable to a netcdf file,
expects a numpy array of length 1 for the data argument.
'''
v = nc.createVariable(varname, data.dtype, zlib=compress, fill_value=fill)
v[:] = data
nc.sync()
def add_attribute(nc, key, value, var=None):
'''
Take in a single attname:value pair and write into the global
or variable namespace
'''
if var==None:
nc.setncattr(key, value)
else:
if not key in set([ "_FillValue", "_ChunkSize" ]):
nc.variables[var].setncattr(key, value)
nc.sync()
def add_attributes(nc, attrs, var=None):
'''
Take in a dict of attname:value pairs and write the whole dict
into the global or variable namespace
'''
if var==None:
nc.setncatts(attrs)
else:
nc.variables[var].setncatts(attrs)
nc.sync()
|
asascience-open/paegan
|
paegan/cdm/writer.py
|
Python
|
gpl-3.0
| 1,992
|
[
"NetCDF"
] |
6e9c24ac3801087fca16339e943fa3be8f30eef79484751d3162c95df95313fc
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from nematic_analysis import *
#from glob import glob
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder='/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#basefolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
#outfolder= '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
outfolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#v0val=['0.3','0.5','0.7','1.5','2.0','3.0','7.0','10.0']
v0val=['0.3','0.5','0.7','1.5']
sigma=1
rval=['8.0']
nstep=10100000
nsave=5000
nsnap=int(nstep/nsave)
#skip=835
skip=0
for r in rval:
for v0 in v0val:
#param = Param(basefolder)
files = sorted(glob(basefolder+'R_'+ r+ '/v0_' + v0 + '/sphere_*.dat'))[skip:]
defects=np.zeros((len(files),12))
ndefect=np.zeros((len(files),1))
u=0
for f in files:
print f
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_data' + str(u)+'.vtk'
defects0,ndefect0=getDefects(f,float(r),sigma,outname,False,False)
defects[u,0:3]=defects0[0,:]
defects[u,3:6]=defects0[1,:]
defects[u,6:9]=defects0[2,:]
defects[u,9:12]=defects0[3,:]
ndefect[u]=ndefect0
#outname = '.'.join((f).split('.')[:-1]) + '_defects.vtk'
#outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_defects' + str(u)+'.vtk'
#print outname
#writeDefects(defects0,ndefect0,outname)
u+=1
outfile2=outfolder + 'defects_v0_' + v0 + '_R_'+ r+ '.dat'
np.savetxt(outfile2,np.concatenate((ndefect,defects),axis=1),fmt='%12.6g', header='ndefect defects')
|
sknepneklab/SAMoS
|
analysis/batch_nematic/batch_analyze_nematic_R8a.py
|
Python
|
gpl-3.0
| 2,379
|
[
"VTK"
] |
551f8ffc53ace3fa812b479f510d3798652b82912ffc21a84da48367a607375a
|
#!/usr/bin/python
"""
PYCAL v1.01
changelog:
-
TODO view met de hele maand -> cal..
TODO kleuren systeem voor iets gebruiken
TODO ondersteuning voor verschillende agenda's
TODO een -C functie voor de config file... en dan het absolute pad
TODO beter aangeven waar hij de todo opslaagt en de config import ook doen voor vanaf de iphone
TODO bij add als er geen event is gegeven een goede error
TODO wat als er een false is? dus bij elk functiegebruik een false optie en een deftige error msg
TODO rm optie of replace optie? -> alleen event vervangen?
TODO sterretje of reoccuring
TODO update systeem uit gcal?
TODO balkjes interface voor ls?? (low prior)
"""
import os
import fileinput, sys
import datetime
from datetime import date
import sys
#TODO: include file met birthdays etc...
from config import *
#TODO :sterretje
def isnumber(testval):
try:
int(testval)
return True
except ValueError:
return False
def extract_timepoints_fromline(line):
return line[:line.find(" ")]
def extract_startendpoint(timepointsstring):
#takes the argument and splits it if necesairy to te start and end.
if timepointsstring.count("-") == 1:
split_place = timepointsstring.find("-")
start_point = timepointsstring[:split_place]
end_point = timepointsstring[split_place+1:]
else:
#print("Incorrect number of times specified")
return timepointsstring, timepointsstring;
#fixed here
return start_point, end_point;
def extract_daymonthyear(datestring):
#takes the datestring and turns it into day month year
#00.00.00 00.00 00
day=""
month=""
year=""
datearray = datestring.split(".")
len_datearray=len(datearray)
if len_datearray < 4:
day = datearray[0]
else:
print("Too much dots in date specification")
return False;
if len_datearray > 1:
month = datearray[1]
if len_datearray == 3 :
year = datearray[2]
return day, month, year;
def extract_hourminutes(hourminutesstring):
#takes a format of 0000 and extracts an hour and minutes from it.
hour = hourminutesstring[:2]
minutes = hourminutesstring[2:]
return hour, minutes;
def fill_zero(x):
if len(x) == 1:
x = "0"+x
return x;
def create_date(day, month, year):
day= str(day)
if len(day) == 1:
day = "0" + day
month =str(month)
if len(month) ==1:
month = "0"+ month
year = str(year)
day = fill_zero(day)
month = fill_zero(month)
return day + '.' + month + '.' + year;
def checkday(day):
if 0 < day < 32:
return True;
else:
print("ERROR: Day is out of boundries")
return False;
def checkmonth(month):
if 0 < month < 13:
return True;
else:
print("ERROR: Month is out of boundries")
return False;
####Function Usage beyond this line
def checkandfill_blankdate(day, month, year, time, followday, followmonth, followyear, followtime):
#input check, bij elke ingevulde waarde -> defenities van maken
start = False
if followday == "":
#in geval van start, anders is het gewoon de startdatum in het end piping
start = True
followdate = datetime.datetime.now()
followday = int(followdate.strftime("%d"))
followmonth = int(followdate.strftime("%m"))
followyear = int(followdate.strftime("%y"))
followtime = 0
#Hier heb ik zowiso ne followtime van alles.
time = int(time)
followtime = int(followtime)
if year =="":
if month == "":
if day == "":
if time <= followtime:
#extra if
if start != True:
followday = followday+1
day = followday
else:
time = int(time)
if isnumber(day):
day = int(day)
if checkday(day) == False:
return False;
if day < followday:
followmonth= followmonth + 1
else:
return False;
month = followmonth
if month > 12:
month = 1
followyear = followyear + 1
else:
time = int(time)
if isnumber(day):
day = int(day)
if checkday(day) == False:
return False;
else:
return False;
if isnumber(month):
month = int(month)
if checkmonth(month) == False:
return False;
if month < followmonth:
followyear= followyear + 1
else:
return False;
nextmonth = month + 1
nextmonth_year = followyear
if nextmonth > 12:
nextmonth = 1
nextmonth_year = nextmonth_year + 1
if day > (date(nextmonth_year, nextmonth , 1) - date(followyear, month, 1)).days:
day = 1
month = month + 1
if month > 12:
month = 1
followyear = followyear + 1
year = followyear
else:
if isnumber(day):
day = int(day)
if checkday(day) == False:
return False;
else:
return False;
if isnumber(month):
month = int(month)
if checkmonth(month) == False:
return False;
else:
return False;
if isnumber(year):
year = int(year)
"""print(day, month, year, time)
print(followday, followmonth, followyear, followtime)
continue_test = False
if year > followyear:
continue_test = True
elif year == followyear:
if month > followmonth:
continue_test = True
elif month == followmonth:
if day > followday:
continue_test = True
elif day == followday:
if time > followtime:
continue_test = True
if continue_test!=True:
print("ERROR: Event ends before it starts")
return False;"""
return day, month, year;
def extract_time(timestring):
#takes the timesrting and makes it a good formatted (0000) time
len_timestring = len(timestring)
if len_timestring ==4:
time = timestring
elif len_timestring==2:
time = timestring + "00"
elif len_timestring==3:
time = "0" + timestring
elif len_timestring == 1:
time = "0" + timestring + "00"
elif len_timestring==0:
time = "0000"
else:
print("Specified time-format too long")
return False;
return time;
def extract_datetime(pointstring):
#datetimestring = uit het eerste argument alles voor de -
#this function processes everything to a Type of event, day, month, year and time
pointstring_count=pointstring.count(":")
if pointstring_count==0:
time = extract_time("")
date = pointstring
elif pointstring_count==1:
split_place = pointstring.find(":")
date = pointstring[:split_place]
time = extract_time(pointstring[split_place+1:])
else:
print("Incorrect format of time")
return False;
return date, time;
def filter_lines_date(calendar, day, month, year, ask_date):
ls = []
f = open(calendar, 'r')
line_nr = 1
for line in f:
add = False
add_start = False
check = False
#TODO: here it is not possible to execute the line beneath
(start_point, end_point) = extract_startendpoint(extract_timepoints_fromline(line))
(start_date, start_time) = extract_datetime(start_point)
(end_date, end_time) = extract_datetime(end_point)
(start_day, start_month, start_year) = extract_daymonthyear(start_date)
(end_day, end_month, end_year) = extract_daymonthyear(end_date)
event = line[line.find(" ")+1:-1]
if isnumber(start_day):
if isnumber(end_day):
if isnumber(start_month):
if isnumber(end_month):
if isnumber(start_year):
if isnumber(end_year):
f_start_day = int(start_day)
f_end_day = int(end_day)
f_start_month = int(start_month)
f_end_month = int(end_month)
f_start_year = int(start_year)
f_end_year = int(end_year)
check = True
if check == True:
if f_start_year < year < f_end_year:
add=True
else:
if f_start_year < year:
add_start=True
elif f_start_year == year:
if f_start_month < month:
add_start=True
elif f_start_month == month:
if f_start_day <= day:
add_start=True
if add_start==True:
if f_end_year > year:
add=True
elif f_end_year == year:
if f_end_month > month:
add=True
elif f_end_month == month:
if f_end_day >=day:
add=True
#else:
#print("error in file: line:" + str(line_nr))
if add==True:
ls.append([line_nr, start_date, start_time, end_date, end_time, event])
high_line_nr = line_nr
line_nr = line_nr+1
ls.sort(key=lambda x: x[4])
ls.sort(key=lambda x: x[3])
ls.sort(key=lambda x: x[2])
ls.sort(key=lambda x: x[1])
len_ls=len(ls)
ls_nr = 0
#TODO
#efficienter hieronder door vergelijking te maken met huidige datum om zeveel mogelijk weg te laten, de ls[ls_nr] vervangen door kortere versies en ev de tekeningen erbij steken
while ls_nr < len_ls:
if ls[ls_nr][1] == ls[ls_nr][3]:
print(str(ls[ls_nr][0]) + " "*(len(str(high_line_nr))-len(str(ls[ls_nr][0]))) +"| " + ls[ls_nr][2] + "-" + ls[ls_nr][4] + " |" + ls[ls_nr][5])
elif ask_date == ls[ls_nr][1]:
print(str(ls[ls_nr][0]) + " "*(len(str(high_line_nr))-len(str(ls[ls_nr][0]))) + "| " + ls[ls_nr][2] + "-" + ls[ls_nr][3] + ":" + ls[ls_nr][4] + " |" + ls[ls_nr][5])
elif ask_date == ls[ls_nr][3]:
print(str(ls[ls_nr][0]) + " "*(len(str(high_line_nr))-len(str(ls[ls_nr][0]))) + "| " + ls[ls_nr][1] + ":" + ls[ls_nr][2] + "-" + ls[ls_nr][4] + " |" + ls[ls_nr][5])
else:
print(str(ls[ls_nr][0]) + " "*(len(str(high_line_nr))-len(str(ls[ls_nr][0]))) + "| " + ls[ls_nr][1] + ":" + ls[ls_nr][2] + "-" + ls[ls_nr][3] + ":" + ls[ls_nr][4] + " |" + ls[ls_nr][5])
ls_nr += 1
f.close()
return;
def list_events_day(calendar, date):
if date == "today":
translatedate = datetime.datetime.now()
day =int(translatedate.strftime("%d"))
month=int(translatedate.strftime("%m"))
year=int(translatedate.strftime("%y"))
date = create_date(day, month, year)
print("Today ~" + translatedate.strftime("%a") + " " + date + ':')
filter_lines_date(calendar, day, month, year, date)
elif date == "0":
translatedate = datetime.datetime.now()
day =int(translatedate.strftime("%d"))
month=int(translatedate.strftime("%m"))
year=int(translatedate.strftime("%y"))
date = create_date(day, month, year)
print("Today ~" + translatedate.strftime("%a") + " " + date + ':')
filter_lines_date(calendar, day, month, year, date)
elif date[0] == "+":
add = int(date[1:])
translatedate = datetime.datetime.now()+ (datetime.timedelta(days=1))*add
day =int(translatedate.strftime("%d"))
month=int(translatedate.strftime("%m"))
year=int(translatedate.strftime("%y"))
date = create_date(day, month, year)
#print("+"+ str(add) + " ~" + translatedate.strftime("%a") + " " + date + ':')
os.system("echo -e '\033[1;34m+"+ str(add) + " ~" + translatedate.strftime("%a") + " " + date + ":\033[0m'")
#Here make this print green? of blauw?
filter_lines_date(calendar, day, month, year, date)
elif date[0] == "-":
add = int(date[1:])
translatedate = datetime.datetime.now()- (datetime.timedelta(days=1))*add
day =int(translatedate.strftime("%d"))
month=int(translatedate.strftime("%m"))
year=int(translatedate.strftime("%y"))
date = create_date(day, month, year)
#print("-"+ str(add) + " ~" + translatedate.strftime("%a") + " " + date + ':')
os.system("echo -e '\033[1;34m-"+ str(add) + " ~" + translatedate.strftime("%a") + " " + date + ":\033[0m'")
filter_lines_date(calendar, day, month, year, date)
###hier een range met - en dat laten oplossen door de pointstrings en dan inrementies met de translatedate en vergelijken
elif date.count("-") == 1:
split_place = timepointsstring.find("-")
start_point = timepointsstring[:split_place]
end_point = timepointsstring[split_place+1:]
else:
(start_date, start_time) = extract_datetime(date)
(start_day, start_month, start_year) = extract_daymonthyear(start_date)
day, month, year = checkandfill_blankdate(start_day, start_month, start_year, start_time, "", "", "", "")
date = create_date(day, month, year)
print(date + ':')
##TODO hier de date naar de echte date omvormen
filter_lines_date(calendar, day, month, year, date)
"""elif date = "monday":
elif date = "mon":
elif date = "tuesday":
elif date = "tue":
elif date = "wednesday":
elif date = "wed":
elif date = "thurday":
elif date = "thu":
elif date = "friday":
elif date = "fri":
elif date = "saturday":
elif date = "sat":
elif date = "sunday":
elif date = "sun":
else:
"""
return;
def add_event(date, event, calendar):
(start_point, end_point) = extract_startendpoint(date)
(start_date, start_time) = extract_datetime(start_point)
(end_date, end_time) = extract_datetime(end_point)
(start_day, start_month, start_year) = extract_daymonthyear(start_date)
(end_day, end_month, end_year) = extract_daymonthyear(end_date)
start_day, start_month, start_year = checkandfill_blankdate(start_day, start_month, start_year, start_time, "", "", "", "")
start_date=create_date(start_day, start_month, start_year)
end_day, end_month, end_year = checkandfill_blankdate(end_day, end_month, end_year, end_time, start_day, start_month, start_year, start_time)
end_date=create_date(end_day, end_month, end_year)
#onderstaande writen naar file
append = start_date + ":" + start_time + "-" + end_date + ":" + end_time + " " + event
print("ADDED: " +"'"+ append + "'" + " to " + calendar)
print("ON LINE:")
os.system('echo $(($(wc -l < ' + calendar+ ')+1))')
f = open(calendar, "a")
f.write(append + "\n")
f.close()
def delete_event(calendar, line_nr):
line_nr= int(line_nr)
for line in fileinput.input(calendar, inplace=1, backup='.orig'):
if line_nr <= fileinput.lineno() <line_nr+1:
pass
else:
print(line[:-1])
fileinput.close()
print("DELETED:")
os.system('grep -F -x -v -f '+ calendar + ' ' +calendar+'.orig')
#def search_event(calendar,
####SCRIPT beyong this line
len_sysargv = len(sys.argv)
if len_sysargv == 1:
list_events_day(calendarfile , "today")
#print today as in day format
elif sys.argv[1] == "ls":
if len_sysargv == 2:
#list of all coming events, measures the widht and if a print line exceeds it will substract it from the amount of screenspace left + 1 line left for the new command
#start by printing the next 5
list_events_day(calendarfile , "today")
elif len_sysargv == 3:
list_events_day(calendarfile , sys.argv[2])
else:
print("too much arguments")
"""elif sys.argv[1] = "list":
#same as above"""
elif sys.argv[1] == "r":
if len_sysargv == 2:
for d in range(0,7):
list_events_day(calendarfile , '+'+str(d))
if len_sysargv == 3:
for d in range(0,int(sys.argv[2])):
list_events_day(calendarfile, '+'+str(d))
elif len_sysargv != 2:
if sys.argv[1] == "a":
arg_event = 3
event = sys.argv[arg_event]
arg_event = 4
while arg_event < len_sysargv:
event = event + " " + sys.argv[arg_event]
arg_event = arg_event+1
add_event(sys.argv[2], event, calendarfile)
#add a new event
# 2 is zowiso de tijd en 3 is de event en dat aan elkaar plakken in ne file, ni zo moeilijk?
"""elif sys.argv[1] = "add":
elif sys.argv[1] = "d":
elif sys.argv[1] = "day":
#Whole day
start_hour = 0000
#function above check that the time is bigger -> otherwise its the next day
end_hour = 0000 """
elif sys.argv[1] =="rm":
line_nr=sys.argv[2]
delete_event(calendarfile, line_nr)
elif sys.argv[1] =="s":
search_term=sys.argv[2]
search_event(calendarfile, search_term)
|
polarsbear/pycal
|
calTOBESURE.py
|
Python
|
gpl-2.0
| 17,653
|
[
"Elk"
] |
8bf720f9d4fba79b597e879e03a847e5c7334561c89726b90ffdf5fc5f5cc591
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Multivariate autoregressive model (vector autoregression).
Implements the following model (num_blocks = max(ar_order, ma_order + 1)):
y(t, 1) = \sum_{i=1}^{ar_order} ar_coefs[i] * y(t - 1, i)
y(t, i) = y(t - 1, i - 1) + ma_coefs[i - 1] * e(t) for 1 < i < num_blocks
y(t, num_blocks) = y(t - 1, num_blocks - 1) + e(t)
Where e(t) are Gaussian with zero mean and learned covariance.
Each element of ar_coefs and ma_coefs is a [num_features x num_features]
matrix. Each y(t, i) is a vector of length num_features. Indices in the above
equations are one-based. Initial conditions y(0, i) come from prior state (which
may either be learned or left as a constant with high prior covariance).
If ar_order > ma_order, the observation model is:
y(t, 1) + observation_noise(t)
If ma_order >= ar_order, it is (to observe the moving average component):
y(t, 1) + y(t, num_blocks) + observation_noise(t)
Where observation_noise(t) are Gaussian with zero mean and learned covariance.
This implementation uses a formulation which puts all of the autoregressive
coefficients in the transition equation for the observed component, which
enables learning using truncated backpropagation. Noise is not applied directly
to the observed component (with the exception of standard observation noise),
which further aids learning of the autoregressive coefficients when VARMA is in
an ensemble with other models (in which case having an observation noise term is
usually unavoidable).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class VARMA(state_space_model.StateSpaceModel):
"""A VARMA model implementation as a special case of the state space model."""
def __init__(self,
autoregressive_order,
moving_average_order,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Construct a VARMA model.
The size of the latent state for this model is:
num_features * max(autoregressive_order, moving_average_order + 1)
Square matrices of this size are constructed and multiplied.
Args:
autoregressive_order: The maximum autoregressive lag.
moving_average_order: The maximum moving average lag, after which
transient deviations are expected to return to their long-term mean.
configuration: A StateSpaceModelConfiguration object.
"""
self.ar_order = autoregressive_order
self.ma_order = moving_average_order
self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)
super(VARMA, self).__init__(configuration=configuration)
self.state_dimension = self.state_num_blocks * self.num_features
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
# TODO(allenl): Evaluate parameter transformations for AR/MA coefficients
# which improve interpretability/stability.
self.ar_coefs = variable_scope.get_variable(
name="ar_coefs",
shape=[self.num_features, self.num_features, self.ar_order],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.ma_coefs = variable_scope.get_variable(
name="ma_coefs",
initializer=array_ops.tile(
linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],
[self.ma_order, 1, 1]),
dtype=self.dtype)
super(VARMA, self)._define_parameters(
observation_transition_tradeoff_log=observation_transition_tradeoff_log)
def get_state_transition(self):
"""Construct state transition matrix from VARMA parameters.
Returns:
the state transition matrix. It has shape
[self.state_dimension, self.state_dimension].
"""
# Pad any unused AR blocks with zeros. The extra state is necessary if
# ma_order >= ar_order.
ar_coefs_padded = array_ops.reshape(
array_ops.pad(self.ar_coefs,
[[0, 0], [0, 0],
[0, self.state_num_blocks - self.ar_order]]),
[self.num_features, self.state_dimension])
shift_matrix = array_ops.pad(
linalg_ops.eye(
(self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features]])
return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)
def get_noise_transform(self):
"""Construct state noise transform matrix from VARMA parameters.
Returns:
the state noise transform matrix. It has shape
[self.state_dimension, self.num_features].
"""
# Noise is broadcast, through the moving average coefficients, to
# un-observed parts of the latent state.
ma_coefs_padded = array_ops.reshape(
array_ops.pad(self.ma_coefs,
[[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],
[0, 0]]),
[(self.state_num_blocks - 1) * self.num_features, self.num_features],
name="noise_transform")
# Deterministically apply noise to the oldest component.
return array_ops.concat(
[ma_coefs_padded,
linalg_ops.eye(self.num_features, dtype=self.dtype)],
axis=0)
def get_observation_model(self, times):
"""Construct observation model matrix from VARMA parameters.
Args:
times: A [batch size] vector indicating the times observation models are
requested for. Unused.
Returns:
the observation model matrix. It has shape
[self.num_features, self.state_dimension].
"""
del times # StateSpaceModel will broadcast along the batch dimension
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
# Add a second observed component which "catches" the accumulated moving
# average errors as they reach the end of the state. If ar_order >
# ma_order, this is unnecessary, since accumulated errors cycle naturally.
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
# Most state space models use only an explicit observation noise term to
# model deviations from expectations, and so a low initial transition noise
# parameter is helpful there. Since deviations from expectations are also
# modeled as transition noise in VARMA, we set its initial value based on a
# slight over-estimate empirical observation noise.
if self._input_statistics is not None:
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
initial_transition_noise_scale = math_ops.log(
math_ops.maximum(
math_ops.reduce_mean(feature_variance), minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = state_noise_transform.get_shape()[1].value
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
|
benoitsteiner/tensorflow-xsmm
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py
|
Python
|
apache-2.0
| 8,893
|
[
"Gaussian"
] |
848a8c4a70100f4a2079d3533f4b707b33fcccd8880b35e2f7398f64d028c219
|
#!/usr/bin/env
"""
Chuckchi_Winds_NARR_model_prep.py
Retrieve NARR winds for one locations
Icy Cape Line, Ckip2
Latitude = 70.8401 Longitude = 163.2054
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
"""
#System Stack
import datetime
#Science Stack
import numpy as np
from netCDF4 import Dataset
# User Stack
import utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','station_1','3hr filtered', 'U,V','Winds', 'Chuckchi'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def csvread(ifile):
date, time, uwnd, vwnd, atemp, bpress = [], [], [], [], [], []
with open(ifile, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader) #skip header
""" DAT TIM WU WV AT BP """
for row in csv_reader:
try:
r0,r1,r2,r3,r4,r5,r6 = row[0].strip().split()
except ValueError:
r0,r1,r2,r3,r4,r5 = row[0].strip().split()
date.append(r0)
time.append(r1)
uwnd.append(r2)
vwnd.append(r3)
return {'DAT': np.array(date, int), 'TIM':np.array(time, float), 'WU':np.array(uwnd, float),\
'WV':np.array(vwnd, float)}
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
"---"
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Volumes/WDC_internal/Users/bell/in_and_outbox/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
infile = [NARR + 'uwnd.10m.1994.nc'] #used just to get grid sections
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
"""
C1 70.8305, 163.1195
C2 71.2162, 164.3008
C3 71.8191, 165.9820
"""
station_name = [ 'C2',]
sta_lat = [71.2162,]
sta_long = [164.3008,]
#Find NARR nearest point to moorings - haversine formula
station_1 = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
station_1_modelpt = [lat_lon['lat'][station_1[3],station_1[4]],lat_lon['lon'][station_1[3],station_1[4]]]
print "station_1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], station_1_modelpt[0], station_1_modelpt[1])
#loop over all requested data
years = range(2015,2016,1)
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1u_f = triangle_smoothing(station_1_data['uwnd'])
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1v_f = triangle_smoothing(station_1_data['vwnd'])
# retrieve only these location's data
# sfc air temp
infile = NARR + 'air.2m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
station_1at = station_1_data['air'] -273.15 #Kelvin
#convert to EPIC time
pydate = date2pydate(station_1_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_' + station_name[0] + '_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], station_1_modelpt, [station_1u_f, station_1v_f, station_1at])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=75,llcrnrlon=-180,urcrnrlon=-145, lat_ts=60)
# Mooring Data
x_moor, y_moor = m([-1. * sta_long[0],],sta_lat)
x_close, y_close = m([station_1_modelpt[1],], [station_1_modelpt[0],])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(60,75,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-180,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/Chuckchi_region.png', bbox_inches='tight', dpi = (100))
plt.close()
|
shaunwbell/FOCI_Analysis
|
ReanalysisRetreival_orig/Chuckchi_Winds/Chuckchi_WindsSFCtemp_NARR_model_prep.py
|
Python
|
mit
| 9,929
|
[
"NetCDF"
] |
352070b7cc466585d12e0ac0e4f25f6e36038bb9ad1a08b7aa83adebd44bece2
|
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/Geometry-diskout-multi-contour/'
inputFile = '/Users/seb/Downloads/ParaViewData-3.98.1/Data/disk_out_ref.ex2'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# Pipeline creation
# -----------------------------------------------------------------------------
reader = simple.OpenDataFile(inputFile)
reader.PointVariables = ['Temp', 'V', 'Pres', 'AsH3', 'GaMe3', 'CH4', 'H2']
clip = simple.Clip(Input=reader)
clip.ClipType.Normal = [0.0, 1.0, 0.0]
streamLines = simple.StreamTracer(
Input = reader,
SeedType="High Resolution Line Source",
Vectors = ['POINTS', 'V'],
MaximumStreamlineLength = 20.16)
streamLines.SeedType.Point2 = [5.75, 5.75, 10.15999984741211]
streamLines.SeedType.Point1 = [-5.75, -5.75, -10.0]
streamTubes = simple.Tube(Input=streamLines, Radius = 0.2)
contourFilter = simple.Contour( Input = reader,
PointMergeMethod = "Uniform Binning",
ContourBy = 'AsH3',
Isosurfaces = [0.1],
ComputeScalars = 1)
contourValues = [ 0.09 + float(x)*0.01 for x in range(9) ]
sections = {
"LookupTables": {
"AsH3": {
"range": [
0.0804768,
0.184839
],
"preset": "wildflower"
},
"Pres": {
"range": [
0.00678552,
0.0288185
],
"preset": "cool"
},
"Temp": {
"range": [
293.15,
913.15
],
"preset": "spectralflip"
}
}
}
sceneDescription = {
'scene': [
{
'name': 'Stream lines',
'source': streamTubes,
'colors': {
'Pres': {'location': 'POINT_DATA' },
'Temp': {'location': 'POINT_DATA' }
}
},{
'name': 'Clip',
'source': clip,
'colors': {
'Pres': {'location': 'POINT_DATA' },
'Temp': {'location': 'POINT_DATA' }
}
},{
'name': 'Contour AsH3',
'source': contourFilter,
'colors': {
'AsH3': {'location': 'POINT_DATA' },
'Pres': {'location': 'POINT_DATA' },
'Temp': {'location': 'POINT_DATA' }
}
}
]
}
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
dsb = GeometryDataSetBuilder(outputDir, sceneDescription, sections=sections)
dsb.getDataHandler().registerArgument(priority=1, name='contour', values=contourValues, ui='slider', loop='modulo')
dsb.getDataHandler().registerArgument(priority=2, name='clip', values=range(5), ui='slider', loop='modulo')
dsb.start()
for v in dsb.getDataHandler().clip:
for c in dsb.getDataHandler().contour:
clip.ClipType.Origin = [0.0, float(2*v) - 5.0, 0.0]
contourFilter.Isosurfaces = [ c ]
dsb.writeData()
dsb.stop()
|
Kitware/tonic-data-generator
|
scripts/paraview/samples/Geometry-multicontour-diskout.py
|
Python
|
bsd-3-clause
| 3,466
|
[
"ParaView"
] |
e7d029dc34cb9e791b9b2c92e553f3d916fea40cffa864c4de655e8cabe0ad01
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "0.8.6"
__author__ = "Pierre Legrand (pierre.legrand@synchrotron-soleil.fr)"
__date__ = "20-12-2013"
__copyright__ = "Copyright (c) 2006-2013 Pierre Legrand"
__license__ = "New BSD http://www.opensource.org/licenses/bsd-license.php"
# Environemantal variable XDSHOME, if set, defines the place where the xds
# executables will be searched. The parallelized execs (xds_par, xscale_par)
# will be used be defaults.
import sys
import os
atom_names = ['Ag', 'Al', 'Ar', 'As', 'Au', 'B', 'Ba', 'Be', 'Bi', 'Br',
'C', 'Ca', 'Cd', 'Ce', 'Cl', 'Co', 'Cr', 'Cs', 'Cu', 'Dy', 'Er', 'Eu', 'F',
'Fe', 'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf', 'Hg', 'Ho', 'I', 'In', 'Ir', 'K',
'Kr', 'La', 'Li', 'Lu', 'Mg', 'Mn', 'Mo', 'N', 'Na', 'Nb', 'Nd', 'Ne',
'Ni', 'O', 'Os', 'P', 'Pb', 'Pd', 'Pm', 'Pr', 'Pt', 'Pu', 'Rb', 'Re',
'Rh', 'Rn', 'Ru', 'S', 'Sb', 'Sc', 'Se', 'Si', 'Sm', 'Sn', 'Sr', 'Ta',
'Tb', 'Tc', 'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'V', 'W', 'Xe', 'Y',
'Yb', 'Zn', 'Zr']
options = ["CCP4","CCP4F","CNS","SHELX","SOLVE","EPMR","CRANK",
"AMORE","SHARP","PHASER","REPLACE"]
usage = """
USAGE : %s FILE OPTIONS [free_hkl_to_inherit] [nSites] [atomType] EXPORT_MODE \n
EXPORT_MODE can be one of these:\n
%s\n
FILE is a XDS or XSCALE reflection file (usually XDS_ASCII.HKL).\n
OPTIONS:
-a force anomal output (Friedel's law = False)
-n force normal output (Friedel's law = True)
-m force merged output
-u force unmerged output
-f force generation of free reflection flag
-nf force no generation of free reflection flag
Default is keeping the XDS input file settings.
-l label, or -l=label
In case of CCP4 export, give a new label columns.
The defaults labels: FP, SIGFP, DANO, SIGDANO, ISYM with
-l pk or -l=pk becomes:
FP_pk, SIGFP_pk, DANO_pk ... in CCP4 mode
FP_pk, SIGFP_pk, F(+)_pk, SIGF(+)_pk, ... in PHASER mode
free_hkl_to_inherit: is a reflection file containing a previously
selected set of free reflection to be kept in the newly
exported reflection file for calculation of unbiased Rfree.
The accepted file format are: SHELX, CNS and MTZ (with the
following labels: FP=FP SIGFP=SIGFP FREE=FreeR_flag).\n
nSites: integer describing the number of heavy atom sites expected.\n
atomType: Symbol of the heavy atom type expected. Only one or two
letters symbols are recognised (like I, Se, S, Hg).\n
NOTE:
i) Keywords free_hkl_to_inherit, nSites, atomeType and EXPORT_MODE
can be given in any order.
ii) Cell parameters, space group number and wavelength are taken
from the XDS reflection file header.
iii) If Friedel's law == False and heavy atom type is not given, a
guess is made base on the wavelength closest atome type edge.
iv) All the exported files are created in a new local directory named
after the requested mode (./ccp4, ./phaser, ./solve...).
v) In most modes, custom bash scripts are created to allow a rapid
interactive exploration of data processing.
EXAMPLES:
xdsconv.py XDS_ASCII.HKL shelx 12 Se
xdsconv.py solve XDS_ASCII.HKL Se 12
xdsconv.py 12 Se phaser XDS_ASCII.HKL
xdsconv.py XDS_ASCII.HKL ccp4 -n FreeR_reference.mtz
xdsconv.py XDS_ASCII.HKL ccp4 Se 12 -l=peak FreeR_reference.mtz
"""
fmt_inp = """
<== Input file: %(file_name_in)s
<<< Space group number: %(spgn_in)s
<<< Cell parameters: %(cell_str)s
<<< Friedel's law: %(friedel_in)s
<<< Merge: %(merge_in)s
<<< Template: %(ID)s
<<< Wavelength %(wavelength).4f
"""
fmt_outp = """==> Output file: %(file_name_out)s
==> Output mode: %(mode)s
>>> Friedel's law: %(friedel_out)s
>>> Merge: %(merge_out)s
>>> Resolution limit: %(res_low).2f - %(res_high).2f Angstroem
"""
fmt_outp_ha = """
>>> From Wavelength %(wavelength).4f
??? Guessed Atome Edge %(ha_name)s
??? F' %(fp).2f
??? F" %(fpp).2f
"""
xdsconv_script = """UNIT_CELL_CONSTANTS= %(cell_str)s
SPACE_GROUP_NUMBER= %(spgn_in)s
INPUT_FILE=%(file_name_in)s %(file_type)s %(res_low).2f %(res_high).2f
OUTPUT_FILE=%(dir_mode)s%(file_name_out)s %(mode_out)s FRIEDEL'S_LAW=%(friedel_out)s MERGE=%(merge_out)s
"""
shelxc_script = """SAD %(file_name_out)s
CELL %(cell_str)s
SPAG %(spg_name)s
NTRY 1000
FIND %(num_sites)d
SFAC %(ha_name)s
MIND -3.5
MAXM 2
"""
shelxall_script = """#!/bin/sh
name="NORM"
nsites=6
pats="yes"
res_range="3.3 3.5 3.8 4.0 4.4 5.0"
search_sites_shelx () {
ID=$1 # Identifier
RES=$2 # High resolution limit
NSITES=$3 # Number of sites
PATS=$4 # Patterson seeding option
shelxc ${ID} << EOFC > ${ID}_shelxc.log
SAD %(file_name_out)s
CELL %(cell_str)s
SPAG %(spg_name)s
SHEL 999 ${RES}
NTRY 1000
FIND ${NSITES}
SFAC %(ha_name)s
ESEL 1.3
MIND -3.0 1
MAXM 4
EOFC
if [ $PATS != "yes" ] ; then
#grep -v PATS ${ID}_fa.ins > ${ID}_nopats_fa.ins
sed -e 's/^PATS/WEED 0.3\\nSKIP 0.5/' ${ID}_fa.ins > ${ID}_nopats_fa.ins
cp ${ID}.hkl ${ID}_nopats.hkl
cp ${ID}_fa.hkl ${ID}_nopats_fa.hkl
shelxd ${ID}_nopats_fa > ${ID}_nopats_shelxd.log
else
shelxd ${ID}_fa > ${ID}_shelxd.log
fi
}
if [ $pats != "yes" ] ; then
PN="NOPATS"
else
PN="PATS"
fi
# loop to start batch process with different resoution limit
for res in ${res_range} ; do
id="${name}_${PN}_E1.3_N${nsites}_D${res}"
search_sites_shelx ${id} ${res} ${nsites} ${pats} &
done
# end of the loop.
"""
sitcom_script ="""#!/bin/sh
do_sitcom () {
ID=$1 # Identifier
NSITES=$2 # Number of sites
PATS=$3 # Patterson seeding option
if [ $PATS = "yes" ] ; then
cat << EOFS > ${ID}_sitcom.inp
unit_cell %(cell_str)s
space_group %(spgn_in)s
str_name ${ID}
deriv_atyp %(ha_name)s
#
# TAG WEIGHT FILE N(SOL) N(SITES)
#
read_set ${ID} 1.0 ${ID}_fa.lst 10 ${NSITES}
#read_set SHELXD2 1.0 a4_fa.lst 10 ${nsites}
#read_sol HYSS 1.0 a3_fa.pdb
#read_sol SHELXD5 1.0 a2_fa.pdb
EOFS
sitcom < ${ID}_sitcom.inp > ${ID}_sitcom.log
cat << EOFS >> tmp.all_sitcom.inp
read_sol ${ID} 1.0 ${ID}_fa.pdb ${NSITES}
EOFS
else
cat << EOFS > ${ID}_nopats_sitcom.inp
unit_cell %(cell_str)s
space_group %(spgn_in)s
str_name ${ID}_nopats
deriv_atyp %(ha_name)s
#
# TAG WEIGHT FILE N(SOL) N(SITES)
#
read_set ${ID}np 1.0 ${ID}_nopats_fa.lst 10 ${NSITES}
#read_set SHELXD2 1.0 a4_fa.lst 10 ${nsites}
#read_sol HYSS 1.0 a3_fa.pdb
#read_sol SHELXD5 1.0 a2_fa.pdb
EOFS
sitcom < ${ID}_nopats_sitcom.inp > ${ID}_nopats_sitcom.log
cat << EOFS >> tmp.all_sitcom.inp
read_sol ${ID}np 1.0 ${ID}_nopats_fa.pdb ${NSITES}
EOFS
fi
}
id="aaa"
nsites=%(num_sites)d
pats="yes"
cat << EOFS > tmp.all_sitcom.inp
unit_cell %(cell_str)s
space_group %(spgn_in)s
str_name ${id}
deriv_atyp %(ha_name)s
#
# TAG WEIGHT FILE N(SOL) N(SITES)
#
EOFS
# loop to start batch process with different resoution limit
for res in 3.2 3.5 4.0 4.4 ; do
do_sitcom ${id}${res} ${nsites} ${pats}
done
# end of the loop
# compare all solutions
if [ $pats = "yes" ] ; then
mv tmp.all_sitcom.inp ${id}_all_sitcom.inp
sitcom < ${id}_all_sitcom.inp > ${id}_all_sitcom.log
else
mv tmp.all_sitcom.inp ${id}_all_np_sitcom.inp
sitcom < ${id}_all_np_sitcom.inp > ${id}_all_np_sitcom.log
fi
"""
xprep_script = """../%(file_name_in)s
X\nY\n\nS\nI
%(cns_sg)s
Y
A\nA\nA
100\n
%(ident)s\nY\n
%(ha_name)s\n4\n%(wavelength)s\n3.0\n
E\nP\nR\n15 3
P\n%(ident)s.pattX.ps\nX\n\n\n
P\n%(ident)s.pattY.ps\nY\n\n\n
P\n%(ident)s.pattZ.ps\nZ\n\n\n
E\nQ
"""
f2mtz_script = """
TITLE data from XDS
FILE %(file_name_out)s
SYMMETRY %(spgn_in)s
CELL %(cell_str)s
LABOUT H K L FP%(lbl)s SIGFP%(lbl)s %(cinp_ano)s %(free_lbl)s
CTYPOUT H H H F Q %(cinp_ano2)s %(free_code)s
NAME PROJECT %(ID)s CRYSTAL %(ID)s DATASET d%(ID)s
END
"""
f2mtz_phaser_script = """
TITLE data from XDS
FILE %(file_name_out)s
SYMMETRY %(spgn_in)s
CELL %(cell_str)s
LABOUT H K L FP%(lbl)s SIGFP%(lbl)s F(+)%(lbl)s SIGF(+)%(lbl)s F(-)%(lbl)s SIGF(-)%(lbl)s %(free_lbl)s
CTYPOUT H H H F Q G L G L %(free_code)s
END
"""
scala_script = """#!/bin/bash
function run_scala() {
pointless XDSIN XDS_ASCII.HKL \\
HKLOUT XDS_pointless_correct.mtz > XDS_pointless_correct.log
scala hklin XDS_pointless_correct.mtz hklout XDS_scala_correct.mtz \\
scales SCALA.scales \\
rogues SCALA.rogues \\
rogueplot SCALA.rogueplot \\
correlplot SCALA.correlplot \\
normplot SCALA.norm \\
anomplot SCALA.anom \\
> XDS_scala.log << eof-1
cycles 0
bin 12
scales constant # batch scaling is generally poorer than smoothed
anomalous on
eof-1
}
function run_aimless() {
pointless XDSIN XDS_ASCII.HKL \\
HKLOUT XDS_pointless_correct.mtz > XDS_pointless_correct.log
aimless hklin XDS_pointless_correct.mtz hklout XDS_aimless_correct.mtz \\
scales AIMLESS.scales \\
rogues AIMLESS.rogues \\
rogueplot AIMLESS.rogueplot \\
correlplot AIMLESS.correlplot \\
normplot AIMLESS.norm \\
anomplot AIMLESS.anom \\
> XDS_aimless.log << eof-2
cycles 0
bin 12
scales constant # batch scaling is generally poorer than smoothed
eof-2
}
#run_scala
run_aimless
"""
cad_crank_script = """
cad HKLIN1 temp.mtz HKLOUT output_file_name.mtz<<EOF
LABIN FILE 1 ALL
XNAME FILE 1 ALL=XDS
DWAVELENGTH FILE 1 XDS %(wavelength)s
END
"""
f2mtz_sharp_script = """
TITLE data from XDS
FILE %(file_name_out)s
SYMMETRY %(spgn_in)s
CELL %(cell_str)s
LABOUT H K L FMID SMID DANO SANO ISYM
CTYPOUT H H H F Q D Q Y
END
"""
solve_script = """#!/bin/sh
set noclobber
# run solve
solve_giant << eof-solve > solve.out
symfile %(spg_name)s.sym
cell %(cell_str)s
resolution %(res_low).3f %(res_high).3f
logfile ./solve.logfile
readformatted
unmerged
read_intensities
checksolve
mad_atom %(ha_name)s
lambda 1
label sad wavelength = %(wavelength)s
rawmadfile %(file_name_out)s
fixscattfactors
fprprv_mad %(fpp)s
nsolsite_deriv %(num_sites)d
# Add your known sites here
#xyz 0.3690 0.1194 0.0178
#xyz 0.9238 0.0377 0.0699
acceptance 0.30
!nres 200
#addsolve
SAD
eof-solve
# run resolve
resolve << eof-resolve > resolve.out
!solvent_content 0.30
!seq_file protein.seq
eof-resolve
fft HKLIN resolve.mtz MAPOUT resolve.ccp4map << eof-fft > resolve_fft.out
TITLE from resolve
LABIN F1=FP PHI=PHIM W=FOMM
END
eof-fft
"""
phaser_script = """#!/bin/bash
# script written by xdsconv.py (pierre.legrand at synchrotron-soleil.fr)
label=""
scat="%(ha_name)s"
PARTIAL_MODEL_OPTION=""
solvent_content=0.5
parrot_cycles=5
parrot_resolution=1.0
function usage () {
echo
echo " $0 [-s x] ha_sites.pdb [partial_model.pdb]"
echo " -l label, --label mtz columns label"
echo " -a, --anom anomalous scatterer"
echo " -s solvc, --solvent solvc set the solvent content"
echo " -n ncycles, --parrot_cycles number of parrot cycles"
echo " -r resol, --resolution parrot resolution cutoff"
echo " -h, --help print this help"
echo
}
while [ $# -gt 0 ]; do
case "$1" in
-h | --help | -help )
usage
exit 0 ;;
-l | --label )
label="_$2"
shift ; shift ;;
-a | --anom )
scat=$2
echo "INFO: Using anomalous scatterer: $scat"
shift; shift ;;
-s | --solvent )
solvent_content=$2
echo "INFO: Using a solvent fraction of: $solvent_content"
shift; shift ;;
-r | --resolution )
parrot_resolution=$2
echo "INFO: Cuting high resolution for parrot to: $2"
shift; shift ;;
-n | --parrot_cycles )
parrot_cycles=$2
echo "INFO: Number of parrot parrot cycles set to: $2"
shift; shift ;;
* )
hatom_pdb=$1
ID=`basename $hatom_pdb .pdb`
break ;;
esac
done
if [[ $# -eq 2 ]];then
echo "Using partial model: $2"
PARTIAL_MODEL_OPTION="PART PDB $2 ID 100"
echo $PARTIAL_MODEL_OPTION
fi
function run_phaser() {
phaser << eof > phaser_auto_${ID}.log
MODE EP_AUTO
TITLe SAD phasing of ${ID} with %(num_sites)d ${scat}
HKLIn %(last_name)s
#COMPosition PROTein SEQ PROT.seq NUM 2
COMPosition BY SOLVent
COMPosition PERCentage $solvent_content
CRYStal ${ID} DATAset sad LABIn F+=F(+)${label} SIG+=SIGF(+)${label} F-=F(-)${label} SIG-=SIGF(-)${label}
WAVElength %(wavelength)f
LLGComplete COMPLETE ON # CLASH 3.8
ATOM CRYStal ${ID} PDB $hatom_pdb SCATtering ${scat}
ATOM CHANge SCATterer ${scat}
ROOT ${ID}_auto
${PARTIAL_MODEL_OPTION}
eof
}
function run_parrot() {
hand=$1
if [ $hand = "ori" ] ; then
parrID=${ID}_auto
elif [ $hand = "inv" ] ; then
parrID=${ID}_auto.hand
fi
cparrot \\
-mtzin-wrk ${parrID}.mtz \\
-pdbin-wrk-ha ${parrID}.pdb \\
-colin-wrk-fo "/*/*/[FP${label},SIGFP${label}]" \\
-colin-wrk-hl "/*/*/[HLA,HLB,HLC,HLD]" \\
-colin-wrk-fc "/*/*/[FWT,PHWT]" \\
-colin-wrk-free "/*/*/[FreeR_flag]" \\
-mtzout ${parrID}_parrot_${solvent_content}_${parrot_cycles}.mtz \\
-colout parrot \\
-solvent-flatten \\
-histogram-match \\
-cycles ${parrot_cycles} \\
-resolution ${parrot_resolution} \\
-solvent-content ${solvent_content} \\
-ncs-average \\
> cparrot_${parrID}_${solvent_content}_${parrot_cycles}.log
# -ncs-mask-filter-radius 22 \\
}
run_phaser
run_parrot ori
test -f ${ID}_auto.hand.mtz && run_parrot inv
"""
replace_script = """#!/bin/bash
cat << eof > RF_SELF_3.inp
title ordinary self RF by slow RF from xdsme
!
print RF_SELF_3.prt
!
polar xzk
euler zyz
orthog axabz
!
acell %(cell_str)s
asymmetry %(spg_name)s
aobsfile data.hkl
acutoff 1.0 1.0 0.0
aformat 3i6, 2e10.3
! reading F power=2
! reading I power=1
apower 2
origin true
!
cutoff 0.25
!
resolution 40.0 3.5
boxsize 3 3 3
radius 20.0
geval 2
!
self true
cross false
fast true
!
sangle polar
oangle polar xyk
!
! This sets the search limits automatically
!
slimit 2 270 90 3
!
mapfile RF_SELF_3.map
peak 3.0 50
!
cntf RF_SELF_3.ps
cntl 3 9 0.5 1
cntl 1.5 2.5 0.5 4
!
stop
eof
glrf < RF_SELF_3.inp > RF_SELF.log
"""
crossec_script = """
ATOM %(ha_name)s
NWAV 1 %(wavelength)s
END
"""
cns_par = """a, b, c, alpha, beta, gamma = %(cns_cell)s
sg = %(cns_sg)s
low_res, high_res = %(res_low).3f %(res_high).3f
reflection_infile_1 = %(file_name_out)s
obs_f, obs_sigf, = "fobs", "sigma"
test_set, test_flag = "test", 1
"""
cad_script = """LABIN FILE 1 ALL
DWAVE FILE 1 %(ID)s d%(ID)s %(wavelength).5f\nEND"""
cad2_script = """LABIN FILE 1 ALL
LABIN FILE 2 %(cad_ano)s
DWAVE FILE 2 %(ID)s d%(ID)s %(wavelength).5f\nEND"""
mtzutils_script = "END\n"
mtz2various_script = """#!/bin/bash
rm -f free_refl_shelx_F3.tmp
labelF=$(mtzdmp $1 | grep " F " | head -1 | awk '{print $12 }')
labelSIGF=$(mtzdmp $1 | grep " Q " | head -1 | awk '{print $12 }')
labelFREE=$(mtzdmp $1 | grep " I " | head -1 | awk '{print $12 }')
echo "using labels \\"$labelF $labelSIGF $labelFREE\\""
mtz2various hklin $1 hklout free_refl_shelx_F3.tmp << eof > mtz2shelx.log
LABIN FP=$labelF SIGFP=$labelSIGF FREE=$labelFREE
OUTPUT SHELX
eof
grep -v " 0 0 0" free_refl_shelx_F3.tmp > free_refl_shelx_F3.hkl
echo " 0 0 0 0. 0.00 0" >> free_refl_shelx_F3.hkl
rm -f free_refl_shelx_F3.tmp
"""
HAd = {0.72227:("U ",92,"L3"),0.72766:("Y ",39,"K "),0.76973:("Sr",38,"K "),
0.81554:("Rb",37,"K "),0.86552:("Kr",36,"K "),0.92040:("Br",35,"K "),
0.92340:("Bi",83,"L3"),0.95073:("Pb",82,"L3"),0.97974:("Se",34,"K "),
1.00910:("Hg",80,"L3"),1.04000:("Au",79,"L3"),1.04500:("As",33,"K "),
1.07230:("Pt",78,"L3"),1.10580:("Ir",77,"L3"),1.11658:("Ge",32,"K "),
1.14080:("Os",76,"L3"),1.17730:("Re",75,"L3"),1.19580:("Ga",31,"K "),
1.21550:("W ",74,"L3"),1.25530:("Ta",73,"L3"),1.28340:("Zn",30,"K "),
1.29720:("Hf",72,"L3"),1.34050:("Lu",71,"L3"),1.38059:("Cu",29,"K "),
1.38620:("Yb",70,"L3"),1.43340:("Tm",69,"L3"),1.48350:("Er",68,"L3"),
1.48807:("Ni",28,"K "),1.53680:("Ho",67,"L3"),1.59160:("Dy",66,"L3"),
1.60815:("Co",27,"K "),1.64970:("Tb",65,"L3"),1.71170:("Gd",64,"L3"),
1.74346:("Fe",26,"K "),1.77610:("Eu",63,"L3"),1.84570:("Sm",62,"L3")}
# A dictionary containing all sg_number:sg_name from the spacegroup.lib
cns_sg_lib = {1:"P1",2:"P-1",3:"P2",4:"P2(1)",5:"C2",6:"PM",7:"PC",8:"CM",
9:"CC",10:"P2/M",11:"P2(1)/M",12:"C2/M",13:"P2/C",14:"P2(1)/C",15:"C2/C",
16:"P222",17:"P222(1)",18:"P2(1)2(1)2",19:"P2(1)2(1)2(1)",20:"C222(1)",
21:"C222",22:"F222",23:"I222",24:"I2(1)2(1)2(1)",25:"PMM2",26:"PMC2(1)",
27:"PCC2",28:"PMA2",29:"PCA2(1)",30:"PNC2",31:"PMN2(1)",32:"PBA2",
33:"PNA2(1)",34:"PNN2",35:"CMM2",36:"CMC2(1)",37:"CCC2",38:"AMM2",
39:"ABM2",40:"AMA2",41:"ABA2",42:"FMM2",43:"FDD2",44:"IMM2",45:"IBA2",
46:"IMA2",47:"PMMM",48:"PNNN",49:"PCCM",50:"PBAN",51:"PMMA",52:"PNNA",
53:"PMNA",54:"PCCA",55:"PBAM",56:"PCCN",57:"PBCM",58:"PNNM",59:"PMMN",
60:"PBCN",61:"PBCA",62:"PNMA",63:"CMCM",64:"CMCA",65:"CMMM",13065:"AMMM",
66:"CCCM",67:"CMMA",68:"CCCA",69:"FMMM",70:"FDDD",71:"IMMM",72:"IBAM",
73:"IBCA",74:"IMMA",75:"P4",76:"P4(1)",77:"P4(2)",78:"P4(3)",79:"I4",
80:"I4(1)",81:"P-4",82:"I-4",83:"P4/M",84:"P4(2)/M",85:"P4/N",86:"P4(2)/N",
87:"I4/M",88:"I4(1)/A",89:"P422",90:"P42(1)2",91:"P4(1)22",92:"P4(1)2(1)2",
93:"P4(2)22",94:"P4(2)2(1)2",95:"P4(3)22",96:"P4(3)2(1)2",97:"I422",
98:"I4(1)22",99:"P4MM",100:"P4BM",101:"P4(2)CM",102:"P4(2)NM",103:"P4CC",
104:"P4NC",105:"P4(2)MC",106:"P4(2)BC",107:"I4MM",108:"I4CM",109:"I4(1)MD",
110:"I4(1)CD",111:"P-42M",112:"P-42C",113:"P-42(1)M",114:"P-42(1)C",
115:"P-4M2",116:"P-4C2",117:"P-4B2",118:"P-4N2",119:"I-4M2",120:"I-4C2",
121:"I-42M",122:"I-42D",123:"P4/MMM",124:"P4/MCC",125:"P4/NBM",126:"P4/NNC",
127:"P4/MBM",128:"P4/MNC",129:"P4/NMM",130:"P4/NCC",131:"P4(2)/MMC",
132:"P4(2)/MCM",133:"P4(2)/NBC",134:"P4(2)/NNM",135:"P4(2)/MBC",
136:"P4(2)/MNM",137:"P4(2)/NMC",138:"P4(2)/NCM",139:"I4/MMM",140:"I4/MCM",
141:"I4(1)/AMD",142:"I4(1)/ACD",143:"P3",144:"P3(1)",145:"P3(2)",146:"R3",
20146:"R3R",147:"P-3",148:"R-3",20148:"R-3R",149:"P312",150:"P321",
151:"P3(1)12",152:"P3(1)21",153:"P3(2)12",154:"P3(2)21",155:"R32",
20155:"R32R",156:"P3M1",157:"P31M",158:"P3C1",159:"P31C",160:"R3M",
20160:"R3MR",161:"R3C",20161:"R3CR",162:"P-31M",163:"P-31C",164:"P-3M1",
165:"P-3C1",166:"R-3M",20166:"R-3MR",167:"R-3C",20167:"R-3CR",168:"P6",
169:"P6(1)",170:"P6(5)",171:"P6(2)",172:"P6(4)",173:"P6(3)",174:"P-6",
175:"P6/M",176:"P6(3)/M",177:"P622",178:"P6(1)22",179:"P6(5)22",
180:"P6(2)22",181:"P6(4)22",182:"P6(3)22",183:"P6MM",184:"P6CC",
185:"P6(3)CM",186:"P6(3)MC",187:"P-6M2",188:"P-6C2",189:"P-62M",
190:"P-62C",191:"P6/MMM",192:"P6/MCC",193:"P6(3)/MCM",194:"P6(3)/MMC",
195:"P23",196:"F23",197:"I23",198:"P2(1)3",199:"I2(1)3",200:"PM-3",
201:"PN-3",202:"FM-3",203:"FD-3",204:"IM-3",205:"PA-3",206:"IA-3",
207:"P432",208:"P4(2)32",209:"F432",210:"F4(1)32",211:"I432",
212:"P4(3)32",213:"P4(1)32",214:"I4(1)32",215:"P-43M",216:"F-43M",
217:"I-43M",218:"P-43N",219:"F-43C",220:"I-43D",221:"PM-3M",
222:"PN-3N",223:"PM-3N",224:"PN-3M",225:"FM-3M",226:"FM-3C",227:"FD-3M",
228:"FD-3C",229:"IM-3M",230:"IA-3D"}
amore_symops = {
1: ((1, 0, 'P1', 'PG1', 'TRICLINIC'),'x,y,z * end'),
2: ((2, 0, 'P-1', 'PG1BAR', 'TRICLINIC'),'x,y,z * -x,-y,-z * end'),
3: ((2, 0, 'P2', 'PG2', 'MONOCLINIC'),'x,y,z * -x,y,-z * end'),
4: ((2, 0, 'P21', 'PG2', 'MONOCLINIC'),'x,y,z * -x,1/2+y,-z * end'),
5: ((2, 1, 'C2', 'PG2', 'MONOCLINIC'),'x,y,z * -x,y,-z * 1/2,1/2,0 * end'),
6: ((2, 0, 'PM', 'PGM', 'MONOCLINIC'),'x,y,z * x,-y,z * end'),
7: ((2, 0, 'PC', 'PGM', 'MONOCLINIC'),'x,y,z * x,-y,1/2+z * end'),
8: ((2, 1, 'CM', 'PGM', 'MONOCLINIC'),'x,y,z * x,-y,z * 1/2,1/2,0 * end'),
9: ((2, 1, 'CC', 'PGM', 'MONOCLINIC'),'x,y,z * x,-y,1/2+z * 1/2,1/2,0 * end'),
10: ((4, 0, 'P2/M', 'PG2/M', 'MONOCLINIC'),'x,y,z * -x,y,-z * -x,-y,-z * x,-y,z * end'),
11: ((4, 0, 'P21/M', 'PG2/M', 'MONOCLINIC'),'x,y,z * -x,1/2+y,-z * -x,-y,-z * x,1/2-y,z * end'),
12: ((4, 1, 'C2/M', 'PG2/M', 'MONOCLINIC'),'x,y,z * -x,y,-z * -x,-y,-z * x,-y,z * 1/2,1/2,0 * end'),
13: ((4, 0, 'P2/C', 'PG2/M', 'MONOCLINIC'),'x,y,z * -x,y,1/2-z * -x,-y,-z * x,-y,1/2+z * end'),
14: ((4, 0, 'P21/C', 'PG2/M', 'MONOCLINIC'),'x,y,z * -x,-y,-z * -x,1/2+y,1/2-z * x,1/2-y,1/2+z * end'),
15: ((4, 1, 'C2/C', 'PG2/M', 'MONOCLINIC'),'x,y,z * -x,y,1/2-z * -x,-y,-z * x,-y,1/2+z * 1/2,1/2,0 * end'),
16: ((4, 0, 'P222', 'PG222', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * end'),
17: ((4, 0, 'P2221', 'PG222', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * -x,y,1/2-z * x,-y,-z * end'),
18: ((4, 0, 'P21212', 'PG222', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * end'),
19: ((4, 0, 'P212121', 'PG222', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * end'),
20: ((4, 1, 'C2221', 'PG222', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * -x,y,1/2-z * x,-y,-z * 1/2,1/2,0 * end'),
21: ((4, 1, 'C222', 'PG222', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * 1/2,1/2,0 * end'),
22: ((4, 3, 'F222', 'PG222', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
23: ((4, 1, 'I222', 'PG222', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * 1/2,1/2,1/2 * end'),
24: ((4, 1, 'I212121', 'PG222', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * 1/2,1/2,1/2 * end'),
25: ((4, 0, 'PMM2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,-y,z * -x,y,z * end'),
26: ((4, 0, 'PMC21', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * x,-y,1/2+z * -x,y,z * end'),
27: ((4, 0, 'PCC2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,-y,1/2+z * -x,y,1/2+z * end'),
28: ((4, 0, 'PMA2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2+x,-y,z * 1/2-x,y,z * end'),
29: ((4, 0, 'PCA21', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * 1/2+x,-y,z * 1/2-x,y,1/2+z * end'),
30: ((4, 0, 'PNC2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,1/2-y,1/2+z * -x,1/2+y,1/2+z * end'),
31: ((4, 0, 'PMN21', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,1/2+z * 1/2+x,-y,1/2+z * -x,y,z * end'),
32: ((4, 0, 'PBA2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * end'),
33: ((4, 0, 'PNA21', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,1/2+z * end'),
34: ((4, 0, 'PNN2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * end'),
35: ((4, 1, 'CMM2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,-y,z * -x,y,z * 1/2,1/2,0 * end'),
36: ((4, 1, 'CMC21', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * x,-y,1/2+z * -x,y,z * 1/2,1/2,0 * end'),
37: ((4, 1, 'CCC2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,-y,1/2+z * -x,y,1/2+z * 1/2,1/2,0 * end'),
38: ((4, 1, 'AMM2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,-y,z * -x,y,z * 0,1/2,1/2 * end'),
39: ((4, 1, 'ABM2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,1/2-y,z * -x,1/2+y,z * 0,1/2,1/2 * end'),
40: ((4, 1, 'AMA2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2+x,-y,z * 1/2-x,y,z * 0,1/2,1/2 * end'),
41: ((4, 1, 'ABA2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 0,1/2,1/2 * end'),
42: ((4, 3, 'FMM2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,-y,z * -x,y,z * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
43: ((4, 3, 'FDD2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/4+x,1/4-y,1/4+z * 1/4-x,1/4+y,1/4+z * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
44: ((4, 1, 'IMM2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * x,-y,z * -x,y,z * 1/2,1/2,1/2 * end'),
45: ((4, 1, 'IBA2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2,1/2,1/2 * end'),
46: ((4, 1, 'IMA2', 'PGMM2', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2+x,-y,z * 1/2-x,y,z * 1/2,1/2,1/2 * end'),
47: ((8, 0, 'PMMM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * end'),
48: ((8, 0, 'PNNN', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * end'),
49: ((8, 0, 'PCCM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,1/2-z * x,-y,1/2-z * -x,-y,-z * x,y,-z * x,-y,1/2+z * -x,y,1/2+z * end'),
50: ((8, 0, 'PBAN', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * 1/2-x,1/2-y,-z * 1/2+x,1/2+y,-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * end'),
51: ((8, 0, 'PMMA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,z * -x,y,-z * 1/2+x,-y,-z * -x,-y,-z * 1/2+x,y,-z * x,-y,z * 1/2-x,y,z * end'),
52: ((8, 0, 'PNNA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,z * 1/2-x,1/2+y,1/2-z * x,1/2-y,1/2-z * -x,-y,-z * 1/2+x,y,-z * 1/2+x,1/2-y,1/2+z * -x,1/2+y,1/2+z * end'),
53: ((8, 0, 'PMNA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,1/2+z * 1/2-x,y,1/2-z * x,-y,-z * -x,-y,-z * 1/2+x,y,1/2-z * 1/2+x,-y,1/2+z * -x,y,z * end'),
54: ((8, 0, 'PCCA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,z * -x,y,1/2-z * 1/2+x,-y,1/2-z * -x,-y,-z * 1/2+x,y,-z * x,-y,1/2+z * 1/2-x,y,1/2+z * end'),
55: ((8, 0, 'PBAM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * -x,-y,-z * x,y,-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * end'),
56: ((8, 0, 'PCCN', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,1/2-y,z * -x,1/2+y,1/2-z * 1/2+x,-y,1/2-z * -x,-y,-z * 1/2+x,1/2+y,-z * x,1/2-y,1/2+z * 1/2-x,y,1/2+z * end'),
57: ((8, 0, 'PBCM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * -x,1/2+y,1/2-z * x,1/2-y,-z * -x,-y,-z * x,y,1/2-z * x,1/2-y,1/2+z * -x,1/2+y,z * end'),
58: ((8, 0, 'PNNM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2-x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2-z * -x,-y,-z * x,y,-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * end'),
59: ((8, 0, 'PMMN', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * 1/2-x,1/2-y,-z * 1/2+x,1/2+y,-z * x,-y,z * -x,y,z * end'),
60: ((8, 0, 'PBCN', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,1/2-y,1/2+z * -x,y,1/2-z * 1/2+x,1/2-y,-z * -x,-y,-z * 1/2+x,1/2+y,1/2-z * x,-y,1/2+z * 1/2-x,1/2+y,z * end'),
61: ((8, 0, 'PBCA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * -x,-y,-z * 1/2+x,y,1/2-z * x,1/2-y,1/2+z * 1/2-x,1/2+y,z * end'),
62: ((8, 0, 'PNMA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,-z * 1/2+x,1/2-y,1/2-z * -x,-y,-z * 1/2+x,y,1/2-z * x,1/2-y,z * 1/2-x,1/2+y,1/2+z * end'),
63: ((8, 1, 'CMCM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,1/2+z * -x,y,1/2-z * x,-y,-z * -x,-y,-z * x,y,1/2-z * x,-y,1/2+z * -x,y,z * 1/2,1/2,0 * end'),
64: ((8, 1, 'CMCA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,1/2-y,1/2+z * -x,1/2+y,1/2-z * x,-y,-z * -x,-y,-z * x,1/2+y,1/2-z * x,1/2-y,1/2+z * -x,y,z * 1/2,1/2,0 * end'),
65: ((8, 1, 'CMMM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * 1/2,1/2,0 * end'),
66: ((8, 1, 'CCCM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,1/2-z * x,-y,1/2-z * -x,-y,-z * x,y,-z * x,-y,1/2+z * -x,y,1/2+z * 1/2,1/2,0 * end'),
67: ((8, 1, 'CMMA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,1/2-y,z * -x,1/2+y,-z * x,-y,-z * -x,-y,-z * x,1/2+y,-z * x,1/2-y,z * -x,y,z * 1/2,1/2,0 * end'),
68: ((8, 1, 'CCCA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,1/2-y,z * -x,y,-z * 1/2+x,1/2-y,-z * -x,1/2-y,1/2-z * 1/2+x,y,1/2-z * x,1/2-y,1/2+z * 1/2-x,y,1/2+z * 1/2,1/2,0 * end'),
69: ((8, 3, 'FMMM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
70: ((8, 3, 'FDDD', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * 1/4-x,1/4-y,1/4-z * 1/4+x,1/4+y,1/4-z * 1/4+x,1/4-y,1/4+z * 1/4-x,1/4+y,1/4+z * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
71: ((8, 1, 'IMMM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * 1/2,1/2,1/2 * end'),
72: ((8, 1, 'IBAM', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,-y,z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * -x,-y,-z * x,y,-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2,1/2,1/2 * end'),
73: ((8, 1, 'IBCA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * -x,-y,-z * 1/2+x,y,1/2-z * x,1/2-y,1/2+z * 1/2-x,1/2+y,z * 1/2,1/2,1/2 * end'),
74: ((8, 1, 'IMMA', 'PGMMM', 'ORTHORHOMBIC'),'x,y,z * -x,1/2-y,z * -x,1/2+y,-z * x,-y,-z * -x,-y,-z * x,1/2+y,-z * x,1/2-y,z * -x,y,z * 1/2,1/2,1/2 * end'),
75: ((4, 0, 'P4', 'PG4', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * end'),
76: ((4, 0, 'P41', 'PG4', 'TETRAGONAL'),'x,y,z * -x,-y,1/2+z * -y,x,1/4+z * y,-x,3/4+z * end'),
77: ((4, 0, 'P42', 'PG4', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * end'),
78: ((4, 0, 'P43', 'PG4', 'TETRAGONAL'),'x,y,z * -x,-y,1/2+z * -y,x,3/4+z * y,-x,1/4+z * end'),
79: ((4, 1, 'I4', 'PG4', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * 1/2,1/2,1/2 * end'),
80: ((4, 1, 'I41', 'PG4', 'TETRAGONAL'),'x,y,z * 1/2-x,1/2-y,1/2+z * -y,1/2+x,1/4+z * 1/2+y,-x,3/4+z * 1/2,1/2,1/2 * end'),
81: ((4, 0, 'P-4', 'PG4BAR', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * end'),
82: ((4, 1, 'I-4', 'PG4BAR', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * 1/2,1/2,1/2 * end'),
83: ((8, 0, 'P4/M', 'PG4/M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * end'),
84: ((8, 0, 'P42/M', 'PG4/M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * -x,-y,-z * x,y,-z * y,-x,1/2-z * -y,x,1/2-z * end'),
85: ((8, 0, 'P4/N', 'PG4/M', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,z * 1/2+y,1/2-x,z * 1/2-x,1/2-y,-z * 1/2+x,1/2+y,-z * y,-x,-z * -y,x,-z * end'),
86: ((8, 0, 'P42/N', 'PG4/M', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * y,-x,-z * -y,x,-z * end'),
87: ((8, 1, 'I4/M', 'PG4/M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * 1/2,1/2,1/2 * end'),
88: ((8, 1, 'I41/A', 'PG4/M', 'TETRAGONAL'),'x,y,z * 1/2-x,1/2-y,1/2+z * -y,1/2+x,1/4+z * 1/2+y,-x,3/4+z * -x,1/2-y,1/4-z * 1/2+x,y,3/4-z * y,-x,-z * 1/2-y,1/2+x,1/2-z * 1/2,1/2,1/2 * end'),
89: ((8, 0, 'P422', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,-z * x,-y,-z * y,x,-z * -y,-x,-z * end'),
90: ((8, 0, 'P4212', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,z * 1/2+y,1/2-x,z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * y,x,-z * -y,-x,-z * end'),
91: ((8, 0, 'P4122', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,1/2+z * -y,x,1/4+z * y,-x,3/4+z * -x,y,-z * x,-y,1/2-z * y,x,3/4-z * -y,-x,1/4-z * end'),
92: ((8, 0, 'P41212', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,1/2+z * 1/2-y,1/2+x,1/4+z * 1/2+y,1/2-x,3/4+z * 1/2-x,1/2+y,1/4-z * 1/2+x,1/2-y,3/4-z * y,x,-z * -y,-x,1/2-z * end'),
93: ((8, 0, 'P4222', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * -x,y,-z * x,-y,-z * y,x,1/2-z * -y,-x,1/2-z * end'),
94: ((8, 0, 'P42212', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * 1/2-x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2-z * y,x,-z * -y,-x,-z * end'),
95: ((8, 0, 'P4322', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,1/2+z * -y,x,3/4+z * y,-x,1/4+z * -x,y,-z * x,-y,1/2-z * y,x,1/4-z * -y,-x,3/4-z * end'),
96: ((8, 0, 'P43212', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,1/2+z * 1/2-y,1/2+x,3/4+z * 1/2+y,1/2-x,1/4+z * 1/2-x,1/2+y,3/4-z * 1/2+x,1/2-y,1/4-z * y,x,-z * -y,-x,1/2-z * end'),
97: ((8, 1, 'I422', 'PG422', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,-z * x,-y,-z * y,x,-z * -y,-x,-z * 1/2,1/2,1/2 * end'),
98: ((8, 1, 'I4122', 'PG422', 'TETRAGONAL'),'x,y,z * 1/2-x,1/2-y,1/2+z * -y,1/2+x,1/4+z * 1/2+y,-x,3/4+z * 1/2-x,y,3/4-z * x,1/2-y,1/4-z * 1/2+y,1/2+x,1/2-z * -y,-x,-z * 1/2,1/2,1/2 * end'),
99: ((8, 0, 'P4MM', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * x,-y,z * -x,y,z * -y,-x,z * y,x,z * end'),
100: ((8, 0, 'P4BM', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2-y,1/2-x,z * 1/2+y,1/2+x,z * end'),
101: ((8, 0, 'P42CM', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * x,-y,1/2+z * -x,y,1/2+z * -y,-x,z * y,x,z * end'),
102: ((8, 0, 'P42NM', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * -y,-x,z * y,x,z * end'),
103: ((8, 0, 'P4CC', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * x,-y,1/2+z * -x,y,1/2+z * -y,-x,1/2+z * y,x,1/2+z * end'),
104: ((8, 0, 'P4NC', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
105: ((8, 0, 'P42MC', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * x,-y,z * -x,y,z * -y,-x,1/2+z * y,x,1/2+z * end'),
106: ((8, 0, 'P42BC', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
107: ((8, 1, 'I4MM', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * x,-y,z * -x,y,z * -y,-x,z * y,x,z * 1/2,1/2,1/2 * end'),
108: ((8, 1, 'I4CM', 'PG4MM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * x,-y,1/2+z * -x,y,1/2+z * -y,-x,1/2+z * y,x,1/2+z * 1/2,1/2,1/2 * end'),
109: ((8, 1, 'I41MD', 'PG4MM', 'TETRAGONAL'),'x,y,z * 1/2-x,1/2-y,1/2+z * -y,1/2+x,1/4+z * 1/2+y,-x,3/4+z * x,-y,z * 1/2-x,1/2+y,1/2+z * -y,1/2-x,1/4+z * 1/2+y,x,3/4+z * 1/2,1/2,1/2 * end'),
110: ((8, 1, 'I41CD', 'PG4MM', 'TETRAGONAL'),'x,y,z * 1/2-x,1/2-y,1/2+z * -y,1/2+x,1/4+z * 1/2+y,-x,3/4+z * x,-y,1/2+z * 1/2-x,1/2+y,z * -y,1/2-x,3/4+z * 1/2+y,x,1/4+z * 1/2,1/2,1/2 * end'),
111: ((8, 0, 'P-42M', 'PG4BAR2M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * -x,y,-z * x,-y,-z * -y,-x,z * y,x,z * end'),
112: ((8, 0, 'P-42C', 'PG4BAR2M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * -x,y,1/2-z * x,-y,1/2-z * -y,-x,1/2+z * y,x,1/2+z * end'),
113: ((8, 0, 'P-421M', 'PG4BAR2M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * 1/2-y,1/2-x,z * 1/2+y,1/2+x,z * end'),
114: ((8, 0, 'P-421C', 'PG4BAR2M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * 1/2-x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2-z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
115: ((8, 0, 'P-4M2', 'PG4BARM2', 'TETRAGONAL'),'x,y,z * -x,-y,z * y,-x,-z * -y,x,-z * x,-y,z * -x,y,z * y,x,-z * -y,-x,-z * end'),
116: ((8, 0, 'P-4C2', 'PG4BARM2', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * x,-y,1/2+z * -x,y,1/2+z * y,x,1/2-z * -y,-x,1/2-z * end'),
117: ((8, 0, 'P-4B2', 'PG4BARM2', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2+y,1/2+x,-z * 1/2-y,1/2-x,-z * end'),
118: ((8, 0, 'P-4N2', 'PG4BARM2', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * end'),
119: ((8, 1, 'I-4M2', 'PG4BARM2', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * x,-y,z * -x,y,z * y,x,-z * -y,-x,-z * 1/2,1/2,1/2 * end'),
120: ((8, 1, 'I-4C2', 'PG4BARM2', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * x,-y,1/2+z * -x,y,1/2+z * y,x,1/2-z * -y,-x,1/2-z * 1/2,1/2,1/2 * end'),
121: ((8, 1, 'I-42M', 'PG4BAR2M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * -x,y,-z * x,-y,-z * -y,-x,z * y,x,z * 1/2,1/2,1/2 * end'),
122: ((8, 1, 'I-42D', 'PG4BAR2M', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,-z * y,-x,-z * 1/2-x,y,3/4-z * 1/2+x,-y,3/4-z * 1/2-y,-x,3/4+z * 1/2+y,x,3/4+z * 1/2,1/2,1/2 * end'),
123: ((16, 0, 'P4/MMM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,-z * x,-y,-z * y,x,-z * -y,-x,-z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * x,-y,z * -x,y,z * -y,-x,z * y,x,z * end'),
124: ((16, 0, 'P4/MCC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,1/2-z * x,-y,1/2-z * y,x,1/2-z * -y,-x,1/2-z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * x,-y,1/2+z * -x,y,1/2+z * -y,-x,1/2+z * y,x,1/2+z * end'),
125: ((16, 0, 'P4/NBM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,-z * x,-y,-z * y,x,-z * -y,-x,-z * 1/2-x,1/2-y,-z * 1/2+x,1/2+y,-z * 1/2+y,1/2-x,-z * 1/2-y,1/2+x,-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2-y,1/2-x,z * 1/2+y,1/2+x,z * end'),
126: ((16, 0, 'P4/NNC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,-z * x,-y,-z * y,x,-z * -y,-x,-z * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * 1/2+y,1/2-x,1/2-z * 1/2-y,1/2+x,1/2-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
127: ((16, 0, 'P4/MBM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * 1/2+y,1/2+x,-z * 1/2-y,1/2-x,-z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2-y,1/2-x,z * 1/2+y,1/2+x,z * end'),
128: ((16, 0, 'P4/MNC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * 1/2-x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2-z * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
129: ((16, 0, 'P4/NMM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,z * 1/2+y,1/2-x,z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * y,x,-z * -y,-x,-z * 1/2-x,1/2-y,-z * 1/2+x,1/2+y,-z * y,-x,-z * -y,x,-z * x,-y,z * -x,y,z * 1/2-y,1/2-x,z * 1/2+y,1/2+x,z * end'),
130: ((16, 0, 'P4/NCC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,z * 1/2+y,1/2-x,z * 1/2-x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2-z * y,x,1/2-z * -y,-x,1/2-z * 1/2-x,1/2-y,-z * 1/2+x,1/2+y,-z * y,-x,-z * -y,x,-z * x,-y,1/2+z * -x,y,1/2+z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
131: ((16, 0, 'P42/MMC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * -x,y,-z * x,-y,-z * y,x,1/2-z * -y,-x,1/2-z * -x,-y,-z * x,y,-z * y,-x,1/2-z * -y,x,1/2-z * x,-y,z * -x,y,z * -y,-x,1/2+z * y,x,1/2+z * end'),
132: ((16, 0, 'P42/MCM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * -x,y,1/2-z * x,-y,1/2-z * y,x,-z * -y,-x,-z * -x,-y,-z * x,y,-z * y,-x,1/2-z * -y,x,1/2-z * x,-y,1/2+z * -x,y,1/2+z * -y,-x,z * y,x,z * end'),
133: ((16, 0, 'P42/NBC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * -x,y,1/2-z * x,-y,1/2-z * 1/2+y,1/2+x,-z * 1/2-y,1/2-x,-z * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * y,-x,-z * -y,x,-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * -y,-x,1/2+z * y,x,1/2+z * end'),
134: ((16, 0, 'P42/NNM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * -x,y,-z * x,-y,-z * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * y,-x,-z * -y,x,-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * -y,-x,z * y,x,z * end'),
135: ((16, 0, 'P42/MBC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,1/2+z * y,-x,1/2+z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * -x,-y,-z * x,y,-z * y,-x,1/2-z * -y,x,1/2-z * 1/2+x,1/2-y,z * 1/2-x,1/2+y,z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
136: ((16, 0, 'P42/MNM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * 1/2-x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2-z * y,x,-z * -y,-x,-z * -x,-y,-z * x,y,-z * 1/2+y,1/2-x,1/2-z * 1/2-y,1/2+x,1/2-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * -y,-x,z * y,x,z * end'),
137: ((16, 0, 'P42/NMC', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * 1/2-x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2-z * y,x,-z * -y,-x,-z * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * y,-x,-z * -y,x,-z * x,-y,z * -x,y,z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * end'),
138: ((16, 0, 'P42/NCM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * 1/2-y,1/2+x,1/2+z * 1/2+y,1/2-x,1/2+z * 1/2-x,1/2+y,-z * 1/2+x,1/2-y,-z * y,x,1/2-z * -y,-x,1/2-z * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * y,-x,-z * -y,x,-z * x,-y,1/2+z * -x,y,1/2+z * 1/2-y,1/2-x,z * 1/2+y,1/2+x,z * end'),
139: ((16, 1, 'I4/MMM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,-z * x,-y,-z * y,x,-z * -y,-x,-z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * x,-y,z * -x,y,z * -y,-x,z * y,x,z * 1/2,1/2,1/2 * end'),
140: ((16, 1, 'I4/MCM', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * -x,-y,z * -y,x,z * y,-x,z * -x,y,1/2-z * x,-y,1/2-z * y,x,1/2-z * -y,-x,1/2-z * -x,-y,-z * x,y,-z * y,-x,-z * -y,x,-z * x,-y,1/2+z * -x,y,1/2+z * -y,-x,1/2+z * y,x,1/2+z * 1/2,1/2,1/2 * end'),
141: ((16, 1, 'I41/AMD', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * 1/2-x,1/2-y,1/2+z * -y,1/2+x,1/4+z * 1/2+y,-x,3/4+z * 1/2-x,y,3/4-z * x,1/2-y,1/4-z * 1/2+y,1/2+x,1/2-z * -y,-x,-z * -x,1/2-y,1/4-z * 1/2+x,y,3/4-z * y,-x,-z * 1/2-y,1/2+x,1/2-z * 1/2+x,1/2-y,1/2+z * -x,y,z * 1/2-y,-x,3/4+z * y,1/2+x,1/4+z * 1/2,1/2,1/2 * end'),
142: ((16, 1, 'I41/ACD', 'PG4/MMM', 'TETRAGONAL'),'x,y,z * 1/2-x,1/2-y,1/2+z * -y,1/2+x,1/4+z * 1/2+y,-x,3/4+z * 1/2-x,y,1/4-z * x,1/2-y,3/4-z * 1/2+y,1/2+x,-z * -y,-x,1/2-z * -x,1/2-y,1/4-z * 1/2+x,y,3/4-z * y,-x,-z * 1/2-y,1/2+x,1/2-z * 1/2+x,1/2-y,z * -x,y,1/2+z * 1/2-y,-x,1/4+z * y,1/2+x,3/4+z * 1/2,1/2,1/2 * end'),
143: ((3, 0, 'P3', 'PG3', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * end'),
144: ((3, 0, 'P31', 'PG3', 'TRIGONAL'),'x,y,z * -y,x-y,1/3+z * y-x,-x,2/3+z * end'),
145: ((3, 0, 'P32', 'PG3', 'TRIGONAL'),'x,y,z * -y,x-y,2/3+z * y-x,-x,1/3+z * end'),
146: ((3, 2, 'R3', 'PG3', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * 2/3,1/3,1/3 * 1/3,2/3,2/3 * end'),
146.1: ((3, 0, 'R3_R', 'PG3', 'RHOMBOHEDRAL'),'x,y,z * z,x,y * y,z,x * end'),
147: ((6, 0, 'P-3', 'PG3BAR', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,-z * y,y-x,-z * x-y,x,-z * end'),
148: ((6, 2, 'R-3', 'PG3BAR', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,-z * y,y-x,-z * x-y,x,-z * 2/3,1/3,1/3 * 1/3,2/3,2/3 * end'),
148.1: ((6, 0, 'R-3_R', 'PG3BAR', 'RHOMBOHEDRAL'),'x,y,z * z,x,y * y,z,x * -x,-y,-z * -z,-x,-y * -y,-z,-x * end'),
149: ((6, 0, 'P312', 'PG312', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -y,-x,-z * y-x,y,-z * x,x-y,-z * end'),
150: ((6, 0, 'P321', 'PG321', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,-z * x-y,-y,-z * -x,y-x,-z * end'),
151: ((6, 0, 'P3112', 'PG312', 'TRIGONAL'),'x,y,z * -y,x-y,1/3+z * y-x,-x,2/3+z * -y,-x,2/3-z * y-x,y,1/3-z * x,x-y,-z * end'),
152: ((6, 0, 'P3121', 'PG321', 'TRIGONAL'),'x,y,z * -y,x-y,1/3+z * y-x,-x,2/3+z * y,x,-z * x-y,-y,2/3-z * -x,y-x,1/3-z * end'),
153: ((6, 0, 'P3212', 'PG312', 'TRIGONAL'),'x,y,z * -y,x-y,2/3+z * y-x,-x,1/3+z * -y,-x,1/3-z * y-x,y,2/3-z * x,x-y,-z * end'),
154: ((6, 0, 'P3221', 'PG321', 'TRIGONAL'),'x,y,z * -y,x-y,2/3+z * y-x,-x,1/3+z * y,x,-z * x-y,-y,1/3-z * -x,y-x,2/3-z * end'),
155: ((6, 2, 'R32', 'PG32', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,-z * x-y,-y,-z * -x,y-x,-z * 2/3,1/3,1/3 * 1/3,2/3,2/3 * end'),
155.1: ((6, 0, 'R32_R', 'PG32', 'RHOMBOHEDRAL'),'x,y,z * z,x,y * y,z,x * -y,-x,-z * -x,-z,-y * -z,-y,-x * end'),
156: ((6, 0, 'P3M1', 'PG3M1', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -y,-x,z * y-x,y,z * x,x-y,z * end'),
157: ((6, 0, 'P31M', 'PG31M', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,z * x-y,-y,z * -x,y-x,z * end'),
158: ((6, 0, 'P3C1', 'PG3M1', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * end'),
159: ((6, 0, 'P31C', 'PG31M', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,1/2+z * x-y,-y,1/2+z * -x,y-x,1/2+z * end'),
160: ((6, 2, 'R3M', 'PG3M', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -y,-x,z * y-x,y,z * x,x-y,z * 2/3,1/3,1/3 * 1/3,2/3,2/3 * end'),
160.1: ((6, 0, 'R3M_R', 'PG3M', 'RHOMBOHEDRAL'),'x,y,z * z,x,y * y,z,x * y,x,z * x,z,y * z,y,x * end'),
161: ((6, 2, 'R3C', 'PG3M', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * 2/3,1/3,1/3 * 1/3,2/3,2/3 * end'),
161.1: ((6, 0, 'R3C_R', 'PG3M', 'RHOMBOHEDRAL'),'x,y,z * z,x,y * y,z,x * 1/2+y,1/2+x,1/2+z * 1/2+x,1/2+z,1/2+y * 1/2+z,1/2+y,1/2+x * end'),
162: ((12, 0, 'P-31M', 'PG3BAR1M', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -y,-x,-z * y-x,y,-z * x,x-y,-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * y,x,z * x-y,-y,z * -x,y-x,z * end'),
163: ((12, 0, 'P-31C', 'PG3BAR1M', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -y,-x,1/2-z * y-x,y,1/2-z * x,x-y,1/2-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * y,x,1/2+z * x-y,-y,1/2+z * -x,y-x,1/2+z * end'),
164: ((12, 0, 'P-3M1', 'PG3BARM1', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,-z * x-y,-y,-z * -x,y-x,-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * -y,-x,z * y-x,y,z * x,x-y,z * end'),
165: ((12, 0, 'P-3C1', 'PG3BARM1', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,1/2-z * x-y,-y,1/2-z * -x,y-x,1/2-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * end'),
166: ((12, 2, 'R-3M', 'PG3BARM', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,-z * x-y,-y,-z * -x,y-x,-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * -y,-x,z * y-x,y,z * x,x-y,z * 2/3,1/3,1/3 * 1/3,2/3,2/3 * end'),
166.1: ((12, 0, 'R-3M_R', 'PG3BARM', 'RHOMBOHEDRAL'),'x,y,z * z,x,y * y,z,x * -y,-x,-z * -x,-z,-y * -z,-y,-x * -x,-y,-z * -z,-x,-y * -y,-z,-x * y,x,z * x,z,y * z,y,x * end'),
167: ((12, 2, 'R-3C', 'PG3BARM', 'TRIGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * y,x,1/2-z * x-y,-y,1/2-z * -x,y-x,1/2-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * 2/3,1/3,1/3 * 1/3,2/3,2/3 * end'),
167.1: ((12, 0, 'R-3C_R', 'PG3BARM', 'RHOMBOHEDRAL'),'x,y,z * z,x,y * y,z,x * 1/2-y,1/2-x,1/2-z * 1/2-x,1/2-z,1/2-y * 1/2-z,1/2-y,1/2-x * -x,-y,-z * -z,-x,-y * -y,-z,-x * 1/2+y,1/2+x,1/2+z * 1/2+x,1/2+z,1/2+y * 1/2+z,1/2+y,1/2+x * end'),
168: ((6, 0, 'P6', 'PG6', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,z * y,y-x,z * x-y,x,z * end'),
169: ((6, 0, 'P61', 'PG6', 'HEXAGONAL'),'x,y,z * -y,x-y,1/3+z * y-x,-x,2/3+z * -x,-y,1/2+z * y,y-x,5/6+z * x-y,x,1/6+z * end'),
170: ((6, 0, 'P65', 'PG6', 'HEXAGONAL'),'x,y,z * -y,x-y,2/3+z * y-x,-x,1/3+z * -x,-y,1/2+z * y,y-x,1/6+z * x-y,x,5/6+z * end'),
171: ((6, 0, 'P62', 'PG6', 'HEXAGONAL'),'x,y,z * -y,x-y,2/3+z * y-x,-x,1/3+z * -x,-y,z * y,y-x,2/3+z * x-y,x,1/3+z * end'),
172: ((6, 0, 'P64', 'PG6', 'HEXAGONAL'),'x,y,z * -y,x-y,1/3+z * y-x,-x,2/3+z * -x,-y,z * y,y-x,1/3+z * x-y,x,2/3+z * end'),
173: ((6, 0, 'P63', 'PG6', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,1/2+z * y,y-x,1/2+z * x-y,x,1/2+z * end'),
174: ((6, 0, 'P-6', 'PG6BAR', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * x,y,-z * -y,x-y,-z * y-x,-x,-z * end'),
175: ((12, 0, 'P6/M', 'PG6/M', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,z * y,y-x,z * x-y,x,z * -x,-y,-z * y,y-x,-z * x-y,x,-z * x,y,-z * -y,x-y,-z * y-x,-x,-z * end'),
176: ((12, 0, 'P63/M', 'PG6/M', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,1/2+z * y,y-x,1/2+z * x-y,x,1/2+z * -x,-y,-z * y,y-x,-z * x-y,x,-z * x,y,1/2-z * -y,x-y,1/2-z * y-x,-x,1/2-z * end'),
177: ((12, 0, 'P622', 'PG622', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,z * y,y-x,z * x-y,x,z * y,x,-z * x-y,-y,-z * -x,y-x,-z * -y,-x,-z * y-x,y,-z * x,x-y,-z * end'),
178: ((12, 0, 'P6122', 'PG622', 'HEXAGONAL'),'x,y,z * -y,x-y,1/3+z * y-x,-x,2/3+z * -x,-y,1/2+z * y,y-x,5/6+z * x-y,x,1/6+z * y,x,1/3-z * x-y,-y,-z * -x,y-x,2/3-z * -y,-x,5/6-z * y-x,y,1/2-z * x,x-y,1/6-z * end'),
179: ((12, 0, 'P6522', 'PG622', 'HEXAGONAL'),'x,y,z * -y,x-y,2/3+z * y-x,-x,1/3+z * -x,-y,1/2+z * y,y-x,1/6+z * x-y,x,5/6+z * y,x,2/3-z * x-y,-y,-z * -x,y-x,1/3-z * -y,-x,1/6-z * y-x,y,1/2-z * x,x-y,5/6-z * end'),
180: ((12, 0, 'P6222', 'PG622', 'HEXAGONAL'),'x,y,z * -y,x-y,2/3+z * y-x,-x,1/3+z * -x,-y,z * y,y-x,2/3+z * x-y,x,1/3+z * y,x,2/3-z * x-y,-y,-z * -x,y-x,1/3-z * -y,-x,2/3-z * y-x,y,-z * x,x-y,1/3-z * end'),
181: ((12, 0, 'P6422', 'PG622', 'HEXAGONAL'),'x,y,z * -y,x-y,1/3+z * y-x,-x,2/3+z * -x,-y,z * y,y-x,1/3+z * x-y,x,2/3+z * y,x,1/3-z * x-y,-y,-z * -x,y-x,2/3-z * -y,-x,1/3-z * y-x,y,-z * x,x-y,2/3-z * end'),
182: ((12, 0, 'P6322', 'PG622', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,1/2+z * y,y-x,1/2+z * x-y,x,1/2+z * y,x,-z * x-y,-y,-z * -x,y-x,-z * -y,-x,1/2-z * y-x,y,1/2-z * x,x-y,1/2-z * end'),
183: ((12, 0, 'P6MM', 'PG6MM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,z * y,y-x,z * x-y,x,z * -y,-x,z * y-x,y,z * x,x-y,z * y,x,z * x-y,-y,z * -x,y-x,z * end'),
184: ((12, 0, 'P6CC', 'PG6MM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,z * y,y-x,z * x-y,x,z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * y,x,1/2+z * x-y,-y,1/2+z * -x,y-x,1/2+z * end'),
185: ((12, 0, 'P63CM', 'PG6MM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,1/2+z * y,y-x,1/2+z * x-y,x,1/2+z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * y,x,z * x-y,-y,z * -x,y-x,z * end'),
186: ((12, 0, 'P63MC', 'PG6MM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,1/2+z * y,y-x,1/2+z * x-y,x,1/2+z * -y,-x,z * y-x,y,z * x,x-y,z * y,x,1/2+z * x-y,-y,1/2+z * -x,y-x,1/2+z * end'),
187: ((12, 0, 'P-6M2', 'PG6BARM2', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * x,y,-z * -y,x-y,-z * y-x,-x,-z * -y,-x,z * y-x,y,z * x,x-y,z * -y,-x,-z * y-x,y,-z * x,x-y,-z * end'),
188: ((12, 0, 'P-6C2', 'PG6BARM2', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * x,y,1/2-z * -y,x-y,1/2-z * y-x,-x,1/2-z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * -y,-x,-z * y-x,y,-z * x,x-y,-z * end'),
189: ((12, 0, 'P-62M', 'PG6BAR2M', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * x,y,-z * -y,x-y,-z * y-x,-x,-z * y,x,-z * x-y,-y,-z * -x,y-x,-z * y,x,z * x-y,-y,z * -x,y-x,z * end'),
190: ((12, 0, 'P-62C', 'PG6BAR2M', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * x,y,1/2-z * -y,x-y,1/2-z * y-x,-x,1/2-z * y,x,-z * x-y,-y,-z * -x,y-x,-z * y,x,1/2+z * x-y,-y,1/2+z * -x,y-x,1/2+z * end'),
191: ((24, 0, 'P6/MMM', 'PG6/MMM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,z * y,y-x,z * x-y,x,z * y,x,-z * x-y,-y,-z * -x,y-x,-z * -y,-x,-z * y-x,y,-z * x,x-y,-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * x,y,-z * y-x,-x,-z * -y,x-y,-z * -y,-x,z * y-x,y,z * x,x-y,z * y,x,z * x-y,-y,z * -x,y-x,z * end'),
192: ((24, 0, 'P6/MCC', 'PG6/MMM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,z * y,y-x,z * x-y,x,z * y,x,1/2-z * x-y,-y,1/2-z * -x,y-x,1/2-z * -y,-x,1/2-z * y-x,y,1/2-z * x,x-y,1/2-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * x,y,-z * y-x,-x,-z * -y,x-y,-z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * y,x,1/2+z * x-y,-y,1/2+z * -x,y-x,1/2+z * end'),
193: ((24, 0, 'P63/MCM', 'PG6/MMM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,1/2+z * y,y-x,1/2+z * x-y,x,1/2+z * y,x,1/2-z * x-y,-y,1/2-z * -x,y-x,1/2-z * -y,-x,-z * y-x,y,-z * x,x-y,-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * x,y,1/2-z * y-x,-x,1/2-z * -y,x-y,1/2-z * -y,-x,1/2+z * y-x,y,1/2+z * x,x-y,1/2+z * y,x,z * x-y,-y,z * -x,y-x,z * end'),
194: ((24, 0, 'P63/MMC', 'PG6/MMM', 'HEXAGONAL'),'x,y,z * -y,x-y,z * y-x,-x,z * -x,-y,1/2+z * y,y-x,1/2+z * x-y,x,1/2+z * y,x,-z * x-y,-y,-z * -x,y-x,-z * -y,-x,1/2-z * y-x,y,1/2-z * x,x-y,1/2-z * -x,-y,-z * y,y-x,-z * x-y,x,-z * x,y,1/2-z * y-x,-x,1/2-z * -y,x-y,1/2-z * -y,-x,z * y-x,y,z * x,x-y,z * y,x,1/2+z * x-y,-y,1/2+z * -x,y-x,1/2+z * end'),
195: ((12, 0, 'P23', 'PG23', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * end'),
196: ((12, 3, 'F23', 'PG23', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
197: ((12, 1, 'I23', 'PG23', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2,1/2,1/2 * end'),
198: ((12, 0, 'P213', 'PG23', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * end'),
199: ((12, 1, 'I213', 'PG23', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * 1/2,1/2,1/2 * end'),
200: ((24, 0, 'PM-3', 'PGM3BAR', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * end'),
201: ((24, 0, 'PN-3', 'PGM3BAR', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * 1/2-z,1/2-x,1/2-y * 1/2-z,1/2+x,1/2+y * 1/2+z,1/2+x,1/2-y * 1/2+z,1/2-x,1/2+y * 1/2-y,1/2-z,1/2-x * 1/2+y,1/2-z,1/2+x * 1/2-y,1/2+z,1/2+x * 1/2+y,1/2+z,1/2-x * end'),
202: ((24, 3, 'FM-3', 'PGM3BAR', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
203: ((24, 3, 'FD-3', 'PGM3BAR', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/4-x,1/4-y,1/4-z * 1/4+x,1/4+y,1/4-z * 1/4+x,1/4-y,1/4+z * 1/4-x,1/4+y,1/4+z * 1/4-z,1/4-x,1/4-y * 1/4-z,1/4+x,1/4+y * 1/4+z,1/4+x,1/4-y * 1/4+z,1/4-x,1/4+y * 1/4-y,1/4-z,1/4-x * 1/4+y,1/4-z,1/4+x * 1/4-y,1/4+z,1/4+x * 1/4+y,1/4+z,1/4-x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
204: ((24, 1, 'IM-3', 'PGM3BAR', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * 1/2,1/2,1/2 * end'),
205: ((24, 0, 'PA-3', 'PGM3BAR', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * -x,-y,-z * 1/2+x,y,1/2-z * x,1/2-y,1/2+z * 1/2-x,1/2+y,z * -z,-x,-y * 1/2-z,1/2+x,y * 1/2+z,x,1/2-y * z,1/2-x,1/2+y * -y,-z,-x * y,1/2-z,1/2+x * 1/2-y,1/2+z,x * 1/2+y,z,1/2-x * end'),
206: ((24, 1, 'IA-3', 'PGM3BAR', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * -x,-y,-z * 1/2+x,y,1/2-z * x,1/2-y,1/2+z * 1/2-x,1/2+y,z * -z,-x,-y * 1/2-z,1/2+x,y * 1/2+z,x,1/2-y * z,1/2-x,1/2+y * -y,-z,-x * y,1/2-z,1/2+x * 1/2-y,1/2+z,x * 1/2+y,z,1/2-x * 1/2,1/2,1/2 * end'),
207: ((24, 0, 'P432', 'PG432', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,-z * -y,-x,-z * y,-x,z * -y,x,z * x,z,-y * -x,z,y * -x,-z,-y * x,-z,y * z,y,-x * z,-y,x * -z,y,x * -z,-y,-x * end'),
208: ((24, 0, 'P4232', 'PG432', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * 1/2+y,1/2-x,1/2+z * 1/2-y,1/2+x,1/2+z * 1/2+x,1/2+z,1/2-y * 1/2-x,1/2+z,1/2+y * 1/2-x,1/2-z,1/2-y * 1/2+x,1/2-z,1/2+y * 1/2+z,1/2+y,1/2-x * 1/2+z,1/2-y,1/2+x * 1/2-z,1/2+y,1/2+x * 1/2-z,1/2-y,1/2-x * end'),
209: ((24, 3, 'F432', 'PG432', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,-z * -y,-x,-z * y,-x,z * -y,x,z * x,z,-y * -x,z,y * -x,-z,-y * x,-z,y * z,y,-x * z,-y,x * -z,y,x * -z,-y,-x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
210: ((24, 3, 'F4132', 'PG432', 'CUBIC'),'x,y,z * -x,1/2-y,1/2+z * 1/2-x,1/2+y,-z * 1/2+x,-y,1/2-z * z,x,y * 1/2+z,-x,1/2-y * -z,1/2-x,1/2+y * 1/2-z,1/2+x,-y * y,z,x * 1/2-y,1/2+z,-x * 1/2+y,-z,1/2-x * -y,1/2-z,1/2+x * 3/4+y,1/4+x,3/4-z * 1/4-y,1/4-x,1/4-z * 1/4+y,3/4-x,3/4+z * 3/4-y,3/4+x,1/4+z * 3/4+x,1/4+z,3/4-y * 3/4-x,3/4+z,1/4+y * 1/4-x,1/4-z,1/4-y * 1/4+x,3/4-z,3/4+y * 3/4+z,1/4+y,3/4-x * 1/4+z,3/4-y,3/4+x * 3/4-z,3/4+y,1/4+x * 1/4-z,1/4-y,1/4-x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
211: ((24, 1, 'I432', 'PG432', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,-z * -y,-x,-z * y,-x,z * -y,x,z * x,z,-y * -x,z,y * -x,-z,-y * x,-z,y * z,y,-x * z,-y,x * -z,y,x * -z,-y,-x * 1/2,1/2,1/2 * end'),
212: ((24, 0, 'P4332', 'PG432', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * 1/4+y,3/4+x,3/4-z * 1/4-y,1/4-x,1/4-z * 3/4+y,3/4-x,1/4+z * 3/4-y,1/4+x,3/4+z * 1/4+x,3/4+z,3/4-y * 3/4-x,1/4+z,3/4+y * 1/4-x,1/4-z,1/4-y * 3/4+x,3/4-z,1/4+y * 1/4+z,3/4+y,3/4-x * 3/4+z,3/4-y,1/4+x * 3/4-z,1/4+y,3/4+x * 1/4-z,1/4-y,1/4-x * end'),
213: ((24, 0, 'P4132', 'PG432', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * 3/4+y,1/4+x,1/4-z * 3/4-y,3/4-x,3/4-z * 1/4+y,1/4-x,3/4+z * 1/4-y,3/4+x,1/4+z * 3/4+x,1/4+z,1/4-y * 1/4-x,3/4+z,1/4+y * 3/4-x,3/4-z,3/4-y * 1/4+x,1/4-z,3/4+y * 3/4+z,1/4+y,1/4-x * 1/4+z,1/4-y,3/4+x * 1/4-z,3/4+y,1/4+x * 3/4-z,3/4-y,3/4-x * end'),
214: ((24, 1, 'I4132', 'PG432', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * 3/4+y,1/4+x,1/4-z * 3/4-y,3/4-x,3/4-z * 1/4+y,1/4-x,3/4+z * 1/4-y,3/4+x,1/4+z * 3/4+x,1/4+z,1/4-y * 1/4-x,3/4+z,1/4+y * 3/4-x,3/4-z,3/4-y * 1/4+x,1/4-z,3/4+y * 3/4+z,1/4+y,1/4-x * 1/4+z,1/4-y,3/4+x * 1/4-z,3/4+y,1/4+x * 3/4-z,3/4-y,3/4-x * 1/2,1/2,1/2 * end'),
215: ((24, 0, 'P-43M', 'PG4BAR3M', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,z * -y,-x,z * y,-x,-z * -y,x,-z * x,z,y * -x,z,-y * -x,-z,y * x,-z,-y * z,y,x * z,-y,-x * -z,y,-x * -z,-y,x * end'),
216: ((24, 3, 'F-43M', 'PG4BAR3M', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,z * -y,-x,z * y,-x,-z * -y,x,-z * x,z,y * -x,z,-y * -x,-z,y * x,-z,-y * z,y,x * z,-y,-x * -z,y,-x * -z,-y,x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
217: ((24, 1, 'I-43M', 'PG4BAR3M', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,z * -y,-x,z * y,-x,-z * -y,x,-z * x,z,y * -x,z,-y * -x,-z,y * x,-z,-y * z,y,x * z,-y,-x * -z,y,-x * -z,-y,x * 1/2,1/2,1/2 * end'),
218: ((24, 0, 'P-43N', 'PG4BAR3M', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2+y,1/2+x,1/2+z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2-x,1/2-z * 1/2-y,1/2+x,1/2-z * 1/2+x,1/2+z,1/2+y * 1/2-x,1/2+z,1/2-y * 1/2-x,1/2-z,1/2+y * 1/2+x,1/2-z,1/2-y * 1/2+z,1/2+y,1/2+x * 1/2+z,1/2-y,1/2-x * 1/2-z,1/2+y,1/2-x * 1/2-z,1/2-y,1/2+x * end'),
219: ((24, 3, 'F-43C', 'PG4BAR3M', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2+y,1/2+x,1/2+z * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2-x,1/2-z * 1/2-y,1/2+x,1/2-z * 1/2+x,1/2+z,1/2+y * 1/2-x,1/2+z,1/2-y * 1/2-x,1/2-z,1/2+y * 1/2+x,1/2-z,1/2-y * 1/2+z,1/2+y,1/2+x * 1/2+z,1/2-y,1/2-x * 1/2-z,1/2+y,1/2-x * 1/2-z,1/2-y,1/2+x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
220: ((24, 1, 'I-43D', 'PG4BAR3M', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * 1/4+y,1/4+x,1/4+z * 1/4-y,3/4-x,3/4+z * 3/4+y,1/4-x,3/4-z * 3/4-y,3/4+x,1/4-z * 1/4+x,1/4+z,1/4+y * 3/4-x,3/4+z,1/4-y * 1/4-x,3/4-z,3/4+y * 3/4+x,1/4-z,3/4-y * 1/4+z,1/4+y,1/4+x * 3/4+z,1/4-y,3/4-x * 3/4-z,3/4+y,1/4-x * 1/4-z,3/4-y,3/4+x * 1/2,1/2,1/2 * end'),
221: ((48, 0, 'PM-3M', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,-z * -y,-x,-z * y,-x,z * -y,x,z * x,z,-y * -x,z,y * -x,-z,-y * x,-z,y * z,y,-x * z,-y,x * -z,y,x * -z,-y,-x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * -y,-x,z * y,x,z * -y,x,-z * y,-x,-z * -x,-z,y * x,-z,-y * x,z,y * -x,z,-y * -z,-y,x * -z,y,-x * z,-y,-x * z,y,x * end'),
222: ((48, 0, 'PN-3N', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,-z * -y,-x,-z * y,-x,z * -y,x,z * x,z,-y * -x,z,y * -x,-z,-y * x,-z,y * z,y,-x * z,-y,x * -z,y,x * -z,-y,-x * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * 1/2-z,1/2-x,1/2-y * 1/2-z,1/2+x,1/2+y * 1/2+z,1/2+x,1/2-y * 1/2+z,1/2-x,1/2+y * 1/2-y,1/2-z,1/2-x * 1/2+y,1/2-z,1/2+x * 1/2-y,1/2+z,1/2+x * 1/2+y,1/2+z,1/2-x * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * 1/2-y,1/2+x,1/2-z * 1/2+y,1/2-x,1/2-z * 1/2-x,1/2-z,1/2+y * 1/2+x,1/2-z,1/2-y * 1/2+x,1/2+z,1/2+y * 1/2-x,1/2+z,1/2-y * 1/2-z,1/2-y,1/2+x * 1/2-z,1/2+y,1/2-x * 1/2+z,1/2-y,1/2-x * 1/2+z,1/2+y,1/2+x * end'),
223: ((48, 0, 'PM-3N', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * 1/2+y,1/2-x,1/2+z * 1/2-y,1/2+x,1/2+z * 1/2+x,1/2+z,1/2-y * 1/2-x,1/2+z,1/2+y * 1/2-x,1/2-z,1/2-y * 1/2+x,1/2-z,1/2+y * 1/2+z,1/2+y,1/2-x * 1/2+z,1/2-y,1/2+x * 1/2-z,1/2+y,1/2+x * 1/2-z,1/2-y,1/2-x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * 1/2-y,1/2+x,1/2-z * 1/2+y,1/2-x,1/2-z * 1/2-x,1/2-z,1/2+y * 1/2+x,1/2-z,1/2-y * 1/2+x,1/2+z,1/2+y * 1/2-x,1/2+z,1/2-y * 1/2-z,1/2-y,1/2+x * 1/2-z,1/2+y,1/2-x * 1/2+z,1/2-y,1/2-x * 1/2+z,1/2+y,1/2+x * end'),
224: ((48, 0, 'PN-3M', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * 1/2+y,1/2-x,1/2+z * 1/2-y,1/2+x,1/2+z * 1/2+x,1/2+z,1/2-y * 1/2-x,1/2+z,1/2+y * 1/2-x,1/2-z,1/2-y * 1/2+x,1/2-z,1/2+y * 1/2+z,1/2+y,1/2-x * 1/2+z,1/2-y,1/2+x * 1/2-z,1/2+y,1/2+x * 1/2-z,1/2-y,1/2-x * 1/2-x,1/2-y,1/2-z * 1/2+x,1/2+y,1/2-z * 1/2+x,1/2-y,1/2+z * 1/2-x,1/2+y,1/2+z * 1/2-z,1/2-x,1/2-y * 1/2-z,1/2+x,1/2+y * 1/2+z,1/2+x,1/2-y * 1/2+z,1/2-x,1/2+y * 1/2-y,1/2-z,1/2-x * 1/2+y,1/2-z,1/2+x * 1/2-y,1/2+z,1/2+x * 1/2+y,1/2+z,1/2-x * -y,-x,z * y,x,z * -y,x,-z * y,-x,-z * -x,-z,y * x,-z,-y * x,z,y * -x,z,-y * -z,-y,x * -z,y,-x * z,-y,-x * z,y,x * end'),
225: ((48, 3, 'FM-3M', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,-z * -y,-x,-z * y,-x,z * -y,x,z * x,z,-y * -x,z,y * -x,-z,-y * x,-z,y * z,y,-x * z,-y,x * -z,y,x * -z,-y,-x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * -y,-x,z * y,x,z * -y,x,-z * y,-x,-z * -x,-z,y * x,-z,-y * x,z,y * -x,z,-y * -z,-y,x * -z,y,-x * z,-y,-x * z,y,x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
226: ((48, 3, 'FM-3C', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * 1/2+y,1/2+x,1/2-z * 1/2-y,1/2-x,1/2-z * 1/2+y,1/2-x,1/2+z * 1/2-y,1/2+x,1/2+z * 1/2+x,1/2+z,1/2-y * 1/2-x,1/2+z,1/2+y * 1/2-x,1/2-z,1/2-y * 1/2+x,1/2-z,1/2+y * 1/2+z,1/2+y,1/2-x * 1/2+z,1/2-y,1/2+x * 1/2-z,1/2+y,1/2+x * 1/2-z,1/2-y,1/2-x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * 1/2-y,1/2-x,1/2+z * 1/2+y,1/2+x,1/2+z * 1/2-y,1/2+x,1/2-z * 1/2+y,1/2-x,1/2-z * 1/2-x,1/2-z,1/2+y * 1/2+x,1/2-z,1/2-y * 1/2+x,1/2+z,1/2+y * 1/2-x,1/2+z,1/2-y * 1/2-z,1/2-y,1/2+x * 1/2-z,1/2+y,1/2-x * 1/2+z,1/2-y,1/2-x * 1/2+z,1/2+y,1/2+x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
227: ((48, 3, 'FD-3M', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,1/2-y,1/2+z * 1/2-x,1/2+y,-z * 1/2+x,-y,1/2-z * z,x,y * 1/2+z,-x,1/2-y * -z,1/2-x,1/2+y * 1/2-z,1/2+x,-y * y,z,x * 1/2-y,1/2+z,-x * 1/2+y,-z,1/2-x * -y,1/2-z,1/2+x * 3/4+y,1/4+x,3/4-z * 1/4-y,1/4-x,1/4-z * 1/4+y,3/4-x,3/4+z * 3/4-y,3/4+x,1/4+z * 3/4+x,1/4+z,3/4-y * 3/4-x,3/4+z,1/4+y * 1/4-x,1/4-z,1/4-y * 1/4+x,3/4-z,3/4+y * 3/4+z,1/4+y,3/4-x * 1/4+z,3/4-y,3/4+x * 3/4-z,3/4+y,1/4+x * 1/4-z,1/4-y,1/4-x * 1/4-x,1/4-y,1/4-z * 1/4+x,3/4+y,3/4-z * 3/4+x,3/4-y,1/4+z * 3/4-x,1/4+y,3/4+z * 1/4-z,1/4-x,1/4-y * 3/4-z,1/4+x,3/4+y * 1/4+z,3/4+x,3/4-y * 3/4+z,3/4-x,1/4+y * 1/4-y,1/4-z,1/4-x * 3/4+y,3/4-z,1/4+x * 3/4-y,1/4+z,3/4+x * 1/4+y,3/4+z,3/4-x * 1/2-y,-x,1/2+z * y,x,z * -y,1/2+x,1/2-z * 1/2+y,1/2-x,-z * 1/2-x,-z,1/2+y * 1/2+x,1/2-z,-y * x,z,y * -x,1/2+z,1/2-y * 1/2-z,-y,1/2+x * -z,1/2+y,1/2-x * 1/2+z,1/2-y,-x * z,y,x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
228: ((48, 3, 'FD-3C', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,1/2-y,1/2+z * 1/2-x,1/2+y,-z * 1/2+x,-y,1/2-z * z,x,y * 1/2+z,-x,1/2-y * -z,1/2-x,1/2+y * 1/2-z,1/2+x,-y * y,z,x * 1/2-y,1/2+z,-x * 1/2+y,-z,1/2-x * -y,1/2-z,1/2+x * 3/4+y,1/4+x,3/4-z * 1/4-y,1/4-x,1/4-z * 1/4+y,3/4-x,3/4+z * 3/4-y,3/4+x,1/4+z * 3/4+x,1/4+z,3/4-y * 3/4-x,3/4+z,1/4+y * 1/4-x,1/4-z,1/4-y * 1/4+x,3/4-z,3/4+y * 3/4+z,1/4+y,3/4-x * 1/4+z,3/4-y,3/4+x * 3/4-z,3/4+y,1/4+x * 1/4-z,1/4-y,1/4-x * 3/4-x,3/4-y,3/4-z * 3/4+x,1/4+y,1/4-z * 1/4+x,1/4-y,3/4+z * 1/4-x,3/4+y,1/4+z * 3/4-z,3/4-x,3/4-y * 1/4-z,3/4+x,1/4+y * 3/4+z,1/4+x,1/4-y * 1/4+z,1/4-x,3/4+y * 3/4-y,3/4-z,3/4-x * 1/4+y,1/4-z,3/4+x * 1/4-y,3/4+z,1/4+x * 3/4+y,1/4+z,1/4-x * -y,1/2-x,z * 1/2+y,1/2+x,1/2+z * 1/2-y,x,-z * y,-x,1/2-z * -x,1/2-z,y * x,-z,1/2-y * 1/2+x,1/2+z,1/2+y * 1/2-x,z,-y * -z,1/2-y,x * 1/2-z,y,-x * z,-y,1/2-x * 1/2+z,1/2+y,1/2+x * 0,1/2,1/2 * 1/2,0,1/2 * 1/2,1/2,0 * end'),
229: ((48, 1, 'IM-3M', 'PGM3BARM', 'CUBIC'),'x,y,z * -x,-y,z * -x,y,-z * x,-y,-z * z,x,y * z,-x,-y * -z,-x,y * -z,x,-y * y,z,x * -y,z,-x * y,-z,-x * -y,-z,x * y,x,-z * -y,-x,-z * y,-x,z * -y,x,z * x,z,-y * -x,z,y * -x,-z,-y * x,-z,y * z,y,-x * z,-y,x * -z,y,x * -z,-y,-x * -x,-y,-z * x,y,-z * x,-y,z * -x,y,z * -z,-x,-y * -z,x,y * z,x,-y * z,-x,y * -y,-z,-x * y,-z,x * -y,z,x * y,z,-x * -y,-x,z * y,x,z * -y,x,-z * y,-x,-z * -x,-z,y * x,-z,-y * x,z,y * -x,z,-y * -z,-y,x * -z,y,-x * z,-y,-x * z,y,x * 1/2,1/2,1/2 * end'),
230: ((48, 1, 'IA-3D', 'PGM3BARM', 'CUBIC'),'x,y,z * 1/2-x,-y,1/2+z * -x,1/2+y,1/2-z * 1/2+x,1/2-y,-z * z,x,y * 1/2+z,1/2-x,-y * 1/2-z,-x,1/2+y * -z,1/2+x,1/2-y * y,z,x * -y,1/2+z,1/2-x * 1/2+y,1/2-z,-x * 1/2-y,-z,1/2+x * 3/4+y,1/4+x,1/4-z * 3/4-y,3/4-x,3/4-z * 1/4+y,1/4-x,3/4+z * 1/4-y,3/4+x,1/4+z * 3/4+x,1/4+z,1/4-y * 1/4-x,3/4+z,1/4+y * 3/4-x,3/4-z,3/4-y * 1/4+x,1/4-z,3/4+y * 3/4+z,1/4+y,1/4-x * 1/4+z,1/4-y,3/4+x * 1/4-z,3/4+y,1/4+x * 3/4-z,3/4-y,3/4-x * -x,-y,-z * 1/2+x,y,1/2-z * x,1/2-y,1/2+z * 1/2-x,1/2+y,z * -z,-x,-y * 1/2-z,1/2+x,y * 1/2+z,x,1/2-y * z,1/2-x,1/2+y * -y,-z,-x * y,1/2-z,1/2+x * 1/2-y,1/2+z,x * 1/2+y,z,1/2-x * 1/4-y,3/4-x,3/4+z * 1/4+y,1/4+x,1/4+z * 3/4-y,3/4+x,1/4-z * 3/4+y,1/4-x,3/4-z * 1/4-x,3/4-z,3/4+y * 3/4+x,1/4-z,3/4-y * 1/4+x,1/4+z,1/4+y * 3/4-x,3/4+z,1/4-y * 1/4-z,3/4-y,3/4+x * 3/4-z,3/4+y,1/4-x * 3/4+z,1/4-y,3/4-x * 1/4+z,1/4+y,1/4+x * 1/2,1/2,1/2 * end')}
def opReadCl(filename):
f = open(filename)
r = f.read()
f.close()
return r
def opWriteCl(filename, _str):
f = open(filename,"w")
f.write(_str)
f.close()
def guessHA(wavelength):
"Trying to find the closest HA edge in HAd from wavelength"
diffs = map(lambda x: abs(x - wavelength), HAd.keys())
return HAd[HAd.keys()[diffs.index(min(diffs))]]
def get_crossec(P):
idx = -1
opWriteCl("crossec.inp", crossec_script % vars(P))
try:
os.system("crossec<crossec.inp>crossec.out")
txt = opReadCl("crossec.out")
idx = txt.index(" $$\n<B>")
except: pass
if idx != -1:
try:
fp, fpp = map(float, txt[idx-23:idx-2].split())
return fp, fpp
except: pass
else: return 0., 0.
class Dumy:
pass
class DoMode:
def __init__(self, P):
self.mode = P.mode
self.dir_mode = self.mode.lower()+"/"
P.dir_mode = self.dir_mode
P.spg_name = (xupy.SPGlib[int(P.spgn_in)][0]).lower()
if P.res_high == 0.0:
try:
# It will fail herre in case Numeric is not installed
# Or for old version of XDS
from xdsHKLinfos import get_info
infos = get_info(P.file_name_in)
P.res_high = infos["res_high"]
P.res_low = infos["res_low"]
except:
pass
if P.wavelength:
self.wavelength = P.wavelength
else:
self.wavelength = 1.5418
if P.ha_name:
self.HA = P.ha_name
P.ha_name = self.HA
P.fp, P.fpp = get_crossec(P)
elif P.wavelength:
self.HA = guessHA(self.wavelength)
P.ha_name = self.HA[0]
P.fp, P.fpp = get_crossec(P)
else:
P.ha_name = "Unknown"
P.fp, P.fpp = 0., 0.
self.HA = "Unknown"
if self.mode == "CNS":
self.name_ext = ".cv"
P.mode_out = self.mode
elif self.mode == "SHELX":
self.name_ext = "_F4.hkl"
#P.free_out = "GENERATE_FRACTION_OF_TEST_REFLECTIONS=0.05\n"
P.mode_out = self.mode
elif self.mode == "SOLVE":
self.name_ext = ".hkli"
P.mode_out = "NONE"
P.merge_out == "FALSE"
P.friedel_out = "TRUE"
elif self.mode == "EPMR":
self.name_ext = ".hkl"
P.mode_out = "FALL"
P.merge_out == "TRUE"
P.friedel_out = "TRUE"
elif self.mode == "AMORE":
P.free_out = ""
P.free_lbl = ""
P.free_code = ""
self.name_ext = ".HKL"
P.mode_out = "CCP4"
P.merge_out == "TRUE"
P.friedel_out = "TRUE"
elif self.mode == "REPLACE":
P.free_out = ""
P.free_lbl = ""
P.free_code = ""
self.name_ext = ".hkl"
P.mode_out = "CCP4"
P.merge_out == "TRUE"
P.friedel_out = "TRUE"
elif self.mode == "CRANK":
self.name_ext = ".mtz"
P.mode_out = "CCP4_I"
P.merge_out == "TRUE"
P.friedel_out = "FALSE"
elif self.mode == "CCP4":
self.name_ext = ".mtz"
P.mode_out = self.mode
if P.friedel_out == "FALSE":
P.cinp_ano = "DANO%(lbl)s SIGDANO%(lbl)s ISYM%(lbl)s" \
% vars(P)
P.cinp_ano2 = "D Q Y"
else: P.cinp_ano = P.cinp_ano2 = ""
P.merge_out = "TRUE"
elif self.mode == "CCP4F":
self.name_ext = ".mtz"
P.mode_out = "CCP4"
if P.friedel_out == "FALSE":
P.cinp_ano = "DANO%(lbl)s SIGDANO%(lbl)s ISYM%(lbl)s" \
% vars(P)
P.cad_ano = "E1=DANO%(lbl)s E2=SIGDANO%(lbl)s E3=ISYM%(lbl)s" \
% vars(P)
P.cinp_ano2 = "D Q Y"
else: P.cinp_ano = P.cad_ano = P.cinp_ano2 = ""
P.merge_out = "TRUE"
P.file_name_out = "F2MTZ.HKL.TMP2"
opWriteCl("XDSCONV2.INP", xdsconv_script % vars(P) + P.free_out)
opWriteCl("f2mtz.inp2", f2mtz_script % vars(P))
opWriteCl("run_simple_scale.sh", scala_script)
P.mode_out = "CCP4_F"
if P.friedel_out == "FALSE":
P.cinp_ano = "F(+)%(lbl)s SIGF(+)%(lbl)s F(-)%(lbl)s SIGF(-)%(lbl)s" \
% vars(P)
P.cinp_ano2 = "G L G L"
else: P.cinp_ano = P.cinp_ano2 = ""
P.merge_out = "TRUE"
# LABOUT H K L FP SIGFP F(+) SIGF(+) F(-) SIGF(-) FreeRflag
# CTYPOUT H H H F Q G L G L X
elif self.mode == "PHASER":
self.name_ext = ".mtz"
P.mode_out = "CCP4_F"
if P.friedel_out == "FALSE":
P.cinp_ano = "F(+)%(lbl)s SIGF(+)%(lbl)s F(-)%(lbl)s SIGF(-)%(lbl)s" \
% vars(P)
P.cinp_ano2 = "G L G L"
else: P.cinp_ano = P.cinp_ano2 = ""
P.merge_out = "TRUE"
elif self.mode == "SHARP":
self.name_ext = ".mtz"
P.mode_out = "CCP4"
if P.friedel_out != "FALSE":
print "Error, no Anomalous information found from XDS."
sys.exit()
P.merge_out = "TRUE"
P.ident = ".".join(P.file_name_in.split(".")[:-1])
#P.file_name_out = P.ident+"_"+P.ha_name+self.name_ext
#P.file_name_out = P.ident + self.name_ext
P.file_name_out = P.ID + self.name_ext
if self.mode == "CCP4" or self.mode == "CCP4F" or \
self.mode == "CRANK" or self.mode == "SHARP" or self.mode == "PHASER" :
self.last_name = P.file_name_out
P.file_name_out = "F2MTZ.HKL.TMP"
P.last_name = self.last_name
if self.mode != "SOLVE":
opWriteCl("XDSCONV.INP", xdsconv_script % vars(P) + P.free_out)
def run(self):
ls = os.listdir(".")
if not self.dir_mode[:-1] in ls:
os.makedirs(self.dir_mode)
if self.mode not in ("SOLVE",):
toexec = os.path.join(xupy.XDSHOME,"xdsconv")
xupy.exec_prog(toexec, stdout="xdsconv.log")
if self.mode == "CCP4F":
os.system("mv XDSCONV2.INP XDSCONV.INP")
os.system("mv f2mtz.inp2 %s" % self.dir_mode)
toexec = os.path.join(xupy.XDSHOME,"xdsconv")
xupy.exec_prog(toexec, stdout="xdsconv_dano.log")
def post_run(self, P):
if self.mode == "SHELX":
# run xprepx
P.cns_sg = cns_sg_lib[int(XC.spgn_in)]
opWriteCl("%s/xprep.inp" % P.dir_mode, xprep_script % vars(P))
opWriteCl("%s/shelxc.inp" % P.dir_mode, shelxc_script % vars(P))
opWriteCl("%s/run_shelx.sh" % P.dir_mode, shelxall_script % vars(P))
os.system("chmod a+x %s/run_shelx.sh" % P.dir_mode)
opWriteCl("%s/run_sitcom.sh" % P.dir_mode, sitcom_script % vars(P))
os.system("chmod a+x %s/run_sitcom.sh" % P.dir_mode)
#os.system("cd %s;xprep<xprep.inp>xprep.log" % P.dir_mode)
os.system("cd %s;shelxc XX1 <shelxc.inp>shelxc.log" % P.dir_mode)
elif self.mode == "CNS":
#file_name_out = P.dirmode+dirname+".cv"
P.cns_sg = cns_sg_lib[int(XC.spgn_in)]
P.cns_cell = 6*"%.2f, " % tuple(P.cell)
opWriteCl("%s/cns_xtal.par" % P.dir_mode, cns_par % vars(P))
elif self.mode == "CCP4":
opWriteCl("%s/f2mtz.inp" % P.dir_mode, f2mtz_script % vars(P))
opWriteCl("%s/cad.inp" % P.dir_mode, cad_script % vars(P))
os.system("cd %s;f2mtz hklout TMP.MTZ<f2mtz.inp >f2mtz.log" % P.dir_mode)
os.system("cd %s;cad hklin1 TMP.MTZ hklout %s <cad.inp >cad.log"
% (P.dir_mode, self.last_name))
os.system("rm -f F2MTZ.INP ccp4/TMP.MTZ ccp4/F2MTZ.HKL.TMP")
elif self.mode == "CCP4F":
opWriteCl("%s/f2mtz.inp" % P.dir_mode, f2mtz_script % vars(P))
opWriteCl("%s/cad.inp" % P.dir_mode, cad2_script % vars(P))
os.system("cd %s;f2mtz hklout TMP.MTZ<f2mtz.inp >f2mtz.log" % P.dir_mode)
os.system("cd %s;f2mtz hklout TMP2.MTZ<f2mtz.inp2 >f2mtz2.log" % P.dir_mode)
os.system("cd %s;cad hklin1 TMP.MTZ hklin2 TMP2.MTZ hklout %s <cad.inp >cad.log"
% (P.dir_mode, self.last_name))
os.system("rm -f F2MTZ.INP ccp4f/TMP*.MTZ ccp4f/F2MTZ.HKL.TMP*")
elif self.mode == "CRANK":
#opWriteCl("%s/f2mtz.inp" % P.dir_mode, f2mtz_script % vars(P))
opWriteCl("cad.inp", cad_crank_script % vars(P))
os.system("f2mtz HKLOUT temp.mtz< F2MTZ.INP>f2mtz.log")
os.system("bash cad.inp >cad.log")
os.system("cd %s; mv ../output_file_name.mtz %s" % \
(P.dir_mode, self.last_name))
os.system("rm -f F2MTZ.INP temp.mtz crank/F2MTZ.HKL.TMP")
elif self.mode == "PHASER":
opWriteCl("%s/f2mtz.inp" % P.dir_mode, f2mtz_phaser_script % vars(P))
opWriteCl("%s/cad.inp" % P.dir_mode, cad_script % vars(P))
os.system("cd %s;f2mtz hklout TMP.MTZ<f2mtz.inp >f2mtz.log" % P.dir_mode)
os.system("cd %s;cad hklin1 TMP.MTZ hklout %s <cad.inp >cad.log"
% (P.dir_mode, self.last_name))
os.system("rm -f F2MTZ.INP phaser/TMP.MTZ phaser/F2MTZ.HKL.TMP")
opWriteCl("%s/run_phaser.sh" % P.dir_mode, phaser_script % vars(P))
os.chmod("%s/run_phaser.sh" % P.dir_mode, 0755)
elif self.mode == "SHARP":
opWriteCl("%s/f2mtz.inp" % P.dir_mode, f2mtz_sharp_script % vars(P))
opWriteCl("%s/cad.inp" % P.dir_mode, cad_script)
os.system("cd %s;f2mtz hklout TMP.MTZ<f2mtz.inp >f2mtz.log" % P.dir_mode)
os.system("cd %s;cad hklin1 TMP.MTZ hklout %s <cad.inp >cad.log"
% (P.dir_mode, self.last_name))
os.system("rm -f F2MTZ.INP ccp4/TMP.MTZ ccp4/F2MTZ.HKL.TMP")
#elif self.mode == "CRANK":
#opWriteCl("%s/mtzutils.inp" % P.dir_mode, mtzutils_script)
#os.system("sed -e 's/J *Q *J *Q$/K M K M/' F2MTZ.INP> F2MTZS.INP")
#os.system("f2mtz hklout %s/TMP.MTZ < F2MTZS.INP > %s/f2mtz.log" % \
#(P.dir_mode, P.dir_mode))
#os.system("cd %s;mtzutils hklin TMP.MTZ hklout %s <mtzutils.inp >mtzutils.log"
#% (P.dir_mode, "crank.mtz"))
#os.system("rm -f F2MTZ.INP crank/TMP.MTZ ccp4/F2MTZ.HKL.TMP")
elif self.mode == "SOLVE":
os.system("grep -v \! %s > solve/%s" % \
(P.file_name_in, P.file_name_out))
P.spg_name = (xupy.SPGlib[int(P.spgn_in)][0]).lower()
opWriteCl("%s/run_solve.sh" % P.dir_mode, solve_script % vars(P))
os.chmod("%s/run_solve.sh" % P.dir_mode, 0755)
elif self.mode == "EPMR":
opWriteCl("%s/cell" % P.dir_mode, "%(cell_str)s %(spgn_in)s\n" % vars(P))
elif self.mode == "REPLACE":
t = """awk '{gsub(",","");print}' """
t += """%(dir_mode)s/%(file_name_out)s >> %(dir_mode)s/data.hkl"""
os.system(t % vars(P))
opWriteCl("%s/run_glrf_self.sh" % P.dir_mode, replace_script % vars(P))
os.system("chmod a+x %(dir_mode)s/run_glrf_self.sh" % vars(P))
os.system("rm -f %(dir_mode)s/%(file_name_out)s" % vars(P))
elif self.mode == "AMORE":
os.system('echo "CELL %(cell_str)s" > format.dat'%vars(P))
os.system('echo "FORMAT (3I6,2E10.3)" >> format.dat')
os.system("cat format.dat > %(dir_mode)s/hkl.d" % vars(P))
t = """awk '{gsub(",","");print}' """
t += """%(dir_mode)s/%(file_name_out)s >> %(dir_mode)s/hkl.d"""
os.system(t % vars(P))
os.system("rm -f format.dat %(dir_mode)s/%(file_name_out)s" % vars(P))
afmt = " * xds *\n%(cell_str)s\n%(symop)s 0\n95. 0.\n15 3.5\n1 1\n"
P.symop = amore_symops[int(P.spgn_in)][1]
opWriteCl("%s/data.d" % P.dir_mode, afmt % vars(P))
if __name__ == '__main__':
import xupy
# Default options
__format_out = [] #"CCP4"
__atom_name = ""
__num_sites = 0
__xds_input = ""
__free_refl_input = ""
__free_refl_type = ""
__force_anom = False
__force_norm = False
__force_merge = False
__force_unmerge = False
__force_free = False
__force_no_free = False
__label = ""
argp_fmt = "<<< %-24s %s"
progname = sys.argv[0].split("/")[-1]
if (len(sys.argv) == 1) or ("-h" in sys.argv) \
or ("--help" in sys.argv) or ("-help" in sys.argv):
print usage % (progname, "|".join(options))
sys.exit(1)
args = sys.argv[1:]
print "\n<== OPTIONS:"
for arg in args:
nonnum = [i for i in arg if i not in "1234567890"]
if nonnum == []:
try:
n = int(arg)
__num_sites = n
print argp_fmt % ("nSites:", n)
except:
pass
elif arg.count("-l="):
__label = "_"+arg[3:]
elif arg.count("-l"):
__label = "_"+str(args[args.index("-l") + 1])
elif arg == "-f":
__force_free = True
elif arg == "-nf":
__force_no_free = True
elif arg == "-a":
__force_anom = True
elif arg == "-n":
__force_norm = True
elif arg == "-u":
__force_unmerge = True
elif arg == "-m":
__force_merge = True
# Geting output format
elif arg.upper() in options:
__format_out.append(arg.upper())
print argp_fmt % ("Export mode:", arg.upper())
# Geting Atom type
elif arg.title() in atom_names:
print argp_fmt % ("atomType:", arg)
__atom_name = arg.title()
# Identifying file type
elif os.path.isfile(arg):
try:
f = open(arg)
s = f.read(65)
f.close()
ss = []
try:
ss = s.split()
ss = map(float, ss)
except:
pass
if "!FORMAT=XDS_ASCII" in s:
__xds_input = arg
elif (("NREFlection=" in s) and ("ANOMalous=" in s)):
__free_refl_input = arg
__free_refl_type = "CNS"
elif (len(ss)>= 12) and (abs(ss[5]) == 1):
__free_refl_input = arg
__free_refl_type = "SHELX"
elif (s[:3] == "MTZ") and (arg[-4:].lower() == ".mtz"):
__free_refl_input = arg
__free_refl_type = "CCP4"
else:
print "\nWarning: Can't define the file type",
print "for argument '%s'\n" % arg
except:
pass
# If input file for free reflection set is CCP4 it needs to be converted
# to shelx format for input in xdsconv.
if __free_refl_input:
print argp_fmt % ("free_hkl_to_inherit:", __free_refl_input),
print "[%s format]." % ( __free_refl_type)
if __free_refl_type == "CCP4":
print " --> Converting CCP4 free reflections to SHELX format."
script = open("mtz2shelx_free.sh", "w")
script.write(mtz2various_script)
script.close()
os.system("sh mtz2shelx_free.sh %s" % __free_refl_input)
__free_refl_input = "free_refl_shelx_F3.hkl"
__free_refl_type = "SHELX"
#
if __force_anom and __force_norm:
print "Warning: Umbiguous options specification (-a and -n), keeping -a."
__force_norm == False
# test input file type of inherited reflections
if __free_refl_input:
#__force_free = False
xdsconv_script += "INHERIT_TEST_REFLECTIONS_FROM_FILE=%s %s\n" % \
(__free_refl_input, __free_refl_type)
#### Default values
XC = Dumy()
XC.file_type = "XDS_ASCII"
XC.friedel_out = ""
XC.free_out = "GENERATE_FRACTION_OF_TEST_REFLECTIONS=0.05\n"
XC.free_lbl = "FreeR_flag"
XC.free_code = "X"
XC.merge_out = ""
XC.cell = ""
XC.lbl = ""
XC.ha_name = __atom_name
XC.num_sites = __num_sites
if not __format_out: __format_out = ["CCP4"]
XC.modes = __format_out
#XC.wavelength = 1.5418111
if __force_anom: XC.friedel_out = "FALSE"
if __force_norm: XC.friedel_out = "TRUE"
if __force_merge: XC.merge_out = "TRUE"
if __force_unmerge: XC.merge_out = "FALSE"
if __force_free:
XC.free_out = "GENERATE_FRACTION_OF_TEST_REFLECTIONS=0.05\n"
XC.free_lbl = "FreeR_flag"
XC.free_code = "X"
if __force_no_free:
XC.free_out = ""
XC.free_lbl = ""
XC.free_code = ""
if __label: XC.lbl = __label
XC.dirname = os.path.split(os.getcwd())[1]
XC.file_name_in = sys.argv[1]
sys.argv.remove(XC.file_name_in)
H = xupy.read_xdsascii_head(XC.file_name_in)
XC.spgn_in = H["sym"]
XC.cell = map(float, H["cell"].split())
XC.cell_str = 6*"%.2f " % tuple(XC.cell)
XC.friedel_in = H["friedels_law"]
XC.merge_in = H["merge"]
if H["template_name"]:
XC.ID = H["template_name"]
while XC.ID[-1] == "_":
XC.ID = XC.ID[:-1]
else:
XC.ID = "XSCALEa"
XC.res_high = H["include_resolution"][1]
XC.res_low = H["include_resolution"][0]
if not H["wavelength"]:
# Try to catch the wavelength from
if H["inputfile_name"] \
and os.path.exists(H["inputfile_name"]) \
and H["inputfile_type"] == "XDS_ASCII":
_h = xupy.read_xdsascii_head(H["inputfile_name"])
if _h["wavelength"] : XC.wavelength = _h["wavelength"]
else:
XC.wavelength = H["wavelength"]
if H["friedels_law"] == "TRUE" and XC.friedel_out == "FALSE":
print "\n>>> WARNING! The input file does not contain Friedel's mate."
if H["merge"] == "TRUE" and XC.merge_out == "FALSE":
print "\n>>> WARNING! The input file does not unmerged reflections."
if not XC.friedel_out: XC.friedel_out = H["friedels_law"]
if not XC.merge_out: XC.merge_out = H["merge"]
# Depending on the chosen mode, predefine suffix_name, merge_out, friedel_out...
print fmt_inp % vars(XC)
#-----------------------------
modes = XC.modes[:]
for mode in modes:
XC.mode = mode
E = DoMode(XC)
print fmt_outp % vars(XC)
if XC.friedel_in == "FALSE":
print fmt_outp_ha % vars(XC)
E.run()
E.post_run(XC)
|
jsburg/xdsme
|
XDS/xdsconv.py
|
Python
|
bsd-3-clause
| 86,399
|
[
"CRYSTAL"
] |
d88d54cc16754b5302343c4c0674ba19b54a6131e75bf3d17052a68f1b881f1c
|
from tendrl.ceph_integration.manager.request_factory import RequestFactory
from tendrl.ceph_integration.manager.user_request import OsdMapModifyingRequest
from tendrl.ceph_integration.manager.user_request import PgCreatingRequest
from tendrl.ceph_integration.manager.user_request import PoolCreatingRequest
from tendrl.ceph_integration.types import Config
from tendrl.ceph_integration.types import OsdMap
from tendrl.commons.event import Event
from tendrl.commons.message import Message
# Valid values for the 'var' argument to 'ceph osd pool set'
POOL_PROPERTIES = ["size", "min_size", "crash_replay_interval",
"pg_num", "pgp_num", "crush_ruleset", "hashpspool"]
# In Ceph versions before mon_osd_max_split_count, assume it is set to this
LEGACY_MON_OSD_MAX_SPLIT_COUNT = "32"
class PoolRequestFactory(RequestFactory):
def _resolve_pool(self, pool_id):
osd_map = NS.state_sync_thread.get_sync_object(OsdMap)
return osd_map.pools_by_id[pool_id]
def _pool_attribute_commands(self, pool_name, attributes):
commands = []
for var in POOL_PROPERTIES:
if var in attributes:
val = attributes[var]
# Special case for hashpspool, accepts 'true' from firefly
# onwards but requires 0 or 1 for dumpling, so just use the
# old style.
if isinstance(val, bool):
val = 1 if val else 0
commands.append(('osd pool set', {
'pool': pool_name,
'var': var,
'val': val
}))
# Quota setting ('osd pool set-quota') is separate to the main 'set'
# operation
for attr_name, set_name in [
('quota_max_bytes', 'max_bytes'),
('quota_max_objects', 'max_objects')
]:
if attr_name in attributes:
commands.append(('osd pool set-quota', {
'pool': pool_name,
'field': set_name,
# set-quota wants a string in case it has units in
'val': attributes[attr_name].__str__()
}))
# Renames come last (the preceding commands reference the pool by its
# old name)
if 'name' in attributes:
commands.append(('osd pool rename', {
"srcpool": pool_name,
"destpool": attributes['name']
}))
return commands
def delete(self, pool_id):
# Resolve pool ID to name
pool_name = self._resolve_pool(pool_id)['pool_name']
# TODO(Rohan) perhaps the REST API should have something in the body to
# make it slightly harder to accidentally delete a pool, to respect
# the severity of this operation since we're hiding the
# --yes-i-really-really-want-to
# stuff here
# TODO(Rohan) handle errors in a way that caller can show to a user,
# e.g.
# if the name is wrong we should be sending a structured errors dict
# that they can use to associate the complaint with the 'name' field.
commands = [
('osd pool delete',
{'pool': pool_name,
'pool2': pool_name,
'sure': '--yes-i-really-really-mean-it'
}
)
]
return OsdMapModifyingRequest(
"Deleting pool '{name}'".format(name=pool_name),
commands
)
def _pool_min_size(self, req_size, req_min_size):
'''Find an appropriate "min_size" parameter for a pool create operation
req_size is requested pool size; 0 means "use osd_pool_default_size"
req_min_size is requested min size
Used in both create and update
'''
ceph_config = NS.state_sync_thread.get_sync_object_data(Config)
size = req_size or int(ceph_config.get('osd_pool_default_size'), 0)
min_size = req_min_size or \
int(ceph_config.get('osd_pool_default_min_size'), 0)
if min_size:
ret_min_size = min(min_size, size)
else:
ret_min_size = size - size / 2
Event(
Message(
priority="info",
publisher=NS.publisher_id,
payload={"message": '_pool_min_size: size %d, min_size %d, '
'ret %d' % (size, min_size, ret_min_size)
}
)
)
return ret_min_size
def update(self, pool_id, attributes):
osd_map = NS.state_sync_thread.get_sync_object(OsdMap)
pool = self._resolve_pool(pool_id)
pool_name = pool['pool_name']
# Recalculate/clamp min_size if it or size is updated
if 'size' in attributes or 'min_size' in attributes:
size = attributes.get('size', pool['size'])
min_size = attributes.get('min_size', pool['min_size'])
attributes['min_size'] = self._pool_min_size(size, min_size)
if 'pg_num' in attributes:
# Special case when setting pg_num: have to do some extra work
# to wait for PG creation between setting these two fields.
final_pg_count = attributes['pg_num']
if 'pgp_num' in attributes:
pgp_num = attributes['pgp_num']
del attributes['pgp_num']
else:
pgp_num = attributes['pg_num']
del attributes['pg_num']
pre_create_commands = self._pool_attribute_commands(
pool_name, attributes)
# This setting is new in Ceph Firefly, where it defaults to 32.
# For older revisions, we simply pretend that the setting exists
# with a default setting.
mon_osd_max_split_count = int(
NS.state_sync_thread.get_sync_object_data(Config).get(
'mon_osd_max_split_count',
LEGACY_MON_OSD_MAX_SPLIT_COUNT
)
)
initial_pg_count = pool['pg_num']
n_osds = min(initial_pg_count, len(osd_map.osds_by_id))
# The rules about creating PGs:
# where N_osds = min(old_pg_count, osd_count)
# the number of new PGs divided by N_osds may not be greater than
# mon_osd_max_split_count
block_size = mon_osd_max_split_count * n_osds
return PgCreatingRequest(
"Growing pool '{name}' to {size} PGs".format(
name=pool_name, size=final_pg_count),
pre_create_commands,
pool_id, pool_name, pgp_num,
initial_pg_count, final_pg_count, block_size)
else:
commands = self._pool_attribute_commands(pool_name, attributes)
if not commands:
raise NotImplementedError(attributes)
# TODO(Rohan) provide some machine-readable indication of which
# objects are affected by a particular request.
# Perhaps subclass Request for each type of object, and have
# that subclass provide both the patches->commands mapping and
# the human readable and machine readable descriptions of it?
# Objects may be decorated with 'id' from use in a bulk PATCH,
# but we don't want anything from this point onwards to see that.
if 'id' in attributes:
del attributes['id']
return OsdMapModifyingRequest(
"Modifying pool '{name}' ({attrs})".format(
name=pool_name, attrs=", ".join(
"%s=%s" % (k, v) for k, v in attributes.items())
),
commands
)
def create(self, attributes):
if 'type' in attributes and attributes['type'] == "erasure":
commands = [(
'osd pool create',
{
'pool': attributes['name'],
'pg_num': attributes['pg_num'],
'pool_type': attributes['type'],
'erasure_code_profile': attributes['erasure_code_profile']
}
)]
else:
commands = [(
'osd pool create',
{
'pool': attributes['name'],
'pg_num': attributes['pg_num'],
}
)]
# Calculate appropriate min_size, including default if none given
req_size = attributes.get('size', 0)
req_min_size = attributes.get('min_size', 0)
attributes['min_size'] = self._pool_min_size(req_size, req_min_size)
# Which attributes must we set after the initial create?
post_create_attrs = attributes.copy()
del post_create_attrs['name']
del post_create_attrs['pg_num']
if 'pgp_num' in post_create_attrs:
del post_create_attrs['pgp_num']
if 'pool_type' in post_create_attrs:
del post_create_attrs['pool_type']
if 'erasure_code_profile' in post_create_attrs:
del post_create_attrs['erasure_code_profile']
commands.extend(self._pool_attribute_commands(
attributes['name'],
post_create_attrs
))
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Post-create attributes: %s" %
post_create_attrs
}
)
)
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Commands: %s" % commands}
)
)
return PoolCreatingRequest(
"Creating pool '{name}'".format(name=attributes['name']),
attributes['name'], commands)
|
r0h4n/ceph-integration
|
tendrl/ceph_integration/manager/pool_request_factory.py
|
Python
|
lgpl-2.1
| 9,966
|
[
"Firefly"
] |
2fac6bc93644a1b78b4c6e5cbebc0c5554f37b31849c31fa8e5cd219f4ec78db
|
#-------------------------------------------------------------------------------
# rbtlib: resource.py
#
# Get a named tuple response from a URL.
#-------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2016 Brian Minard
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#-------------------------------------------------------------------------------
import collections
from functools import wraps
import stat
from types import DictionaryType, DictType, ListType
class BadContentType(Exception):
"""HTTP response contains unexpected content type."""
content_type = {
'accounts': 'application/vnd.reviewboard.org.hosting-service+json',
'api_tokens': 'application/json',
'archived_review_requests': 'application/json',
'blocks': 'application/json',
'branches': 'application/vnd.reviewboard.org.repository+json',
'commits': 'application/vnd.reviewboard.org.repository+json',
'changes': 'application/vnd.reviewboard.org.review-request-changes+json',
'create': 'application/vnd.reviewboard.org.review-request+json',
'default_reviewers': 'application/vnd.reviewboard.org.default-reviewers+json',
'delete': 'application/json',
'depends_on': 'application/json',
'diff_context': 'application/json',
'diff_file_attachments': 'application/json',
'diff_validation': 'application/json',
'diffs': 'application/vnd.reviewboard.org.diffs+json',
'draft': 'application/json',
'extensions': 'application/json',
'file_attachments': 'application/json',
'groups': 'application/vnd.reviewboard.org.review-groups+json',
'hosting_services': 'application/vnd.reviewboard.org.hosting-services+json',
'hosting_service_accounts': 'application/vnd.reviewboard.org.hosting-service-accounts+json',
'info': 'application/vnd.reviewboard.org.server-info+json',
'last_update': 'application/json',
'muted_review_requests': 'application/json',
'remote_repositories': 'application/vnd.reviewboard.org.hosting-service-account+json',
'repository': 'application/vnd.reviewboard.org.repository+json',
'repositories': 'application/vnd.reviewboard.org.repositories+json',
'review_requests': 'application/vnd.reviewboard.org.review-requests+json',
'review_request': 'application/vnd.reviewboard.org.review-request+json',
'reviews': 'application/json',
'root': 'application/vnd.reviewboard.org.root+json',
'screenshots': 'application/json',
'search': 'application/vnd.reviewboard.org.search+json',
'session': 'application/vnd.reviewboard.org.session+json',
'submitter': 'application/json',
'target_groups': 'application/json',
'target_people': 'application/json',
'update': 'application/json',
'users': 'application/vnd.reviewboard.org.users+json',
'validation': 'application/json',
'watched': 'application/json',
'webhooks': 'application/vnd.reviewboard.org.webhooks+json',
}
class Resource(object):
"""Generalized resource.
Provide uniform getter and setter methods over HTTP to Review Board. To
achieve this, the methods are decorated to handle the response at different
levels:
- sending HTTP commands (e.g, GET and POST)
- handling the HTTP response (e.g., content type and status code)
- payload conversion to a whole-part hierarchy
The whole-part hierarchy is contained a nested named tuple containing the
response returned by Review Board.
The composite contains a copy of the JSON response from Review Board. This
permits clients to access the response using a named tuple or dictionary.
Attributes:
session: HTTP session.
name: Resource name.
"""
def __init__(self, session, name):
super(Resource, self).__init__()
self._session = session
self._name = name
@property
def name(self):
return self._name
def composite(fetch):
"""Build the whole-part hierarchy from the HTTP response.
Args:
fetch: a function object defining an HTTP command.
Returns:
A function object generating a composite from the obtained data.
"""
@wraps(fetch)
def _composite(self, href, query_dict = dict()):
"""Generate the composite object from the HTTP response.
Args:
href: A hypertext reference used by the HTTP command.
query_dict: A dictionary containing HTTP command parameters.
Returns:
A composite object containing the whole-part hierarchy.
"""
response = fetch(self, href, query_dict)
assert type(response) is DictType or type(response) is DictionaryType
return self.component(self._name, response, { 'json': response })
return _composite
def contruct_dict_from_http_response(fetch):
"""Decorator returning a dictionary containing the HTTP response.
Args:
fetch: a function object defining an HTTP command.
Returns:
A dictionary containing the payload returned by the HTTP command.
"""
@wraps(fetch)
def _contruct_dict_from_http_response(self, href, query_dict = dict()):
"""Construct a dictionary from the HTTP response.
Args:
href: A hypertext reference used by the HTTP command.
query_dict: A dictionary containing HTTP command parameters.
Returns:
A dictionary defining the response to the HTTP command.
"""
return fetch(self, href, query_dict).json()
return _contruct_dict_from_http_response
def validate_http_content_type(fetch):
"""Decorator validating the response's HTTP content type.
Args:
fetch: a function object defining an HTTP command.
Returns:
A function object for checking the HTTP Content-Type.
"""
@wraps(fetch)
def _validate_http_content_type(self, href, query_dict = dict()):
"""Validate the HTTP content type.
Args:
href: A hypertext reference used by the HTTP command.
query_dict: A dictionary containing HTTP command parameters.
Returns:
A dictionary defining the response to the HTTP command.
Raises:
BadContentType: The expected and returned HTTP content
type do not match.
"""
response = fetch(self, href, query_dict)
response.raise_for_status()
if content_type[self._name] != response.headers['Content-Type']:
raise BadContentType(href, response.headers['Content-Type'],
content_type[self._name], self._name)
return response
return _validate_http_content_type
@stat.is_valid
@composite
@contruct_dict_from_http_response
@validate_http_content_type
def delete(self, href, query_dict = dict()):
"""Execute HTTP DELETE command using session parameters.
Args:
href: A hypertext reference used by the HTTP command.
query_dict: A dictionary containing HTTP command parameters.
Returns:
The HTTP response to the HTTP command.
"""
return self._session.delete(href, data = query_dict)
@stat.is_valid
@composite
@contruct_dict_from_http_response
@validate_http_content_type
def get(self, href, query_dict = dict()):
"""Execute HTTP GET command using session parameters.
Args:
href: A hypertext reference used by the HTTP command.
query_dict: A dictionary containing HTTP command parameters.
Returns:
The HTTP response to the HTTP command.
"""
return self._session.get(href, params = query_dict)
@stat.is_valid
@composite
@contruct_dict_from_http_response
@validate_http_content_type
def post(self, href, query_dict = dict()):
"""Execute HTTP POST command using session parameters.
Args:
href: A hypertext reference used by the HTTP command.
query_dict: A dictionary containing HTTP command parameters.
Returns:
The HTTP response to the HTTP command.
"""
return self._session.post(href, data = query_dict)
@stat.is_valid
@composite
@contruct_dict_from_http_response
@validate_http_content_type
def put(self, href, query_dict = dict()):
"""Execute HTTP PUT command using session parameters.
Args:
href: A hypertext reference used by the HTTP command.
query_dict: A dictionary containing HTTP command parameters.
Returns:
The HTTP response to the HTTP command.
"""
return self._session.put(href, data = query_dict)
def list_component(self, name, response):
"""Construct a list component from the response.
Args:
name: the key string identifying the list in the response.
response: a list.
Returns:
A dictionary containing list components.
"""
args = list()
for element in response:
if type(element) is DictType or type(element) is DictionaryType:
args.append(self.component(name, element))
else:
args.append(name)
return args
def replace(self, s):
"""Replace forbidden characters in named tuple names and field names.
Args:
s: a string.
Returns:
A string with the required substitutions.
"""
return s.replace("-", "_")
def component(self, name, response, extra_args = dict()):
"""Build the whole-part hierarchy making up the composite object.
Recursively create a whole-part hierarchy of the response. Extra
arguments can be embedded within the whole-part hierarchy at this level
only.
Args:
name: the component name.
response: the response defining the component.
extra_args: arguments to add to the component.
Returns:
A named tuple comprising the whole-part hierarchy contained with the
response along with any extra arguments.
"""
field_names = ' '.join(list(extra_args) + list(response))
args = dict(extra_args)
for x in response:
y = self.replace(x)
if type(response[x]) is DictType or type(response[x]) is DictionaryType:
args[y] = self.component(x, response[x])
elif type(response[x]) is ListType:
args[y] = self.list_component(x, response[x])
else:
args[y] = response[x]
tuple_descriptor = collections.namedtuple(self.replace(name),
self.replace(field_names))
return tuple_descriptor(**args)
class ResourceFactory(Resource):
"""Resource specialization by URL and HTTP command.
Permit resource class instances to behave as functions. This separates
resource construction from use. This relegates construction to the library
and leaves client programs to focus on consuming resource outputs.
Attributes:
session: HTTP session.
name: Resource name.
url: Resource URL.
method: the HTTP method requred by the resource.
"""
def __init__(self, session, name, url, method):
"""Construct the resource.
Use the method to define a closure on the URL for the HTTP command.
"""
super(ResourceFactory, self).__init__(session, name)
if 'GET' == method:
self._fetch = lambda query_dict = dict(): self.get(url, query_dict)
elif 'POST' == method:
self._fetch = lambda data_dict = dict(): self.post(url, data_dict)
else:
assert 0, "unknown HTTP {0} command needed by {1} resource".format(method, name)
def __call__(self, query_dict = dict()):
"""Use the getter to populate the resource.
The parent resource includes the entire Review Board response and an
instance of the Resource class for any linked resource.
The top-level component is an amalgamation of the composite
defined by the Review Board response and ResourceFactory objects
for each child resource in that response.
The Review Board response is not altered.
Args:
query_dict: the payload provided to the HTTP command.
Returns:
A named tuple comprising the whole-part hierarchy containing the
HTTP command response and ResourceFactory objects for each child
resource.
"""
response = self._fetch(query_dict)
args = dict()
if None != getattr(response, 'links', None):
for links in response.links:
link_name = type(links).__name__
if 'self' == link_name:
args[link_name] = ResourceFactory(self._session, self.name,
links.href, links.method)
else:
args[link_name] = ResourceFactory(self._session, link_name,
links.href, links.method)
resource_tuple = self.component(self._name, args)
tuple_descriptor = collections.namedtuple(self._name,
resource_tuple._fields + response._fields)
return tuple_descriptor(*(resource_tuple + response))
|
bminard/rbtlib
|
rbtlib/resource/resource.py
|
Python
|
mit
| 14,819
|
[
"Brian"
] |
d125a7e0b721574655adb2948c2964038ab5c5d5dd84dfed0d546074db4e7a87
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module contains the definition of some objects used in the chemenv package.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import is_anion_cation_bond
STATS_ENV_PAPER = 'David Waroquiers, Xavier Gonze, Gian-Marco Rignanese, Cathrin Welker-Nieuwoudt, Frank Rosowski,\n' \
'Michael Goebel, Stephan Schenk, Peter Degelmann, Rute Andre, Robert Glaum, and Geoffroy Hautier,\n' \
'"Statistical analysis of coordination environments in oxides",\n' \
'Chem. Mater., 2017, 29 (19), pp 8346–8360,\n' \
'DOI: 10.1021/acs.chemmater.7b02766\n'
def chemenv_citations():
out = ''
out += '\nIf you use the ChemEnv tool for your research, please consider citing the following reference(s) :\n'
out += '==================================================================================================\n'
out += STATS_ENV_PAPER
return out
class AdditionalConditions():
NO_ADDITIONAL_CONDITION = 0
ONLY_ANION_CATION_BONDS = 1
NO_ELEMENT_TO_SAME_ELEMENT_BONDS = 2
ONLY_ANION_CATION_BONDS_AND_NO_ELEMENT_TO_SAME_ELEMENT_BONDS = 3
ONLY_ELEMENT_TO_OXYGEN_BONDS = 4
#Short versions
NONE = NO_ADDITIONAL_CONDITION
NO_AC = NO_ADDITIONAL_CONDITION
ONLY_ACB = ONLY_ANION_CATION_BONDS
NO_E2SEB = NO_ELEMENT_TO_SAME_ELEMENT_BONDS
ONLY_ACB_AND_NO_E2SEB = ONLY_ANION_CATION_BONDS_AND_NO_ELEMENT_TO_SAME_ELEMENT_BONDS
ONLY_E2OB = ONLY_ELEMENT_TO_OXYGEN_BONDS
#Dictionary mapping of integer for the condition and its "description"
CONDITION_DESCRIPTION = {NO_ADDITIONAL_CONDITION: 'No additional condition',
ONLY_ANION_CATION_BONDS: 'Only anion-cation bonds',
NO_ELEMENT_TO_SAME_ELEMENT_BONDS: 'No element-element bonds (same elements)',
ONLY_ANION_CATION_BONDS_AND_NO_ELEMENT_TO_SAME_ELEMENT_BONDS: 'Only anion-cation bonds and'
' no element-element bonds'
' (same elements)',
ONLY_ELEMENT_TO_OXYGEN_BONDS: 'Only element-oxygen bonds'}
ALL = [NONE, ONLY_ACB, NO_E2SEB, ONLY_ACB_AND_NO_E2SEB, ONLY_E2OB]
def check_condition(self, condition, structure, parameters):
if condition == self.NONE:
return True
elif condition == self.ONLY_ACB:
valences = parameters['valences']
ii = parameters['site_index']
jj = parameters['neighbor_index']
return is_anion_cation_bond(valences, ii, jj)
elif condition == self.NO_E2SEB:
ii = parameters['site_index']
jj = parameters['neighbor_index']
elmts_ii = [sp.symbol for sp in structure[ii].species_and_occu]
elmts_jj = [sp.symbol for sp in structure[jj].species_and_occu]
return len(set(elmts_ii) & set(elmts_jj)) == 0
elif condition == self.ONLY_ACB_AND_NO_E2SEB:
valences = parameters['valences']
ii = parameters['site_index']
jj = parameters['neighbor_index']
elmts_ii = [sp.symbol for sp in structure[ii].species_and_occu]
elmts_jj = [sp.symbol for sp in structure[jj].species_and_occu]
return len(set(elmts_ii) & set(elmts_jj)) == 0 and is_anion_cation_bond(valences, ii, jj)
elif condition == self.ONLY_E2OB:
ii = parameters['site_index']
jj = parameters['neighbor_index']
elmts_ii = [sp.symbol for sp in structure[ii].species_and_occu]
elmts_jj = [sp.symbol for sp in structure[jj].species_and_occu]
return ('O' in elmts_jj and 'O' not in elmts_ii) or ('O' in elmts_ii and 'O' not in elmts_jj)
|
johnson1228/pymatgen
|
pymatgen/analysis/chemenv/utils/defs_utils.py
|
Python
|
mit
| 4,296
|
[
"pymatgen"
] |
8e7f5e7b5aaaf0583b42c0523561055890536c572caaab2172fa21ee422b6818
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRsamtools(RPackage):
"""This package provides an interface to the 'samtools', 'bcftools', and
'tabix' utilities (see 'LICENCE') for manipulating SAM (Sequence
Alignment / Map), FASTA, binary variant call (BCF) and compressed
indexed tab-delimited (tabix) files."""
homepage = "https://bioconductor.org/packages/Rsamtools/"
url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/Rsamtools_1.28.0.tar.gz"
list_url = homepage
version('1.28.0', '313bceac68edec07f2ee466520461d35')
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-bitops', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.28.0')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-rsamtools/package.py
|
Python
|
lgpl-2.1
| 2,380
|
[
"Bioconductor"
] |
648519784a9ac3a142fb04ec1e9bad3cff87ed4a98b1f5bdb6f8cb02eaad0014
|
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-17 13:34:52
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-26 16:31:11
import aces.tools as tl
from ase import Atoms
from aces import default
from aces.io.vasp import writevasp
from ase import io
from aces.tools.Units import Units
from aces import config
from ase.dft.kpoints import ibz_points
from aces.io.lammps.lammpsdata import lammpsdata
import numpy as np
from aces.env import PROJHOME
import atomic
class Material:
def __init__(self, opt={}):
# all the values needed
# unit might be changed by opt but need to be used first
self.__dict__ = dict(self.__dict__,
**default.default)
if 'units' in opt:
self.units = opt['units']
self.units = Units(self.units)
self.elements = ['C', 'N', 'B']
self.set_parameters()
self.__dict__ = dict(self.__dict__, **opt)
self.super_setup()
def __getattr__(self, attr):
if attr == "dim":
return tl.toString(self.supercell)
if attr == "cores":
return int(self.nodes) * int(self.procs)
# to be overided
def set_parameters(self):
pass
def super_setup(self):
self.units = Units(self.units)
self.prepare_lammps()
self.prepare_phonts()
self.bandpoints = ibz_points['fcc']
self.bandpoints['X'] = [.5, 0, 0]
self.bandpoints['Y'] = [0, 0.5, 0]
self.bandpath = ['Gamma', 'X', 'Y', 'Gamma']
self.premitive = np.eye(3)
if not self.useS3:
self.supercell3 = self.supercell
self.setup()
if self.atomfile:
atoms = io.read(
str(PROJHOME + "/data/" + self.atomfile), format="vasp")
self.atoms = atoms.repeat([self.latx, self.laty, self.latz])
# self.atoms.center()
else:
self.atoms = self.lmp_structure()
if self.dimension == 1:
self.masses += "\nfix 1d all setforce NULL 0. 0.\nvelocity all set NULL 0.0 0.0 units box"
elif self.dimension == 2:
self.masses += "\nfix 1d all setforce NULL NULL 0.\nvelocity all set NULL NULL 0.0 units box"
# to be overided
def setup(self):
pass
def prepare_lammps(self):
self.potential = 'pair_style tersoff\npair_coeff * * %s/BNC.tersoff %s' % (
config.lammpspot, ' '.join(self.elements))
self.dump = "dump_modify dump1 element %s" % (' '.join(self.elements))
masses = atomic.getMassFromLabel(self.elements)
self.masses = '\n'.join(["mass %d %f" % (i + 1, mass)
for i, mass in enumerate(masses)])
m = self
units = self.units
m.kb = units.boltz
m.nktv = units.nktv2p
if(m.method == "nvt"):
m.xp = 0
m.dtime = m.timestep * 100
m.tcfactor = units.tcfactor
m.excNum = m.aveRate / m.excRate
m.swapEnergyRate = m.swapEnergy / (m.excRate * m.timestep)
def prepare_phonts(self):
masses = atomic.getMassFromLabel(self.elements)
self.phontsmasses = '\n'.join(
["%s %f 0.0" % (label, mass) for label, mass in zip(self.elements, masses)])
def structure(self):
self.write()
# to be overrided
def lmp_structure(self):
atoms = Atoms()
return atoms
def write(self):
self.watoms(self.atoms)
def watoms(self, atoms):
# atoms.write("structure.xyz")
writevasp(atoms)
# write_vasp("POSCAR",atoms,sort="True",direct=True,vasp5=True)
self.POSCAR2data()
# if len(atoms) < 1000:
# atoms.write('structure.png')
def writeatoms(self, atoms, label='atoms'):
tl.mkcd(label)
self.watoms(atoms)
tl.cd('..')
def getatomicstyle(self):
a = "atom_style atomic"
if self.creatbonds > 0.0:
a = "atom_style bond\natom_modify sort 0 1.\ncomm_modify cutoff 2.0 "
return a
def POSCAR2data(self):
atoms = io.read('POSCAR')
m = self
atoms.set_pbc([m.xp, m.yp, m.zp])
# debug(atoms.cell)
a = lammpsdata(atoms, self.elements)
rot = a.writedata(filename="structure", creatbonds=self.creatbonds)
d, p, d1, p1 = rot
np.savetxt('POSCARrot', np.r_[d, p, d1, p1])
# debug(rot)
return rot
def atoms_from_dump(self, filename):
from atomic import atoms_from_dump as afd
atoms = afd(filename=filename, elements=self.elements)
m = self
atoms.set_pbc([m.xp, m.yp, m.zp])
return atoms
def dump2POSCAR(self, dumpname, poscar='POSCAR', rotate=True):
atoms = self.atoms_from_dump(dumpname)
if rotate:
rot = np.loadtxt(tl.dirname(dumpname) + '/POSCARrot')
d, p, d1, p1 = rot[:3], rot[3], rot[4:7], rot[7]
atoms.rotate(d1, -p1, rotate_cell=True)
atoms.rotate(d, -p, rotate_cell=True)
# write_vasp(poscar,atoms,sort="True",direct=True,vasp5=True)
writevasp(atoms, poscar)
return atoms
def getboxrange(self):
file = open("range")
for i in range(5):
file.next()
xlo, xhi = map(float, file.next().split()[:2])
ylo, yhi = map(float, file.next().split()[:2])
zlo, zhi = map(float, file.next().split()[:2])
return (xlo, xhi, ylo, yhi, zlo, zhi)
def getxrange(self):
file = open('minimize.xyz')
n = file.next().split()[0]
n = int(n)
file.next()
xmin = 100000
xmax = -100000
ymin = 100000
ymax = -100000
zmin = 100000
zmax = -100000
for i in range(n):
label, x, y, z = file.next().split()
x, y, z = map(float, [x, y, z])
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
zmin = min(z, zmin)
zmax = max(z, zmax)
return (xmin, xmax, ymin, ymax, zmin, zmax)
def postMini(self):
xlo, xhi, ylo, yhi, zlo, zhi = self.getboxrange()
xlo0, xhi0, ylo0, yhi0, zlo0, zhi0 = self.getxrange()
if(self.xp == 0):
xlo = xlo0
xhi = xhi0
if(self.yp == 0):
ylo = ylo0
yhi = yhi0
if(self.zp == 0):
zlo = zlo0
zhi = zhi0
lx = xhi - xlo
ly = yhi - ylo
lz = zhi - zlo
if(self.enforceThick):
self.zfactor = lz / self.thick
else:
self.zfactor = 1
self.S = ly * lz
self.box = (xlo, xhi, ylo, yhi, zlo, zhi, lx, ly, lz)
|
vanceeasleaf/aces
|
aces/materials/__init__.py
|
Python
|
gpl-2.0
| 6,769
|
[
"ASE",
"LAMMPS",
"VASP"
] |
df74c2fc77799cb20a12924441763e26c74dda6bf300b507a040e88adf780f4a
|
from gpaw import GPAW, PW
from ase.lattice import bulk
si = bulk('Si')
si.calc = GPAW(mode=PW(200), kpts={'size': (2, 2, 2), 'gamma': True})
si.get_potential_energy()
si.calc.diagonalize_full_hamiltonian()
si.calc.write('Si_gs', 'all')
dct = {}
execfile('Si_g0w0_ppa.py', dct)
assert abs(dct['ks_gap'] - 0.443) < 0.01
assert abs(dct['qp_gap'] - 1.046) < 0.01
|
robwarm/gpaw-symm
|
doc/exercises/gw/test.py
|
Python
|
gpl-3.0
| 359
|
[
"ASE",
"GPAW"
] |
e7befae5b53d7a9291a5ee5c6e6a072e305f2bd6d2e0e287f101ae57e1c576be
|
#!/usr/bin/env python
# Run this test like so:
# $ vtkpython TestTextureGlyph.py -D $VTK_DATA_ROOT \
# -B $VTK_DATA_ROOT/Baseline/Graphics/
#
# $ vtkpython TestTextureGlyph.py --help
# provides more details on other options.
import os
import os.path
import vtk
from vtk.test import Testing
class TestTextureGlyph(Testing.vtkTest):
def testGlyphs(self):
"""Test if texturing of the glyphs works correctly."""
# The Glyph
cs = vtk.vtkCubeSource()
cs.SetXLength(2.0); cs.SetYLength(1.0); cs.SetZLength(0.5)
# Create input point data.
pts = vtk.vtkPoints()
pts.InsertPoint(0, (1,1,1))
pts.InsertPoint(1, (0,0,0))
pts.InsertPoint(2, (-1,-1,-1))
polys = vtk.vtkCellArray()
polys.InsertNextCell(1)
polys.InsertCellPoint(0)
polys.InsertNextCell(1)
polys.InsertCellPoint(1)
polys.InsertNextCell(1)
polys.InsertCellPoint(2)
pd = vtk.vtkPolyData()
pd.SetPoints(pts)
pd.SetPolys(polys)
# Orient the glyphs as per vectors.
vec = vtk.vtkFloatArray()
vec.SetNumberOfComponents(3)
vec.InsertTuple3(0, 1, 0, 0)
vec.InsertTuple3(1, 0, 1, 0)
vec.InsertTuple3(2, 0, 0, 1)
pd.GetPointData().SetVectors(vec)
# The glyph filter.
g = vtk.vtkGlyph3D()
g.SetScaleModeToDataScalingOff()
g.SetVectorModeToUseVector()
g.SetInput(pd)
g.SetSource(cs.GetOutput())
m = vtk.vtkPolyDataMapper()
m.SetInputConnection(g.GetOutputPort())
a = vtk.vtkActor()
a.SetMapper(m)
# The texture.
img_file = os.path.join(Testing.VTK_DATA_ROOT, "Data",
"masonry.bmp")
img_r = vtk.vtkBMPReader()
img_r.SetFileName(img_file)
t = vtk.vtkTexture()
t.SetInputConnection(img_r.GetOutputPort())
t.InterpolateOn()
a.SetTexture(t)
# Renderer, RenderWindow etc.
ren = vtk.vtkRenderer()
ren.SetBackground(0.5, 0.5, 0.5)
ren.AddActor(a)
ren.ResetCamera();
cam = ren.GetActiveCamera()
cam.Azimuth(-90)
cam.Zoom(1.4)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
rwi = vtk.vtkRenderWindowInteractor()
rwi.SetRenderWindow(renWin)
rwi.Initialize()
rwi.Render()
# Compare the images and test.
img_file = "TestTextureGlyph.png"
Testing.compareImage(renWin, Testing.getAbsImagePath(img_file))
# Interact if necessary.
if Testing.isInteractive():
rwi.Start()
if __name__ == "__main__":
Testing.main([(TestTextureGlyph, 'test')])
|
naucoin/VTKSlicerWidgets
|
Graphics/Testing/Python/TestTextureGlyph.py
|
Python
|
bsd-3-clause
| 2,758
|
[
"VTK"
] |
59ada863efdb3edb6e1cc8fa0ee08d51b1b8521b95894cecf8f07e1765922b03
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bn', gettext_noop('Bengali')),
('bg', gettext_noop('Bulgarian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('es-ar', gettext_noop('Argentinean Spanish')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('hu', gettext_noop('Hungarian')),
('he', gettext_noop('Hebrew')),
('hr', gettext_noop('Croatian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('ko', gettext_noop('Korean')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('lv', gettext_noop('Latvian')),
('lt', gettext_noop('Lithuanian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portugese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sr', gettext_noop('Serbian')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
# 'django.core.context_processors.request',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Path to the "jing" executable -- needed to validate XMLFields
JING_PATH = "/usr/bin/jing"
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
MONTH_DAY_FORMAT = 'F j'
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'locmem://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ('asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook', 'nigger', 'shit')
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
###########
# TESTING #
###########
# The name of the method to use to invoke the test suite
TEST_RUNNER = 'django.test.simple.run_tests'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
|
AloneRoad/Inforlearn
|
vendor/django/conf/global_settings.py
|
Python
|
apache-2.0
| 14,586
|
[
"VisIt"
] |
2bef68d8eab6ac886f0ff9523127ff25cd1a2f8b7c24778522df43b8d2d7b161
|
''' <h1> Library for specular and off-specular x-ray reflectivity</h1>
interdiff is a model for specular and off specular simulations including
the effects of interdiffusion in hte calculations. The specular simulations
is conducted with Parrats recursion formula. The off-specular, diffuse
calculations are done with the distorted Born wave approximation (DWBA) as
derived by Holy and with the extensions done by Wormington to include
diffuse interfaces.
<h2>Classes</h2>
<h3>Layer</h3>
<code> Layer(b = 0.0, d = 0.0, f = 0.0+0.0J, dens = 1.0, magn_ang = 0.0, magn = 0.0, sigma = 0.0)</code>
<dl>
<dt><code><b>d</b></code></dt>
<dd>The thickness of the layer in AA (Angstroms = 1e-10m)</dd>
<dt><code><b>f</b></code></dt>
<dd>The x-ray scattering length per formula unit in electrons. To be strict it is the
number of Thompson scattering lengths for each formula unit.</dd>
<dt><code><b>dens</b></code></dt>
<dd>The density of formula units in units per Angstroms. Note the units!</dd>
<dt><code><b>sigmai</b></code></dt>
<dd>The root mean square <em>interdiffusion</em> of the top interface of the layer in Angstroms.</dd>
<dt><code><b>sigmar</b></code></dt>
<dd>The root mean square <em>roughness</em> of the top interface of the layer in Angstroms.</dd>
</dl>
<h3>Stack</h3>
<code> Stack(Layers = [], Repetitions = 1)</code>
<dl>
<dt><code><b>Layers</b></code></dt>
<dd>A <code>list</code> consiting of <code>Layer</code>s in the stack
the first item is the layer closest to the bottom</dd>
<dt><code><b>Repetitions</b></code></dt>
<dd>The number of repsetions of the stack</dd>
</dl>
<h3>Sample</h3>
<code> Sample(Stacks = [], Ambient = Layer(), Substrate = Layer(), eta_z = 10.0,
eta_x = 10.0, h = 1.0)</code>
<dl>
<dt><code><b>Stacks</b></code></dt>
<dd>A <code>list</code> consiting of <code>Stack</code>s in the stacks
the first item is the layer closest to the bottom</dd>
<dt><code><b>Ambient</b></code></dt>
<dd>A <code>Layer</code> describing the Ambient (enviroment above the sample).
Only the scattering lengths and density of the layer is used.</dd>
<dt><code><b>Substrate</b></code></dt>
<dd>A <code>Layer</code> describing the substrate (enviroment below the sample).
Only the scattering lengths, density and roughness of the layer is used.</dd>
<dt><code><b>eta_z</b></code></dt>
<dd>The out-of plane (vertical) correlation length of the roughness
in the sample. Given in AA. </dd>
<dt><code><b>eta_x</b></code></dt>
<dd>The in-plane global correlation length (it is assumed equal for all layers).
Given in AA.</dd>
<dt><code><b>h</b></code></dt>
<dd>The jaggedness parameter, should be between 0 and 1.0. This describes
how jagged the interfaces are. This is also a global parameter for all
interfaces.</dd>
</dl>
<h3>Instrument</h3>
<code>Instrument(wavelength = 1.54, coords = 'tth',
I0 = 1.0 res = 0.001, restype = 'no conv', respoints = 5, resintrange = 2,
beamw = 0.01, footype = 'no corr', samplelen = 10.0, taylor_n = 1)</code>
<dl>
<dt><code><b>wavelength</b></code></dt>
<dd>The wavalelngth of the radiation givenin AA (Angstroms)</dd>
<dt><code><b>coords</b></code></dt>
<dd>The coordinates of the data given to the SimSpecular function.
The available alternatives are: 'q' or 'tth'. Alternatively the numbers
0 (q) or 1 (tth) can be used.</dd>
<dt><code><b>I0</b></code></dt>
<dd>The incident intensity (a scaling factor)</dd>
<dt><code><b>Ibkg</b></code></dt>
<dd>The background intensity. Added as a constant value to the calculated
reflectivity</dd>
<dt><code><b>res</b></code></dt>
<dd>The resolution of the instrument given in the coordinates of
<code>coords</code>. This assumes a gaussian reloution function and
<code>res</code> is the standard deviation of that gaussian.</dd>
<dt><code><b>restype</b></code></dt>
<dd>Describes the rype of the resolution calculated. One of the alterantives:
'no conv', 'fast conv', 'full conv and varying res.' or 'fast conv + varying res.'.
The respective numbers 0-3 also works. Note that fast convolution only alllows
a single value into res wheras the other can also take an array with the
same length as the x-data (varying resolution)</dd>
<dt><code><b>respoints</b></code></dt>
<dd>The number of points to include in the resolution calculation. This is only
used for 'full conv and vaying res.' and 'fast conv + varying res'</dd>
<dt><code><b>resintrange</b></code></dt>
<dd>Number of standard deviatons to integrate the resolution fucntion times
the relfectivty over</dd>
<dt><code><b>footype</b></code></dt>
<dd>Which type of footprint correction is to be applied to the simulation.
One of: 'no corr', 'gauss beam' or 'square beam'. Alternatively,
the number 0-2 are also valid. The different choices are self explanatory.
</dd>
<dt><code><b>beamw</b></code></dt>
<dd>The width of the beam given in mm. For 'gauss beam' it should be
the standard deviation. For 'square beam' it is the full width of the beam.</dd>
<dt><code><b>samplelen</b></code></dt>
<dd>The length of the sample given in mm</dd>
<dt><code><b>taylor_n</b></code></dt>
<dd>The number terms taken into account in the taylor expansion of
the fourier integral of the correlation function. More terms more accurate
calculation but also much slower.</dd>
'''
#import lib.paratt as Paratt
try:
import lib.paratt_weave as Paratt
except StandardError,S:
print 'Not using inline c code for reflectivity calcs - can not import module'
print S
import lib.paratt as Paratt
__offspec__ = True
try:
import lib.offspec2_weave
except Exception,S:
print 'Failed to import: offspec2_weave, No off-specular simulations possible'
print S
__offspec__ = False
from numpy import *
from scipy.special import erf
from lib.instrument import *
# Preamble to define the parameters needed for the models outlined below:
ModelID='MingInterdiff'
# Automatic loading of parameters possible by including this list
__pars__ = ['Layer', 'Stack', 'Sample', 'Instrument']
# Used for making choices in the GUI
instrument_string_choices = {'coords': ['q','tth'],\
'restype': ['no conv', 'fast conv',\
'full conv and varying res.', 'fast conv + varying res.'],\
'footype': ['no corr', 'gauss beam', 'square beam']}
InstrumentParameters={'wavelength':1.54,'coords':'tth','I0':1.0,'res':0.001,\
'restype':'no conv','respoints':5,'resintrange':2,'beamw':0.01,'footype': 'no corr',\
'samplelen':10.0, 'Ibkg': 0.0, 'taylor_n': 1}
# Coordinates=1 => twothetainput
# Coordinates=0 => Q input
#Res stddev of resolution
#ResType 0: No resolution convlution
# 1: Fast convolution
# 2: Full Convolution +varying resolution
# 3: Fast convolution varying resolution
#ResPoints Number of points for the convolution only valid for ResolutionType=2
#ResIntrange Number of standard deviatons to integrate over default 2
# Parameters for footprint coorections
# Footype: 0: No corections for footprint
# 1: Correction for Gaussian beam => Beaw given in mm and stddev
# 2: Correction for square profile => Beaw given in full width mm
# Samlen= Samplelength in mm.
LayerParameters = {'sigmai':0.0, 'sigmar':0.0, 'dens':1.0, 'd':0.0,\
'f':0.0+1.0j}
StackParameters = {'Layers':[], 'Repetitions':1}
SampleParameters = {'Stacks':[], 'Ambient':None, 'Substrate':None, 'h':1.0,\
'eta_z':10.0, 'eta_x':10.0}
def Specular(TwoThetaQz, sample, instrument):
# preamble to get it working with my class interface
restype = instrument.getRestype()
if restype == 2 or restype == instrument_string_choices['restype'][2]:
(TwoThetaQz,weight) = ResolutionVector(TwoThetaQz[:], \
instrument.getRes(), instrument.getRespoints(),\
range=instrument.getResintrange())
if instrument.getCoords() == 1 or\
instrument.getCoords() == instrument_string_choices['coords'][1]:
theta = TwoThetaQz/2
elif instrument.getCoords() == 0 or\
instrument.getCoords() == instrument_string_choices['coords'][0]:
theta = arcsin(TwoThetaQz/4/pi*instrument.getWavelength())*180./pi
lamda = instrument.getWavelength()
parameters = sample.resolveLayerParameters()
dens = array(parameters['dens'], dtype = complex64)
#print [type(f) for f in parameters['f']]
f = array(parameters['f'], dtype = complex64)
re = 2.82e-13*1e2/1e-10
n = 1 - dens*re*lamda**2/2/pi*f*1e-4
d = array(parameters['d'], dtype = float64)
#d = d[1:-1]
sigmar = array(parameters['sigmar'], dtype = float64)
#sigmar = sigmar[:-1]
sigmai = array(parameters['sigmai'], dtype = float64)
#sigmai = sigmai[:-1]
sigma = sqrt(sigmai**2 + sigmar**2)
#print sigma
R = Paratt.Refl(theta, lamda, n, d, sigma)*instrument.getI0()
#FootprintCorrections
foocor = 1.0
footype = instrument.getFootype()
beamw = instrument.getBeamw()
samlen = instrument.getSamplelen()
if footype == 0 or footype == instrument_string_choices['footype'][0]:
foocor = 1.0
elif footype == 1 or footype == instrument_string_choices['footype'][1]:
foocor = GaussIntensity(theta, samlen/2.0, samlen/2.0, beamw)
elif footype == 2 or footype == instrument_string_choices['footype'][2]:
foocor = SquareIntensity(theta, samlen, beamw)
else:
raise ValueError('Variable footype has an unvalid value')
if restype == 0 or restype == instrument_string_choices['restype'][0]:
R = R[:]*foocor
elif restype == 1 or restype == instrument_string_choices['restype'][1]:
R = ConvoluteFast(TwoThetaQz,R[:]*foocor, instrument.getRes(),\
range = instrument.getResintrange())
elif restype == 2 or restype == instrument_string_choices['restype'][2]:
R = ConvoluteResolutionVector(TwoThetaQz,R[:]*foocor, weight)
elif restype == 3 or restype == instrument_string_choices['restype'][3]:
R = ConvoluteFastVar(TwoThetaQz,R[:]*foocor, instrument.getRes(),\
range = instrument.getResintrange())
else:
raise ValueError('Variable restype has an unvalid value')
return R + instrument.getIbkg()
def OffSpecularMingInterdiff(TwoThetaQz, ThetaQx, sample, instrument):
lamda = instrument.getWavelength()
if instrument.getCoords() == 1 or\
instrument.getCoords() == instrument_string_choices['coords'][1]:
alphaR1 = ThetaQx
betaR1 = TwoThetaQz - ThetaQx
qx = 2*pi/lamda*(cos(alphaR1*pi/180) - cos(betaR1*pi/180))
qz = 2*pi/lamda*(sin(alphaR1*pi/180) + sin(betaR1*pi/180))
else:
qz = TwoThetaQz
qx = ThetaQx
#print qx
#print qz
parameters = sample.resolveLayerParameters()
def toarray(a, code):
a = list(a)
a.reverse()
return array(a, dtype = code)
dens = array(parameters['dens'], dtype = complex64)
f = array(parameters['f'], dtype = complex64)
re = 2.82e-13*1e2/1e-10
n = 1 - dens*re*lamda**2/2/pi*f*1e-4
n = toarray(n, code = complex64)
sigmar = toarray(parameters['sigmar'], code = float64)
sigmar = sigmar[1:]
#print sigmar
sigmai = toarray(parameters['sigmai'],code = float64)
sigmai = sigmai[1:] + 1e-5
#print sigmai
d=toarray(parameters['d'], code = float64)
d=r_[0, d[1:-1]]
#print d
z = -cumsum(d)
#print z
eta = sample.getEta_x()
#print eta
h = sample.getH()
#print h
eta_z = sample.getEta_z()
#print eta_z
if __offspec__:
(I, alpha, omega) = lib.offspec2_weave.DWBA_Interdiff(qx, qz, lamda, n, z,\
sigmar, sigmai, eta, h, eta_z, d,\
taylor_n = instrument.getTaylor_n())
else:
I=ones(len(qx*qz))
return real(I)*instrument.getI0() + instrument.getIbkg()
def SLD_calculations(z, sample, inst):
''' Calculates the scatteringlength density as at the positions z
'''
parameters = sample.resolveLayerParameters()
dens = array(parameters['dens'], dtype = complex64)
f = array(parameters['f'], dtype = complex64)
sld = dens*f
d_sld = sld[:-1] - sld[1:]
d = array(parameters['d'], dtype = float64)
d = d[1:-1]
# Include one extra element - the zero pos (substrate/film interface)
int_pos = cumsum(r_[0,d])
sigmar = array(parameters['sigmar'], dtype = float64)
sigmar = sigmar[:-1]
sigmai = array(parameters['sigmai'], dtype = float64)
sigmai = sigmai[:-1]
sigma = sqrt(sigmai**2 + sigmar**2)+1e-7
if z == None:
z = arange(-sigma[0]*5, int_pos.max()+sigma[-1]*5, 0.5)
rho = sum(d_sld*(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sld[-1]
return {'real sld': real(rho), 'imag sld': imag(rho), 'z':z}
SimulationFunctions = {'Specular':Specular,\
'OffSpecular':OffSpecularMingInterdiff,\
'SLD': SLD_calculations}
import lib.refl as Refl
(Instrument, Layer, Stack, Sample) = Refl.MakeClasses(InstrumentParameters,\
LayerParameters,StackParameters,\
SampleParameters, SimulationFunctions, ModelID)
if __name__=='__main__':
Fe=Layer(d=10,sigmar=3.0,n=1-2.247e-5+2.891e-6j)
Si=Layer(d=15,sigmar=3.0,n=1-7.577e-6+1.756e-7j)
sub=Layer(sigmar=3.0,n=1-7.577e-6+1.756e-7j)
amb=Layer(n=1.0,sigmar=1.0)
stack=Stack(Layers=[Fe,Si],Repetitions=20)
sample=Sample(Stacks=[stack],Ambient=amb,Substrate=sub,eta_z=500.0,eta_x=100.0)
print sample
inst=Instrument(Wavelength=1.54,Coordinates=1)
|
jackey-qiu/genx
|
models/interdiff.py
|
Python
|
gpl-3.0
| 13,802
|
[
"Gaussian"
] |
dece41702b319d38b80308daee4c23f6726d83b80fa6ed53257fc05900643d8d
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2010-2013 Francois Beaune, Jupiter Jazz Limited
# Copyright (c) 2014-2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from distutils import archive_util, dir_util
from stat import *
from subprocess import *
from xml.etree.ElementTree import ElementTree
import glob
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
import traceback
import zipfile
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
VERSION = "2.4.5"
SETTINGS_FILENAME = "appleseed.package.configuration.xml"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def info(message):
print(" " + message)
def progress(message):
print(" " + message + "...")
def fatal(message):
print("Fatal: " + message + ". Aborting.")
if sys.exc_info()[0]:
print(traceback.format_exc())
sys.exit(1)
def exe(filepath):
return filepath + ".exe" if os.name == "nt" else filepath
def safe_delete_file(path):
try:
if os.path.exists(path):
os.remove(path)
except OSError:
fatal("Failed to delete file '" + path + "'")
def on_rmtree_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed.
# Let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def safe_delete_directory(path):
Attempts = 10
for attempt in range(Attempts):
try:
if os.path.exists(path):
shutil.rmtree(path, onerror=on_rmtree_error)
return
except OSError:
if attempt < Attempts - 1:
time.sleep(0.5)
else:
fatal("Failed to delete directory '" + path + "'")
def safe_make_directory(path):
if not os.path.isdir(path):
os.makedirs(path)
def pushd(path):
old_path = os.getcwd()
os.chdir(path)
return old_path
def extract_zip_file(zip_path, output_path):
zf = zipfile.ZipFile(zip_path)
zf.extractall(output_path)
zf.close()
def copy_glob(input_pattern, output_path):
for input_file in glob.glob(input_pattern):
shutil.copy(input_file, output_path)
def make_writable(filepath):
os.chmod(filepath, S_IRUSR | S_IWUSR)
#--------------------------------------------------------------------------------------------------
# Settings.
#--------------------------------------------------------------------------------------------------
class Settings:
def load(self):
print("Loading settings from " + SETTINGS_FILENAME + "...")
tree = ElementTree()
try:
tree.parse(SETTINGS_FILENAME)
except IOError:
fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'")
self.load_values(tree)
self.print_summary()
def load_values(self, tree):
self.platform = self.__get_required(tree, "platform")
self.configuration = self.__get_required(tree, "configuration")
self.appleseed_path = self.__get_required(tree, "appleseed_path")
self.appleseed_headers_path = self.__get_required(tree, "appleseed_headers_path")
self.qt_runtime_path = self.__get_required(tree, "qt_runtime_path")
self.platform_runtime_path = self.__get_required(tree, "platform_runtime_path")
self.package_output_path = self.__get_required(tree, "package_output_path")
def print_summary(self):
print("")
print(" Platform: " + self.platform)
print(" Configuration: " + self.configuration)
print(" Path to appleseed: " + self.appleseed_path)
print(" Path to appleseed headers: " + self.appleseed_headers_path)
print(" Path to Qt runtime: " + self.qt_runtime_path)
if os.name == "nt":
print(" Path to platform runtime: " + self.platform_runtime_path)
print(" Output directory: " + self.package_output_path)
print("")
def __get_required(self, tree, key):
value = tree.findtext(key)
if value is None:
fatal("Missing value \"{0}\" in configuration file".format(key))
return value
#--------------------------------------------------------------------------------------------------
# Package information.
#--------------------------------------------------------------------------------------------------
class PackageInfo:
def __init__(self, settings):
self.settings = settings
def load(self):
print("Loading package information...")
self.retrieve_git_tag()
self.build_package_path()
self.print_summary()
def retrieve_git_tag(self):
old_path = pushd(self.settings.appleseed_path)
self.version = Popen("git describe --long", stdout=PIPE, shell=True).stdout.read().strip()
os.chdir(old_path)
def build_package_path(self):
package_name = "appleseed-" + self.version + "-" + self.settings.platform + ".zip"
self.package_path = os.path.join(self.settings.package_output_path, self.version, package_name)
def print_summary(self):
print("")
print(" Version: " + self.version)
print(" Package path: " + self.package_path)
print("")
#--------------------------------------------------------------------------------------------------
# Base package builder.
#--------------------------------------------------------------------------------------------------
class PackageBuilder:
def __init__(self, settings, package_info):
self.settings = settings
self.package_info = package_info
def build_package(self):
print("Building package:")
print("")
self.orchestrate()
print("")
print("The package was successfully built.")
def orchestrate(self):
self.remove_leftovers()
self.retrieve_sandbox_from_git_repository()
self.deploy_sandbox_to_stage()
self.cleanup_stage()
self.add_local_binaries_to_stage()
self.add_local_libraries_to_stage()
self.add_headers_to_stage()
self.add_shaders_to_stage()
self.add_scripts_to_stage()
self.add_local_schema_files_to_stage()
self.add_text_files_to_stage()
self.add_dummy_files_into_empty_directories()
self.disable_system_qt_plugins()
self.alter_stage()
self.build_final_zip_file()
self.remove_stage()
def remove_leftovers(self):
progress("Removing leftovers from previous invocations")
safe_delete_directory("appleseed")
safe_delete_file("sandbox.zip")
safe_delete_file(self.package_info.package_path)
def retrieve_sandbox_from_git_repository(self):
progress("Retrieving sandbox from Git repository")
old_path = pushd(os.path.join(self.settings.appleseed_path, "sandbox"))
self.run("git archive --format=zip --output=" + os.path.join(old_path, "sandbox.zip") + " --worktree-attributes HEAD")
os.chdir(old_path)
def deploy_sandbox_to_stage(self):
progress("Deploying sandbox to staging directory")
extract_zip_file("sandbox.zip", "appleseed/")
safe_delete_file("sandbox.zip")
def cleanup_stage(self):
progress("Cleaning up staging directory")
# Remove API reference documentation.
safe_delete_directory("appleseed/documentation/apireference")
# Remove the test suite.
safe_delete_directory("appleseed/tests/test scenes")
# Remove voluminous unit tests/benchmarks data.
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_particles.bin")
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_photons.bin")
# Remove the devkit which we ship separately.
safe_delete_directory("appleseed/extras/devkit")
def add_local_binaries_to_stage(self):
progress("Adding local binaries to staging directory")
safe_make_directory("appleseed/bin")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/bin", self.settings.configuration), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("maketx")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslc")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslinfo")), "appleseed/bin/")
def add_local_libraries_to_stage(self):
progress("Adding local libraries to staging directory")
safe_make_directory("appleseed/lib")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/lib", self.settings.configuration), "appleseed/lib/")
#
# This method is used by the Mac and Linux package builders.
# It requires the following members to be defined:
#
# self.shared_lib_ext
# self.get_dependencies_for_file()
#
def add_unix_dependencies_to_stage(self):
# Get shared libs needed by binaries.
bin_libs = set()
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
libs = self.get_dependencies_for_file(os.path.join("appleseed/bin", filename))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by appleseed.python.
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
appleseedpython_shared_lib = "_appleseedpython" + self.shared_lib_ext
if appleseedpython_shared_lib in filenames:
libs = self.get_dependencies_for_file(os.path.join(dirpath, appleseedpython_shared_lib))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by libraries.
lib_libs = set()
for lib in bin_libs:
libs = self.get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = bin_libs.union(lib_libs)
if False:
# Print dependencies.
info(" Dependencies:")
for lib in all_libs:
info(" " + lib)
# Copy needed libs to lib directory.
dest_dir = os.path.join("appleseed", "lib/")
for lib in all_libs:
# The library might already exist, but without writing rights.
lib_name = os.path.basename(lib)
dest_path = os.path.join(dest_dir, lib_name)
if not os.path.exists(dest_path):
progress(" Copying {0} to {1}".format(lib, dest_dir))
shutil.copy(lib, dest_dir)
make_writable(dest_path)
def add_headers_to_stage(self):
progress("Adding headers to staging directory")
# appleseed headers.
safe_make_directory("appleseed/include")
ignore_files = shutil.ignore_patterns("*.cpp", "*.c", "*.xsd", "snprintf", "version.h.in")
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "foundation"), "appleseed/include/foundation", ignore=ignore_files)
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "main"), "appleseed/include/main", ignore=ignore_files)
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "renderer"), "appleseed/include/renderer", ignore=ignore_files)
def add_shaders_to_stage(self):
progress("Adding shaders to staging directory")
shutil.rmtree("appleseed/shaders")
shutil.copytree(os.path.join(self.settings.appleseed_path, "sandbox/shaders"), "appleseed/shaders")
def add_scripts_to_stage(self):
progress("Adding scripts to staging directory")
shutil.copy("convertmany.py", "appleseed/bin/")
shutil.copy("rendermany.py", "appleseed/bin/")
shutil.copy("updatemany.py", "appleseed/bin/")
shutil.copy("rendernode.py", "appleseed/bin/")
shutil.copy("rendermanager.py", "appleseed/bin/")
shutil.copy("mitsuba2appleseed.py", "appleseed/bin/")
def add_local_schema_files_to_stage(self):
progress("Adding local schema files to staging directory")
safe_make_directory("appleseed/schemas")
copy_glob(os.path.join(self.settings.appleseed_path, "sandbox/schemas/*.xsd"), "appleseed/schemas/")
def add_text_files_to_stage(self):
progress("Adding LICENSE.txt and README.md files")
shutil.copy(os.path.join(self.settings.appleseed_path, "LICENSE.txt"), "appleseed/")
shutil.copy(os.path.join(self.settings.appleseed_path, "README.md"), "appleseed/")
def add_dummy_files_into_empty_directories(self):
progress("Adding dummy files to preserve empty directories")
for dirpath, dirnames, filenames in os.walk("."):
if len(dirnames) == 0 and len(filenames) == 0:
self.create_preserve_file(dirpath)
def disable_system_qt_plugins(self):
progress("Disabling system's Qt plugins")
with open("appleseed/bin/qt.conf", "w") as f:
pass
def create_preserve_file(self, path):
with open(os.path.join(path, "preserve.txt"), "w") as f:
f.write("This file allows to preserve this otherwise empty directory.\n")
# This method is overridden in the platform-specific builders below.
def alter_stage(self):
return
def build_final_zip_file(self):
progress("Building final zip file from staging directory")
package_base_path = os.path.splitext(self.package_info.package_path)[0]
archive_util.make_zipfile(package_base_path, "appleseed")
def remove_stage(self):
progress("Deleting staging directory...")
safe_delete_directory("appleseed")
def run(self, cmdline):
info("Running command line: {0}".format(cmdline))
os.system(cmdline)
def run_subprocess(self, cmdline):
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
#--------------------------------------------------------------------------------------------------
# Windows package builder.
#--------------------------------------------------------------------------------------------------
class WindowsPackageBuilder(PackageBuilder):
def alter_stage(self):
self.add_dependencies_to_stage()
def add_dependencies_to_stage(self):
progress("Windows-specific: Adding dependencies to staging directory")
self.copy_qt_framework("QtCore")
self.copy_qt_framework("QtGui")
copy_glob(os.path.join(self.settings.platform_runtime_path, "*"), "appleseed/bin/")
def copy_qt_framework(self, framework_name):
src_filepath = os.path.join(self.settings.qt_runtime_path, framework_name + "4" + ".dll")
dst_path = os.path.join("appleseed", "bin")
shutil.copy(src_filepath, dst_path)
#--------------------------------------------------------------------------------------------------
# Mac package builder.
#--------------------------------------------------------------------------------------------------
class MacPackageBuilder(PackageBuilder):
def __init__(self, settings, package_info):
PackageBuilder.__init__(self, settings, package_info)
self.shared_lib_ext = ".dylib"
self.system_libs_prefixes = ["/System/Library/", "/usr/lib/libcurl", "/usr/lib/libc++",
"/usr/lib/libbz2", "/usr/lib/libSystem", "usr/lib/libz",
"/usr/lib/libncurses"]
def alter_stage(self):
safe_delete_file("appleseed/bin/.DS_Store")
self.add_dependencies_to_stage()
self.fixup_binaries()
self.create_qt_conf_file()
os.rename("appleseed/bin/appleseed.studio", "appleseed/bin/appleseed-studio")
def add_dependencies_to_stage(self):
progress("Mac-specific: Adding dependencies to staging directory")
self.add_unix_dependencies_to_stage()
self.copy_qt_framework("QtCore")
self.copy_qt_framework("QtGui")
self.copy_qt_resources("QtGui")
self.copy_qt_framework("QtOpenGL")
def copy_qt_framework(self, framework_name):
framework_dir = framework_name + ".framework"
src_filepath = os.path.join(self.settings.qt_runtime_path, framework_dir, "Versions", "4", framework_name)
dest_path = os.path.join("appleseed", "lib", framework_dir, "Versions", "4")
safe_make_directory(dest_path)
shutil.copy(src_filepath, dest_path)
make_writable(os.path.join(dest_path, framework_name))
def copy_qt_resources(self, framework_name):
framework_dir = framework_name + ".framework"
src_path = os.path.join(self.settings.qt_runtime_path, framework_dir, "Versions", "4", "Resources")
dest_path = os.path.join("appleseed", "lib", framework_dir, "Resources")
shutil.copytree(src_path, dest_path)
def fixup_binaries(self):
progress("Mac-specific: Fixing up binaries")
self.set_libraries_ids()
self.set_qt_framework_ids()
self.change_library_paths_in_libraries()
self.change_library_paths_in_executables()
self.change_qt_framework_paths_in_qt_frameworks()
def set_libraries_ids(self):
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".dylib":
lib_path = os.path.join(dirpath, filename)
self.set_library_id(lib_path, filename)
def set_qt_framework_ids(self):
self.set_library_id("appleseed/lib/QtCore.framework/Versions/4/QtCore", "QtCore.framework/Versions/4/QtCore")
self.set_library_id("appleseed/lib/QtGui.framework/Versions/4/QtGui", "QtGui.framework/Versions/4/QtGui")
self.set_library_id("appleseed/lib/QtOpenGL.framework/Versions/4/QtOpenGL", "QtOpenGL.framework/Versions/4/QtOpenGL")
def change_library_paths_in_libraries(self):
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".dylib":
lib_path = os.path.join(dirpath, filename)
self.change_library_paths_in_binary(lib_path)
self.change_qt_framework_paths_in_binary(lib_path)
def change_library_paths_in_executables(self):
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
exe_path = os.path.join(dirpath, filename)
self.change_library_paths_in_binary(exe_path)
self.change_qt_framework_paths_in_binary(exe_path)
# Can be used on executables and dynamic libraries.
def change_library_paths_in_binary(self, bin_path):
for lib_path in self.get_dependencies_for_file(bin_path, fix_paths=False):
lib_name = os.path.basename(lib_path)
self.change_library_path(bin_path, lib_path, "@executable_path/../lib/" + lib_name)
# Can be used on executables and dynamic libraries.
def change_qt_framework_paths_in_binary(self, bin_path):
for fwk_path in self.get_qt_frameworks_for_file(bin_path):
fwk_name = re.search(r"(Qt.*)\.framework", fwk_path).group(1)
self.change_library_path(bin_path, fwk_path, "@executable_path/../lib/{0}.framework/Versions/4/{0}".format(fwk_name))
def change_qt_framework_paths_in_qt_frameworks(self):
self.change_qt_framework_paths_in_binary("appleseed/lib/QtCore.framework/Versions/4/QtCore")
self.change_qt_framework_paths_in_binary("appleseed/lib/QtGui.framework/Versions/4/QtGui")
self.change_qt_framework_paths_in_binary("appleseed/lib/QtOpenGL.framework/Versions/4/QtOpenGL")
def set_library_id(self, target, name):
self.run('install_name_tool -id "{0}" {1}'.format(name, target))
def change_library_path(self, target, old, new):
self.run('install_name_tool -change "{0}" "{1}" {2}'.format(old, new, target))
def get_dependencies_for_file(self, filename, fix_paths=True):
returncode, out, err = self.run_subprocess(["otool", "-L", filename])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
# Ignore libs relative to @loader_path.
if "@loader_path" in lib:
continue
# Ignore system libs.
if self.is_system_lib(lib):
continue
# Ignore Qt frameworks.
if re.search(r"Qt.*\.framework", lib):
continue
if fix_paths:
# Optionally search for libraries in other places.
if not os.path.exists(lib):
candidate = os.path.join("/usr/local/lib/", lib)
if os.path.exists(candidate):
lib = candidate
libs.add(lib)
return libs
def get_qt_frameworks_for_file(self, filename, fix_paths=True):
returncode, out, err = self.run_subprocess(["otool", "-L", filename])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
if re.search(r"Qt.*\.framework", lib):
libs.add(lib)
return libs
def is_system_lib(self, lib):
for prefix in self.system_libs_prefixes:
if lib.startswith(prefix):
return True
return False
def create_qt_conf_file(self):
safe_make_directory("appleseed/bin/Contents/Resources")
open("appleseed/bin/Contents/Resources/qt.conf", "w").close()
#--------------------------------------------------------------------------------------------------
# Linux package builder.
#--------------------------------------------------------------------------------------------------
class LinuxPackageBuilder(PackageBuilder):
def __init__(self, settings, package_info):
PackageBuilder.__init__(self, settings, package_info)
self.shared_lib_ext = ".so"
self.system_libs_prefixes = ["linux", "librt", "libpthread", "libGL", "libX", "libselinux",
"libICE", "libSM", "libdl", "libm.so", "libgcc", "libc.so",
"/lib64/ld-linux-", "libstdc++", "libxcb", "libdrm", "libnsl",
"libuuid", "libgthread", "libglib", "libgobject", "libglapi",
"libffi", "libfontconfig", "libutil", "libpython",
"libxshmfence.so"]
def alter_stage(self):
self.make_executable(os.path.join("appleseed/bin", "maketx"))
self.make_executable(os.path.join("appleseed/bin", "oslc"))
self.make_executable(os.path.join("appleseed/bin", "oslinfo"))
self.add_dependencies_to_stage()
self.set_runtime_paths_on_binaries()
self.clear_runtime_paths_on_libraries()
def make_executable(self, filepath):
mode = os.stat(filepath)[ST_MODE]
mode |= S_IXUSR | S_IXGRP | S_IXOTH
os.chmod(filepath, mode)
def add_dependencies_to_stage(self):
progress("Linux-specific: Adding dependencies to staging directory")
self.add_unix_dependencies_to_stage()
def set_runtime_paths_on_binaries(self):
progress("Linux-specific: Setting runtime paths on binaries")
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
self.run("chrpath -r \$ORIGIN/../lib " + os.path.join("appleseed/bin", filename))
def clear_runtime_paths_on_libraries(self):
progress("Linux-specific: Clearing runtime paths on libraries")
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".so":
self.run("chrpath -d " + os.path.join(dirpath, filename))
def get_dependencies_for_file(self, filename):
returncode, out, err = self.run_subprocess(["ldd", filename])
if returncode != 0:
fatal("Failed to invoke ldd(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n"):
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Ignore system libs.
if self.is_system_lib(line):
continue
libs.add(line.split()[2])
return libs
def is_system_lib(self, lib):
for prefix in self.system_libs_prefixes:
if lib.startswith(prefix):
return True
return False
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
print("appleseed.package version " + VERSION)
print("")
settings = Settings()
package_info = PackageInfo(settings)
settings.load()
package_info.load()
if os.name == "nt":
package_builder = WindowsPackageBuilder(settings, package_info)
elif os.name == "posix" and platform.mac_ver()[0] != "":
package_builder = MacPackageBuilder(settings, package_info)
elif os.name == "posix" and platform.mac_ver()[0] == "":
package_builder = LinuxPackageBuilder(settings, package_info)
else:
fatal("Unsupported platform: " + os.name)
package_builder.build_package()
if __name__ == '__main__':
main()
|
aiivashchenko/appleseed
|
scripts/appleseed.package.py
|
Python
|
mit
| 28,765
|
[
"VisIt"
] |
8f3f657ed74a636185abf348ade768d297d82c50c44d21cc92f04dd12ab43319
|
from flatdict import FlatDict
from openpnm.io import Dict, GenericIO
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class HDF5(GenericIO):
r"""
The HDF5 (Hierarchical Data Format) file is good for high-peformance, long
term data storage
"""
@classmethod
def export_data(cls, network=None, phases=[], element=['pore', 'throat'],
filename='', interleave=True, flatten=False, categorize_by=[]):
r"""
Creates an HDF5 file containing data from the specified objects,
and categorized according to the given arguments.
Parameters
----------
network : GenericNetwork
The network containing the desired data
phases : list[GenericPhase]s (optional, default is none)
A list of phase objects whose data are to be included
element : str or list[str]
An indication of whether 'pore' and/or 'throat' data are desired.
The default is both.
interleave : bool (default is ``True``)
When ``True`` (default) the data from all Geometry objects (and
Physics objects if ``phases`` are given) is interleaved into
a single array and stored as a network property (or Phase
property for Physics data). When ``False``, the data for each
object are stored under their own dictionary key, the structuring
of which depends on the value of the ``flatten`` argument.
flatten : bool (default is ``True``)
When ``True``, all objects are accessible from the top level
of the dictionary. When ``False`` objects are nested under their
parent object. If ``interleave`` is ``True`` this argument is
ignored.
categorize_by : str or list[str]
Indicates how the dictionaries should be organized. The list can
contain any, all or none of the following strings:
**'objects'** : If specified the dictionary keys will be stored
under a general level corresponding to their type (e.g.
'network/net_01/pore.all'). If ``interleave`` is ``True`` then
only the only categories are *network* and *phase*, since
*geometry* and *physics* data get stored under their respective
*network* and *phase*.
**'data'** : If specified the data arrays are additionally
categorized by ``label`` and ``property`` to separate *boolean*
from *numeric* data.
**'elements'** : If specified the data arrays are additionally
categorized by ``pore`` and ``throat``, meaning that the propnames
are no longer prepended by a 'pore.' or 'throat.'
"""
from h5py import File as hdfFile
project, network, phases = cls._parse_args(network=network,
phases=phases)
if filename == '':
filename = project.name
filename = cls._parse_filename(filename, ext='hdf')
dct = Dict.to_dict(network=network, phases=phases, element=element,
interleave=interleave, flatten=flatten,
categorize_by=categorize_by)
d = FlatDict(dct, delimiter='/')
f = hdfFile(filename, "w")
for item in d.keys():
tempname = '_'.join(item.split('.'))
arr = d[item]
if d[item].dtype == 'O':
logger.warning(item + ' has dtype object, will not write to file')
del d[item]
elif 'U' in str(arr[0].dtype):
pass
else:
f.create_dataset(name='/'+tempname, shape=arr.shape,
dtype=arr.dtype, data=arr)
return f
def print_hdf5(f, flat=False):
r"""
Given an hdf5 file handle, prints to console in a human readable manner
Parameters
----------
f : file handle
The hdf5 file to print
flat : bool
Flag to indicate if print should be nested or flat. The default is
``flat==False`` resulting in a nested view.
"""
if flat is False:
def print_level(f, p='', indent='-'):
for item in f.keys():
if hasattr(f[item], 'keys'):
p = print_level(f[item], p=p, indent=indent + indent[0])
elif indent[-1] != ' ':
indent = indent + ''
p = indent + item + '\n' + p
return(p)
p = print_level(f)
print(p)
else:
f.visit(print)
def to_hdf5(network=None, phases=[], element=['pore', 'throat'],
filename='', interleave=True, flatten=False, categorize_by=[]):
return HDF5.export_data(network=network, phases=phases, element=element,
filename=filename, interleave=interleave,
flatten=flatten, categorize_by=categorize_by)
to_hdf5.__doc__ = HDF5.export_data.__doc__
|
PMEAL/OpenPNM
|
openpnm/io/_hdf5.py
|
Python
|
mit
| 5,056
|
[
"VisIt"
] |
9a6cf76d39a2e6c75fd872840a4fd025cf92911c9f6fddbfe9539076b3d179c8
|
import numpy as np
from galaxies import Galaxies
from galaxy_meta import GalaxyMeta
import matplotlib.pyplot as plt
class GalaxyTester():
def __init__(self, data, model):
self.data = data
self.model = model
self.galaxies = Galaxies()
self.test_ids = self.galaxies.get_flattened_test_ids()
self.meta = GalaxyMeta()
def test(self):
num_test = self.data.y_test.size
num_test_classes = np.unique(self.data.y_test).size
num_train_classes = np.unique(self.data.y).size
self.p, self.var = self.model.predict_y(self.data.x_test[:num_test])
self.Y_guess = np.argmax(self.p, axis=1)
# self.all_guesses = np.zeros(num_test_classes, dtype=np.int32)
# self.wrong_guesses = np.zeros(num_test_classes, dtype=np.int32)
# self.almost_correct = np.zeros(num_test_classes, dtype=np.int32)
# self.almost_wrong = np.zeros(num_test_classes, dtype=np.int32)
# self.unique_y = np.sort(np.unique(self.data.y))
self.idx = [
[[],[]],
[[],[]],
[[],[]]
]
self.prob = [
[[],[]],
[[],[]],
[[],[]]
]
self.variances = [
[[],[]],
[[],[]],
[[],[]]
]
self.objid = [
[[],[]],
[[],[]],
[[],[]]
]
self.cs = [
[[],[]],
[[],[]],
[[],[]]
]
self.el = [
[[],[]],
[[],[]],
[[],[]]
]
self.nvote = [
[[],[]],
[[],[]],
[[],[]]
]
self.edge = [
[[],[]],
[[],[]],
[[],[]]
]
self.disk = [
[[],[]],
[[],[]],
[[],[]]
]
self.merge = [
[[],[]],
[[],[]],
[[],[]]
]
self.acw = [
[[],[]],
[[],[]],
[[],[]]
]
for real in range(0, num_test_classes):
real_idx = self.data.y_test == real
for guess in range(0, num_train_classes):
guess_idx = self.Y_guess == guess
self.idx[real][guess] = real_idx & guess_idx
self.prob[real][guess] = self.p[self.idx[real][guess],:]
self.variances[real][guess] = self.var[self.idx[real][guess],:]
self.objid[real][guess] = self.test_ids[self.idx[real][guess]]
for objid in self.objid[real][guess]:
spiral, elliptical, unc, n, ra, dec, cs, el, merge, edge, acw, cw, disk = self.meta.find_by_id(objid, real)
self.cs[real][guess].append(cs)
self.el[real][guess].append(el)
self.nvote[real][guess].append(n)
self.edge[real][guess].append(edge)
self.disk[real][guess].append(disk)
self.merge[real][guess].append(merge)
self.acw[real][guess].append(acw)
print('For real: {} guess: {} ({} images):'.format(
real, guess, len(self.nvote[real][guess])
))
print('\tprob 0: {} ({})'.format(
np.average(self.prob[real][guess][:,0]),
np.std(self.prob[real][guess][:,0])
))
print('\tprob 1: {} ({})'.format(
np.average(self.prob[real][guess][:,1]),
np.std(self.prob[real][guess][:,1])
))
print('\tnvotes: {} ({})'.format(
np.average(self.nvote[real][guess]),
np.std(self.nvote[real][guess])
))
print('\tcs: {} ({})'.format(
np.average(self.cs[real][guess]),
np.std(self.cs[real][guess])
))
print('\tel: {} ({})'.format(
np.average(self.el[real][guess]),
np.std(self.el[real][guess])
))
print('\tspiral: {} ({})'.format(
np.average(self.spiral[real][guess]),
np.std(self.spiral[real][guess])
))
print('\telliptical: {} ({})'.format(
np.average(self.elliptical[real][guess]),
np.std(self.elliptical[real][guess])
))
print('\tedge: {} ({})'.format(
np.average(self.edge[real][guess]),
np.std(self.edge[real][guess])
))
print('\tdisk: {} ({})'.format(
np.average(self.disk[real][guess]),
np.std(self.disk[real][guess])
))
print('\tmerge: {} ({})'.format(
np.average(self.merge[real][guess]),
np.std(self.merge[real][guess])
))
print('\tacw: {} ({})'.format(
np.average(self.acw[real][guess]),
np.std(self.acw[real][guess])
))
labels = {0: 'Spiral', 1:'Elliptical', 2:'Uncertain'}
real = 0
not_real = 1
probs = 0
votes = self.cs
plt.errorbar(self.prob[real][real][:,probs].flatten()*100,
np.reshape(votes[real][real], len(votes[real][real]))*100,
xerr=self.variances[real][real][:,probs].flatten()*100,
errorevery=10,
fmt='.', label=labels[real])
plt.errorbar(self.prob[real][not_real][:,probs].flatten()*100,
np.reshape(votes[real][not_real], len(votes[real][not_real]))*100,
xerr=self.variances[real][not_real][:,probs].flatten()*100,
errorevery=10,
fmt='.', label=labels[not_real])
plt.title('Comparison of GP Model Classification Probability'+
'\nWith with Galaxy Zoo Votes for ' + labels[real] + ' Galaxies')
plt.xlabel('GP Model ' + labels[probs] + ' Probability (%)')
plt.ylabel('Galaxy Zoo ' + labels[real] + ' Votes (%)')
fig = plt.gcf()
fig.savefig(labels[real] + '-' + labels[probs]+'.eps')
plt.legend(loc='lower right')
plt.show()
# plt.plot(self.prob[0][0][:,1].flatten()*100, np.reshape(self.el[0][0], len(self.el[0][0]))*100, '.', label='Spiral')
# plt.plot(self.prob[0][1][:,1].flatten()*100, np.reshape(self.el[0][1], len(self.el[0][1]))*100, '.', label='Elliptical')
# plt.title('Comparison of GP Model Classification Probability'+
# '\nWith with Galaxy Zoo Votes for Elliptical Galaxies')
# plt.xlabel('GP Model Elliptical Probability (%)')
# plt.ylabel('Galaxy Zoo Elliptical Votes (%)')
# fig = plt.gcf()
# fig.savefig('spiral-elliptical.eps')
# plt.legend()
# plt.show()
# plt.plot(self.prob[1][0][:,0].flatten()*100, np.reshape(self.cs[1][0], len(self.cs[1][0]))*100, '.', label='Spiral')
# plt.plot(self.prob[1][1][:,0].flatten()*100, np.reshape(self.cs[1][1], len(self.cs[1][1]))*100, '.', label='Elliptical')
# plt.title('Comparison of GP Model Classification Probability'+
# '\nWith with Galaxy Zoo Votes for Spiral Galaxies')
# plt.xlabel('GP Model Spiral Probability (%)')
# plt.ylabel('Galaxy Zoo Spiral Votes (%)')
# fig = plt.gcf()
# fig.savefig('elliptical-spiral.eps')
# plt.legend()
# plt.show()
real = 1
not_real = 0
probs = 1
votes = self.el
plt.errorbar(self.prob[real][real][:,probs].flatten()*100,
np.reshape(votes[real][real], len(votes[real][real]))*100,
xerr=self.variances[real][real][:,probs].flatten()*100,
errorevery=10,
fmt='.', label=labels[real])
plt.errorbar(self.prob[real][not_real][:,probs].flatten()*100,
np.reshape(votes[real][not_real], len(votes[real][not_real]))*100,
xerr=self.variances[real][not_real][:,probs].flatten()*100,
errorevery=10,
fmt='.', label=labels[not_real])
plt.title('Comparison of GP Model Classification Probability'+
'\nWith with Galaxy Zoo Votes for ' + labels[real] + ' Galaxies')
plt.xlabel('GP Model ' + labels[probs] + ' Probability (%)')
plt.ylabel('Galaxy Zoo ' + labels[real] + ' Votes (%)')
fig = plt.gcf()
fig.savefig(labels[real] + '-' + labels[probs]+'.eps')
plt.legend(loc='lower right')
plt.show()
real = 2
not_probs = 0
probs = 1
votes = self.el
plt.errorbar(self.prob[real][probs][:,probs].flatten()*100,
np.reshape(votes[real][probs], len(votes[real][probs]))*100,
xerr=self.variances[real][probs][:,probs].flatten()*100,
errorevery=10,
fmt='.', label=labels[real])
plt.errorbar(self.prob[real][not_probs][:,probs].flatten()*100,
np.reshape(votes[real][not_probs], len(votes[real][not_probs]))*100,
xerr=self.variances[real][not_probs][:,probs].flatten()*100,
errorevery=10,
fmt='.', label=labels[not_probs])
plt.title('Comparison of GP Model Classification Probability'+
'\nWith with Galaxy Zoo Votes for ' + labels[real] + ' Galaxies')
plt.xlabel('GP Model ' + labels[probs] + ' Probability (%)')
plt.ylabel('Galaxy Zoo ' + labels[real] + ' Votes (%)')
fig = plt.gcf()
fig.savefig(labels[real] + '-' + labels[probs]+'.eps')
plt.legend(loc='lower right')
plt.show()
# def plot_ratios(real, numerator, title):
# labels = {0: 'Spiral', 1:'Elliptical', 2:'Uncertain'}
# denom = 0
# if (numerator == 0):
# denom = 1
# plt.plot(self.prob[real][real]/self.prob[real][])
|
MorganR/gaussian-processes
|
galaxy_tester.py
|
Python
|
mit
| 10,084
|
[
"Galaxy"
] |
0e943c9cbe62bac3981ca55f789a2c6070f7b78734e84cea84fefd21fc1538ea
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Full CI solver for spin-free Hamiltonian. This solver can be used to compute
doublet, triplet,...
The CI wfn are stored as a 2D array [alpha,beta], where each row corresponds
to an alpha string. For each row (alpha string), there are
total-num-beta-strings of columns. Each column corresponds to a beta string.
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
'''
import sys
import ctypes
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.fci import cistring
from pyscf.fci import rdm
from pyscf.fci import spin_op
from pyscf.fci import addons
from pyscf.fci.spin_op import contract_ss
from pyscf.fci.addons import _unpack_nelec
from pyscf import __config__
libfci = lib.load_library('libfci')
def contract_1e(f1e, fcivec, norb, nelec, link_index=None):
'''Contract the 1-electron Hamiltonian with a FCI vector to get a new FCI
vector.
'''
fcivec = numpy.asarray(fcivec, order='C')
link_indexa, link_indexb = _unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
f1e_tril = lib.pack_tril(f1e)
ci1 = numpy.zeros_like(fcivec)
libfci.FCIcontract_a_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
libfci.FCIcontract_b_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def contract_2e(eri, fcivec, norb, nelec, link_index=None):
r'''Contract the 4-index tensor eri[pqrs] with a FCI vector
.. math::
|output\rangle = E_{pq} E_{rs} eri_{pq,rs} |CI\rangle \\
E_{pq}E_{rs} = E_{pr,qs} + \delta_{qr} E_{ps} \\
E_{pq} = p^+ q + \bar{p}^+ \bar{q}
E_{pr,qs} = p^+ r^+ s q + \bar{p}^+ r^+ s \bar{q} + ...
:math:`p,q,...` means spin-up orbitals and :math:`\bar{p}, \bar{q}` means
spin-down orbitals.
Note the input argument eri is NOT the 2e hamiltonian tensor. 2e hamiltonian is
.. math::
h2e &= (pq|rs) E_{pr,qs} \\
&= (pq|rs) (E_{pq}E_{rs} - \delta_{qr} E_{ps}) \\
&= eri_{pq,rs} E_{pq}E_{rs} \\
So the relation between eri and hamiltonian (the 2e-integral tensor) is
.. math::
eri_{pq,rs} = (pq|rs) - (1/Nelec) \sum_q (pq|qs)
to restore the symmetry between pq and rs,
.. math::
eri_{pq,rs} = (pq|rs) - (.5/Nelec) [\sum_q (pq|qs) + \sum_p (pq|rp)]
See also :func:`direct_spin1.absorb_h1e`
'''
fcivec = numpy.asarray(fcivec, order='C')
eri = ao2mo.restore(4, eri, norb)
link_indexa, link_indexb = _unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
ci1 = numpy.empty_like(fcivec)
libfci.FCIcontract_2e_spin1(eri.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def make_hdiag(h1e, eri, norb, nelec):
'''Diagonal Hamiltonian for Davidson preconditioner
'''
neleca, nelecb = _unpack_nelec(nelec)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
occslsta = occslstb = cistring._gen_occslst(range(norb), neleca)
if neleca != nelecb:
occslstb = cistring._gen_occslst(range(norb), nelecb)
na = len(occslsta)
nb = len(occslstb)
hdiag = numpy.empty(na*nb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(neleca), ctypes.c_int(nelecb),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def absorb_h1e(h1e, eri, norb, nelec, fac=1):
'''Modify 2e Hamiltonian to include 1e Hamiltonian contribution.
'''
if not isinstance(nelec, (int, numpy.number)):
nelec = sum(nelec)
h2e = ao2mo.restore(1, eri.copy(), norb)
f1e = h1e - numpy.einsum('jiik->jk', h2e) * .5
f1e = f1e * (1./(nelec+1e-100))
for k in range(norb):
h2e[k,k,:,:] += f1e
h2e[:,:,k,k] += f1e
return ao2mo.restore(4, h2e, norb) * fac
def pspace(h1e, eri, norb, nelec, hdiag=None, np=400):
'''pspace Hamiltonian to improve Davidson preconditioner. See, CPL, 169, 463
'''
if norb > 63:
raise NotImplementedError('norb > 63')
neleca, nelecb = _unpack_nelec(nelec)
h1e = numpy.ascontiguousarray(h1e)
eri = ao2mo.restore(1, eri, norb)
nb = cistring.num_strings(norb, nelecb)
if hdiag is None:
hdiag = make_hdiag(h1e, eri, norb, nelec)
if hdiag.size < np:
addr = numpy.arange(hdiag.size)
else:
try:
addr = numpy.argpartition(hdiag, np-1)[:np].copy()
except AttributeError:
addr = numpy.argsort(hdiag)[:np].copy()
addra, addrb = divmod(addr, nb)
stra = cistring.addrs2str(norb, neleca, addra)
strb = cistring.addrs2str(norb, nelecb, addrb)
np = len(addr)
h0 = numpy.zeros((np,np))
libfci.FCIpspace_h0tril(h0.ctypes.data_as(ctypes.c_void_p),
h1e.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
stra.ctypes.data_as(ctypes.c_void_p),
strb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(np))
HERMITIAN_THRESHOLD = 1e-10
if (abs(h1e - h1e.T).max() < HERMITIAN_THRESHOLD and
abs(eri - eri.transpose(1,0,3,2)).max() < HERMITIAN_THRESHOLD):
# symmetric Hamiltonian
h0 = lib.hermi_triu(h0)
else:
# Fill the upper triangular part
h0 = numpy.asarray(h0, order='F')
h1e = numpy.asarray(h1e.T, order='C')
eri = numpy.asarray(eri.transpose(1,0,3,2), order='C')
libfci.FCIpspace_h0tril(h0.ctypes.data_as(ctypes.c_void_p),
h1e.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
stra.ctypes.data_as(ctypes.c_void_p),
strb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(np))
idx = numpy.arange(np)
h0[idx,idx] = hdiag[addr]
return addr, h0
# be careful with single determinant initial guess. It may diverge the
# preconditioner when the eigvalue of first davidson iter equals to hdiag
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
return _kfactory(FCISolver, h1e, eri, norb, nelec, ci0, level_shift,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
def _kfactory(Solver, h1e, eri, norb, nelec, ci0=None, level_shift=1e-3,
tol=1e-10, lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, ecore=0, **kwargs):
cis = Solver(None)
cis.level_shift = level_shift
cis.conv_tol = tol
cis.lindep = lindep
cis.max_cycle = max_cycle
cis.max_space = max_space
cis.nroots = nroots
cis.davidson_only = davidson_only
cis.pspace_size = pspace_size
unknown = {}
for k in kwargs:
if not hasattr(cis, k):
unknown[k] = kwargs[k]
setattr(cis, k, kwargs[k])
if unknown:
sys.stderr.write('Unknown keys %s for FCI kernel %s\n' %
(str(unknown.keys()), __name__))
e, c = cis.kernel(h1e, eri, norb, nelec, ci0, ecore=ecore, **unknown)
return e, c
def energy(h1e, eri, fcivec, norb, nelec, link_index=None):
'''Compute the FCI electronic energy for given Hamiltonian and FCI vector.
'''
h2e = absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))
def make_rdm1s(fcivec, norb, nelec, link_index=None):
'''Spin separated 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta)
dm1[p,q] = <q^\dagger p>
The convention is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if link_index is None:
neleca, nelecb = _unpack_nelec(nelec)
link_indexa = cistring.gen_linkstr_index(range(norb), neleca)
link_indexb = cistring.gen_linkstr_index(range(norb), nelecb)
link_index = (link_indexa, link_indexb)
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', fcivec, fcivec,
norb, nelec, link_index)
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', fcivec, fcivec,
norb, nelec, link_index)
return rdm1a, rdm1b
def make_rdm1(fcivec, norb, nelec, link_index=None):
'''Spin-traced one-particle density matrix
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention is based on McWeeney's book, Eq (5.4.20)
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
rdm1a, rdm1b = make_rdm1s(fcivec, norb, nelec, link_index)
return rdm1a + rdm1b
def make_rdm12s(fcivec, norb, nelec, link_index=None, reorder=True):
r'''Spin separated 1- and 2-particle density matrices.
The return values include two lists, a list of 1-particle density matrices
and a list of 2-particle density matrices. The density matrices are:
(alpha,alpha), (beta,beta) for 1-particle density matrices;
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta),
(beta,beta,beta,beta) for 2-particle density matrices.
1pdm[p,q] = :math:`\langle q^\dagger p\rangle`;
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`.
Energy should be computed as
E = einsum('pq,qp', h1, 1pdm) + 1/2 * einsum('pqrs,pqrs', eri, 2pdm)
where h1[p,q] = <p|h|q> and eri[p,q,r,s] = (pq|rs)
'''
dm1a, dm2aa = rdm.make_rdm12_spin1('FCIrdm12kern_a', fcivec, fcivec,
norb, nelec, link_index, 1)
dm1b, dm2bb = rdm.make_rdm12_spin1('FCIrdm12kern_b', fcivec, fcivec,
norb, nelec, link_index, 1)
_, dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, link_index, 0)
if reorder:
dm1a, dm2aa = rdm.reorder_rdm(dm1a, dm2aa, inplace=True)
dm1b, dm2bb = rdm.reorder_rdm(dm1b, dm2bb, inplace=True)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(fcivec, norb, nelec, link_index=None, reorder=True):
r'''Spin traced 1- and 2-particle density matrices.
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle +
\langle q_\beta^\dagger p_\beta \rangle`;
2pdm[p,q,r,s] = :math:`\langle p_\alpha^\dagger r_\alpha^\dagger s_\alpha q_\alpha\rangle +
\langle p_\beta^\dagger r_\alpha^\dagger s_\alpha q_\beta\rangle +
\langle p_\alpha^\dagger r_\beta^\dagger s_\beta q_\alpha\rangle +
\langle p_\beta^\dagger r_\beta^\dagger s_\beta q_\beta\rangle`.
Energy should be computed as
E = einsum('pq,qp', h1, 1pdm) + 1/2 * einsum('pqrs,pqrs', eri, 2pdm)
where h1[p,q] = <p|h|q> and eri[p,q,r,s] = (pq|rs)
'''
#(dm1a, dm1b), (dm2aa, dm2ab, dm2bb) = \
# make_rdm12s(fcivec, norb, nelec, link_index, reorder)
#return dm1a+dm1b, dm2aa+dm2ab+dm2ab.transpose(2,3,0,1)+dm2bb
dm1, dm2 = rdm.make_rdm12_spin1('FCIrdm12kern_sf', fcivec, fcivec,
norb, nelec, link_index, 1)
if reorder:
dm1, dm2 = rdm.reorder_rdm(dm1, dm2, inplace=True)
return dm1, dm2
def trans_rdm1s(cibra, ciket, norb, nelec, link_index=None):
r'''Spin separated transition 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta).
See also function :func:`make_rdm1s`
1pdm[p,q] = :math:`\langle q^\dagger p \rangle`
'''
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, link_index)
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, link_index)
return rdm1a, rdm1b
def trans_rdm1(cibra, ciket, norb, nelec, link_index=None):
r'''Spin traced transition 1-particle transition density matrices.
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle
+ \langle q_\beta^\dagger p_\beta \rangle`
'''
rdm1a, rdm1b = trans_rdm1s(cibra, ciket, norb, nelec, link_index)
return rdm1a + rdm1b
def trans_rdm12s(cibra, ciket, norb, nelec, link_index=None, reorder=True):
r'''Spin separated 1- and 2-particle transition density matrices.
The return values include two lists, a list of 1-particle transition
density matrices and a list of 2-particle transition density matrices.
The density matrices are:
(alpha,alpha), (beta,beta) for 1-particle transition density matrices;
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta),
(beta,beta,alpha,alpha), (beta,beta,beta,beta) for 2-particle transition
density matrices.
1pdm[p,q] = :math:`\langle q^\dagger p\rangle`;
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`.
'''
dm1a, dm2aa = rdm.make_rdm12_spin1('FCItdm12kern_a', cibra, ciket,
norb, nelec, link_index, 2)
dm1b, dm2bb = rdm.make_rdm12_spin1('FCItdm12kern_b', cibra, ciket,
norb, nelec, link_index, 2)
_, dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', cibra, ciket,
norb, nelec, link_index, 0)
_, dm2ba = rdm.make_rdm12_spin1('FCItdm12kern_ab', ciket, cibra,
norb, nelec, link_index, 0)
dm2ba = dm2ba.transpose(3,2,1,0)
if reorder:
dm1a, dm2aa = rdm.reorder_rdm(dm1a, dm2aa, inplace=True)
dm1b, dm2bb = rdm.reorder_rdm(dm1b, dm2bb, inplace=True)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2ba, dm2bb)
def trans_rdm12(cibra, ciket, norb, nelec, link_index=None, reorder=True):
r'''Spin traced transition 1- and 2-particle transition density matrices.
1pdm[p,q] = :math:`\langle q^\dagger p\rangle`;
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`.
'''
#(dm1a, dm1b), (dm2aa, dm2ab, dm2ba, dm2bb) = \
# trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)
#return dm1a+dm1b, dm2aa+dm2ab+dm2ba+dm2bb
dm1, dm2 = rdm.make_rdm12_spin1('FCItdm12kern_sf', cibra, ciket,
norb, nelec, link_index, 2)
if reorder:
dm1, dm2 = rdm.reorder_rdm(dm1, dm2, inplace=True)
return dm1, dm2
def _get_init_guess(na, nb, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
# The "nroots" lowest determinats based on energy expectation value.
ci0 = []
try:
addrs = numpy.argpartition(hdiag, nroots-1)[:nroots]
except AttributeError:
addrs = numpy.argsort(hdiag)[:nroots]
for addr in addrs:
x = numpy.zeros((na*nb))
x[addr] = 1
ci0.append(x.ravel())
# Add noise
ci0[0][0 ] += 1e-5
ci0[0][-1] -= 1e-5
return ci0
def get_init_guess(norb, nelec, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
neleca, nelecb = _unpack_nelec(nelec)
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
return _get_init_guess(na, nb, nroots, hdiag)
###############################################################
# direct-CI driver
###############################################################
def kernel_ms1(fci, h1e, eri, norb, nelec, ci0=None, link_index=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
if nroots is None: nroots = fci.nroots
if davidson_only is None: davidson_only = fci.davidson_only
if pspace_size is None: pspace_size = fci.pspace_size
if max_memory is None:
max_memory = fci.max_memory - lib.current_memory()[0]
log = logger.new_logger(fci, verbose)
nelec = _unpack_nelec(nelec, fci.spin)
assert(0 <= nelec[0] <= norb and 0 <= nelec[1] <= norb)
link_indexa, link_indexb = _unpack(norb, nelec, link_index)
na = link_indexa.shape[0]
nb = link_indexb.shape[0]
if max_memory < na*nb*6*8e-6:
log.warn('Not enough memory for FCI solver. '
'The minimal requirement is %.0f MB', na*nb*60e-6)
hdiag = fci.make_hdiag(h1e, eri, norb, nelec)
nroots = min(hdiag.size, nroots)
try:
addr, h0 = fci.pspace(h1e, eri, norb, nelec, hdiag, max(pspace_size,nroots))
if pspace_size > 0:
pw, pv = fci.eig(h0)
else:
pw = pv = None
if pspace_size >= na*nb and ci0 is None and not davidson_only:
# The degenerated wfn can break symmetry. The davidson iteration with proper
# initial guess doesn't have this issue
if na*nb == 1:
return pw[0]+ecore, pv[:,0].reshape(1,1)
elif nroots > 1:
civec = numpy.empty((nroots,na*nb))
civec[:,addr] = pv[:,:nroots].T
return pw[:nroots]+ecore, [c.reshape(na,nb) for c in civec]
elif abs(pw[0]-pw[1]) > 1e-12:
civec = numpy.empty((na*nb))
civec[addr] = pv[:,0]
return pw[0]+ecore, civec.reshape(na,nb)
except NotImplementedError:
addr = [0]
pw = pv = None
precond = fci.make_precond(hdiag, pw, pv, addr)
h2e = fci.absorb_h1e(h1e, eri, norb, nelec, .5)
def hop(c):
hc = fci.contract_2e(h2e, c, norb, nelec, (link_indexa,link_indexb))
return hc.ravel()
if ci0 is None:
if callable(getattr(fci, 'get_init_guess', None)):
ci0 = lambda: fci.get_init_guess(norb, nelec, nroots, hdiag)
else:
def ci0(): # lazy initialization to reduce memory footprint
x0 = []
for i in range(nroots):
x = numpy.zeros(na*nb)
x[addr[i]] = 1
x0.append(x)
return x0
elif not callable(ci0):
if isinstance(ci0, numpy.ndarray) and ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
# Add vectors if not enough initial guess is given
if len(ci0) < nroots:
if callable(getattr(fci, 'get_init_guess', None)):
ci0.extend(fci.get_init_guess(norb, nelec, nroots, hdiag)[len(ci0):])
else:
for i in range(len(ci0), nroots):
x = numpy.zeros(na*nb)
x[addr[i]] = 1
ci0.append(x)
if tol is None: tol = fci.conv_tol
if lindep is None: lindep = fci.lindep
if max_cycle is None: max_cycle = fci.max_cycle
if max_space is None: max_space = fci.max_space
tol_residual = getattr(fci, 'conv_tol_residual', None)
with lib.with_omp_threads(fci.threads):
#e, c = lib.davidson(hop, ci0, precond, tol=fci.conv_tol, lindep=fci.lindep)
e, c = fci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, follow_state=True,
tol_residual=tol_residual, **kwargs)
if nroots > 1:
return e+ecore, [ci.reshape(na,nb) for ci in c]
else:
return e+ecore, c.reshape(na,nb)
def make_pspace_precond(hdiag, pspaceig, pspaceci, addr, level_shift=0):
# precondition with pspace Hamiltonian, CPL, 169, 463
def precond(r, e0, x0, *args):
#h0e0 = h0 - numpy.eye(len(addr))*(e0-level_shift)
h0e0inv = numpy.dot(pspaceci/(pspaceig-(e0-level_shift)), pspaceci.T)
hdiaginv = 1/(hdiag - (e0-level_shift))
hdiaginv[abs(hdiaginv)>1e8] = 1e8
h0x0 = x0 * hdiaginv
#h0x0[addr] = numpy.linalg.solve(h0e0, x0[addr])
h0x0[addr] = numpy.dot(h0e0inv, x0[addr])
h0r = r * hdiaginv
#h0r[addr] = numpy.linalg.solve(h0e0, r[addr])
h0r[addr] = numpy.dot(h0e0inv, r[addr])
e1 = numpy.dot(x0, h0r) / numpy.dot(x0, h0x0)
x1 = r - e1*x0
#pspace_x1 = x1[addr].copy()
x1 *= hdiaginv
# pspace (h0-e0)^{-1} cause diverging?
#x1[addr] = numpy.linalg.solve(h0e0, pspace_x1)
return x1
return precond
def make_diag_precond(hdiag, pspaceig, pspaceci, addr, level_shift=0):
return lib.make_diag_precond(hdiag, level_shift)
class FCISolver(lib.StreamObject):
'''Full CI solver
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`.
max_cycle : int
Total number of iterations. Default is 100
max_space : tuple of int
Davidson iteration space size. Default is 14.
conv_tol : float
Energy convergence tolerance. Default is 1e-10.
level_shift : float
Level shift applied in the preconditioner to avoid singularity.
Default is 1e-3
davidson_only : bool
By default, the entire Hamiltonian matrix will be constructed and
diagonalized if the system is small (see attribute pspace_size).
Setting this parameter to True will enforce the eigenvalue
problems being solved by Davidson subspace algorithm. This flag
should be enabled when initial guess is given or particular spin
symmetry or point-group symmetry is required because the initial
guess or symmetry are completely ignored in the direct diagonlization.
pspace_size : int
The dimension of Hamiltonian matrix over which Davidson iteration
algorithm will be used for the eigenvalue problem. Default is 400.
This is roughly corresponding to a (6e,6o) system.
nroots : int
Number of states to be solved. Default is 1, the ground state.
spin : int or None
Spin (2S = nalpha-nbeta) of the system. If this attribute is None,
spin will be determined by the argument nelec (number of electrons)
of the kernel function.
wfnsym : str or int
Symmetry of wavefunction. It is used only in direct_spin1_symm
and direct_spin0_symm solver.
Saved results
eci : float or a list of float
FCI energy(ies)
ci : nparray
FCI wfn vector(s)
converged : bool (or a list of bool for multiple roots)
Whether davidson iteration is converged
Examples:
>>> from pyscf import gto, scf, ao2mo, fci
>>> mol = gto.M(atom='Li 0 0 0; Li 0 0 1', basis='sto-3g')
>>> mf = scf.RHF(mol).run()
>>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)
>>> eri = ao2mo.kernel(mol, mf.mo_coeff)
>>> cisolver = fci.direct_spin1.FCI(mol)
>>> e, ci = cisolver.kernel(h1, eri, h1.shape[1], mol.nelec, ecore=mol.energy_nuc())
>>> print(e)
-14.4197890826
'''
max_cycle = getattr(__config__, 'fci_direct_spin1_FCI_max_cycle', 100)
max_space = getattr(__config__, 'fci_direct_spin1_FCI_max_space', 12)
conv_tol = getattr(__config__, 'fci_direct_spin1_FCI_conv_tol', 1e-10)
conv_tol_residual = getattr(__config__, 'fci_direct_spin1_FCI_conv_tol_residual', None)
lindep = getattr(__config__, 'fci_direct_spin1_FCI_lindep', 1e-14)
# level shift in precond
level_shift = getattr(__config__, 'fci_direct_spin1_FCI_level_shift', 1e-3)
# force the diagonlization use davidson iteration. When the CI space
# is small, the solver exactly diagonlizes the Hamiltonian. But this
# solution will ignore the initial guess. Setting davidson_only can
# enforce the solution on the initial guess state
davidson_only = getattr(__config__, 'fci_direct_spin1_FCI_davidson_only', False)
pspace_size = getattr(__config__, 'fci_direct_spin1_FCI_pspace_size', 400)
threads = getattr(__config__, 'fci_direct_spin1_FCI_threads', None)
lessio = getattr(__config__, 'fci_direct_spin1_FCI_lessio', False)
def __init__(self, mol=None):
if mol is None:
self.stdout = sys.stdout
self.verbose = logger.NOTE
self.max_memory = lib.param.MAX_MEMORY
else:
self.stdout = mol.stdout
self.verbose = mol.verbose
self.max_memory = mol.max_memory
self.mol = mol
self.nroots = 1
self.spin = None
# Initialize symmetry attributes for the compatibility with direct_spin1_symm
# solver. They are not used by direct_spin1 solver.
self.orbsym = None
self.wfnsym = None
self.converged = False
self.norb = None
self.nelec = None
self.eci = None
self.ci = None
keys = set(('max_cycle', 'max_space', 'conv_tol', 'lindep',
'level_shift', 'davidson_only', 'pspace_size', 'threads',
'lessio'))
self._keys = set(self.__dict__.keys()).union(keys)
@property
def e_tot(self):
return self.eci
@property
def nstates(self):
return self.nroots
@nstates.setter
def nstates(self, x):
self.nroots = x
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('******** %s ********', self.__class__)
log.info('max. cycles = %d', self.max_cycle)
log.info('conv_tol = %g', self.conv_tol)
log.info('davidson only = %s', self.davidson_only)
log.info('linear dependence = %g', self.lindep)
log.info('level shift = %g', self.level_shift)
log.info('max iter space = %d', self.max_space)
log.info('max_memory %d MB', self.max_memory)
log.info('nroots = %d', self.nroots)
log.info('pspace_size = %d', self.pspace_size)
log.info('spin = %s', self.spin)
return self
@lib.with_doc(absorb_h1e.__doc__)
def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):
nelec = _unpack_nelec(nelec, self.spin)
return absorb_h1e(h1e, eri, norb, nelec, fac)
@lib.with_doc(make_hdiag.__doc__)
def make_hdiag(self, h1e, eri, norb, nelec):
nelec = _unpack_nelec(nelec, self.spin)
return make_hdiag(h1e, eri, norb, nelec)
@lib.with_doc(pspace.__doc__)
def pspace(self, h1e, eri, norb, nelec, hdiag=None, np=400):
nelec = _unpack_nelec(nelec, self.spin)
return pspace(h1e, eri, norb, nelec, hdiag, np)
@lib.with_doc(contract_1e.__doc__)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
nelec = _unpack_nelec(nelec, self.spin)
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
@lib.with_doc(contract_2e.__doc__)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None, **kwargs):
nelec = _unpack_nelec(nelec, self.spin)
return contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs)
def eig(self, op, x0=None, precond=None, **kwargs):
if isinstance(op, numpy.ndarray):
self.converged = True
return scipy.linalg.eigh(op)
self.converged, e, ci = \
lib.davidson1(lambda xs: [op(x) for x in xs],
x0, precond, lessio=self.lessio, **kwargs)
if kwargs['nroots'] == 1:
self.converged = self.converged[0]
e = e[0]
ci = ci[0]
return e, ci
def make_precond(self, hdiag, pspaceig, pspaceci, addr):
if pspaceig is None:
return make_diag_precond(hdiag, pspaceig, pspaceci, addr,
self.level_shift)
else:
return make_pspace_precond(hdiag, pspaceig, pspaceci, addr,
self.level_shift)
@lib.with_doc(get_init_guess.__doc__)
def get_init_guess(self, norb, nelec, nroots, hdiag):
return get_init_guess(norb, nelec, nroots, hdiag)
def kernel(self, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
orbsym=None, wfnsym=None, ecore=0, **kwargs):
if self.verbose >= logger.WARN:
self.check_sanity()
self.norb = norb
self.nelec = nelec
self.eci, self.ci = \
kernel_ms1(self, h1e, eri, norb, nelec, ci0, None,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
return self.eci, self.ci
@lib.with_doc(energy.__doc__)
def energy(self, h1e, eri, fcivec, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
h2e = self.absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = self.contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))
def spin_square(self, fcivec, norb, nelec):
nelec = _unpack_nelec(nelec, self.spin)
return spin_op.spin_square0(fcivec, norb, nelec)
spin_square.__doc__ = spin_op.spin_square0.__doc__
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, fcivec, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm1s(fcivec, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, fcivec, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm1(fcivec, norb, nelec, link_index)
@lib.with_doc(make_rdm12s.__doc__)
def make_rdm12s(self, fcivec, norb, nelec, link_index=None, reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm12s(fcivec, norb, nelec, link_index, reorder)
@lib.with_doc(make_rdm12.__doc__)
def make_rdm12(self, fcivec, norb, nelec, link_index=None, reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm12(fcivec, norb, nelec, link_index, reorder)
def make_rdm2(self, fcivec, norb, nelec, link_index=None, reorder=True):
r'''Spin traced 2-particle density matrice
NOTE the 2pdm is :math:`\langle p^\dagger q^\dagger s r\rangle` but
stored as [p,r,q,s]
'''
nelec = _unpack_nelec(nelec, self.spin)
return self.make_rdm12(fcivec, norb, nelec, link_index, reorder)[1]
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm12s.__doc__)
def trans_rdm12s(self, cibra, ciket, norb, nelec, link_index=None,
reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)
@lib.with_doc(trans_rdm12.__doc__)
def trans_rdm12(self, cibra, ciket, norb, nelec, link_index=None,
reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm12(cibra, ciket, norb, nelec, link_index, reorder)
def large_ci(self, fcivec, norb, nelec,
tol=getattr(__config__, 'fci_addons_large_ci_tol', .1),
return_strs=getattr(__config__, 'fci_addons_large_ci_return_strs', True)):
nelec = _unpack_nelec(nelec, self.spin)
return addons.large_ci(fcivec, norb, nelec, tol, return_strs)
def transform_ci_for_orbital_rotation(self, fcivec, norb, nelec, u):
nelec = _unpack_nelec(nelec, self.spin)
return addons.transform_ci_for_orbital_rotation(fcivec, norb, nelec, u)
def contract_ss(self, fcivec, norb, nelec):
from pyscf.fci import spin_op
nelec = _unpack_nelec(nelec, self.spin)
return spin_op.contract_ss(fcivec, norb, nelec)
def gen_linkstr(self, norb, nelec, tril=True, spin=None):
if spin is None:
spin = self.spin
neleca, nelecb = _unpack_nelec(nelec, spin)
if tril:
link_indexa = cistring.gen_linkstr_index_trilidx(range(norb), neleca)
link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), nelecb)
else:
link_indexa = cistring.gen_linkstr_index(range(norb), neleca)
link_indexb = cistring.gen_linkstr_index(range(norb), nelecb)
return link_indexa, link_indexb
FCI = FCISolver
def _unpack(norb, nelec, link_index, spin=None):
if link_index is None:
neleca, nelecb = _unpack_nelec(nelec, spin)
link_indexa = link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), neleca)
if neleca != nelecb:
link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), nelecb)
return link_indexa, link_indexb
else:
return link_index
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
cis = FCISolver(mol)
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron - 2
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
nea = nelec//2 + 1
neb = nelec//2 - 1
nelec = (nea, neb)
e1 = cis.kernel(h1e, eri, norb, nelec, davidson_only=True)[0]
print(e1, e1 - -7.7466756526056004)
|
gkc1000/pyscf
|
pyscf/fci/direct_spin1.py
|
Python
|
apache-2.0
| 38,081
|
[
"PySCF"
] |
139e323f07ef52a7119a374c18f1b658513ff9ad6d603d6f70b57ccf3d3a21fe
|
#=== PHYSICS =========================================================================================
# 2D physics functions.
# Authors: Tom De Smedt, Giorgio Olivero (Vector class)
# License: BSD (see LICENSE.txt for details).
# Copyright (c) 2008 City In A Bottle (cityinabottle.org)
# http://cityinabottle.org/nodebox
# This module can benefit greatly from loading psyco.
from math import sqrt, pow
from math import sin, cos, atan2, degrees, radians, pi
from random import random
from heapq import heappush, heappop
from warnings import warn
# float("inf") doesn't work on windows.
INFINITE = 1e20
# This module is standalone, line(), ellipse() and Text.draw()
# must be either implemented or patched:
def line(x1, y1, x2, y2, stroke=(0,0,0,1), strokewidth=1):
pass
def ellipse(x, y, width, height, fill=(0,0,0,1), stroke=None, strokewidth=1):
pass
class Text:
def __init__(self, string, **kwargs):
self.string = string
self.__dict__.update(kwargs)
def copy(self):
k = self.__dict__.copy()
k.pop("string")
return Text(self.string, **k)
def draw(self):
pass
#=====================================================================================================
#--- VECTOR ------------------------------------------------------------------------------------------
# A Euclidean vector (sometimes called a geometric or spatial vector, or - as here - simply a vector)
# is a geometric object that has both a magnitude (or length) and direction.
# A vector is frequently represented by a line segment with an arrow.
class Vector(object):
def __init__(self, x=0, y=0, z=0, length=None, angle=None):
""" A vector represents a direction and a magnitude (or length).
Vectors can be added, subtracted, multiplied, divided, flipped, and 2D rotated.
Vectors are used in physics to represent velocity and acceleration.
"""
self.x = float(x)
self.y = float(y)
self.z = float(z)
if length is not None:
self.length = length
if angle is not None:
self.angle = angle
def copy(self):
return Vector(self.x, self.y, self.z)
def __getitem__(self, i):
return (self.x, self.y, self.z)[i]
def __setitem__(self, i, v):
setattr(self, ("x", "y", "z")[i], float(v))
def _get_xyz(self):
return (self.x, self.y, self.z)
def _set_xyz(self, coords):
self.x = float(coords[0])
self.y = float(coords[1])
self.z = float(coords[2])
xyz = property(_get_xyz, _set_xyz)
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, coords):
self.x = float(coords[0])
self.y = float(coords[1])
xy = property(_get_xy, _set_xy)
def _get_length(self):
return sqrt(self.x**2 + self.y**2 + self.z**2)
def _set_length(self, n):
d = self.length or 1
self.x *= n/d
self.y *= n/d
self.z *= n/d
length = magnitude = property(_get_length, _set_length)
def distance(self, v):
""" Returns the distance between two vectors,
e.g. if two vectors would be two sides of a triangle, returns the third side.
"""
dx = v.x - self.x
dy = v.y - self.y
dz = v.z - self.z
return sqrt(dx**2 + dy**2 + dz**2)
def distance2(self, v):
# Squared distance, avoiding the costly root calculation.
return (v.x-self.x)**2 + (v.y-self.y)**2 + (v.z-self.z)**2
def normalize(self):
""" Normalizes the vector to a unit vector with length=1.
"""
d = self.length or 1
self.x /= d
self.y /= d
self.z /= d
def _normalized(self):
""" Yields a new vector that is the normalized vector of this vector.
"""
d = self.length
if d == 0:
return self.copy()
return Vector(self.x/d, self.y/d, self.z/d)
normalized = unit = property(_normalized)
def reverse(self):
""" Reverses the direction of the vector so it points in the opposite direction.
"""
self.x = -self.x
self.y = -self.y
self.z = -self.z
flip = reverse
def _reversed(self):
""" Yields a new vector pointing in the opposite direction of this vector.
"""
return Vector(-self.x, -self.y, -self.z)
reversed = flipped = inverse = property(_reversed)
# v.normal, v.angle, v.rotate(), v.rotated() and v.angle_to() are defined in 2D.
# v.in2D.rotate() is here for decorational purposes.
@property
def in2D(self):
return self
def _orthogonal(self):
""" Yields a new vector whose 2D angle is 90 degrees (perpendicular) to this vector.
In 3D, there would be many perpendicular vectors.
"""
return Vector(self.y, -self.x, self.z)
orthogonal = perpendicular = normal = property(_orthogonal)
def _get_angle(self):
""" Yields the 2D direction of the vector.
"""
return degrees(atan2(self.y, self.x))
def _set_angle(self, degrees):
d = self.length
self.x = cos(radians(degrees)) * d
self.y = sin(radians(degrees)) * d
angle = direction = property(_get_angle, _set_angle)
def rotate(self, degrees):
""" Rotates the direction of the vector in 2D.
"""
self.angle += degrees
def rotated(self, degrees):
""" Returns a copy of the vector with direction rotated in 2D.
"""
v = self.copy()
v.rotate(degrees)
return v
def angle_to(self, v):
""" Returns the 2D angle between two vectors.
"""
return degrees(atan2(v.y, v.x) - atan2(self.y, self.x))
angle_between = angle_to
# Arithmetic operators.
# + - * / returns new vector objects.
def __add__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x+v, self.y+v, self.z+v)
return Vector(self.x+v.x, self.y+v.y, self.z+v.z)
def __sub__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x-v, self.y-v, self.z-v)
return Vector(self.x-v.x, self.y-v.y, self.z-v.z)
def __mul__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x*v, self.y*v, self.z*v)
return Vector(self.x*v.x, self.y*v.y, self.z*v.z)
def __div__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x/v, self.y/v, self.z/v)
return Vector(self.x/v.x, self.y/v.y, self.z/v.z)
# += -= *= /= modify the vector coordinates in-place.
def __iadd__(self, v):
if isinstance(v, (int, float)):
self.x+=v; self.y+=v; self.z+=v; return self
self.x+=v.x; self.y+=v.y; self.z+=v.z; return self
def __isub__(self, v):
if isinstance(v, (int, float)):
self.x-=v; self.y-=v; self.z-=v; return self
self.x-=v.x; self.y-=v.y; self.z-=v.z; return self
def __imul__(self, v):
if isinstance(v, (int, float)):
self.x*=v; self.y*=v; self.z*=v; return self
self.x*=v.x; self.y*=v.y; self.z*=v.z; return self
def __idiv__(self, v):
if isinstance(v, (int, float)):
self.x/=v; self.y/=v; self.z/=v; return self
self.x/=v.x; self.y/=v.y; self.z/=v.z; return self
def dot(self, v):
""" Returns a scalar that is the dot product between the two vectors.
"""
return self.x*v.x + self.y*v.y + self.z*v.z
def cross(self, v):
""" Returns a new vector that is the cross product between the two vectors.
"""
return Vector(self.y*v.z - self.z*v.y,
self.z*v.x - self.x*v.z,
self.x*v.y - self.y*v.x)
def __neg__(self):
return Vector(-self.x, -self.y, -self.z)
def __eq__(self, v):
return isinstance(v, Vector) and self.x == v.x and self.y == v.y and self.z == v.z
def __ne__(self, v):
return not self.__eq__(v)
def __repr__(self):
return "%s(%.2f, %.2f, %.2f)" % (self.__class__.__name__, self.x, self.y, self.z)
def draw(self, x, y):
""" Draws the vector in 2D (z-axis is ignored).
Set stroke() and strokewidth() first.
"""
ellipse(x, y, 4, 4)
line(x, y, x+self.x, y+self.y)
#=====================================================================================================
#--- FLOCKING ----------------------------------------------------------------------------------------
# Boids is an artificial life program, developed by Craig Reynolds in 1986,
# which simulates the flocking behavior of birds.
# Boids is an example of emergent behavior, the complexity of Boids arises
# from the interaction of individual agents adhering to a set of simple rules:
# - separation: steer to avoid crowding local flockmates,
# - alignment: steer towards the average heading of local flockmates,
# - cohesion: steer to move toward the average position of local flockmates.
# Unexpected behavior, such as splitting flocks and reuniting after avoiding obstacles,
# can be considered emergent. The boids framework is often used in computer graphics,
# providing realistic-looking representations of flocks of birds and other creatures,
# such as schools of fish or herds of animals.
_UID = 0
def _uid():
global _UID; _UID+=1; return _UID
class Boid:
def __init__(self, flock, x=0, y=0, z=0, sight=70, space=30):
""" An agent in a flock with an (x,y,z)-position subject to different forces.
- sight : radius of local flockmates when calculating cohesion and alignment.
- space : radius of personal space when calculating separation.
"""
self._id = _uid()
self.flock = flock
self.x = x
self.y = y
self.z = z
self.velocity = Vector(random()*2-1, random()*2-1, random()*2-1)
self.target = None # A target Vector towards which the boid will steer.
self.sight = sight # The radius of cohesion and alignment, and visible obstacles.
self.space = space # The radius of separation.
self.dodge = False # Avoiding an obstacle?
self.crowd = 0 # Percentage (0.0-1.0) of flockmates within sight.
def __eq__(self, other):
# Comparing boids by id makes it significantly faster.
return isinstance(other, Boid) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
b = Boid(self.flock, self.x, self.y, self.z, self.sight, self.space)
b.velocity = self.velocity.copy()
b.target = self.target
return b
@property
def heading(self):
""" The boid's heading as an angle in degrees.
"""
return self.velocity.angle
@property
def depth(self):
""" The boid's relative depth (0.0-1.0) in the flock's container box.
"""
return not self.flock.depth and 1.0 or max(0.0, min(1.0, self.z / self.flock.depth))
def near(self, boid, distance=50):
""" Returns True if the given boid is within distance.
"""
# Distance is measured in a box instead of a sphere for performance.
return abs(self.x - boid.x) < distance and \
abs(self.y - boid.y) < distance and \
abs(self.z - boid.z) < distance
def separation(self, distance=25):
""" Returns steering velocity (vx,vy,vz) to avoid crowding local flockmates.
"""
vx = vy = vz = 0.0
for b in self.flock:
if b != self:
if abs(self.x-b.x) < distance: vx += self.x - b.x
if abs(self.y-b.y) < distance: vy += self.y - b.y
if abs(self.z-b.z) < distance: vz += self.z - b.z
return vx, vy, vz
def alignment(self, distance=50):
""" Returns steering velocity (vx,vy,vz) towards the average heading of local flockmates.
"""
vx = vy = vz = n = 0.0
for b in self.flock:
if b != self and b.near(self, distance):
vx += b.velocity.x
vy += b.velocity.y
vz += b.velocity.z; n += 1
if n:
return (vx/n-self.velocity.x), (vy/n-self.velocity.y), (vz/n-self.velocity.z)
return vx, vy, vz
def cohesion(self, distance=40):
""" Returns steering velocity (vx,vy,vz) towards the average position of local flockmates.
"""
vx = vy = vz = n = 0.0
for b in self.flock:
if b != self and b.near(self, distance):
vx += b.x
vy += b.y
vz += b.z; n += 1
# Calculate percentage of flockmates within sight.
self.crowd = float(n) / (len(self.flock) or 1)
if n:
return (vx/n-self.x), (vy/n-self.y), (vz/n-self.z)
return vx, vy, vz
def avoidance(self):
""" Returns steering velocity (vx,vy,0) to avoid 2D obstacles.
The boid is not guaranteed to avoid collision.
"""
vx = vy = 0.0
self.dodge = False
for o in self.flock.obstacles:
dx = o.x - self.x
dy = o.y - self.y
d = sqrt(dx**2 + dy**2) # Distance to obstacle.
s = (self.sight + o.radius) # Visibility range.
if d < s:
self.dodge = True
# Force grows exponentially from 0.0 to 1.0,
# where 1.0 means the boid touches the obstacle circumference.
f = (d-o.radius) / (s-o.radius)
f = (1-f)**2
if d < o.radius:
f *= 4
#self.velocity.reverse()
vx -= dx * f
vy -= dy * f
return (vx, vy, 0)
def limit(self, speed=10.0):
""" Limits the boid's velocity (the boid can momentarily go very fast).
"""
v = self.velocity
m = max(abs(v.x), abs(v.y), abs(v.z)) or 1
if abs(v.x) > speed: v.x = v.x / m * speed
if abs(v.y) > speed: v.y = v.y / m * speed
if abs(v.z) > speed: v.z = v.z / m * speed
def update(self, separation=0.2, cohesion=0.2, alignment=0.6, avoidance=0.6, target=0.2, limit=15.0):
""" Updates the boid's velocity based on the cohesion, separation and alignment forces.
- separation: force that keeps boids apart.
- cohesion : force that keeps boids closer together.
- alignment : force that makes boids move in the same direction.
- avoidance : force that steers the boid away from obstacles.
- target : force that steers the boid towards a target vector.
- limit : maximum velocity.
"""
f = 0.1
m1, m2, m3, m4, m5 = separation*f, cohesion*f, alignment*f, avoidance*f, target*f
vx1, vy1, vz1 = self.separation(self.space)
vx2, vy2, vz2 = self.cohesion(self.sight)
vx3, vy3, vz3 = self.alignment(self.sight)
vx4, vy4, vz4 = self.avoidance()
vx5, vy5, vz5 = self.target and (
(self.target.x-self.x),
(self.target.y-self.y),
(self.target.z-self.z)) or (0,0,0)
self.velocity.x += m1*vx1 + m2*vx2 + m3*vx3 + m4*vx4 + m5*vx5
self.velocity.y += m1*vy1 + m2*vy2 + m3*vy3 + m4*vy4 + m5*vy5
self.velocity.z += m1*vz1 + m2*vz2 + m3*vz3 + m4*vz4 + m5*vz5
self.velocity.z = self.flock.depth and self.velocity.z or 0 # No z-axis for Flock.depth=0
self.limit(speed=limit)
self.x += self.velocity.x
self.y += self.velocity.y
self.z += self.velocity.z
def seek(self, vector):
""" Sets the given Vector as the boid's target.
"""
self.target = vector
def __repr__(self):
return "Boid(x=%.1f, y=%.1f, z=%.1f)" % (self.x, self.y, self.z)
class Obstacle:
def __init__(self, x=0, y=0, z=0, radius=10):
""" An obstacle with an (x, y, z) position and a radius.
Boids will steer around obstacles that the flock is aware of, and that they can see.
"""
self.x = x
self.y = y
self.z = z
self.radius = radius
def copy(self):
return Obstacle(self.x, self.y, self.z, self.radius)
def __repr__(self):
return "Obstacle(x=%.1f, y=%.1f, z=%.1f, radius=%.1f)" % (self.x, self.y, self.z, self.radius)
class Flock(list):
def __init__(self, amount, x, y, width, height, depth=100.0, obstacles=[]):
""" A flock of the given amount of boids, confined to a box.
Obstacles can be added to Flock.obstacles (boids will steer away from them).
"""
self.x = x
self.y = y
self.width = width
self.height = height
self.depth = depth
self.scattered = False
self.gather = 0.05
self.obstacles = []
for i in range(amount):
# Boids will originate from the center of the flocking area.
b = Boid(self,
self.x + 0.5 * (width or 0),
self.y + 0.5 * (height or 0),
0.5 * (depth or 0))
self.append(b)
@property
def boids(self):
return self
def copy(self):
f = Flock(0, self.x, self.y, self.width, self.height, self.depth)
f.scattered = self.scattered
f.gather = self.gather
f.obstacles = [o.copy() for o in self.obstacles]
for b in self:
f.append(b.copy())
return f
def seek(self, target):
""" Sets the target vector of all boids in the flock (None for no target).
"""
for b in self:
b.seek(target)
def sight(self, distance):
for b in self:
b.sight = distance
def space(self, distance):
for b in self:
b.space = distance
def constrain(self, force=1.0, teleport=False):
""" Keep the flock inside the rectangular flocking area.
The given force determines how fast the boids will swivel when near an edge.
Alternatively, with teleport=True boids that cross a 2D edge teleport to the opposite side.
"""
f = 5
def _teleport(b):
if b.x < self.x:
b.x = self.x + self.width
if b.x > self.x + self.width:
b.x = self.x
if b.y < self.y:
b.y = self.y + self.height
if b.y > self.y + self.height:
b.y = self.y
def _constrain(b):
if b.x < self.x:
b.velocity.x += force * f * random()
if b.x > self.x + self.width:
b.velocity.x -= force * f * random()
if b.y < self.y:
b.velocity.y += force * f * random()
if b.y > self.y + self.height:
b.velocity.y -= force * f * random()
for b in self:
if b.z < 0:
b.velocity.z += force * f * random()
if b.z > self.depth:
b.velocity.z -= force * f * random()
teleport and _teleport(b) \
or _constrain(b)
def scatter(self, gather=0.05):
""" Scatters the flock, until Flock.scattered=False.
Flock.gather is the chance (0.0-1.0, or True/False) that the flock will reunite by itself.
"""
self.scattered = True
self.gather = gather
def update(self, separation=0.2, cohesion=0.2, alignment=0.6, avoidance=0.6, target=0.2, limit=15.0, constrain=1.0, teleport=False):
""" Updates the boid velocities based on the given forces.
Different forces elicit different flocking behavior; fine-tuning them can be delicate.
"""
if self.scattered:
# When scattered, make the boid cohesion negative and diminish alignment.
self.scattered = (random() > self.gather)
cohesion = -0.01
alignment *= 0.25
for b in self:
b.update(separation, cohesion, alignment, avoidance, target, limit)
self.constrain(force=constrain, teleport=teleport)
def by_depth(self):
""" Returns the boids in the flock sorted by depth (z-axis).
"""
return sorted(self, key=lambda boid: boid.z)
def __repr__(self):
return "Flock(%s)" % repr(list(self))
flock = Flock
#=== SYSTEM ==========================================================================================
# A computer graphics technique to simulate certain fuzzy phenomena,
# which are otherwise very hard to reproduce with conventional rendering techniques:
# fire, explosions, smoke, moving water, sparks, falling leaves, clouds, fog, snow, dust,
# meteor tails, hair, fur, grass, or abstract visual effects like glowing trails, magic spells.
#--- FORCE -------------------------------------------------------------------------------------------
class Force:
def __init__(self, particle1, particle2, strength=1.0, threshold=100.0):
""" An attractive or repulsive force that causes objects with a mass to accelerate.
A negative strength indicates an attractive force.
"""
self.particle1 = particle1
self.particle2 = particle2
self.strength = strength
self.threshold = threshold
def apply(self):
""" Applies the force between two particles, based on the distance and mass of the particles.
"""
# Distance has a minimum threshold to keep forces from growing too large,
# e.g. distance 100 divides force by 10000, distance 5 only by 25.
# Decreasing the threshold moves particles that are very close to each other away faster.
dx = self.particle2.x - self.particle1.x
dy = self.particle2.y - self.particle1.y
d = sqrt(dx*dx + dy*dy)
d = max(d, self.threshold)
# The force between particles increases according to their weight.
# The force decreases as distance between them increases.
f = 10.0 * -self.strength * self.particle1.mass * self.particle2.mass
f = f / (d*d)
fx = f * dx / d
fy = f * dy / d
self.particle1.force.x += fx
self.particle1.force.y += fy
self.particle2.force.x -= fx
self.particle2.force.y -= fy
def __repr__(self):
return "Force(strength=%.2f)" % self.strength
force = Force
#--- SPRING ------------------------------------------------------------------------------------------
class Spring:
def __init__(self, particle1, particle2, length, strength=1.0):
""" A force that exerts attractive resistance when its length changes.
A spring acts as a flexible (but secure) connection between two particles.
"""
self.particle1 = particle1
self.particle2 = particle2
self.strength = strength
self.length = length
self.snapped = False
def snap(self):
""" Breaks the connection between the two particles.
"""
self.snapped = True
def apply(self):
""" Applies the force between two particles.
"""
# Distance between two particles.
dx = self.particle2.x - self.particle1.x
dy = self.particle2.y - self.particle1.y
d = sqrt(dx*dx + dy*dy)
if d == 0:
return
# The attractive strength decreases for heavy particles.
# The attractive strength increases when the spring is stretched.
f = 10.0 * self.strength / (self.particle1.mass * self.particle2.mass)
f = f * (d - self.length)
fx = f * dx / d
fy = f * dy / d
self.particle1.force.x += fx
self.particle1.force.y += fy
self.particle2.force.x -= fx
self.particle2.force.y -= fy
def draw(self, **kwargs):
line(self.particle1.x, self.particle1.y,
self.particle2.x, self.particle2.y, **kwargs)
def __repr__(self):
return "Spring(strength='%.2f', length='%.2f')" % (self.strength, self.length)
spring = Spring
#--- PARTICLE ----------------------------------------------------------------------------------------
MASS = "mass"
class Particle:
def __init__(self, x, y, velocity=(0.0,0.0), mass=10.0, radius=10.0, life=None, fixed=False):
""" An object with a mass subjected to attractive and repulsive forces.
The object's velocity is an inherent force (e.g. a rocket propeller to escape gravity).
"""
self._id = _uid()
self.x = x + random()
self.y = y + random()
self.mass = mass
self.radius = radius == MASS and mass or radius
self.velocity = isinstance(velocity, tuple) and Vector(*velocity) or velocity
self.force = Vector(0.0, 0.0) # Force accumulator.
self.life = life
self._age = 0.0
self.dead = False
self.fixed = fixed
@property
def age(self):
# Yields the particle's age as a number between 0.0 and 1.0.
return self.life and min(1.0, float(self._age) / self.life) or 0.0
def draw(self, **kwargs):
r = self.radius * (1 - self.age)
ellipse(self.x, self.y, r*2, r*2, **kwargs)
def __eq__(self, other):
return isinstance(other, Particle) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Particle(x=%.1f, y=%.1f, radius=%.1f, mass=%.1f)" % (
self.x, self.y, self.radius, self.mass)
particle = Particle
#--- SYSTEM ------------------------------------------------------------------------------------------
class flist(list):
def __init__(self, system):
# List of forces or springs that keeps System.dynamics in synch.
self.system = system
def insert(self, i, force):
list.insert(self, i, force)
self.system._dynamics.setdefault(force.particle1._id, []).append(force)
self.system._dynamics.setdefault(force.particle2._id, []).append(force)
def append(self, force):
self.insert(len(self), force)
def extend(self, forces):
for f in forces: self.append(f)
def pop(self, i):
f = list.pop(self, i)
self.system._dynamics.pop(force.particle1._id)
self.system._dynamics.pop(force.particle2._id)
return f
def remove(self, force):
i = self.index(force); self.pop(i)
class System(object):
def __init__(self, gravity=(0,0), drag=0.0):
""" A collection of particles and the forces working on them.
"""
self.particles = []
self.emitters = []
self.forces = flist(self)
self.springs = flist(self)
self.gravity = isinstance(gravity, tuple) and Vector(*gravity) or gravity
self.drag = drag
self._dynamics = {} # Particle id linked to list of applied forces.
def __len__(self):
return len(self.particles)
def __iter__(self):
return iter(self.particles)
def __getitem__(self, i):
return self.particles[i]
def extend(self, x):
for x in x: self.append(x)
def append(self, x):
if isinstance(x, Particle) and not x in self.particles:
self.particles.append(x)
elif isinstance(x, Force):
self.forces.append(x)
elif isinstance(x, Spring):
self.springs.append(x)
elif isinstance(x, Emitter):
self.emitters.append(x)
self.extend(x.particles)
x.system = self
def _cross(self, f=lambda particle1, particle2: None, source=None, particles=[]):
# Applies function f to any two given particles in the list,
# or between source and any other particle if source is given.
P = particles or self.particles
for i, p1 in enumerate(P):
if source is None:
[f(p1, p2) for p2 in P[i+1:]]
else:
f(source, p1)
def force(self, strength=1.0, threshold=100, source=None, particles=[]):
""" The given force is applied between each two particles.
The effect this yields (with a repulsive force) is an explosion.
- source: one vs. all, apply the force to this particle with all others.
- particles: a list of particles to apply the force to (some vs. some or some vs. source).
Be aware that 50 particles wield yield 1250 forces: O(n**2/2); or O(n) with source.
The force is applied to particles present in the system,
those added later on are not subjected to the force.
"""
f = lambda p1, p2: self.forces.append(Force(p1, p2, strength, threshold))
self._cross(f, source, particles)
def dynamics(self, particle, type=None):
""" Returns a list of forces working on the particle, optionally filtered by type (e.g. Spring).
"""
F = self._dynamics.get(isinstance(particle, Particle) and particle._id or particle, [])
F = [f for f in F if type is None or isinstance(f, type)]
return F
def limit(self, particle, m=None):
""" Limits the movement of the particle to m.
When repulsive particles are close to each other, their force can be very high.
This results in large movement steps, and gaps in the animation.
This can be remedied by limiting the total force.
"""
# The right way to do it requires 4x sqrt():
# if m and particle.force.length > m:
# particle.force.length = m
# if m and particle.velocity.length > m:
# particle.velocity.length = m
if m is not None:
for f in (particle.force, particle.velocity):
if abs(f.x) > m:
f.y *= m / abs(f.x)
f.x *= m / abs(f.x)
if abs(f.y) > m:
f.x *= m / abs(f.y)
f.y *= m / abs(f.y)
def update(self, limit=30):
""" Updates the location of the particles by applying all the forces.
"""
for e in self.emitters:
# Fire particles from emitters.
e.update()
for p in self.particles:
# Apply gravity. Heavier objects have a stronger attraction.
p.force.x = 0
p.force.y = 0
p.force.x += 0.1 * self.gravity.x * p.mass
p.force.y += 0.1 * -self.gravity.y * p.mass
for f in self.forces:
# Apply attractive and repulsive forces between particles.
if not f.particle1.dead and \
not f.particle2.dead:
f.apply()
for s in self.springs:
# Apply spring forces between particles.
if not s.particle1.dead and \
not s.particle2.dead and \
not s.snapped:
s.apply()
for p in self.particles:
if not p.fixed:
# Apply drag.
p.velocity.x *= 1.0 - min(1.0, self.drag)
p.velocity.y *= 1.0 - min(1.0, self.drag)
# Apply velocity.
p.force.x += p.velocity.x
p.force.y += p.velocity.y
# Limit the accumulated force and update the particle's position.
self.limit(p, limit)
p.x += p.force.x
p.y += p.force.y
if p.life:
# Apply lifespan.
p._age += 1
p.dead = p._age > p.life
@property
def dead(self):
# Yields True when all particles are dead (and we don't need to update anymore).
for p in self.particles:
if not p.dead: return False
return True
def draw(self, **kwargs):
""" Draws the system at the current iteration.
"""
for s in self.springs:
if not s.particle1.dead and \
not s.particle2.dead and \
not s.snapped:
s.draw(**kwargs)
for p in self.particles:
if not p.dead:
p.draw(**kwargs)
def __repr__(self):
return "System(particles=%i, forces=%i, springs=%i)" % \
(len(self.particles), len(self.forces), len(self.springs))
system = System
# Notes:
# While this system is interesting for many effects, it is unstable.
# If for example very strong springs are applied, particles will start "shaking".
# This is because the forces are simply added to the particle's position instead of integrated.
# See also:
# http://local.wasp.uwa.edu.au/~pbourke/miscellaneous/particle/
# http://local.wasp.uwa.edu.au/~pbourke/miscellaneous/particle/particlelib.c
#def euler_derive(particle, dt=0.1):
# particle.x += particle.velocity.x * dt
# particle.y += particle.velocity.y * dt
# particle.velocity.x += particle.force.x / particle.mass * dt
# particle.velocity.y += particle.force.y / particle.mass * dt
# If this is applied, springs will need a velocity dampener:
#fx = f + 0.01 + (self.particle2.velocity.x - self.particle1.velocity.x) * dx / d
#fy = f + 0.01 + (self.particle2.velocity.y - self.particle1.velocity.y) * dy / d
# In pure Python this is slow, since only 1/10 of the force is applied each System.update().
#--- EMITTER -----------------------------------------------------------------------------------------
class Emitter(object):
def __init__(self, x, y, angle=0, strength=1.0, spread=10):
""" A source that shoots particles in a given direction with a given strength.
"""
self.system = None # Set when appended to System.
self.particles = []
self.x = x
self.y = y
self.velocity = Vector(1, 1, length=strength, angle=angle)
self.spread = spread # Angle-of-view.
self._i = 0 # Current iteration.
def __len__(self):
return len(self.particles)
def __iter__(self):
return iter(self.particles)
def __getitem__(self, i):
return self.particles[i]
def extend(self, x, life=100):
for x in x: self.append(x, life)
def append(self, particle, life=100):
particle.life = particle.life or life
particle._age = particle.life
particle.dead = True
self.particles.append(particle)
if self.system is not None:
# Also append the particle to the system the emitter is part of.
self.system.append(particle)
def _get_angle(self):
return self.velocity.angle
def _set_angle(self, v):
self.velocity.angle = v
angle = property(_get_angle, _set_angle)
def _get_strength(self):
return self.velocity.length
def _set_strength(self, v):
self.velocity.length = max(v, 0.01)
strength = length = magnitude = property(_get_strength, _set_strength)
def update(self):
""" Update the system and respawn dead particles.
When a particle dies, it can be reused as a new particle fired from the emitter.
This is more efficient than creating a new Particle object.
"""
self._i += 1 # Respawn occurs gradually.
p = self.particles[self._i % len(self.particles)]
if p.dead:
p.x = self.x
p.y = self.y
p.velocity = self.velocity.rotated(self.spread * 0.5 * (random()*2-1))
p._age = 0
p.dead = False
emitter = Emitter
#=== GRAPH ===========================================================================================
# Graph visualization is a way of representing information as diagrams of abstract graphs and networks.
# Automatic graph drawing has many important applications in software engineering,
# database and web design, networking, and in visual interfaces for many other domains.
#--- NODE --------------------------------------------------------------------------------------------
def deepcopy(o):
# A color can be represented as a tuple or as a nodebox.graphics.Color object,
# in which case it needs to be copied by invoking Color.copy().
if o is None:
return o
if hasattr(o, "copy"):
return o.copy()
if isinstance(o, (str, bool, int, float, complex)):
return o
if isinstance(o, (list, tuple, set)):
return o.__class__(deepcopy(v) for v in o)
if isinstance(o, dict):
return dict((deepcopy(k), deepcopy(v)) for k,v in o.iteritems())
raise Exception("don't know how to copy %s" % o.__class__.__name__)
class Node(object):
def __init__(self, id="", radius=5, **kwargs):
""" A node with a unique id in the graph.
Node.id is drawn as a text label, unless optional parameter text=False.
Optional parameters include: fill, stroke, strokewidth, text, font, fontsize, fontweight.
"""
self.graph = None
self.links = Links()
self.id = id
self._x = 0.0 # Calculated by Graph.layout.update().
self._y = 0.0 # Calculated by Graph.layout.update().
self.force = Vector(0.0, 0.0)
self.radius = radius
self.fixed = kwargs.pop("fixed", False)
self.fill = kwargs.pop("fill", None)
self.stroke = kwargs.pop("stroke", (0,0,0,1))
self.strokewidth = kwargs.pop("strokewidth", 1)
self.text = kwargs.get("text", True) and \
Text(isinstance(id, bytes) and id or str(id),
width = 85,
fill = kwargs.pop("text", (0,0,0,1)),
fontsize = kwargs.pop("fontsize", 11), **kwargs) or None
self._weight = None # Calculated by Graph.eigenvector_centrality().
self._centrality = None # Calculated by Graph.betweenness_centrality().
@property
def _distance(self):
# Graph.distance controls the (x,y) spacing between nodes.
return self.graph and float(self.graph.distance) or 1.0
def _get_x(self):
return self._x * self._distance
def _get_y(self):
return self._y * self._distance
def _set_x(self, v):
self._x = v / self._distance
def _set_y(self, v):
self._y = v / self._distance
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
@property
def edges(self):
""" Yields a list of edges from/to the node.
"""
return self.graph is not None \
and [e for e in self.graph.edges if self.id in (e.node1.id, e.node2.id)] \
or []
@property
def weight(self):
""" Yields eigenvector centrality as a number between 0.0-1.0.
"""
if self.graph and self._weight is None:
self.graph.eigenvector_centrality()
return self._weight
@property
def centrality(self):
""" Yields betweenness centrality as a number between 0.0-1.0.
"""
if self.graph and self._centrality is None:
self.graph.betweenness_centrality()
return self._centrality
def flatten(self, depth=1, traversable=lambda node, edge: True, _visited=None):
""" Recursively lists the node and nodes linked to it.
Depth 0 returns a list with the node.
Depth 1 returns a list with the node and all the directly linked nodes.
Depth 2 includes the linked nodes' links, and so on.
"""
_visited = _visited or {}
_visited[self.id] = (self, depth)
if depth >= 1:
for n in self.links:
if n.id not in _visited or _visited[n.id][1] < depth-1:
if traversable(self, self.links.edges[n.id]):
n.flatten(depth-1, traversable, _visited)
return [n for n,d in _visited.values()] # Fast, but not order-preserving.
def draw(self, weighted=False):
""" Draws the node as a circle with the given radius, fill, stroke and strokewidth.
Draws the node centrality as a shadow effect when weighted=True.
Draws the node text label.
Override this method in a subclass for custom drawing.
"""
# Draw the node weight as a shadow (based on node betweenness centrality).
if weighted is not False :
w = 0.25 * 35
ellipse(
self.x,
self.y,
self.radius*2 + w,
self.radius*2 + w, fill=(0,0,0,0.2), stroke=None)
# Draw the node.
ellipse(
self.x,
self.y,
self.radius*2,
self.radius*2, fill=self.fill, stroke=self.stroke, strokewidth=self.strokewidth)
# Draw the node text label.
if self.text:
self.text.draw(
self.x + self.radius,
self.y + self.radius)
def contains(self, x, y):
""" Returns True if the given coordinates (x, y) are inside the node radius.
"""
return abs(self.x - x) < self.radius*2 and \
abs(self.y - y) < self.radius*2
def __repr__(self):
return "%s(id=%s)" % (self.__class__.__name__, repr(self.id))
def __eq__(self, node):
return isinstance(node, Node) and self.id == node.id
def __ne__(self, node):
return not self.__eq__(node)
class Links(list):
def __init__(self):
""" A list in which each node has an associated edge.
The edge() method returns the edge for a given node id.
"""
self.edges = dict()
def append(self, node, edge=None):
if node.id not in self.edges:
list.append(self, node)
self.edges[node.id] = edge
def remove(self, node):
list.remove(self, node)
self.edges.pop(node.id, None)
def edge(self, node):
return self.edges.get(isinstance(node, Node) and node.id or node)
#--- EDGE --------------------------------------------------------------------------------------------
coordinates = lambda x, y, d, a: (x + d*cos(radians(a)), y + d*sin(radians(a)))
class Edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, type=None, stroke=(0,0,0,1), strokewidth=1):
""" A connection between two nodes.
Its weight indicates the importance (not the cost) of the connection.
Its type is useful in a semantic network (e.g. "is-a", "is-part-of", ...)
"""
self.node1 = node1
self.node2 = node2
self._weight = weight
self.length = length
self.type = type
self.stroke = stroke
self.strokewidth = strokewidth
def _get_weight(self):
return self._weight
def _set_weight(self, v):
self._weight = v
# Clear cached adjacency map in the graph, since edge weights have changed.
if self.node1.graph is not None:
self.node1.graph._adjacency = None
if self.node2.graph is not None:
self.node2.graph._adjacency = None
weight = property(_get_weight, _set_weight)
def draw(self, weighted=False, directed=False):
""" Draws the edge as a line with the given stroke and strokewidth (increased with Edge.weight).
Override this method in a subclass for custom drawing.
"""
w = weighted and self.weight or 0
line(
self.node1.x,
self.node1.y,
self.node2.x,
self.node2.y, stroke=self.stroke, strokewidth=self.strokewidth+w)
if directed:
self.draw_arrow(stroke=self.stroke, strokewidth=self.strokewidth+w)
def draw_arrow(self, **kwargs):
""" Draws the direction of the edge as an arrow on the rim of the receiving node.
"""
x0, y0 = self.node1.x, self.node1.y
x1, y1 = self.node2.x, self.node2.y
# Find the edge's angle based on node1 and node2 position.
a = degrees(atan2(y1-y0, x1-x0))
# The arrow points to node2's rim instead of it's center.
r = self.node2.radius
d = sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x01, y01 = coordinates(x0, y0, d-r-1, a)
# Find the two other arrow corners under the given angle.
r = max(kwargs.get("strokewidth", 1) * 3, 6)
dx1, dy1 = coordinates(x01, y01, -r, a-20)
dx2, dy2 = coordinates(x01, y01, -r, a+20)
line(x01, y01, dx1, dy1, **kwargs)
line(x01, y01, dx2, dy2, **kwargs)
line(dx1, dy1, dx2, dy2, **kwargs)
def __repr__(self):
return "%s(id1=%s, id2=%s)" % (self.__class__.__name__, repr(self.node1.id), repr(self.node2.id))
#--- GRAPH -------------------------------------------------------------------------------------------
# Return value of Graph.shortest_paths().
# Dictionary values can be accessed by Node as well as by node id.
class nodedict(dict):
def __init__(self, graph, *args, **kwargs):
#dict.__init__(self, *args, **kwargs)
self.graph = graph
def __contains__(self, node):
return dict.__contains__(self, self.graph.get(node, node))
def __getitem__(self, node):
return dict.__getitem__(self, isinstance(node, Node) and node or self.graph[node])
def get(self, node, default=None):
return dict.get(self, self.graph.get(node, node), default)
def unique(list):
u, b = [], {}
for item in list:
if item not in b: u.append(item); b[item]=True
return u
# Graph layouts:
SPRING = "spring"
# Graph node sort order:
WEIGHT, CENTRALITY = "weight", "centrality"
ALL = "all"
class Graph(dict):
def __init__(self, layout=SPRING, distance=10.0):
""" A network of nodes connected by edges that can be drawn with a given layout.
"""
self.nodes = [] # List of Node objects.
self.edges = [] # List of Edge objects.
self.root = None
self._adjacency = None # Cached adjacency() dict.
self.layout = layout==SPRING and GraphSpringLayout(self) or GraphLayout(self)
self.distance = distance
def __getitem__(self, id):
try:
return dict.__getitem__(self, id)
except KeyError:
raise Exception("no node with id '%s' in graph" % id)
def append(self, base, *args, **kwargs):
""" Appends a Node or Edge to the graph: Graph.append(Node, id="rabbit").
"""
kwargs["base"] = base
if issubclass(base, Node):
return self.add_node(*args, **kwargs)
if issubclass(base, Edge):
return self.add_edge(*args, **kwargs)
def add_node(self, id, *args, **kwargs):
""" Appends a new Node to the graph.
An optional base parameter can be used to pass a subclass of Node.
"""
n = kwargs.pop("base", Node)
n = isinstance(id, Node) and id or self.get(id) or n(id, *args, **kwargs)
if n.id not in self:
self.nodes.append(n)
self[n.id] = n; n.graph = self
self.root = kwargs.get("root", False) and n or self.root
# Clear adjacency cache.
self._adjacency = None
return n
def add_edge(self, id1, id2, *args, **kwargs):
""" Appends a new Edge to the graph.
An optional base parameter can be used to pass a subclass of Edge:
Graph.add_edge("cold", "winter", base=IsPropertyOf)
"""
# Create nodes that are not yet part of the graph.
n1 = self.add_node(id1)
n2 = self.add_node(id2)
# Creates an Edge instance.
# If an edge (in the same direction) already exists, yields that edge instead.
e1 = n1.links.edge(n2)
if e1 and e1.node1 == n1 and e1.node2 == n2:
return e1
e2 = kwargs.pop("base", Edge)
e2 = e2(n1, n2, *args, **kwargs)
self.edges.append(e2)
# Synchronizes Node.links:
# A.links.edge(B) yields edge A->B
# B.links.edge(A) yields edge B->A
n1.links.append(n2, edge=e2)
n2.links.append(n1, edge=e1 or e2)
# Clear adjacency cache.
self._adjacency = None
return e2
def remove(self, x):
""" Removes the given Node (and all its edges) or Edge from the graph.
Note: removing Edge a->b does not remove Edge b->a.
"""
if isinstance(x, Node) and x.id in self:
self.pop(x.id)
self.nodes.remove(x); x.graph = None
# Remove all edges involving the given node.
for e in list(self.edges):
if x in (e.node1, e.node2):
if x in e.node1.links: e.node1.links.remove(x)
if x in e.node2.links: e.node2.links.remove(x)
self.edges.remove(e)
if isinstance(x, Edge):
self.edges.remove(x)
# Clear adjacency cache.
self._adjacency = None
def node(self, id):
""" Returns the node in the graph with the given id.
"""
return self.get(id)
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
return id1 in self and id2 in self and self[id1].links.edge(id2) or None
def paths(self, node1, node2, length=4, path=[]):
""" Returns a list of paths (shorter than or equal to given length) connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
return [[self[id] for id in p] for p in paths(self, node1.id, node2.id, length, path)]
def shortest_path(self, node1, node2, heuristic=None, directed=False):
""" Returns a list of nodes connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
try:
p = dijkstra_shortest_path(self, node1.id, node2.id, heuristic, directed)
p = [self[id] for id in p]
return p
except IndexError:
return None
def shortest_paths(self, node, heuristic=None, directed=False):
""" Returns a dictionary of nodes, each linked to a list of nodes (shortest path).
"""
if not isinstance(node, Node):
node = self[node]
p = nodedict(self)
for id, path in dijkstra_shortest_paths(self, node.id, heuristic, directed).iteritems():
p[self[id]] = path and [self[id] for id in path] or None
return p
def eigenvector_centrality(self, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns a node => weight dictionary.
Node.weight is updated in the process.
Node.weight is higher for nodes with a lot of (indirect) incoming traffic.
"""
ec = eigenvector_centrality(self, normalized, reversed, rating, iterations, tolerance)
ec = nodedict(self, ((self[id], w) for id, w in ec.items()))
for n, w in ec.items():
n._weight = w
return ec
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns a node => weight dictionary.
Node.centrality is updated in the process.
Node.centrality is higher for nodes with a lot of passing traffic.
"""
bc = brandes_betweenness_centrality(self, normalized, directed)
bc = nodedict(self, ((self[id], w) for id, w in bc.items()))
for n, w in bc.items():
n._centrality = w
return bc
def sorted(self, order=WEIGHT, threshold=0.0):
""" Returns a list of nodes sorted by WEIGHT or CENTRALITY.
Nodes with a lot of traffic will be at the start of the list.
"""
o = lambda node: getattr(node, order)
nodes = ((o(n), n) for n in self.nodes if o(n) >= threshold)
nodes = reversed(sorted(nodes))
return [n for w, n in nodes]
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in (n for n in self.nodes if len(n.links) <= depth):
self.remove(n)
def fringe(self, depth=0):
""" For depth=0, returns the list of leaf nodes (nodes with only one connection).
For depth=1, returns the list of leaf nodes and their connected nodes, and so on.
"""
u = []; [u.extend(n.flatten(depth)) for n in self.nodes if len(n.links) == 1]
return unique(u)
@property
def density(self):
""" Yields the number of edges vs. the maximum number of possible edges.
For example, <0.35 => sparse, >0.65 => dense, 1.0 => complete.
"""
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
@property
def is_complete(self):
return self.density == 1.0
@property
def is_dense(self):
return self.density > 0.65
@property
def is_sparse(self):
return self.density < 0.35
def split(self):
""" Returns the list of unconnected subgraphs.
"""
return partition(self)
def update(self, iterations=10, **kwargs):
""" Graph.layout.update() is called the given number of iterations.
"""
for i in range(iterations):
self.layout.update(**kwargs)
def draw(self, weighted=False, directed=False):
""" Draws all nodes and edges.
"""
for e in self.edges:
e.draw(weighted, directed)
for n in reversed(self.nodes): # New nodes (with Node._weight=None) first.
n.draw(weighted)
def node_at(self, x, y):
""" Returns the node at (x,y) or None.
"""
for n in self.nodes:
if n.contains(x, y): return n
def _add_node_copy(self, n, **kwargs):
# Magical fairy dust to copy subclasses of Node.
# We assume that the subclass constructor takes an optional "text" parameter
# (Text objects in NodeBox for OpenGL's implementation are expensive).
try:
new = self.add_node(n.id, root=kwargs.get("root",False), text=False)
except TypeError:
new = self.add_node(n.id, root=kwargs.get("root",False))
new.__class__ = n.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in n.__dict__.iteritems()
if k not in ("graph", "links", "_x", "_y", "force", "_weight", "_centrality"))
def _add_edge_copy(self, e, **kwargs):
if kwargs.get("node1", e.node1).id not in self \
or kwargs.get("node2", e.node2).id not in self:
return
new = self.add_edge(
kwargs.get("node1", self[e.node1.id]),
kwargs.get("node2", self[e.node2.id]))
new.__class__ = e.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in e.__dict__.iteritems()
if k not in ("node1", "node2"))
def copy(self, nodes=ALL):
""" Returns a copy of the graph with the given list of nodes (and connecting edges).
The layout will be reset.
"""
g = Graph(layout=None, distance=self.distance)
g.layout = self.layout.copy(graph=g)
for n in (nodes==ALL and self.nodes or (isinstance(n, Node) and n or self[n] for n in nodes)):
g._add_node_copy(n, root=self.root==n)
for e in self.edges:
g._add_edge_copy(e)
return g
#--- GRAPH LAYOUT ------------------------------------------------------------------------------------
# Graph drawing or graph layout, as a branch of graph theory,
# applies topology and geometry to derive two-dimensional representations of graphs.
class GraphLayout:
def __init__(self, graph):
""" Calculates node positions iteratively when GraphLayout.update() is called.
"""
self.graph = graph
self.iterations = 0
def update(self):
self.iterations += 1
def reset(self):
self.iterations = 0
for n in self.graph.nodes:
n._x = 0
n._y = 0
n.force = Vector(0,0)
@property
def bounds(self):
""" Returns a (x, y, width, height)-tuple of the approximate layout dimensions.
"""
x0, y0 = +INFINITE, +INFINITE
x1, y1 = -INFINITE, -INFINITE
for n in self.graph.nodes:
if (n.x < x0): x0 = n.x
if (n.y < y0): y0 = n.y
if (n.x > x1): x1 = n.x
if (n.y > y1): y1 = n.y
return (x0, y0, x1-x0, y1-y0)
def copy(self, graph):
return GraphLayout(self, graph)
class GraphSpringLayout(GraphLayout):
def __init__(self, graph):
""" A force-based layout in which edges are regarded as springs.
The forces are applied to the nodes, pulling them closer or pushing them apart.
"""
# Based on: http://snipplr.com/view/1950/graph-javascript-framework-version-001/
GraphLayout.__init__(self, graph)
self.k = 4.0 # Force constant.
self.force = 0.01 # Force multiplier.
self.repulsion = 15 # Maximum repulsive force radius.
def _distance(self, node1, node2):
# Yields a tuple with distances (dx, dy, d, d**2).
# Ensures that the distance is never zero (which deadlocks the animation).
dx = node2._x - node1._x
dy = node2._y - node1._y
d2 = dx*dx + dy*dy
if d2 < 0.01:
dx = random() * 0.1 + 0.1
dy = random() * 0.1 + 0.1
d2 = dx*dx + dy*dy
return dx, dy, sqrt(d2), d2
def _repulse(self, node1, node2):
# Updates Node.force with the repulsive force.
dx, dy, d, d2 = self._distance(node1, node2)
if d < self.repulsion:
f = self.k**2 / d2
node2.force.x += f * dx
node2.force.y += f * dy
node1.force.x -= f * dx
node1.force.y -= f * dy
def _attract(self, node1, node2, weight=0, length=1.0):
# Updates Node.force with the attractive edge force.
dx, dy, d, d2 = self._distance(node1, node2)
d = min(d, self.repulsion)
f = (d2 - self.k**2) / self.k * length
f *= weight * 0.5 + 1
f /= d
node2.force.x -= f * dx
node2.force.y -= f * dy
node1.force.x += f * dx
node1.force.y += f * dy
def update(self, weight=10.0, limit=0.5):
""" Updates the position of nodes in the graph.
The weight parameter determines the impact of edge weight.
The limit parameter determines the maximum movement each update().
"""
GraphLayout.update(self)
# Forces on all nodes due to node-node repulsions.
for i, n1 in enumerate(self.graph.nodes):
for j, n2 in enumerate(self.graph.nodes[i+1:]):
self._repulse(n1, n2)
# Forces on nodes due to edge attractions.
for e in self.graph.edges:
self._attract(e.node1, e.node2, weight*e.weight, 1.0/(e.length or 0.01))
# Move nodes by given force.
for n in self.graph.nodes:
if not n.fixed:
n._x += max(-limit, min(self.force * n.force.x, limit))
n._y += max(-limit, min(self.force * n.force.y, limit))
n.force.x = 0
n.force.y = 0
def copy(self, graph):
g = GraphSpringLayout(graph)
g.k, g.force, g.repulsion = self.k, self.force, self.repulsion
return g
#--- GRAPH TRAVERSAL ---------------------------------------------------------------------------------
def depth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True, _visited=None):
""" Visits all the nodes connected to the given root node, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and subsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
"""
stop = visit(node)
_visited = _visited or {}
_visited[node.id] = True
for n in node.links:
if stop: return True
if traversable(node, node.links.edge(n)) is False: continue
if not n.id in _visited:
stop = depth_first_search(n, visit, traversable, _visited)
return stop
dfs = depth_first_search;
def breadth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True):
""" Visits all the nodes connected to the given root node, breadth-first.
"""
q = [node]
_visited = {}
while q:
node = q.pop(0)
if not node.id in _visited:
if visit(node):
return True
q.extend((n for n in node.links if traversable(node, node.links.edge(n)) is not False))
_visited[node.id] = True
return False
bfs = breadth_first_search;
def paths(graph, id1, id2, length=4, path=[], _root=True):
""" Returns a list of paths from node with id1 to node with id2.
Only paths shorter than or equal to the given length are included.
Uses a brute-force DFS approach (performance drops exponentially for longer paths).
"""
if len(path) >= length:
return []
if id1 not in graph:
return []
if id1 == id2:
return [path + [id1]]
path = path + [id1]
p = []
s = set(path) # 5% speedup.
for node in graph[id1].links:
if node.id not in s:
p.extend(paths(graph, node.id, id2, length, path, False))
return _root and sorted(p, key=len) or p
def edges(path):
""" Returns an iterator of Edge objects for the given list of nodes.
It yields None where two successive nodes are not connected.
"""
# For example, the distance (i.e., edge weight sum) of a path:
# sum(e.weight for e in edges(path))
return len(path) > 1 and (n.links.edge(path[i+1]) for i,n in enumerate(path[:-1])) or iter(())
#--- GRAPH THEORY ------------------------------------------------------------------------------------
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" Returns a dictionary indexed by node id1's,
in which each value is a dictionary of connected node id2's linking to the edge weight.
If directed=True, edges go from id1 to id2, but not the other way.
If stochastic=True, all the weights for the neighbors of a given node sum to 1.
A heuristic function can be given that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
# Caching a heuristic from a method won't work.
# Bound method objects are transient,
# i.e., id(object.method) returns a new value each time.
if graph._adjacency is not None and \
graph._adjacency[1:] == (directed, reversed, stochastic, heuristic and id(heuristic)):
return graph._adjacency[0]
map = {}
for n in graph.nodes:
map[n.id] = {}
for e in graph.edges:
id1, id2 = not reversed and (e.node1.id, e.node2.id) or (e.node2.id, e.node1.id)
map[id1][id2] = 1.0 - 0.5 * e.weight
if heuristic:
map[id1][id2] += heuristic(id1, id2)
if not directed:
map[id2][id1] = map[id1][id2]
if stochastic:
for id1 in map:
n = sum(map[id1].values())
for id2 in map[id1]:
map[id1][id2] /= n
# Cache the adjacency map: this makes dijkstra_shortest_path() 2x faster in repeated use.
graph._adjacency = (map, directed, reversed, stochastic, heuristic and id(heuristic))
return map
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest path between two nodes.
Returns a list of node id's, starting with id1 and ending with id2.
Raises an IndexError between nodes on unconnected graphs.
"""
# Based on: Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
def flatten(list):
# Flattens a linked list of the form [0,[1,[2,[]]]]
while len(list) > 0:
yield list[0]; list=list[1]
G = adjacency(graph, directed=directed, heuristic=heuristic)
q = [(0, id1, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited nodes.
while True:
(cost1, n1, path) = heappop(q)
if n1 not in visited:
visited.add(n1)
if n1 == id2:
return list(flatten(path))[::-1] + [n1]
path = (n1, path)
for (n2, cost2) in G[n1].iteritems():
if n2 not in visited:
heappush(q, (cost1 + cost2, n2, path))
def dijkstra_shortest_paths(graph, id, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest paths from the given node to all other nodes.
Returns a dictionary of node id's, each linking to a list of node id's (i.e., the path).
"""
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.4.1: Aric Hagberg, Dan Schult and Pieter Swart.
# This is 5x faster than:
# for n in g: dijkstra_shortest_path(g, id, n.id)
W = adjacency(graph, directed=directed, heuristic=heuristic)
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
P[id] = [id]
seen = {id: 0}
heappush(Q, (0, id))
while Q:
(dist, v) = heappop(Q)
if v in D: continue
D[v] = dist
for w in W[v].iterkeys():
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, w))
P[w] = P[v] + [w]
for n in graph:
if n not in P: P[n]=None
return P
def floyd_warshall_all_pairs_distance(graph, heuristic=None, directed=False):
""" Floyd-Warshall's algorithm for finding the path length for all pairs for nodes.
Returns a dictionary of node id's,
each linking to a dictionary of node id's linking to path length.
"""
from collections import defaultdict # Requires Python 2.5+.
g = graph.keys()
d = defaultdict(lambda: defaultdict(lambda: 1e30)) # float('inf')
p = defaultdict(dict) # Predecessors.
for e in graph.edges:
u = e.node1.id
v = e.node2.id
w = 1.0 - 0.5 * e.weight
w = heuristic and heuristic(u, v) + w or w
d[u][v] = min(w, d[u][v])
d[u][u] = 0
p[u][v] = u
if not directed:
d[v][u] = min(w, d[v][u])
p[v][u] = v
for w in g:
dw = d[w]
for u in g:
du, duw = d[u], d[u][w]
for v in g:
# Performance optimization, assumes d[w][v] > 0.
#if du[v] > duw + dw[v]:
if du[v] > duw and du[v] > duw + dw[v]:
d[u][v] = duw + dw[v]
p[u][v] = p[w][v]
class pdict(dict):
def __init__(self, predecessors, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.predecessors = predecessors
return pdict(p, ((u, dict((v, w) for v,w in d[u].iteritems() if w < 1e30)) for u in d))
def predecessor_path(tree, u, v):
""" Returns the path between node u and node v as a list of node id's.
The given tree is the return value of floyd_warshall_all_pairs_distance().predecessors.
"""
def _traverse(u, v):
w = tree[u][v]
if w == u:
return []
return _traverse(u,w) + [w] + _traverse(w,v)
return [u] + _traverse(u,v) + [v]
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
"""
# Ulrik Brandes, A Faster Algorithm for Betweenness Centrality,
# Journal of Mathematical Sociology 25(2):163-177, 2001,
# http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.0.1: Aric Hagberg, Dan Schult and Pieter Swart.
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
W = adjacency(graph, directed=directed)
b = dict.fromkeys(graph, 0.0)
for id in graph:
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
for n in graph: P[n]=[]
seen = {id: 0}
heappush(Q, (0, id, id))
S = []
E = dict.fromkeys(graph, 0) # sigma
E[id] = 1.0
while Q:
(dist, pred, v) = heappop(Q)
if v in D:
continue
D[v] = dist
S.append(v)
E[v] += E[pred]
for w in W[v]:
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, v, w))
P[w] = [v]
E[w] = 0.0
elif vw_dist == seen[w]: # Handle equal paths.
P[w].append(v)
E[w] += E[v]
d = dict.fromkeys(graph, 0.0)
for w in reversed(S):
for v in P[w]:
d[v] += (1.0 + d[w]) * E[v] / E[w]
if w != id:
b[w] += d[w]
# Normalize between 0.0 and 1.0.
m = normalized and max(b.values()) or 1
b = dict((id, w/m) for id, w in b.items())
return b
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (cfr. Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
"""
# Based on: NetworkX, Aric Hagberg (hagberg@lanl.gov)
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
# Note: much faster than betweenness centrality (which grows exponentially).
def normalize(vector):
w = 1.0 / (sum(vector.values()) or 1)
for node in vector:
vector[node] *= w
return vector
G = adjacency(graph, directed=True, reversed=reversed)
v = normalize(dict([(n, random()) for n in graph])) # Node ID => weight vector.
# Eigenvector calculation using the power iteration method: y = Ax.
# It has no guarantee of convergence.
for i in range(iterations):
v0 = v
v = dict.fromkeys(v0.keys(), 0)
for n1 in v:
for n2 in G[n1]:
v[n1] += 0.01 + v0[n2] * G[n1][n2] * rating.get(n1, 1)
normalize(v)
e = sum([abs(v[n]-v0[n]) for n in v]) # Check for convergence.
if e < len(G) * tolerance:
# Normalize between 0.0 and 1.0.
m = normalized and max(v.values()) or 1
v = dict((id, w/m) for id, w in v.items())
return v
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict((n, 0) for n in G)
# a | b => all elements from a and all the elements from b.
# a & b => elements that appear in a as well as in b.
# a - b => elements that appear in a but not in b.
def union(a, b):
return list(set(a) | set(b))
def intersection(a, b):
return list(set(a) & set(b))
def difference(a, b):
return list(set(a) - set(b))
def partition(graph):
""" Returns a list of unconnected subgraphs.
"""
# Creates clusters of nodes and directly connected nodes.
# Iteratively merges two clusters if they overlap.
g = []
for n in graph.nodes:
g.append(dict.fromkeys((n.id for n in n.flatten()), True))
for i in reversed(range(len(g))):
for j in reversed(range(i+1, len(g))):
if g[i] and g[j] and len(intersection(g[i], g[j])) > 0:
g[i] = union(g[i], g[j])
g[j] = []
g = [graph.copy(nodes=[graph[id] for id in n]) for n in g if n]
g.sort(lambda a, b: len(b) - len(a))
return g
#--- GRAPH THEORY | CLIQUE ---------------------------------------------------------------------------
def is_clique(graph):
""" A clique is a set of nodes in which each node is connected to all other nodes.
"""
#for n1 in graph.nodes:
# for n2 in graph.nodes:
# if n1 != n2 and graph.edge(n1.id, n2.id) is None:
# return False
return graph.density == 1.0
def clique(graph, id):
""" Returns the largest possible clique for the node with given id.
"""
if isinstance(id, Node):
id = id.id
a = [id]
for n in graph.nodes:
try:
# Raises StopIteration if all nodes in the clique are connected to n:
(id for id in a if n.id==id or graph.edge(n.id, id) is None).next()
except StopIteration:
a.append(n.id)
return a
def cliques(graph, threshold=3):
""" Returns all cliques in the graph with at least the given number of nodes.
"""
a = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in a: a.append(c)
return a
#--- GRAPH MAINTENANCE -------------------------------------------------------------------------------
# Utility commands for safe linking and unlinking of nodes,
# with respect for the surrounding nodes.
def unlink(graph, node1, node2=None):
""" Removes the edges between node1 and node2.
If only node1 is given, removes all edges to and from it.
This does not remove node1 from the graph.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node) and node2 is not None:
node2 = graph[node2]
for e in list(graph.edges):
if node1 in (e.node1, e.node2) and node2 in (e.node1, e.node2, None):
graph.edges.remove(e)
try:
node1.links.remove(node2)
node2.links.remove(node1)
except: # 'NoneType' object has no attribute 'links'
pass
def redirect(graph, node1, node2):
""" Connects all of node1's edges to node2 and unlinks node1.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node):
node2 = graph[node2]
for e in graph.edges:
if node1 in (e.node1, e.node2):
if e.node1 == node1 and e.node2 != node2:
graph._add_edge_copy(e, node1=node2, node2=e.node2)
if e.node2 == node1 and e.node1 != node2:
graph._add_edge_copy(e, node1=e.node1, node2=node2)
unlink(graph, node1)
def cut(graph, node):
""" Unlinks the given node, but keeps edges intact by connecting the surrounding nodes.
If A, B, C, D are nodes and A->B, B->C, B->D, if we then cut B: A->C, A->D.
"""
if not isinstance(node, Node):
node = graph[node]
for e in graph.edges:
if node in (e.node1, e.node2):
for n in node.links:
if e.node1 == node and e.node2 != n:
graph._add_edge_copy(e, node1=n, node2=e.node2)
if e.node2 == node and e.node1 != n:
graph._add_edge_copy(e, node1=e.node1, node2=n)
unlink(graph, node)
def insert(graph, node, a, b):
""" Inserts the given node between node a and node b.
If A, B, C are nodes and A->B, if we then insert C: A->C, C->B.
"""
if not isinstance(node, Node):
node = graph[node]
if not isinstance(a, Node):
a = graph[a]
if not isinstance(b, Node):
b = graph[b]
for e in graph.edges:
if e.node1 == a and e.node2 == b:
graph._add_edge_copy(e, node1=a, node2=node)
graph._add_edge_copy(e, node1=node, node2=b)
if e.node1 == b and e.node2 == a:
graph._add_edge_copy(e, node1=b, node2=node)
graph._add_edge_copy(e, node1=node, node2=a)
unlink(graph, a, b)
|
pepsipepsi/nodebox_opengl_python3
|
nodebox/graphics/physics.py
|
Python
|
bsd-3-clause
| 77,201
|
[
"VisIt"
] |
c3d57ea8dabb8753f9f7fd5b578de89c6dbdc4047e97834554baf58aee3bd762
|
##
## Script that showcases a flight to the Hyades, proper motions and
## a zoom out of the galaxy
##
from py4j.java_gateway import JavaGateway, GatewayParameters
gateway = JavaGateway(gateway_parameters=GatewayParameters(auto_convert=True))
gs = gateway.entry_point
## PREPPING
gs.disableInput()
gs.cameraStop()
gs.minimizeInterfaceWindow()
gs.setSmoothLodTransitions(True)
gs.setFov(60)
gs.setRotationCameraSpeed(20)
gs.setTurningCameraSpeed(20)
gs.setCameraSpeed(20)
gs.setSimulationPace(0)
gs.setStarBrightness(40.0)
gs.setStarSize(6.0)
gs.setBrightnessLevel(0)
gs.setContrastLevel(1)
gs.setAmbientLight(0)
gs.setBloom(0)
gs.setMotionBlur(False)
gs.setSaturationLevel(1.0)
gs.setVisibility("element.clusters", False)
gs.setVisibility("element.propermotions", False)
gs.setRotationCameraSpeed(40)
gs.setTurningCameraSpeed(30)
gs.setCameraSpeed(30)
gs.setVisibility('element.labels',False)
gs.setVisibility('element.constellations',False)
gs.setSimulationTime(2018,4,25,10,0,0,0)
gs.setTurningCameraSpeed(10)
gs.setRotationCameraSpeed(10)
gs.setRotationCameraSpeed(20)
gs.setTurningCameraSpeed(20)
gs.setCameraSpeed(30)
gs.setVisibility('element.labels',False)
gs.setSaturationLevel(2)
gs.setVisibility('element.planets',True)
gs.setVisibility('element.moons',True)
gs.setCinematicCamera(True)
gs.setCameraFocusInstantAndGo("Sol")
gs.sleep(2)
gs.setCameraFocusInstant("Bellatrix")
gs.sleep(2)
gs.sleep(2)
gs.setCameraLock(True)
gs.setVisibility('element.labels',False)
gs.setVisibility('element.planets',False)
gs.setVisibility('element.constellations',True)
gs.sleep(7)
gs.setVisibility('element.planets',False)
gs.setCameraFocus("78The2Tau")
gs.sleep(3)
gs.setVisibility('element.constellations',False)
gs.setCameraSpeed(0.5)
gs.sleep(5)
gs.setVisibility("element.clusters", False)
gs.goToObject("78The2Tau", 0.000007778, 25)
gs.setSaturationLevel(1.7)
gs.setCameraLock(True)
gs.sleep(7)
gs.startSimulationTime()
def frange(x, y, jump):
while x < y:
yield x
x += jump
for speed in frange(0.0, 0.01, 0.00025):
gs.cameraRotate(speed,0)
gs.sleep(0.1)
gs.sleep(20)
gs.cameraStop()
gs.setCameraLock(False)
gs.setCameraFree()
gs.sleep(5)
gs.setProperMotionsNumberFactor(2000)
gs.sleep(3)
for pace in frange(.1e11, .5e12, .1e11):
gs.setSimulationPace(pace)
gs.sleep(0.1)
gs.sleep(15)
gs.cameraRotate(0.1, 0.0)
gs.sleep(10)
gs.setCameraSpeed(2.0)
fwdval = 0.1
saturation = 2.0
for t in range(0, 500000):
gs.cameraForward(-fwdval)
gs.setSaturationLevel(saturation)
gs.sleep(0.1)
saturation -= 0.01
if saturation <= 1.0:
saturation = 1.0
fwdval += 0.1
if fwdval >= 1.0:
fwdval = 1.0
if gs.getDistanceTo("Sol") > 6622825900000000000:
break
gs.sleep(5)
gs.cameraStop()
gs.stopSimulationTime()
gs.sleep(2)
##
## CLEAN UP AND FINISH
##
gs.setFrameOutput(False)
gs.enableInput()
gs.maximizeInterfaceWindow()
gs.setSimulationPace(1)
gs.setStarBrightness(27.0)
gs.setStarSize(8.0)
gs.setRotationCameraSpeed(40)
gs.setTurningCameraSpeed(24)
gs.setCameraSpeed(40)
gs.setBrightnessLevel(0)
gs.setContrastLevel(1)
gs.setAmbientLight(0)
gs.setBloom(0)
gs.setMotionBlur(False)
gs.setFov(60)
gs.setSaturationLevel(1.0)
gs.setVisibility('element.planets',True)
gs.setVisibility('element.moons',True)
# close gateway
gateway.close()
|
ari-zah/gaiasky
|
assets/scripts/showcases/flight-hyades-mw.py
|
Python
|
lgpl-3.0
| 3,340
|
[
"Galaxy"
] |
f5ca800cf016e4491b67fbb77ce796a49ff8ba6d9bb1e302a38f7247de1e2a0f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.