text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
#
# Copyright 2015 John Kendrick
#
# This file is part of PDielec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the MIT License
# along with this program, if not see https://opensource.org/licenses/MIT
#
"""Read contents of a directory containing Crystal input and output files"""
import re
import math
import os
import numpy as np
from PDielec.Constants import amu, hartree2ev
from PDielec.UnitCell import UnitCell
from PDielec.GenericOutputReader import GenericOutputReader
from PDielec.IO import pdielec_io
class CrystalOutputReader(GenericOutputReader):
"""Read contents of a directory containing Crystal input and output files"""
def __init__(self, filenames):
GenericOutputReader.__init__(self, filenames)
self.type = 'Crystal output'
self.hessian_symmetrisation = 'crystal'
self._fractional_coordinates = []
return
def _read_output_files(self):
"""Read the Crystal files in the directory"""
self.manage = {} # Empty the dictionary matching phrases
self.manage['masses'] = (re.compile(' ATOMS ISOTOPIC MASS'), self._read_masses)
self.manage['lattice'] = (re.compile(' DIRECT LATTICE VECTORS CARTESIAN COMPONENTS'), self._read_lattice_vectors)
self.manage['fractional'] = (re.compile(' ATOMS IN THE ASYMMETRIC UNIT'), self._read_fractional_coordinates)
self.manage['bornCharges'] = (re.compile(' ATOMIC BORN CHARGE TENSOR'), self._read_born_charges)
self.manage['eigenvectors'] = (re.compile(' NORMAL MODES NORMALIZ'), self._read_eigenvectors)
self.manage['staticIonic'] = (re.compile(' SUM TENSOR OF THE VIBRATIONAL CONTRIBUTIONS TO '), self._read_ionic_dielectric)
self.manage['epsilon'] = (re.compile(' SUSCEPTIBILITY '), self._read_epsilon)
self.manage['kpoints'] = (re.compile(' SHRINK\. FACT\.\('), self._read_kpoints)
self.manage['electrons'] = (re.compile(' N\. OF ELECTRONS'), self._read_electrons)
self.manage['energy'] = (re.compile(' TOTAL ENERGY\(DFT\)'), self._read_energy)
self.manage['energy2'] = (re.compile(' TOTAL ENERGY + DISP'), self._read_energy2)
self.manage['energy3'] = (re.compile(' *CENTRAL POINT'), self._read_energy3)
for f in self._outputfiles:
self._read_output_file(f)
return
def _read_energy(self, line):
self.final_free_energy = hartree2ev*float(line.split()[3])
self.final_energy_without_entropy = hartree2ev*float(line.split()[3])
def _read_energy2(self, line):
self.final_free_energy = hartree2ev*float(line.split()[5])
self.final_energy_without_entropy = hartree2ev*float(line.split()[5])
def _read_energy3(self, line):
self.final_free_energy = hartree2ev*float(line.split()[2])
self.final_energy_without_entropy = hartree2ev*float(line.split()[2])
def _read_electrons(self, line):
self.electrons = int(line.split()[5])
def _read_kpoints(self, line):
self.kpoint_grid = [ int(line.split()[2]), int(line.split()[3]), int(line.split()[4]) ]
self.kpoints = int(line.split()[12])
def _read_epsilon(self, line):
self.file_descriptor.readline()
self.file_descriptor.readline()
optical_dielectric = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
linea = self.file_descriptor.readline().split()
while len(linea) > 0:
component = linea[0]
# Problems with Crystal17 file format
if component == "0.000":
component = linea[1]
epsilon = float(linea[4])
else:
epsilon = float(linea[3])
if component == "XX": optical_dielectric[0][0] = epsilon
if component == "YY": optical_dielectric[1][1] = epsilon
if component == "ZZ": optical_dielectric[2][2] = epsilon
if component == "XY": optical_dielectric[0][1] = epsilon
if component == "XZ": optical_dielectric[0][2] = epsilon
if component == "YZ": optical_dielectric[1][2] = epsilon
# read the next line
linea = self.file_descriptor.readline().split()
# symmetrize
optical_dielectric[1][0] = optical_dielectric[0][1]
optical_dielectric[2][0] = optical_dielectric[0][2]
optical_dielectric[2][1] = optical_dielectric[1][2]
self.zerof_optical_dielectric = optical_dielectric
return
def _read_masses(self, line):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
n = 2
self.masses_per_type = [ 0 for i in range(self.nspecies) ]
for i in range(self.nions):
if n > 11:
line = self.file_descriptor.readline()
n = 2
mass = float(line.split()[n])
self.masses.append(mass)
self.masses_per_type[self.atom_type_list[i]] = mass
n = n + 3
return
def _read_eigenvectors(self, line):
if os.path.isfile(self.open_directory+"/HESSFREQ.DAT"):
# print("Reading Hessian from HESSFREQ.DAT",file=sys.stderr)
self._read_hessfreq_dat(self.open_directory+"/HESSFREQ.DAT")
else:
# print("Reading Normal Modes from output file",file=sys.stderr)
# print("WARNING! WARNING! WARNING! WARNING! WARNING!",file=sys.stderr)
# print("The numerical precision of the input is limited",file=sys.stderr)
# print("If possible proved a HESSFREQ.DAT from the calculation",file=sys.stderr)
# print("WARNING! WARNING! WARNING! WARNING! WARNING!",file=sys.stderr)
self._read_output_eigenvectors(line)
return
def _read_hessfreq_dat(self,filename):
fd2 = pdielec_io(filename, 'r')
nmodes = self.nions*3
# Create a mass weighting vector
n = 0
massweight = np.zeros(nmodes)
for a in range(self.nions):
for j in range(3):
massweight[n] = 1.0 / math.sqrt(self.masses[a]*amu)
n = n + 1
# We read the hessian and store the mass weighted matrix
hessian = np.zeros((nmodes, nmodes))
line = fd2.readline()
pos = 0
for i in range(nmodes):
for j in range(nmodes):
hessian[i, j] = massweight[i]*massweight[j]*float(line.split()[pos])
pos = pos + 1
if pos >= 4:
line = fd2.readline()
pos = 0
# end if pos
# end for j
# end for i
fd2.close()
# symmetrise, project, diagonalise and store the frequencies and normal modes
self._dynamical_matrix(hessian)
return
def _read_output_eigenvectors(self, line):
self.file_descriptor.readline()
self.frequencies = []
self.mass_weighted_normal_modes = []
n = 6
nmodes = 3 * self.nions
self.frequencies = []
# Loop over all the modes
for j in range(nmodes):
# They come in columns of 6 so after 6 modes we read the frequencies and initialse the 6 new modes
n = n + 1
if n >= 6:
n = 0
linef = self.file_descriptor.readline().split()[1:]
linef = [float(f) for f in linef]
self.file_descriptor.readline()
atoms = []
for f in linef:
self.frequencies.append(f)
atoms.append([])
# Read through the XYZ components for each atom and store the mode in atoms
for i in range(self.nions):
mass = np.sqrt(self.masses[i])
linex = self.file_descriptor.readline().split()[4:]
liney = self.file_descriptor.readline().split()[1:]
linez = self.file_descriptor.readline().split()[1:]
for a, x, y, z in zip(atoms, linex, liney, linez):
x = float(x) * mass
y = float(y) * mass
z = float(z) * mass
a.append([x, y, z])
for a in atoms:
self.mass_weighted_normal_modes.append(a)
self.file_descriptor.readline()
# end of if n >= 6
for i, mode in enumerate(self.mass_weighted_normal_modes):
marray = np.array(mode)
sumxyz = 0.0
for atoms in marray:
for xyz in atoms:
sumxyz = sumxyz + xyz*xyz
marray = marray / np.sqrt(sumxyz)
self.mass_weighted_normal_modes[i] = marray.tolist()
return
def _read_born_charges(self, line):
"""Read the born charges from the outputfile file.
Each row of the output refers to a given field direction
Each column in the row refers the atomic displacement
so the output is arranged [[a1x a1y a1z]
[a2x a2y a2z]
[a3x a3y a3z]]
where 1,2,3 are the field directions and x, y, z are the atomic displacements"""
# Extract directory containing the output file
if os.path.isfile(self.open_directory+"/BORN.DAT"):
# print("Reading Born charge tensor from BORN.DAT",file=sys.stderr)
self._read_born_charges_from_born_dat(self.open_directory+"/BORN.DAT")
else:
# print("Reading Born Charge Tensor from output file",file=sys.stderr)
# print("WARNING! WARNING! WARNING! WARNING! WARNING!",file=sys.stderr)
# print("The numerical precision of the input is limited",file=sys.stderr)
# print("If possible proved a BORN.DAT from the calculation",file=sys.stderr)
# print("WARNING! WARNING! WARNING! WARNING! WARNING!",file=sys.stderr)
self._read_born_charges_from_output(line)
return
def _read_born_charges_from_born_dat(self,filename):
fd2 = pdielec_io(filename, 'r')
self.born_charges = []
for i in range(self.nions):
b = []
line = fd2.readline()
b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])
line = fd2.readline()
b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])
line = fd2.readline()
b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])
b_np = np.array(b)
bt_np = b_np.T
b = bt_np.tolist()
self.born_charges.append(b)
fd2.close()
return
def _read_born_charges_from_output(self, line):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self.born_charges = []
for i in range(self.nions):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
b = []
b.append([float(line.split()[1]), float(line.split()[2]), float(line.split()[3])])
line = self.file_descriptor.readline()
b.append([float(line.split()[1]), float(line.split()[2]), float(line.split()[3])])
line = self.file_descriptor.readline()
b.append([float(line.split()[1]), float(line.split()[2]), float(line.split()[3])])
line = self.file_descriptor.readline()
self.born_charges.append(b)
line = self.file_descriptor.readline()
return
def _read_ionic_dielectric(self, line):
# Read the ionic contribution to the static dielectric
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self.zerof_static_dielectric = []
self.zerof_static_dielectric.append([float(f) for f in line.split()[0:3]])
line = self.file_descriptor.readline()
self.zerof_static_dielectric.append([float(f) for f in line.split()[0:3]])
line = self.file_descriptor.readline()
self.zerof_static_dielectric.append([float(f) for f in line.split()[0:3]])
# a = np.array(ionic_dielectric)
# self.zerof_static_dielectric = a.tolist()
return
def _read_lattice_vectors(self, line):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
avector = [float(line.split()[0]), float(line.split()[1]), float(line.split()[2])]
line = self.file_descriptor.readline()
bvector = [float(line.split()[0]), float(line.split()[1]), float(line.split()[2])]
line = self.file_descriptor.readline()
cvector = [float(line.split()[0]), float(line.split()[1]), float(line.split()[2])]
self.unit_cells.append(UnitCell(avector, bvector, cvector))
self.ncells = len(self.unit_cells)
self.volume = self.unit_cells[-1].volume
# The fractional coordinates are specified before the lattice vectors
self.unit_cells[-1].set_fractional_coordinates(self._fractional_coordinates)
self.unit_cells[-1].set_element_names(self.species_list)
return
def _read_fractional_coordinates(self, line):
self.nions = int(line.split()[12])
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self._fractional_coordinates = []
self.species = []
self.species_list = []
self.atom_type_list = []
for i in range(self.nions):
line = self.file_descriptor.readline()
species = line.split()[3].capitalize()
self.species_list.append(species)
if species not in self.species:
self.species.append(species)
species_index = self.species.index(species)
self.atom_type_list.append(species_index)
self._fractional_coordinates.append([float(line.split()[4]), float(line.split()[5]), float(line.split()[6])])
self.nspecies = len(self.species)
self.ions_per_type = [ 0 for i in range(self.nspecies) ]
for species_index in self.atom_type_list:
self.ions_per_type[species_index] += 1
return
| JohnKendrick/PDielec | PDielec/CrystalOutputReader.py | Python | mit | 14,704 | [
"CRYSTAL"
] | 6814bf8bc3fc907d56d36d37f949942bb5cea730c102b208bf2b0b9d14b52fe1 |
# Support for the Numato Galatea - PCI Express Spartan 6 Development Board
# https://numato.com/product/galatea-pci-express-spartan-6-fpga-development-board
from collections import OrderedDict
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, iMPACT
from third_party.litex.litex.build.xilinx.programmer import XC3SProg
_io = [
# ---------------------- Clocks ---------------------------
# Clock U18 - 100MHz - CMOS Crystal Oscillator
# NET "CLK1" LOC = "AB13" | IOSTANDARD = "LVCMOS33" | Period = 100 MHz; # Bank = 0
("clk100", 0, Pins("G9"), IOStandard("LVCMOS33")),
# Clock U19 - 100MHz - CMOS Crystal Oscillator
# NET "CLK2" LOC = "Y13" | IOSTANDARD = "LVCMOS33" | Period = 100 MHz; # Bank = 2
("clk2", 0, Pins("Y13"), IOStandard("LVCMOS33")),
# Clock U17 - 100MHz - CMOS Crystal Oscillator
# NET "CLK3" LOC = "AB13" | IOSTANDARD = "LVCMOS33" | Period = 100 MHz; # Bank = 2
("clk3", 0, Pins("AB13"), IOStandard("LVCMOS33")),
# Clock U21 SMA Clock
# NET "EXT_CLK1" LOC = "U12" | IOSTANDARD = "LVCMOS33"; # Bank = 2
# Clock U6 SMA Clock
# NET "EXT_CLK2" LOC = "T12" | IOSTANDARD = "LVCMOS33"; # Bank = 2
# SW1
#NET "RST_N" LOC = "AB19" | IOSTANDARD = "LVCMOS33" | PULLUP ; # Bank = 2
("cpu_reset", 0, Pins("AB19"), IOStandard("LVCMOS33"), Misc("PULLUP")),
# ---------------------- UART -----------------------------
#NET "UART_RX" LOC = "V17" | IOSTANDARD = "LVCMOS33"; # Bank = 2
#NET "UART_TX" LOC = "W18" | IOSTANDARD = "LVCMOS33"; # Bank = 2
("serial", 0,
Subsignal("rx", Pins("V17"), IOStandard("LVCMOS33")),
Subsignal("tx", Pins("W18"), IOStandard("LVCMOS33")),
),
# SPI Flash #
#NET "SPI_Flash_SCK" LOC = "Y20" | IOSTANDARD = "LVCMOS33"; # Bank = 2
#NET "SPI_Flash_MISO" LOC = "T14" | IOSTANDARD = "LVCMOS33"; # Bank = 2
#NET "SPI_Flash_MOSI" LOC = "AB20" | IOSTANDARD = "LVCMOS33"; # Bank = 2
#NET "SPI_Flash_SS" LOC = "AA3" | IOSTANDARD = "LVCMOS33"; # Bank = 2
## onBoard Quad-SPI Flash
## N25Q128A13ESE40E - 128 Mb QSPI flash memory
#QSPI_FLASH_SCLK Y20 LVCMOS33 16 FAST
#QSPI_FLASH_IO0 AB20 LVCMOS33 16 FAST
#QSPI_FLASH_IO1 AA20 LVCMOS33 16 FAST
#QSPI_FLASH_IO2 R13 LVCMOS33 16 FAST
#QSPI_FLASH_IO3 T14 LVCMOS33 16 FAST
#QSPI_FLASH_SS AA3 LVCMOS33 16 FAST
("spiflash1x", 0,
#NET "SPI_Flash_SCK" LOC = "Y20" | IOSTANDARD = "LVCMOS33"; # Bank = 2
Subsignal("clk", Pins("Y20")),
#NET "SPI_Flash_SS" LOC = "AA3" | IOSTANDARD = "LVCMOS33"; # Bank = 2
Subsignal("cs_n", Pins("AA3")),
#NET "SPI_Flash_MISO" LOC = "T14" | IOSTANDARD = "LVCMOS33"; # Bank = 2
#NET "SPI_Flash_MOSI" LOC = "AB20" | IOSTANDARD = "LVCMOS33"; # Bank = 2
#Subsignal("dq", Pins("AB20", "AA20", "R13", "T14")),
IOStandard("LVCMOS33"), Misc("SLEW=FAST")
),
## DDR3 Chip:0
# MT41J128M16JT-125:K - 16 Meg x 16 x 8 Banks - DDR3-1600 11-11-11
# FBGA Code: D9PSL, Part Number: MT41J128M16 - http://www.micron.com/support/fbga
("ddram_clock", 0,
Subsignal("p", Pins("K4")),
Subsignal("n", Pins("K3")),
IOStandard("DIFF_SSTL15_II"), Misc("IN_TERM=NONE")
),
("ddram", 0,
Subsignal("cke", Pins("F2"), IOStandard("SSTL15_II")),
Subsignal("ras_n", Pins("M5"), IOStandard("SSTL15_II")),
Subsignal("cas_n", Pins("M4"), IOStandard("SSTL15_II")),
Subsignal("we_n", Pins("H2"), IOStandard("SSTL15_II")),
Subsignal("ba", Pins("J3 J1 H1"), IOStandard("SSTL15_II")),
Subsignal("a", Pins("K2 K1 K5 M6 H3 L4 M3 K6 G3 G1 J4 E1 F1 J6 H5"), IOStandard("SSTL15_II")),
Subsignal("dq", Pins(
"R3 R1 P2 P1 L3 L1 M2 M1",
"T2 T1 U3 U1 W3 W1 Y2 Y1"), IOStandard("SSTL15_II")),
Subsignal("dqs", Pins("N3 V2"), IOStandard("DIFF_SSTL15_II")),
Subsignal("dqs_n", Pins("N1 V1"), IOStandard("DIFF_SSTL15_II")),
Subsignal("dm", Pins("N4 P3"), IOStandard("SSTL15_II")),
Subsignal("odt", Pins("L6"), IOStandard("SSTL15_II")),
Subsignal("reset_n", Pins("E3"), IOStandard("LVCMOS15")),
Misc("SLEW=FAST"),
Misc("VCCAUX_IO=HIGH")
),
## DDR3 Chip:1
# MT41J128M16JT-125:K - 16 Meg x 16 x 8 Banks - DDR3-1600 11-11-11
# FBGA Code: D9PSL, Part Number: MT41J128M16 - http://www.micron.com/support/fbga
("ddram_clock", 1,
Subsignal("p", Pins("K20")),
Subsignal("n", Pins("L19")),
IOStandard("DIFF_SSTL15_II"), Misc("IN_TERM=NONE")
),
("ddram", 1,
Subsignal("cke", Pins("F21"), IOStandard("SSTL15_II")),
Subsignal("ras_n", Pins("K21"), IOStandard("SSTL15_II")),
Subsignal("cas_n", Pins("K22"), IOStandard("SSTL15_II")),
Subsignal("we_n", Pins("K19"), IOStandard("SSTL15_II")),
Subsignal("ba", Pins("K17 L17 K18"), IOStandard("SSTL15_II")),
Subsignal("a", Pins("H21 H22 G22 J20 H20 M20 M19 G20 E20 E22 J19 H19 F22 G19 F20"), IOStandard("SSTL15_II")),
Subsignal("dq", Pins(
"R20 R22 P21 P22 L20 L22 M21 M22",
"T21 T22 U20 U22 W20 W22 Y21 Y22"), IOStandard("SSTL15_II")),
Subsignal("dqs", Pins("N20 V21"), IOStandard("DIFF_SSTL15_II")),
Subsignal("dqs_n", Pins("N22 V22"), IOStandard("DIFF_SSTL15_II")),
Subsignal("dm", Pins("N19 P20"), IOStandard("SSTL15_II")),
Subsignal("odt", Pins("J22"), IOStandard("SSTL15_II")),
Subsignal("reset_n", Pins("H18"), IOStandard("LVCMOS15")),
Misc("SLEW=FAST"),
Misc("VCCAUX_IO=HIGH")
),
]
_connectors = []
class Platform(XilinxPlatform):
name = "galatea"
default_clk_name = "clk100"
default_clk_period = 10.0
# W25Q128FVEIG - component U3
# 128M (16M x 8) - 104MHz
# Pretends to be a Micron N25Q128 (ID 0x0018ba20)
# FIXME: Create a "spi flash module" object in the same way we have SDRAM
# module objects.
spiflash_model = "n25q128"
spiflash_read_dummy_bits = 10
spiflash_clock_div = 4
spiflash_total_size = int((128/8)*1024*1024) # 128Mbit
spiflash_page_size = 256
spiflash_sector_size = 0x10000
# The Galatea has a XC6SLX45 which bitstream takes up ~12Mbit (1484472 bytes)
# 0x200000 offset (16Mbit) gives plenty of space
gateware_size = 0x200000
def __init__(self, programmer="openocd"):
# XC6SLX45T-3FGG484C
XilinxPlatform.__init__(self, "xc6slx45t-fgg484-3", _io, _connectors)
self.programmer = programmer
pins = {
'ProgPin': 'PullUp',
'DonePin': 'PullUp',
'TckPin': 'PullNone',
'TdiPin': 'PullNone',
'TdoPin': 'PullNone',
'TmsPin': 'PullNone',
'UnusedPin': 'PullNone',
}
for pin, config in pins.items():
self.toolchain.bitgen_opt += " -g %s:%s " % (pin, config)
# FPGA AUX is connected to the 3.3V supply
self.add_platform_command("""CONFIG VCCAUX="3.3";""")
def create_programmer(self):
if self.programmer == "xc3sprog":
return XC3SProg(cable='xpc')
elif self.programmer == "impact":
return iMPACT()
else:
raise ValueError("{} programmer is not supported".format(self.programmer))
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
# The oscillator clock sources.
try:
self.add_period_constraint(self.lookup_request("clk100"), 10.0)
except ConstraintError:
pass
| mithro/HDMI2USB-litex-firmware | platforms/galatea.py | Python | bsd-2-clause | 8,108 | [
"CRYSTAL"
] | dd9cd7fe9ea951287851dd7f3cbe785fcca02d335c37ff17dd7f2311dfa18676 |
# pylint: disable=bad-continuation
"""
Certificate HTML webview.
"""
import logging
from datetime import datetime
from uuid import uuid4
import six
import pytz
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.template import RequestContext
from django.utils import translation
from django.utils.encoding import smart_str
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from badges.events.course_complete import get_completion_badge
from badges.utils import badges_enabled
from edxmako.shortcuts import render_to_response
from edxmako.template import Template
from lms.djangoapps.certificates.api import (
emit_certificate_event,
get_active_web_certificate,
get_certificate_footer_context,
get_certificate_header_context,
get_certificate_template,
get_certificate_url
)
from lms.djangoapps.certificates.models import (
CertificateGenerationCourseSetting,
CertificateHtmlViewConfiguration,
CertificateSocialNetworks,
CertificateStatuses,
GeneratedCertificate
)
from lms.djangoapps.certificates.permissions import PREVIEW_CERTIFICATES
from lms.djangoapps.courseware.courses import get_course_by_id
from openedx.core.djangoapps.catalog.utils import get_course_run_details
from openedx.core.djangoapps.certificates.api import certificates_viewable_for_course, display_date_for_certificate
from openedx.core.djangoapps.lang_pref.api import get_closest_released_language
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.courses import course_image_url
from student.models import LinkedInAddToProfileConfiguration
from util import organizations_helpers as organization_api
from util.date_utils import strftime_localized
from util.views import handle_500
log = logging.getLogger(__name__)
_ = translation.ugettext
INVALID_CERTIFICATE_TEMPLATE_PATH = 'certificates/invalid.html'
def get_certificate_description(mode, certificate_type, platform_name):
"""
:return certificate_type_description on the basis of current mode
"""
certificate_type_description = None
if mode == 'honor':
# Translators: This text describes the 'Honor' course certificate type.
certificate_type_description = _(u"An {cert_type} certificate signifies that a "
u"learner has agreed to abide by the honor code established by {platform_name} "
u"and has completed all of the required tasks for this course under its "
u"guidelines.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'verified':
# Translators: This text describes the 'ID Verified' course certificate type, which is a higher level of
# verification offered by edX. This type of verification is useful for professional education/certifications
certificate_type_description = _(u"A {cert_type} certificate signifies that a "
u"learner has agreed to abide by the honor code established by {platform_name} "
u"and has completed all of the required tasks for this course under its "
u"guidelines. A {cert_type} certificate also indicates that the "
u"identity of the learner has been checked and "
u"is valid.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'xseries':
# Translators: This text describes the 'XSeries' course certificate type. An XSeries is a collection of
# courses related to each other in a meaningful way, such as a specific topic or theme, or even an organization
certificate_type_description = _(u"An {cert_type} certificate demonstrates a high level of "
u"achievement in a program of study, and includes verification of "
u"the student's identity.").format(cert_type=certificate_type)
return certificate_type_description
def _update_certificate_context(context, course, user_certificate, platform_name):
"""
Build up the certificate web view context using the provided values
(Helper method to keep the view clean)
"""
# Populate dynamic output values using the course/certificate data loaded above
certificate_type = context.get('certificate_type')
# Override the defaults with any mode-specific static values
context['certificate_id_number'] = user_certificate.verify_uuid
context['certificate_verify_url'] = u"{prefix}{uuid}{suffix}".format(
prefix=context.get('certificate_verify_url_prefix'),
uuid=user_certificate.verify_uuid,
suffix=context.get('certificate_verify_url_suffix')
)
# Translators: The format of the date includes the full name of the month
date = display_date_for_certificate(course, user_certificate)
context['certificate_date_issued'] = _(u'{month} {day}, {year}').format(
month=strftime_localized(date, "%B"),
day=date.day,
year=date.year
)
# Translators: This text represents the verification of the certificate
context['document_meta_description'] = _(u'This is a valid {platform_name} certificate for {user_name}, '
u'who participated in {partner_short_name} {course_number}').format(
platform_name=platform_name,
user_name=context['accomplishment_copy_name'],
partner_short_name=context['organization_short_name'],
course_number=context['course_number']
)
# Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar
context['document_title'] = _(u"{partner_short_name} {course_number} Certificate | {platform_name}").format(
partner_short_name=context['organization_short_name'],
course_number=context['course_number'],
platform_name=platform_name
)
# Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate
# screen. The text describes the accomplishment represented by the certificate information displayed to the user
context['accomplishment_copy_description_full'] = _(u"successfully completed, received a passing grade, and was "
u"awarded this {platform_name} {certificate_type} "
u"Certificate of Completion in ").format(
platform_name=platform_name,
certificate_type=context.get("certificate_type"))
certificate_type_description = get_certificate_description(user_certificate.mode, certificate_type, platform_name)
if certificate_type_description:
context['certificate_type_description'] = certificate_type_description
# Translators: This text describes the purpose (and therefore, value) of a course certificate
context['certificate_info_description'] = _(u"{platform_name} acknowledges achievements through "
u"certificates, which are awarded for course activities "
u"that {platform_name} students complete.").format(
platform_name=platform_name,
)
def _update_context_with_basic_info(context, course_id, platform_name, configuration):
"""
Updates context dictionary with basic info required before rendering simplest
certificate templates.
"""
context['platform_name'] = platform_name
context['course_id'] = course_id
# Update the view context with the default ConfigurationModel settings
context.update(configuration.get('default', {}))
# Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content
reserved = _("All rights reserved")
context['copyright_text'] = u'© {year} {platform_name}. {reserved}.'.format(
year=datetime.now(pytz.timezone(settings.TIME_ZONE)).year,
platform_name=platform_name,
reserved=reserved
)
# Translators: This text is bound to the HTML 'title' element of the page and appears
# in the browser title bar when a requested certificate is not found or recognized
context['document_title'] = _("Invalid Certificate")
context['company_tos_urltext'] = _("Terms of Service & Honor Code")
# Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information
context['company_privacy_urltext'] = _("Privacy Policy")
# Translators: This line appears as a byline to a header image and describes the purpose of the page
context['logo_subtitle'] = _("Certificate Validation")
# Translators: Accomplishments describe the awards/certifications obtained by students on this platform
context['accomplishment_copy_about'] = _(u'About {platform_name} Accomplishments').format(
platform_name=platform_name
)
# Translators: This line appears on the page just before the generation date for the certificate
context['certificate_date_issued_title'] = _("Issued On:")
# Translators: The Certificate ID Number is an alphanumeric value unique to each individual certificate
context['certificate_id_number_title'] = _('Certificate ID Number')
context['certificate_info_title'] = _(u'About {platform_name} Certificates').format(
platform_name=platform_name
)
context['certificate_verify_title'] = _(u"How {platform_name} Validates Student Certificates").format(
platform_name=platform_name
)
# Translators: This text describes the validation mechanism for a certificate file (known as GPG security)
context['certificate_verify_description'] = _(u'Certificates issued by {platform_name} are signed by a gpg key so '
u'that they can be validated independently by anyone with the '
u'{platform_name} public key. For independent verification, '
u'{platform_name} uses what is called a '
u'"detached signature""".').format(platform_name=platform_name)
context['certificate_verify_urltext'] = _("Validate this certificate for yourself")
# Translators: This text describes (at a high level) the mission and charter the edX platform and organization
context['company_about_description'] = _(u"{platform_name} offers interactive online classes and MOOCs.").format(
platform_name=platform_name)
context['company_about_title'] = _(u"About {platform_name}").format(platform_name=platform_name)
context['company_about_urltext'] = _(u"Learn more about {platform_name}").format(platform_name=platform_name)
context['company_courselist_urltext'] = _(u"Learn with {platform_name}").format(platform_name=platform_name)
context['company_careers_urltext'] = _(u"Work at {platform_name}").format(platform_name=platform_name)
context['company_contact_urltext'] = _(u"Contact {platform_name}").format(platform_name=platform_name)
# Translators: This text appears near the top of the certficate and describes the guarantee provided by edX
context['document_banner'] = _(u"{platform_name} acknowledges the following student accomplishment").format(
platform_name=platform_name
)
def _update_course_context(request, context, course, course_key, platform_name):
"""
Updates context dictionary with course info.
"""
context['full_course_image_url'] = request.build_absolute_uri(course_image_url(course))
course_title_from_cert = context['certificate_data'].get('course_title', '')
accomplishment_copy_course_name = course_title_from_cert if course_title_from_cert else course.display_name
context['accomplishment_copy_course_name'] = accomplishment_copy_course_name
course_number = course.display_coursenumber if course.display_coursenumber else course.number
context['course_number'] = course_number
if context['organization_long_name']:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _(u'a course of study offered by {partner_short_name}, '
'an online learning initiative of '
'{partner_long_name}.').format(
partner_short_name=context['organization_short_name'],
partner_long_name=context['organization_long_name'],
platform_name=platform_name)
else:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by '
'{partner_short_name}.').format(
partner_short_name=context['organization_short_name'],
platform_name=platform_name)
def _update_social_context(request, context, course, user, user_certificate, platform_name):
"""
Updates context dictionary with info required for social sharing.
"""
share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS)
context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False)
context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID)
context['facebook_share_text'] = share_settings.get(
'CERTIFICATE_FACEBOOK_TEXT',
_(u"I completed the {course_title} course on {platform_name}.").format(
course_title=context['accomplishment_copy_course_name'],
platform_name=platform_name
)
)
context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False)
context['twitter_share_text'] = share_settings.get(
'CERTIFICATE_TWITTER_TEXT',
_(u"I completed a course at {platform_name}. Take a look at my certificate.").format(
platform_name=platform_name
)
)
share_url = request.build_absolute_uri(get_certificate_url(course_id=course.id, uuid=user_certificate.verify_uuid))
context['share_url'] = share_url
twitter_url = ''
if context.get('twitter_share_enabled', False):
twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format(
twitter_share_text=smart_str(context['twitter_share_text']),
share_url=six.moves.urllib.parse.quote_plus(smart_str(share_url))
)
context['twitter_url'] = twitter_url
context['linked_in_url'] = None
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
linkedin_share_enabled = share_settings.get('CERTIFICATE_LINKEDIN', linkedin_config.enabled)
if linkedin_share_enabled:
context['linked_in_url'] = linkedin_config.add_to_profile_url(
course.id,
course.display_name,
user_certificate.mode,
smart_str(share_url)
)
def _update_context_with_user_info(context, user, user_certificate):
"""
Updates context dictionary with user related info.
"""
user_fullname = user.profile.name
context['username'] = user.username
context['course_mode'] = user_certificate.mode
context['accomplishment_user_id'] = user.id
context['accomplishment_copy_name'] = user_fullname
context['accomplishment_copy_username'] = user.username
context['accomplishment_more_title'] = _(u"More Information About {user_name}'s Certificate:").format(
user_name=user_fullname
)
# Translators: This line is displayed to a user who has completed a course and achieved a certification
context['accomplishment_banner_opening'] = _(u"{fullname}, you earned a certificate!").format(
fullname=user_fullname
)
# Translators: This line congratulates the user and instructs them to share their accomplishment on social networks
context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what "
"you accomplished. Show it off to family, friends, and colleagues "
"in your social and professional networks.")
# Translators: This line leads the reader to understand more about the certificate that a student has been awarded
context['accomplishment_copy_more_about'] = _(u"More about {fullname}'s accomplishment").format(
fullname=user_fullname
)
def _get_user_certificate(request, user, course_key, course, preview_mode=None):
"""
Retrieves user's certificate from db. Creates one in case of preview mode.
Returns None if there is no certificate generated for given user
otherwise returns `GeneratedCertificate` instance.
"""
user_certificate = None
if preview_mode:
# certificate is being previewed from studio
if request.user.has_perm(PREVIEW_CERTIFICATES, course):
if course.certificate_available_date and not course.self_paced:
modified_date = course.certificate_available_date
else:
modified_date = datetime.now().date()
user_certificate = GeneratedCertificate(
mode=preview_mode,
verify_uuid=six.text_type(uuid4().hex),
modified_date=modified_date
)
elif certificates_viewable_for_course(course):
# certificate is being viewed by learner or public
try:
user_certificate = GeneratedCertificate.eligible_certificates.get(
user=user,
course_id=course_key,
status=CertificateStatuses.downloadable
)
except GeneratedCertificate.DoesNotExist:
pass
return user_certificate
def _track_certificate_events(request, context, course, user, user_certificate):
"""
Tracks web certificate view related events.
"""
# Badge Request Event Tracking Logic
course_key = course.location.course_key
if 'evidence_visit' in request.GET:
badge_class = get_completion_badge(course_key, user)
if not badge_class:
log.warning(u'Visit to evidence URL for badge, but badges not configured for course "%s"', course_key)
badges = []
else:
badges = badge_class.get_for_user(user)
if badges:
# There should only ever be one of these.
badge = badges[0]
tracker.emit(
'edx.badge.assertion.evidence_visited',
{
'badge_name': badge.badge_class.display_name,
'badge_slug': badge.badge_class.slug,
'badge_generator': badge.backend,
'issuing_component': badge.badge_class.issuing_component,
'user_id': user.id,
'course_id': six.text_type(course_key),
'enrollment_mode': badge.badge_class.mode,
'assertion_id': badge.id,
'assertion_image_url': badge.image_url,
'assertion_json_url': badge.assertion_url,
'issuer': badge.data.get('issuer'),
}
)
else:
log.warn(
u"Could not find badge for %s on course %s.",
user.id,
course_key,
)
# track certificate evidence_visited event for analytics when certificate_user and accessing_user are different
if request.user and request.user.id != user.id:
emit_certificate_event('evidence_visited', user, six.text_type(course.id), course, {
'certificate_id': user_certificate.verify_uuid,
'enrollment_mode': user_certificate.mode,
'social_network': CertificateSocialNetworks.linkedin
})
def _update_badge_context(context, course, user):
"""
Updates context with badge info.
"""
badge = None
if badges_enabled() and course.issue_badges:
badges = get_completion_badge(course.location.course_key, user).get_for_user(user)
if badges:
badge = badges[0]
context['badge'] = badge
def _update_organization_context(context, course):
"""
Updates context with organization related info.
"""
partner_long_name, organization_logo = None, None
partner_short_name = course.display_organization if course.display_organization else course.org
course_partner_short_name = partner_short_name # Appsembler: Avoid having it overridden by organization.short_name
partner_short_name_overridden = bool(course.display_organization) # Appsembler: Allow certs app to pick smartly
organizations = organization_api.get_course_organizations(course_id=course.id)
if organizations:
#TODO Need to add support for multiple organizations, Currently we are interested in the first one.
organization = organizations[0]
partner_long_name = organization.get('name', partner_long_name)
partner_short_name = organization.get('short_name', partner_short_name)
organization_logo = organization.get('logo', None)
context['organization_long_name'] = partner_long_name
context['organization_short_name'] = partner_short_name
context['partner_short_name_overridden'] = partner_short_name_overridden
context['course_partner_short_name'] = course_partner_short_name
context['accomplishment_copy_course_org'] = partner_short_name
context['organization_logo'] = organization_logo
def unsupported_url(request, user_id, course_id):
"""
This view returns the un-supported url page aimed to let the user aware that
url is no longer supported
"""
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
configuration = CertificateHtmlViewConfiguration.get_config()
return _render_invalid_certificate(
request, course_id, platform_name, configuration, cert_path='certificates/url_unsupported.html'
)
@login_required
def render_preview_certificate(request, course_id):
"""
This view renders the course certificate in preview mode
"""
return render_html_view(request, six.text_type(course_id))
def render_cert_by_uuid(request, certificate_uuid):
"""
This public view generates an HTML representation of the specified certificate
"""
try:
certificate = GeneratedCertificate.eligible_certificates.get(
verify_uuid=certificate_uuid,
status=CertificateStatuses.downloadable
)
return render_html_view(request, six.text_type(certificate.course_id), certificate)
except GeneratedCertificate.DoesNotExist:
raise Http404
@handle_500(
template_path="certificates/server-error.html",
test_func=lambda request: request.GET.get('preview', None)
)
def render_html_view(request, course_id, certificate=None):
"""
This public view generates an HTML representation of the specified user and course
If a certificate is not available, we display a "Sorry!" screen instead
"""
user = certificate.user if certificate else request.user
user_id = user.id
preview_mode = request.GET.get('preview', None)
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
configuration = CertificateHtmlViewConfiguration.get_config()
# Kick the user back to the "Invalid" screen if the feature is disabled globally
if not configuration_helpers.get_value('CERTIFICATES_HTML_VIEW',
settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False)):
return _render_invalid_certificate(request, course_id, platform_name, configuration)
# Load the course and user objects
try:
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key)
# For any course or user exceptions, kick the user back to the "Invalid" screen
except (InvalidKeyError, Http404) as exception:
error_str = (
u"Invalid cert: error finding course %s "
u"Specific error: %s"
)
log.info(error_str, course_id, str(exception))
return _render_invalid_certificate(request, course_id, platform_name, configuration)
# Kick the user back to the "Invalid" screen if the feature is disabled for the course
if not course.cert_html_view_enabled:
log.info(
u"Invalid cert: HTML certificates disabled for %s. User id: %d",
course_id,
user_id,
)
return _render_invalid_certificate(request, course_id, platform_name, configuration)
# Load user's certificate
user_certificate = _get_user_certificate(request, user, course_key, course, preview_mode)
if not user_certificate:
log.info(
u"Invalid cert: User %d does not have eligible cert for %s.",
user_id,
course_id,
)
return _render_invalid_certificate(request, course_id, platform_name, configuration)
# Get the active certificate configuration for this course
# If we do not have an active certificate, we'll need to send the user to the "Invalid" screen
# Passing in the 'preview' parameter, if specified, will return a configuration, if defined
active_configuration = get_active_web_certificate(course, preview_mode)
if active_configuration is None:
log.info(
u"Invalid cert: course %s does not have an active configuration. User id: %d",
course_id,
user_id,
)
return _render_invalid_certificate(request, course_id, platform_name, configuration)
# Get data from Discovery service that will be necessary for rendering this Certificate.
catalog_data = _get_catalog_data_for_course(course_key)
# Determine whether to use the standard or custom template to render the certificate.
custom_template = None
custom_template_language = None
if settings.FEATURES.get('CUSTOM_CERTIFICATE_TEMPLATES_ENABLED', False):
log.info(u"Custom certificate for course %s", course_id)
custom_template, custom_template_language = _get_custom_template_and_language(
course.id,
user_certificate.mode,
catalog_data.pop('content_language', None)
)
# Determine the language that should be used to render the certificate.
# For the standard certificate template, use the user language. For custom templates, use
# the language associated with the template.
user_language = translation.get_language()
certificate_language = custom_template_language if custom_template else user_language
log.info(
u"certificate language is: %s for the course: %s",
certificate_language,
course_key
)
# Generate the certificate context in the correct language, then render the template.
with translation.override(certificate_language):
context = {'user_language': user_language}
_update_context_with_basic_info(context, course_id, platform_name, configuration)
context['certificate_data'] = active_configuration
# Append/Override the existing view context values with any mode-specific ConfigurationModel values
context.update(configuration.get(user_certificate.mode, {}))
# Append organization info
_update_organization_context(context, course)
# Append course info
_update_course_context(request, context, course, course_key, platform_name)
# Append course run info from discovery
context.update(catalog_data)
# Append user info
_update_context_with_user_info(context, user, user_certificate)
# Append social sharing info
_update_social_context(request, context, course, user, user_certificate, platform_name)
# Append/Override the existing view context values with certificate specific values
_update_certificate_context(context, course, user_certificate, platform_name)
# Append badge info
_update_badge_context(context, course, user)
# Add certificate header/footer data to current context
context.update(get_certificate_header_context(is_secure=request.is_secure()))
context.update(get_certificate_footer_context())
# Append/Override the existing view context values with any course-specific static values from Advanced Settings
context.update(course.cert_html_view_overrides)
# Track certificate view events
_track_certificate_events(request, context, course, user, user_certificate)
# Render the certificate
return _render_valid_certificate(request, context, custom_template)
def _get_catalog_data_for_course(course_key):
"""
Retrieve data from the Discovery service necessary for rendering a certificate for a specific course.
"""
course_certificate_settings = CertificateGenerationCourseSetting.get(course_key)
if not course_certificate_settings:
return {}
catalog_data = {}
course_run_fields = []
if course_certificate_settings.language_specific_templates_enabled:
course_run_fields.append('content_language')
if course_certificate_settings.include_hours_of_effort:
course_run_fields.extend(['weeks_to_complete', 'max_effort'])
if course_run_fields:
course_run_data = get_course_run_details(course_key, course_run_fields)
if course_run_data.get('weeks_to_complete') and course_run_data.get('max_effort'):
try:
weeks_to_complete = int(course_run_data['weeks_to_complete'])
max_effort = int(course_run_data['max_effort'])
catalog_data['hours_of_effort'] = weeks_to_complete * max_effort
except ValueError:
log.exception('Error occurred while parsing course run details')
catalog_data['content_language'] = course_run_data.get('content_language')
log.info(
u"catalog data received for course: %s is : %s",
course_key,
catalog_data,
)
return catalog_data
def _get_custom_template_and_language(course_id, course_mode, course_language):
"""
Return the custom certificate template, if any, that should be rendered for the provided course/mode/language
combination, along with the language that should be used to render that template.
"""
closest_released_language = get_closest_released_language(course_language) if course_language else None
log.info(
u"closest released language for %s is %s and course language was: %s",
course_id,
closest_released_language,
course_language
)
template = get_certificate_template(course_id, course_mode, closest_released_language)
if template and template.language:
return (template, closest_released_language)
elif template:
user_language = translation.get_language()
return (template, user_language)
else:
return (None, None)
def _render_invalid_certificate(request, course_id, platform_name, configuration,
cert_path=INVALID_CERTIFICATE_TEMPLATE_PATH):
"""
Renders the invalid certificate view with default header and footer.
"""
context = {}
_update_context_with_basic_info(context, course_id, platform_name, configuration)
# Add certificate header/footer data to current context
context.update(get_certificate_header_context(is_secure=request.is_secure()))
context.update(get_certificate_footer_context())
return render_to_response(cert_path, context)
def _render_valid_certificate(request, context, custom_template=None):
if custom_template:
template = Template(
custom_template.template,
output_encoding='utf-8',
input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace',
)
context = RequestContext(request, context)
return HttpResponse(template.render(context))
else:
return render_to_response("certificates/valid.html", context)
| appsembler/edx-platform | lms/djangoapps/certificates/views/webview.py | Python | agpl-3.0 | 33,078 | [
"VisIt"
] | fe7fa2a55cb1bfb2db6b828bf51bfdd67e865d7bd99830bb1b0ed7c2861d4415 |
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exact moment matching.
Exact moment matching for decoupled Gaussian process with squared exponential
kernel. Equations based on the paper:
Deisenroth, Marc Peter, Marco F. Huber, and Uwe D. Hanebeck. "Analytic
moment-based Gaussian process filtering." Proceedings of the 26th annual
international conference on machine learning. ACM, 2009.
https://spiral.imperial.ac.uk:8443/bitstream/10044/1/12195/4/icml2009_finalCorrected.pdf
(final corrected)
Matlab code: https://github.com/ICL-SML/gp-adf.
Especially, the file gpPt.m, transition propagation.
A side note: The equations in the matlab is quite different from the equations
in the paper. But actually they are the same, we need to use matrix identity and
the fact that Lambda are diagonal.
Small modifications are necessary for decoupled Gaussian process, e.g., taking
into account different inducing points / bases for mean and covariance, and
sampling for mean bases in order to achieve linear-time complexity:
"""
# TODO(xinyanyan) Add the computation for input-output covariance, which will
# be needed for filtering, and when delta x is predicted.
# TODO(xinyanyan) Add support for different bases for mean and covariance.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import decoupled_gaussian_process
from decoupled_gaussian_process import utils
import tensorflow as tf
class ExactMomentMatchingPredictor(object):
"""Class for predicting a sequence of belief states.
The prediction is based on exact moment matching and decouple Gaussian process
model.
"""
def __init__(self, models):
"""Initialization.
Args:
models: list of DecoupledGaussianProcess.
Raises:
ValueError: invalid models are passed.
"""
for model in models:
if not isinstance(model,
decoupled_gaussian_process.DecoupledGaussianProcess):
raise ValueError('Models include invalid model!')
self.num_models = len(models)
self.models = models
def build_prediction(self, input_mean, input_cov):
"""Build exact moment matching predictions.
Args:
input_mean: rank-1 tensor.
input_cov: rank-2 tensor.
Returns:
tensors for mean and covariance.
"""
output_mean = [None for _ in range(self.num_models)]
output_cov = [
[None for _ in range(self.num_models)] for _ in range(self.num_models)
]
k = [None for _ in range(self.num_models)]
for i in range(self.num_models):
quad_form = (
input_cov + tf.diag(tf.exp(2.0 * self.models[i].log_length_scale)))
coeff = (
tf.exp(2.0 * self.models[i].log_signal_stddev) / tf.sqrt(
tf.matrix_determinant(quad_form)) * tf.exp(
tf.reduce_sum(self.models[i].log_length_scale)))
exp_distance = tf.exp(-0.5 * utils.build_squared_distance(
self.models[i].bases.mean_bases,
tf.reshape(input_mean, [1, -1]),
matrix=quad_form,
is_inverse=True)) # rank-2 column
output_mean[i] = coeff * tf.reduce_sum(
exp_distance * self.models[i].bases.mean_stats)
scaled_x = self.models[i].bases.mean_bases - input_mean
scaled_x /= tf.exp(self.models[i].log_length_scale)
# rank-1, (num of bases,)
k[i] = (2.0 * self.models[i].log_signal_stddev -
tf.reduce_sum(scaled_x * scaled_x, axis=1) / 2.0)
for i in range(self.num_models):
scaled_xi = self.models[i].bases.mean_bases - input_mean
scaled_xi /= tf.exp(2.0 * self.models[i].log_length_scale)
for j in range(i + 1):
quad_form = (
input_cov * (tf.exp(-2.0 * self.models[i].log_length_scale) +
tf.exp(-2.0 * self.models[j].log_length_scale)) +
tf.eye(tf.shape(input_cov)[0], dtype=utils.tf_float))
coeff = 1.0 / tf.sqrt(tf.matrix_determinant(quad_form))
scaled_xj = self.models[j].bases.mean_bases - input_mean
scaled_xj /= tf.exp(2.0 * self.models[j].log_length_scale)
exp_term = tf.reshape(k[i], [-1, 1]) + k[j]
prod_over_sum = (
tf.exp(2.0 * (self.models[i].log_length_scale +
self.models[j].log_length_scale)) /
(tf.exp(2.0 * self.models[i].log_length_scale) +
tf.exp(2.0 * self.models[j].log_length_scale)))
exp_term += 0.5 * utils.build_squared_distance(
scaled_xi, -scaled_xj, diagonal=prod_over_sum)
exp_term -= 0.5 * utils.build_squared_distance(
scaled_xi,
-scaled_xj,
matrix=quad_form,
diagonal=prod_over_sum,
is_inverse=True)
exp_term = tf.exp(exp_term)
multiplier_in_tr = tf.matmul(
self.models[i].bases.mean_stats,
self.models[j].bases.mean_stats,
transpose_b=True)
if i == j:
multiplier_in_tr -= self.models[i].common_terms.inverse_in_cov
# Get centered cov.
output_cov[i][j] = (
coeff * tf.reduce_sum(multiplier_in_tr * exp_term) -
output_mean[i] * output_mean[j])
output_cov[j][i] = output_cov[i][j]
output_cov[i][i] += (
tf.exp(2.0 * self.models[i].log_signal_stddev) +
tf.exp(self.models[i].log_likelihood_variance))
return output_mean, output_cov
| google/decoupled_gaussian_process | exact_moment_matching_predictor.py | Python | apache-2.0 | 5,936 | [
"ADF",
"Gaussian"
] | 205b840d36dda8b1accfbc7a6ff455b28f288a502ec9ed0321ece4d549afde58 |
# Copyright 2021, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`DPQuery`s for differentially private tree aggregation protocols.
`TreeCumulativeSumQuery` and `TreeResidualSumQuery` are `DPQuery`s for continual
online observation queries relying on `tree_aggregation`. 'Online' means that
the leaf nodes of the tree arrive one by one as the time proceeds. The core
logic of tree aggregation is implemented in `tree_aggregation.TreeAggregator`
and `tree_aggregation.EfficientTreeAggregator`.
Depending on the data streaming setting (single/multi-pass), the privacy
accounting method ((epsilon,delta)-DP/RDP/zCDP), and the restart strategy (see
`restart_query`), the DP bound can be computed by one of the public methods
in `analysis.tree_aggregation_accountant`.
For example, for a single-pass algorithm where a sample may appear at most once
in the querying process; if `get_noised_result` is called `steps` times, the
corresponding epsilon for a `target_delta` and `noise_multiplier` to achieve
(epsilon,delta)-DP can be computed as:
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
rdp = compute_rdp_tree_restart(noise_multiplier, [steps], orders)
eps = rdp_accountant.get_privacy_spent(orders, rdp, target_delta)[0]
"""
import attr
import tensorflow as tf
from tensorflow_privacy.privacy.analysis import dp_event
from tensorflow_privacy.privacy.dp_query import dp_query
from tensorflow_privacy.privacy.dp_query import tree_aggregation
# TODO(b/193679963): define `RestartQuery` and move `RestartIndicator` to be
# in the same module.
class TreeCumulativeSumQuery(dp_query.SumAggregationDPQuery):
"""Returns private cumulative sums by clipping and adding correlated noise.
Consider calling `get_noised_result` T times, and each (x_i, i=0,2,...,T-1) is
the private value returned by `accumulate_record`, i.e. x_i = sum_{j=0}^{n-1}
x_{i,j} where each x_{i,j} is a private record in the database. This class is
intended to make multiple queries, which release privatized values of the
cumulative sums s_i = sum_{k=0}^{i} x_k, for i=0,...,T-1.
Each call to `get_noised_result` releases the next cumulative sum s_i, which
is in contrast to the GaussianSumQuery that releases x_i. Noise for the
cumulative sums is accomplished using the tree aggregation logic in
`tree_aggregation`, which is proportional to log(T).
Example usage:
query = TreeCumulativeSumQuery(...)
global_state = query.initial_global_state()
params = query.derive_sample_params(global_state)
for i, samples in enumerate(streaming_samples):
sample_state = query.initial_sample_state(samples[0])
# Compute x_i = sum_{j=0}^{n-1} x_{i,j}
for j,sample in enumerate(samples):
sample_state = query.accumulate_record(params, sample_state, sample)
# noised_cumsum is privatized estimate of s_i
noised_cumsum, global_state, event = query.get_noised_result(
sample_state, global_state)
Attributes:
clip_fn: Callable that specifies clipping function. `clip_fn` receives two
arguments: a flat list of vars in a record and a `clip_value` to clip the
corresponding record, e.g. clip_fn(flat_record, clip_value).
clip_value: float indicating the value at which to clip the record.
record_specs: `Collection[tf.TensorSpec]` specifying shapes of records.
tree_aggregator: `tree_aggregation.TreeAggregator` initialized with user
defined `noise_generator`. `noise_generator` is a
`tree_aggregation.ValueGenerator` to generate the noise value for a tree
node. Noise stdandard deviation is specified outside the `dp_query` by the
user when defining `noise_fn` and should have order
O(clip_norm*log(T)/eps) to guarantee eps-DP.
"""
@attr.s(frozen=True)
class GlobalState(object):
"""Class defining global state for Tree sum queries.
Attributes:
tree_state: Current state of noise tree keeping track of current leaf and
each level state.
clip_value: The clipping value to be passed to clip_fn.
samples_cumulative_sum: Noiseless cumulative sum of samples over time.
"""
tree_state = attr.ib()
clip_value = attr.ib()
samples_cumulative_sum = attr.ib()
def __init__(self,
record_specs,
noise_generator,
clip_fn,
clip_value,
use_efficient=True):
"""Initializes the `TreeCumulativeSumQuery`.
Consider using `build_l2_gaussian_query` for the construction of a
`TreeCumulativeSumQuery` with L2 norm clipping and Gaussian noise.
Args:
record_specs: A nested structure of `tf.TensorSpec`s specifying structure
and shapes of records.
noise_generator: `tree_aggregation.ValueGenerator` to generate the noise
value for a tree node. Should be coupled with clipping norm to guarantee
privacy.
clip_fn: Callable that specifies clipping function. Input to clip is a
flat list of vars in a record.
clip_value: Float indicating the value at which to clip the record.
use_efficient: Boolean indicating the usage of the efficient tree
aggregation algorithm based on the paper "Efficient Use of
Differentially Private Binary Trees".
"""
self._clip_fn = clip_fn
self._clip_value = clip_value
self._record_specs = record_specs
if use_efficient:
self._tree_aggregator = tree_aggregation.EfficientTreeAggregator(
noise_generator)
else:
self._tree_aggregator = tree_aggregation.TreeAggregator(noise_generator)
def initial_global_state(self):
"""Implements `tensorflow_privacy.DPQuery.initial_global_state`."""
initial_tree_state = self._tree_aggregator.init_state()
initial_samples_cumulative_sum = tf.nest.map_structure(
lambda spec: tf.zeros(spec.shape), self._record_specs)
return TreeCumulativeSumQuery.GlobalState(
tree_state=initial_tree_state,
clip_value=tf.constant(self._clip_value, tf.float32),
samples_cumulative_sum=initial_samples_cumulative_sum)
def derive_sample_params(self, global_state):
"""Implements `tensorflow_privacy.DPQuery.derive_sample_params`."""
return global_state.clip_value
def preprocess_record(self, params, record):
"""Implements `tensorflow_privacy.DPQuery.preprocess_record`.
Args:
params: `clip_value` for the record.
record: The record to be processed.
Returns:
Structure of clipped tensors.
"""
clip_value = params
record_as_list = tf.nest.flatten(record)
clipped_as_list = self._clip_fn(record_as_list, clip_value)
return tf.nest.pack_sequence_as(record, clipped_as_list)
def get_noised_result(self, sample_state, global_state):
"""Implements `tensorflow_privacy.DPQuery.get_noised_result`.
Updates tree state, and returns noised cumulative sum and updated state.
Computes new cumulative sum, and returns its noised value. Grows tree state
by one new leaf, and returns the new state.
Args:
sample_state: Sum of clipped records for this round.
global_state: Global state with current sample's cumulative sum and tree
state.
Returns:
A tuple of (noised_cumulative_sum, new_global_state).
"""
new_cumulative_sum = tf.nest.map_structure(
tf.add, global_state.samples_cumulative_sum, sample_state)
cumulative_sum_noise, new_tree_state = self._tree_aggregator.get_cumsum_and_update(
global_state.tree_state)
noised_cumulative_sum = tf.nest.map_structure(tf.add, new_cumulative_sum,
cumulative_sum_noise)
new_global_state = attr.evolve(
global_state,
samples_cumulative_sum=new_cumulative_sum,
tree_state=new_tree_state)
event = dp_event.UnsupportedDpEvent()
return noised_cumulative_sum, new_global_state, event
def reset_state(self, noised_results, global_state):
"""Returns state after resetting the tree.
This function will be used in `restart_query.RestartQuery` after calling
`get_noised_result` when the restarting condition is met.
Args:
noised_results: Noised cumulative sum returned by `get_noised_result`.
global_state: Updated global state returned by `get_noised_result`, which
has current sample's cumulative sum and tree state for the next
cumulative sum.
Returns:
New global state with current noised cumulative sum and restarted tree
state for the next cumulative sum.
"""
new_tree_state = self._tree_aggregator.reset_state(global_state.tree_state)
return attr.evolve(
global_state,
samples_cumulative_sum=noised_results,
tree_state=new_tree_state)
@classmethod
def build_l2_gaussian_query(cls,
clip_norm,
noise_multiplier,
record_specs,
noise_seed=None,
use_efficient=True):
"""Returns a query instance with L2 norm clipping and Gaussian noise.
Args:
clip_norm: Each record will be clipped so that it has L2 norm at most
`clip_norm`.
noise_multiplier: The effective noise multiplier for the sum of records.
Noise standard deviation is `clip_norm*noise_multiplier`. The value can
be used as the input of the privacy accounting functions in
`analysis.tree_aggregation_accountant`.
record_specs: A nested structure of `tf.TensorSpec`s specifying structure
and shapes of records.
noise_seed: Integer seed for the Gaussian noise generator. If `None`, a
nondeterministic seed based on system time will be generated.
use_efficient: Boolean indicating the usage of the efficient tree
aggregation algorithm based on the paper "Efficient Use of
Differentially Private Binary Trees".
"""
if clip_norm <= 0:
raise ValueError(f'`clip_norm` must be positive, got {clip_norm}.')
if noise_multiplier < 0:
raise ValueError(
f'`noise_multiplier` must be non-negative, got {noise_multiplier}.')
gaussian_noise_generator = tree_aggregation.GaussianNoiseGenerator(
noise_std=clip_norm * noise_multiplier,
specs=record_specs,
seed=noise_seed)
def l2_clip_fn(record_as_list, clip_norm):
clipped_record, _ = tf.clip_by_global_norm(record_as_list, clip_norm)
return clipped_record
return cls(
clip_fn=l2_clip_fn,
clip_value=clip_norm,
record_specs=record_specs,
noise_generator=gaussian_noise_generator,
use_efficient=use_efficient)
class TreeResidualSumQuery(dp_query.SumAggregationDPQuery):
"""Implements DPQuery for adding correlated noise through tree structure.
Clips and sums records in current sample x_i = sum_{j=0}^{n-1} x_{i,j};
returns the current sample adding the noise residual from tree aggregation.
The returned value is conceptually equivalent to the following: calculates
cumulative sum of samples over time s_i = sum_{k=0}^i x_i (instead of only
current sample) with added noise by tree aggregation protocol that is
proportional to log(T), T being the number of times the query is called; r
eturns the residual between the current noised cumsum noised(s_i) and the
previous one noised(s_{i-1}) when the query is called.
This can be used as a drop-in replacement for `GaussianSumQuery`, and can
offer stronger utility/privacy tradeoffs when aplification-via-sampling is not
possible, or when privacy epsilon is relativly large. This may result in
more noise by a log(T) factor in each individual estimate of x_i, but if the
x_i are used in the underlying code to compute cumulative sums, the noise in
those sums can be less. That is, this allows us to adapt code that was written
to use a regular `SumQuery` to benefit from the tree aggregation protocol.
Combining this query with a SGD optimizer can be used to implement the
DP-FTRL algorithm in
"Practical and Private (Deep) Learning without Sampling or Shuffling".
Example usage:
query = TreeResidualSumQuery(...)
global_state = query.initial_global_state()
params = query.derive_sample_params(global_state)
for i, samples in enumerate(streaming_samples):
sample_state = query.initial_sample_state(samples[0])
# Compute x_i = sum_{j=0}^{n-1} x_{i,j}
for j,sample in enumerate(samples):
sample_state = query.accumulate_record(params, sample_state, sample)
# noised_sum is privatized estimate of x_i by conceptually postprocessing
# noised cumulative sum s_i
noised_sum, global_state, event = query.get_noised_result(
sample_state, global_state)
Attributes:
clip_fn: Callable that specifies clipping function. `clip_fn` receives two
arguments: a flat list of vars in a record and a `clip_value` to clip the
corresponding record, e.g. clip_fn(flat_record, clip_value).
clip_value: float indicating the value at which to clip the record.
record_specs: A nested structure of `tf.TensorSpec`s specifying structure
and shapes of records.
tree_aggregator: `tree_aggregation.TreeAggregator` initialized with user
defined `noise_generator`. `noise_generator` is a
`tree_aggregation.ValueGenerator` to generate the noise value for a tree
node. Noise stdandard deviation is specified outside the `dp_query` by the
user when defining `noise_fn` and should have order
O(clip_norm*log(T)/eps) to guarantee eps-DP.
"""
@attr.s(frozen=True)
class GlobalState(object):
"""Class defining global state for Tree sum queries.
Attributes:
tree_state: Current state of noise tree keeping track of current leaf and
each level state.
clip_value: The clipping value to be passed to clip_fn.
previous_tree_noise: Cumulative noise by tree aggregation from the
previous time the query is called on a sample.
"""
tree_state = attr.ib()
clip_value = attr.ib()
previous_tree_noise = attr.ib()
def __init__(self,
record_specs,
noise_generator,
clip_fn,
clip_value,
use_efficient=True):
"""Initializes the `TreeCumulativeSumQuery`.
Consider using `build_l2_gaussian_query` for the construction of a
`TreeCumulativeSumQuery` with L2 norm clipping and Gaussian noise.
Args:
record_specs: A nested structure of `tf.TensorSpec`s specifying structure
and shapes of records.
noise_generator: `tree_aggregation.ValueGenerator` to generate the noise
value for a tree node. Should be coupled with clipping norm to guarantee
privacy.
clip_fn: Callable that specifies clipping function. Input to clip is a
flat list of vars in a record.
clip_value: Float indicating the value at which to clip the record.
use_efficient: Boolean indicating the usage of the efficient tree
aggregation algorithm based on the paper "Efficient Use of
Differentially Private Binary Trees".
"""
self._clip_fn = clip_fn
self._clip_value = clip_value
self._record_specs = record_specs
if use_efficient:
self._tree_aggregator = tree_aggregation.EfficientTreeAggregator(
noise_generator)
else:
self._tree_aggregator = tree_aggregation.TreeAggregator(noise_generator)
def _zero_initial_noise(self):
return tf.nest.map_structure(lambda spec: tf.zeros(spec.shape),
self._record_specs)
def initial_global_state(self):
"""Implements `tensorflow_privacy.DPQuery.initial_global_state`."""
initial_tree_state = self._tree_aggregator.init_state()
return TreeResidualSumQuery.GlobalState(
tree_state=initial_tree_state,
clip_value=tf.constant(self._clip_value, tf.float32),
previous_tree_noise=self._zero_initial_noise())
def derive_sample_params(self, global_state):
"""Implements `tensorflow_privacy.DPQuery.derive_sample_params`."""
return global_state.clip_value
def preprocess_record_l2_impl(self, params, record):
"""Clips the l2 norm, returning the clipped record and the l2 norm.
Args:
params: The parameters for the sample.
record: The record to be processed.
Returns:
A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
the structure of preprocessed tensors, and l2_norm is the total l2 norm
before clipping.
"""
l2_norm_clip = params
record_as_list = tf.nest.flatten(record)
clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)
return tf.nest.pack_sequence_as(record, clipped_as_list), norm
def preprocess_record(self, params, record):
"""Implements `tensorflow_privacy.DPQuery.preprocess_record`.
Args:
params: `clip_value` for the record.
record: The record to be processed.
Returns:
Structure of clipped tensors.
"""
clip_value = params
record_as_list = tf.nest.flatten(record)
clipped_as_list = self._clip_fn(record_as_list, clip_value)
return tf.nest.pack_sequence_as(record, clipped_as_list)
def get_noised_result(self, sample_state, global_state):
"""Implements `tensorflow_privacy.DPQuery.get_noised_result`.
Updates tree state, and returns residual of noised cumulative sum.
Args:
sample_state: Sum of clipped records for this round.
global_state: Global state with current samples cumulative sum and tree
state.
Returns:
A tuple of (noised_cumulative_sum, new_global_state).
"""
tree_noise, new_tree_state = self._tree_aggregator.get_cumsum_and_update(
global_state.tree_state)
noised_sample = tf.nest.map_structure(lambda a, b, c: a + b - c,
sample_state, tree_noise,
global_state.previous_tree_noise)
new_global_state = attr.evolve(
global_state, previous_tree_noise=tree_noise, tree_state=new_tree_state)
event = dp_event.UnsupportedDpEvent()
return noised_sample, new_global_state, event
def reset_state(self, noised_results, global_state):
"""Returns state after resetting the tree.
This function will be used in `restart_query.RestartQuery` after calling
`get_noised_result` when the restarting condition is met.
Args:
noised_results: Noised results returned by `get_noised_result`.
global_state: Updated global state returned by `get_noised_result`, which
records noise for the conceptual cumulative sum of the current leaf
node, and tree state for the next conceptual cumulative sum.
Returns:
New global state with zero noise and restarted tree state.
"""
del noised_results
new_tree_state = self._tree_aggregator.reset_state(global_state.tree_state)
return attr.evolve(
global_state,
previous_tree_noise=self._zero_initial_noise(),
tree_state=new_tree_state)
def reset_l2_clip_gaussian_noise(self, global_state, clip_norm, stddev):
noise_generator_state = global_state.tree_state.value_generator_state
assert isinstance(self._tree_aggregator.value_generator,
tree_aggregation.GaussianNoiseGenerator)
noise_generator_state = self._tree_aggregator.value_generator.make_state(
noise_generator_state.seeds, stddev)
new_tree_state = attr.evolve(
global_state.tree_state, value_generator_state=noise_generator_state)
return attr.evolve(
global_state, clip_value=clip_norm, tree_state=new_tree_state)
@classmethod
def build_l2_gaussian_query(cls,
clip_norm,
noise_multiplier,
record_specs,
noise_seed=None,
use_efficient=True):
"""Returns `TreeResidualSumQuery` with L2 norm clipping and Gaussian noise.
Args:
clip_norm: Each record will be clipped so that it has L2 norm at most
`clip_norm`.
noise_multiplier: The effective noise multiplier for the sum of records.
Noise standard deviation is `clip_norm*noise_multiplier`. The value can
be used as the input of the privacy accounting functions in
`analysis.tree_aggregation_accountant`.
record_specs: A nested structure of `tf.TensorSpec`s specifying structure
and shapes of records.
noise_seed: Integer seed for the Gaussian noise generator. If `None`, a
nondeterministic seed based on system time will be generated.
use_efficient: Boolean indicating the usage of the efficient tree
aggregation algorithm based on the paper "Efficient Use of
Differentially Private Binary Trees".
"""
if clip_norm < 0:
raise ValueError(f'`clip_norm` must be non-negative, got {clip_norm}.')
if noise_multiplier < 0:
raise ValueError(
f'`noise_multiplier` must be non-negative, got {noise_multiplier}.')
gaussian_noise_generator = tree_aggregation.GaussianNoiseGenerator(
noise_std=clip_norm * noise_multiplier,
specs=record_specs,
seed=noise_seed)
def l2_clip_fn(record_as_list, clip_norm):
clipped_record, _ = tf.clip_by_global_norm(record_as_list, clip_norm)
return clipped_record
return cls(
clip_fn=l2_clip_fn,
clip_value=clip_norm,
record_specs=record_specs,
noise_generator=gaussian_noise_generator,
use_efficient=use_efficient)
| tensorflow/privacy | tensorflow_privacy/privacy/dp_query/tree_aggregation_query.py | Python | apache-2.0 | 22,246 | [
"Gaussian"
] | 982d8609186c3ce1e783d929b2b46428da9c3da307e5deeba8eca627ecfa42cc |
from rdkit.Chem import AllChem as Chem
from rdkit import DataStructs
from collections import OrderedDict
import numpy
class FP:
def __init__(self, fp):
self.fp = fp
def __str__(self):
return self.fp.__str__()
def computeFP(x):
# compute depth-2 morgan fingerprint hashed to 2048 bits
fp = Chem.GetMorganFingerprintAsBitVect(x, 2, nBits=2048)
res = numpy.zeros(len(fp), numpy.int32)
# convert the fingerprint to a numpy array and wrap it into the dummy container
DataStructs.ConvertToNumpyArray(fp, res)
return FP(res)
def topNpreds(morgan_bnb, classes, m, fp, N=5):
probas = list(morgan_bnb.predict_proba(fp.reshape(1, -1))[0])
d = dict(zip(classes, probas))
scores = OrderedDict(sorted(d.items(), key=lambda t: t[1], reverse=True))
return [(m, t, s) for t, s in list(scores.items())[0:N]]
| chembl/target_predictions | utils.py | Python | apache-2.0 | 858 | [
"RDKit"
] | 3b19623d41b4404da3e60abce7d55e8f0c4dbab486aa9c8d1b3a09f26ab8a358 |
#!/usr/bin/python
########################################################################
# 12 Jan 2015
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import subprocess
import re, os, sys
import argparse
import tempfile
import pysam
import ConfigParser
def find_peak_coverage(combined, samples, pvals, sizes):
data = {}
for pval in pvals:
data[pval] = {}
for sample in samples:
data[pval][sample] = {}
b = tempfile.NamedTemporaryFile(delete=False)
b.close()
command = "bedtools coverage -abam {} -b {} > {}\n".format(samples[sample], combined[pval], b.name)
subprocess.call(command, shell=True)
with open(b.name) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
norm = int(word[3])/float(sizes[sample])
norm *= 1000000
if (word[0], word[1], word[2]) in data[pval][sample]:
data[pval][sample][(word[0], word[1], word[2])] += norm
else:
data[pval][sample][(word[0], word[1], word[2])] = norm
return data
def sam_size(conditions):
results= {}
for peak in conditions:
if conditions[peak] not in results:
size = reduce(lambda x, y: x + y, [ int(l.rstrip('\n').split('\t')[2]) for l in pysam.idxstats(conditions[peak]) ])
results[conditions[peak]] = size
return results
def combine_all_peaks(conditions):
data = {}
for pval in conditions:
f = tempfile.NamedTemporaryFile(delete=False)
for sample in samples:
name = sample + "_" + pval + "_400bp.bed"
with open(name) as k:
for line in k:
line = line.rstrip()
word = line.split("\t")
if word[0] == "chrM":
new_chr = "MT"
else:
new_chr = word[0].strip("chr")
f.write("{}\t{}\t{}\n".format(new_chr, word[1], word[2])),
f.close()
g = tempfile.NamedTemporaryFile(delete=False)
# h = tempfile.NamedTemporaryFile(delete=False)
g.close()
# h.close()
command1 = "sortBed -i {} > {}".format(f.name, g.name)
#command2 = "mergeBed -i {} > {}".format(g.name, h.name)
subprocess.call(command1, shell=True)
# subprocess.call(command2, shell=True)
data[pval] = g.name
return data
def write_results(coverage):
for pval in coverage:
output = open("{}_result.tsv".format(pval), "w")
for key in sorted(coverage[pval]["Etv5_Flag_KI20_N16h_ucsc"]):
output.write("{}\t{}\t{}".format(key[0], key[1], key[2])),
for sample in sorted(coverage[pval]):
output.write("\t{}".format(coverage[pval][sample][key])),
output.write("\n"),
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def main():
parser = argparse.ArgumentParser(description='Creates a table for coverage of a set of peaks. Still needs work\n')
parser.add_argument('-c', 'config.ini', help='[Conditions] contains the peaks as keys and bam files as values')
parser.add_argument('-o', '--output', help='Output file')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = vars(parser.parse_args())
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
conditions = ConfigSectionMap(Config, "Conditions")
sizes = sam_size(conditions)
combined = combine_all_peaks(conditions)
coverage = find_peak_coverage(combined, samples, pvals, sizes)
write_results(coverage)
main() | pdl30/chipseq_misc | scatterplot_peaks.py | Python | gpl-2.0 | 3,620 | [
"pysam"
] | e02691a50d8f88f0c6c3126652efd11f8171fd412c7ef87d4f41094c139e8dfb |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyGpytorch(PythonPackage):
"""GPyTorch is a Gaussian process library implemented using PyTorch.
GPyTorch is designed for creating scalable, flexible, and modular Gaussian
process models with ease."""
homepage = "https://gpytorch.ai/"
pypi = "gpytorch/gpytorch-1.2.1.tar.gz"
maintainers = ['adamjstewart']
version('1.2.1', sha256='ddd746529863d5419872610af23b1a1b0e8a29742131c9d9d2b4f9cae3c90781')
version('1.2.0', sha256='fcb216e0c1f128a41c91065766508e91e487d6ffadf212a51677d8014aefca84')
version('1.1.1', sha256='76bd455db2f17af5425f73acfaa6d61b8adb1f07ad4881c0fa22673f84fb571a')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-torch@1.6:', when='@1.2:', type=('build', 'run'))
depends_on('py-torch@1.5:', type=('build', 'run'))
depends_on('py-scikit-learn', when='@1.2:', type=('build', 'run'))
depends_on('py-scipy', when='@1.2:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-gpytorch/package.py | Python | lgpl-2.1 | 1,184 | [
"Gaussian"
] | 2ac3b38b04a42b0b8d03d73f0ad884d884716d9b5eeec68fc84bf902b4ae3497 |
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from __future__ import unicode_literals
import os
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse
from django.template import (TemplateDoesNotExist, TemplateSyntaxError,
Context, Template, loader)
import django.template.context
from django.test import Client, TestCase
from django.test.client import encode_file, RequestFactory
from django.test.utils import ContextList, override_settings, str_prefix
from django.template.response import SimpleTemplateResponse
from django.utils._os import upath
from django.utils.translation import ugettext_lazy
from django.http import HttpResponse
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
class AssertContainsTests(TestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError as e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError as e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError as e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError as e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError as e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertContains(r, 'さかき')
self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertNotContains(r, 'はたけ')
self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
def test_nontext_contains(self):
r = self.client.get('/test_client_regress/no_template_view/')
self.assertContains(r, ugettext_lazy('once'))
def test_nontext_not_contains(self):
r = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(r, ugettext_lazy('never'))
def test_assert_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertContains(response, 'Hello')
def test_assert_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertContains(response, 'Hello')
def test_assert_not_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertNotContains(response, 'Bye')
def test_assert_not_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertNotContains(response, 'Bye')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError as e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: No templates used to render the response", str(e))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError as e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError as e:
self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError as e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError as e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError as e:
self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e))
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError as e:
self.assertIn("Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301))
self.assertEqual(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301))
self.assertEqual(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/test_client_regress/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/test_client_regress/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_to_different_host(self):
"The test client will preserve scheme, host and port changes"
response = self.client.get('/test_client_regress/redirect_other_host/', follow=True)
self.assertRedirects(response,
'https://otherserver:8443/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
# We can't use is_secure() or get_host()
# because response.request is a dictionary, not an HttpRequest
self.assertEqual(response.request.get('wsgi.url_scheme'), 'https')
self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver')
self.assertEqual(response.request.get('SERVER_PORT'), '8443')
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/', follow=True)
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError as e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError as e:
self.assertIn(str_prefix("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn(str_prefix("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
SESSION_ENGINE='test_client_regress.session'
)
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/test_client/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
class TemplateExceptionTests(TestCase):
def setUp(self):
# Reset the loaders so they don't try to render cached templates.
if loader.template_source_loaders is not None:
for template_loader in loader.template_source_loaders:
if hasattr(template_loader, 'reset'):
template_loader.reset()
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'bad_templates'),)
)
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about syntax error in template")
except TemplateSyntaxError:
pass
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/test_client_regress/arg_view/somename/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
try:
django.template.context._standard_context_processors = (lambda request: {'path': request.special_path},)
response = self.client.get("/test_client_regress/request_context_view/")
self.assertContains(response, 'Path: /test_client_regress/request_context_view/')
finally:
django.template.context._standard_context_processors = None
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'NO')
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'set_session')
# Check that the session has been modified
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
# Log in
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, b'request method: HEAD')
self.assertEqual(response.content, b'')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: DELETE')
def test_patch(self):
"Request a view via request method PATCH"
response = self.client.patch('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_patch(self):
"Request a view with string data via request method PATCH"
# Regression test for #17797
data = '{"test": "json"}'
response = self.client.patch('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
class QueryStringTests(TestCase):
def test_get_like_requests(self):
# See: https://code.djangoproject.com/ticket/10571.
for method_name in ('get', 'head'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = '{"english": "mountain pass"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json.encode())
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
#Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return b'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0])
self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual(b'Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
b'Content-Type: application/x-compress',
b'Content-Type: application/x-zip',
b'Content-Type: application/x-zip-compressed',
b'Content-Type: application/zip',))
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
class RequestHeadersTest(TestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/test_client_regress/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/test_client_regress/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/test_client_regress/check_headers/',
status_code=301, target_status_code=200)
class ReadLimitedStreamTest(TestCase):
"""
Tests that ensure that HttpRequest.body, HttpRequest.read() and
HttpRequest.read(BUFFER) have proper LimitedStream behavior.
Refs #14753, #15785
"""
def test_body_from_empty_request(self):
"""HttpRequest.body on a test client GET request should return
the empty string."""
self.assertEqual(self.client.get("/test_client_regress/body/").content, b'')
def test_read_from_empty_request(self):
"""HttpRequest.read() on a test client GET request should return the
empty string."""
self.assertEqual(self.client.get("/test_client_regress/read_all/").content, b'')
def test_read_numbytes_from_empty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client GET request should
return the empty string."""
self.assertEqual(self.client.get("/test_client_regress/read_buffer/").content, b'')
def test_read_from_nonempty_request(self):
"""HttpRequest.read() on a test client PUT request with some payload
should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/test_client_regress/read_all/",
data=payload,
content_type='text/plain').content, payload)
def test_read_numbytes_from_nonempty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client PUT request with
some payload should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/test_client_regress/read_buffer/",
data=payload,
content_type='text/plain').content, payload)
class RequestFactoryStateTest(TestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behavior. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def common_test_that_should_always_pass(self):
request = RequestFactory().get('/')
request.session = {}
self.assertFalse(hasattr(request, 'user'))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get('/')
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
class RequestFactoryEnvironmentTests(TestCase):
"""
Regression tests for #8551 and #17067: ensure that environment variables
are set correctly in RequestFactory.
"""
def test_should_set_correct_env_variables(self):
request = RequestFactory().get('/path/')
self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1')
self.assertEqual(request.META.get('SERVER_NAME'), 'testserver')
self.assertEqual(request.META.get('SERVER_PORT'), '80')
self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1')
self.assertEqual(request.META.get('SCRIPT_NAME') +
request.META.get('PATH_INFO'), '/path/')
| hellhovnd/django | tests/test_client_regress/tests.py | Python | bsd-3-clause | 48,998 | [
"VisIt"
] | e4f8c7ff6b3f3a63e28ad81d991de74f387ea88b1f42eeed3a27d11a2576d282 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Driver for Microsoft Azure Virtual Machines service.
http://azure.microsoft.com/en-us/services/virtual-machines/
"""
import re
import time
import collections
import random
import sys
import copy
import base64
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
from libcloud.utils.py3 import ET
from libcloud.common.azure import AzureServiceManagementConnection
from libcloud.common.azure import AzureRedirectException
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume
from libcloud.compute.types import NodeState
from libcloud.common.types import LibcloudError
from libcloud.utils.py3 import _real_unicode
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import ensure_string
from libcloud.utils.py3 import urlquote as url_quote
from libcloud.utils.misc import ReprMixin
HTTPSConnection = httplib.HTTPSConnection
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
AZURE_SERVICE_MANAGEMENT_HOST = 'management.core.windows.net'
X_MS_VERSION = '2013-08-01'
WINDOWS_SERVER_REGEX = re.compile(
r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk'
)
"""
Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them
From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx
Prices are for Linux instances in East US data center. To see what pricing will
actually be, visit:
http://azure.microsoft.com/en-gb/pricing/details/virtual-machines/
"""
AZURE_COMPUTE_INSTANCE_TYPES = {
'A0': {
'id': 'ExtraSmall',
'name': 'Extra Small Instance',
'ram': 768,
'disk': 127,
'bandwidth': None,
'price': '0.0211',
'max_data_disks': 1,
'cores': 'Shared'
},
'A1': {
'id': 'Small',
'name': 'Small Instance',
'ram': 1792,
'disk': 127,
'bandwidth': None,
'price': '0.0633',
'max_data_disks': 2,
'cores': 1
},
'A2': {
'id': 'Medium',
'name': 'Medium Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.1266',
'max_data_disks': 4,
'cores': 2
},
'A3': {
'id': 'Large',
'name': 'Large Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.2531',
'max_data_disks': 8,
'cores': 4
},
'A4': {
'id': 'ExtraLarge',
'name': 'Extra Large Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.5062',
'max_data_disks': 16,
'cores': 8
},
'A5': {
'id': 'A5',
'name': 'Memory Intensive Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.2637',
'max_data_disks': 4,
'cores': 2
},
'A6': {
'id': 'A6',
'name': 'A6 Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.5273',
'max_data_disks': 8,
'cores': 4
},
'A7': {
'id': 'A7',
'name': 'A7 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.0545',
'max_data_disks': 16,
'cores': 8
},
'A8': {
'id': 'A8',
'name': 'A8 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '2.0774',
'max_data_disks': 16,
'cores': 8
},
'A9': {
'id': 'A9',
'name': 'A9 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '4.7137',
'max_data_disks': 16,
'cores': 16
},
'A10': {
'id': 'A10',
'name': 'A10 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.2233',
'max_data_disks': 16,
'cores': 8
},
'A11': {
'id': 'A11',
'name': 'A11 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '2.1934',
'max_data_disks': 16,
'cores': 16
},
'D1': {
'id': 'Standard_D1',
'name': 'D1 Faster Compute Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.0992',
'max_data_disks': 2,
'cores': 1
},
'D2': {
'id': 'Standard_D2',
'name': 'D2 Faster Compute Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.1983',
'max_data_disks': 4,
'cores': 2
},
'D3': {
'id': 'Standard_D3',
'name': 'D3 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.3965',
'max_data_disks': 8,
'cores': 4
},
'D4': {
'id': 'Standard_D4',
'name': 'D4 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.793',
'max_data_disks': 16,
'cores': 8
},
'D11': {
'id': 'Standard_D11',
'name': 'D11 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.251',
'max_data_disks': 4,
'cores': 2
},
'D12': {
'id': 'Standard_D12',
'name': 'D12 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.502',
'max_data_disks': 8,
'cores': 4
},
'D13': {
'id': 'Standard_D13',
'name': 'D13 Faster Compute Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '0.9038',
'max_data_disks': 16,
'cores': 8
},
'D14': {
'id': 'Standard_D14',
'name': 'D14 Faster Compute Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '1.6261',
'max_data_disks': 32,
'cores': 16
}
}
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
'os_disk_configuration': 'OSDiskConfiguration',
'is_dns_programmed': 'IsDnsProgrammed'
}
class AzureNodeDriver(NodeDriver):
connectionCls = AzureServiceManagementConnection
name = 'Azure Virtual machines'
website = 'http://azure.microsoft.com/en-us/services/virtual-machines/'
type = Provider.AZURE
_instance_types = AZURE_COMPUTE_INSTANCE_TYPES
_blob_url = ".blob.core.windows.net"
features = {'create_node': ['password']}
service_location = collections.namedtuple(
'service_location',
['is_affinity_group', 'service_location']
)
NODE_STATE_MAP = {
'RoleStateUnknown': NodeState.UNKNOWN,
'CreatingVM': NodeState.PENDING,
'StartingVM': NodeState.PENDING,
'Provisioning': NodeState.PENDING,
'CreatingRole': NodeState.PENDING,
'StartingRole': NodeState.PENDING,
'ReadyRole': NodeState.RUNNING,
'BusyRole': NodeState.PENDING,
'StoppingRole': NodeState.PENDING,
'StoppingVM': NodeState.PENDING,
'DeletingVM': NodeState.PENDING,
'StoppedVM': NodeState.STOPPED,
'RestartingRole': NodeState.REBOOTING,
'CyclingRole': NodeState.TERMINATED,
'FailedStartingRole': NodeState.TERMINATED,
'FailedStartingVM': NodeState.TERMINATED,
'UnresponsiveRole': NodeState.TERMINATED,
'StoppedDeallocated': NodeState.TERMINATED,
}
def __init__(self, subscription_id=None, key_file=None, **kwargs):
"""
subscription_id contains the Azure subscription id in the form of GUID
key_file contains the Azure X509 certificate in .pem form
"""
self.subscription_id = subscription_id
self.key_file = key_file
self.follow_redirects = kwargs.get('follow_redirects', True)
super(AzureNodeDriver, self).__init__(
self.subscription_id,
self.key_file,
secure=True,
**kwargs
)
def list_sizes(self):
"""
Lists all sizes
:rtype: ``list`` of :class:`NodeSize`
"""
sizes = []
for _, values in self._instance_types.items():
node_size = self._to_node_size(copy.deepcopy(values))
sizes.append(node_size)
return sizes
def list_images(self, location=None):
"""
Lists all images
:rtype: ``list`` of :class:`NodeImage`
"""
data = self._perform_get(self._get_image_path(), Images)
custom_image_data = self._perform_get(
self._get_vmimage_path(),
VMImages
)
images = [self._to_image(i) for i in data]
images.extend(self._vm_to_image(j) for j in custom_image_data)
if location is not None:
images = [
image
for image in images
if location in image.extra["location"]
]
return images
def list_locations(self):
"""
Lists all locations
:rtype: ``list`` of :class:`NodeLocation`
"""
data = self._perform_get(
'/' + self.subscription_id + '/locations',
Locations
)
return [self._to_location(l) for l in data]
def list_nodes(self, ex_cloud_service_name):
"""
List all nodes
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:rtype: ``list`` of :class:`Node`
"""
response = self._perform_get(
self._get_hosted_service_path(ex_cloud_service_name) +
'?embed-detail=True',
None
)
self.raise_for_response(response, 200)
data = self._parse_response(response, HostedService)
vips = None
if (len(data.deployments) > 0 and
data.deployments[0].virtual_ips is not None):
vips = [vip.address for vip in data.deployments[0].virtual_ips]
try:
return [
self._to_node(n, ex_cloud_service_name, vips)
for n in data.deployments[0].role_instance_list
]
except IndexError:
return []
def reboot_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot=None):
"""
Reboots a node.
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:param ex_deployment_slot: Options are "production" (default)
or "Staging". (Optional)
:type ex_deployment_slot: ``str``
:rtype: ``bool``
"""
if ex_cloud_service_name is None:
if node.extra is not None:
ex_cloud_service_name = node.extra.get(
'ex_cloud_service_name'
)
if not ex_cloud_service_name:
raise ValueError("ex_cloud_service_name is required.")
if not ex_deployment_slot:
ex_deployment_slot = "Production"
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
try:
response = self._perform_post(
self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
) + '/roleinstances/' + _str(node.id) + '?comp=reboot',
''
)
self.raise_for_response(response, 202)
if self._parse_response_for_async_op(response):
return True
else:
return False
except Exception:
return False
def list_volumes(self, node=None):
"""
Lists volumes of the disks in the image repository that are
associated with the specified subscription.
Pass Node object to scope the list of volumes to a single
instance.
:rtype: ``list`` of :class:`StorageVolume`
"""
data = self._perform_get(self._get_disk_path(), Disks)
volumes = [self._to_volume(volume=v, node=node) for v in data]
return volumes
def create_node(self, name, size, image, ex_cloud_service_name,
ex_storage_service_name=None, ex_new_deployment=False,
ex_deployment_slot="Production", ex_deployment_name=None,
ex_admin_user_id="azureuser", ex_custom_data=None,
ex_virtual_network_name=None, ex_network_config=None,
auth=None, **kwargs):
"""
Create Azure Virtual Machine
Reference: http://bit.ly/1fIsCb7
[www.windowsazure.com/en-us/documentation/]
We default to:
+ 3389/TCP - RDP - 1st Microsoft instance.
+ RANDOM/TCP - RDP - All succeeding Microsoft instances.
+ 22/TCP - SSH - 1st Linux instance
+ RANDOM/TCP - SSH - All succeeding Linux instances.
The above replicates the standard behavior of the Azure UI.
You can retrieve the assigned ports to each instance by
using the following private function:
_get_endpoint_ports(service_name)
Returns public,private port key pair.
@inherits: :class:`NodeDriver.create_node`
:keyword image: The image to use when creating this node
:type image: `NodeImage`
:keyword size: The size of the instance to create
:type size: `NodeSize`
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_storage_service_name: Optional:
Name of the Azure Storage Service.
:type ex_storage_service_name: ``str``
:keyword ex_new_deployment: Optional. Tells azure to create a
new deployment rather than add to an
existing one.
:type ex_new_deployment: ``boolean``
:keyword ex_deployment_slot: Optional: Valid values: production|
staging.
Defaults to production.
:type ex_deployment_slot: ``str``
:keyword ex_deployment_name: Optional. The name of the
deployment.
If this is not passed in we default
to using the Cloud Service name.
:type ex_deployment_name: ``str``
:type ex_custom_data: ``str``
:keyword ex_custom_data: Optional script or other data which is
injected into the VM when it's beginning
provisioned.
:keyword ex_admin_user_id: Optional. Defaults to 'azureuser'.
:type ex_admin_user_id: ``str``
:keyword ex_virtual_network_name: Optional. If this is not passed
in no virtual network is used.
:type ex_virtual_network_name: ``str``
:keyword ex_network_config: Optional. The ConfigurationSet to use
for network configuration
:type ex_network_config: `ConfigurationSet`
"""
# TODO: Refactor this method to make it more readable, split it into
# multiple smaller methods
auth = self._get_and_check_auth(auth)
password = auth.password
if not isinstance(size, NodeSize):
raise ValueError('Size must be an instance of NodeSize')
if not isinstance(image, NodeImage):
raise ValueError(
"Image must be an instance of NodeImage, "
"produced by list_images()"
)
# Retrieve a list of currently available nodes for the provided cloud
# service
node_list = self.list_nodes(
ex_cloud_service_name=ex_cloud_service_name
)
if ex_network_config is None:
network_config = ConfigurationSet()
else:
network_config = ex_network_config
network_config.configuration_set_type = 'NetworkConfiguration'
# Base64 encode custom data if provided
if ex_custom_data:
ex_custom_data = self._encode_base64(data=ex_custom_data)
# We do this because we need to pass a Configuration to the
# method. This will be either Linux or Windows.
if WINDOWS_SERVER_REGEX.search(image.id, re.I):
machine_config = WindowsConfigurationSet(
computer_name=name,
admin_password=password,
admin_user_name=ex_admin_user_id
)
machine_config.domain_join = None
if not node_list or ex_new_deployment:
port = "3389"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = [ep.public_port for ep in
instances.instance_endpoints]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='Remote Desktop',
protocol='tcp',
port=port,
local_port='3389',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
else:
if not node_list or ex_new_deployment:
port = "22"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = []
if instances.instance_endpoints is not None:
for ep in instances.instance_endpoints:
ports += [ep.public_port]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
machine_config = LinuxConfigurationSet(
name,
ex_admin_user_id,
password,
False,
ex_custom_data
)
network_config.input_endpoints.items.append(endpoint)
_storage_location = self._get_cloud_service_location(
service_name=ex_cloud_service_name
)
if ex_storage_service_name is None:
ex_storage_service_name = ex_cloud_service_name
ex_storage_service_name = re.sub(
r'[\W_-]+',
'',
ex_storage_service_name.lower(),
flags=re.UNICODE
)
if self._is_storage_service_unique(
service_name=ex_storage_service_name):
self._create_storage_account(
service_name=ex_storage_service_name,
location=_storage_location.service_location,
is_affinity_group=_storage_location.is_affinity_group
)
# OK, bit annoying here. You must create a deployment before
# you can create an instance; however, the deployment function
# creates the first instance, but all subsequent instances
# must be created using the add_role function.
#
# So, yeah, annoying.
if not node_list or ex_new_deployment:
# This is the first node in this cloud service.
if not ex_deployment_name:
ex_deployment_name = ex_cloud_service_name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name)
# Azure's pattern in the UI.
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
response = self._perform_post(
self._get_deployment_path_using_name(ex_cloud_service_name),
AzureXmlSerializer.virtual_machine_deployment_to_xml(
ex_deployment_name,
ex_deployment_slot,
name,
name,
machine_config,
disk_config,
'PersistentVMRole',
network_config,
None,
None,
size.id,
ex_virtual_network_name,
vm_image_id
)
)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
else:
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name
)
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
path = self._get_role_path(ex_cloud_service_name, _deployment_name)
body = AzureXmlSerializer.add_role_to_xml(
name, # role_name
machine_config, # system_config
disk_config, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
vm_image_id, # vm_image
size.id # role_size
)
response = self._perform_post(path, body)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
return Node(
id=name,
name=name,
state=NodeState.PENDING,
public_ips=[],
private_ips=[],
driver=self.connection.driver,
extra={
'ex_cloud_service_name': ex_cloud_service_name
}
)
def destroy_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot="Production"):
"""
Remove Azure Virtual Machine
This removes the instance, but does not
remove the disk. You will need to use destroy_volume.
Azure sometimes has an issue where it will hold onto
a blob lease for an extended amount of time.
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_deployment_slot: Optional: The name of the deployment
slot. If this is not passed in we
default to production.
:type ex_deployment_slot: ``str``
"""
if not isinstance(node, Node):
raise ValueError("A libcloud Node object is required.")
if ex_cloud_service_name is None and node.extra is not None:
ex_cloud_service_name = node.extra.get('ex_cloud_service_name')
if not ex_cloud_service_name:
raise ValueError("Unable to get ex_cloud_service_name from Node.")
_deployment = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
_deployment_name = _deployment.name
_server_deployment_count = len(_deployment.role_instance_list)
if _server_deployment_count > 1:
path = self._get_role_path(
ex_cloud_service_name,
_deployment_name,
node.id
)
else:
path = self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
)
path += '?comp=media'
self._perform_delete(path)
return True
def ex_list_cloud_services(self):
return self._perform_get(
self._get_hosted_service_path(),
HostedServices
)
def ex_create_cloud_service(self, name, location, description=None,
extended_properties=None):
"""
Create an azure cloud service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: Optional description
:type description: ``str``
:param extended_properties: Optional extended_properties
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_cloud_service_create(
self._get_hosted_service_path(),
AzureXmlSerializer.create_hosted_service_to_xml(
name,
self._encode_base64(name),
description,
location,
None,
extended_properties
)
)
self.raise_for_response(response, 201)
return True
def ex_destroy_cloud_service(self, name):
"""
Delete an azure cloud service.
:param name: Name of the cloud service to destroy.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_cloud_service_delete(
self._get_hosted_service_path(name)
)
self.raise_for_response(response, 200)
return True
def ex_add_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
all_endpoints = [
{
"name": endpoint.name,
"protocol": endpoint.protocol,
"port": endpoint.public_port,
"local_port": endpoint.local_port,
}
for endpoint in node.extra['instance_endpoints']
]
all_endpoints.extend(endpoints)
result = self.ex_set_instance_endpoints(node, all_endpoints,
ex_deployment_slot)
return result
def ex_set_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
"""
For example::
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
{
'name': 'SSH',
'protocol': 'tcp',
'port': port,
'local_port': '22'
}
"""
ex_cloud_service_name = node.extra['ex_cloud_service_name']
vm_role_name = node.name
network_config = ConfigurationSet()
network_config.configuration_set_type = 'NetworkConfiguration'
for endpoint in endpoints:
new_endpoint = ConfigurationSetInputEndpoint(**endpoint)
network_config.input_endpoints.items.append(new_endpoint)
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
response = self._perform_put(
self._get_role_path(
ex_cloud_service_name,
_deployment_name,
vm_role_name
),
AzureXmlSerializer.add_role_to_xml(
None, # role_name
None, # system_config
None, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
None, # vm_image
None # role_size
)
)
self.raise_for_response(response, 202)
def ex_create_storage_service(self, name, location,
description=None, affinity_group=None,
extended_properties=None):
"""
Create an azure storage service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: (Optional) Description of storage service.
:type description: ``str``
:param affinity_group: (Optional) Azure affinity group.
:type affinity_group: ``str``
:param extended_properties: (Optional) Additional configuration
options support by Azure.
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_storage_service_create(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_to_xml(
service_name=name,
label=self._encode_base64(name),
description=description,
location=location,
affinity_group=affinity_group,
extended_properties=extended_properties
)
)
self.raise_for_response(response, 202)
return True
def ex_destroy_storage_service(self, name):
"""
Destroy storage service. Storage service must not have any active
blobs. Sometimes Azure likes to hold onto volumes after they are
deleted for an inordinate amount of time, so sleep before calling
this method after volume deletion.
:param name: Name of storage service.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_storage_service_delete(
self._get_storage_service_path(name)
)
self.raise_for_response(response, 200)
return True
"""
Functions not implemented
"""
def create_volume_snapshot(self):
raise NotImplementedError(
'You cannot create snapshots of '
'Azure VMs at this time.'
)
def attach_volume(self):
raise NotImplementedError(
'attach_volume is not supported '
'at this time.'
)
def create_volume(self):
raise NotImplementedError(
'create_volume is not supported '
'at this time.'
)
def detach_volume(self):
raise NotImplementedError(
'detach_volume is not supported '
'at this time.'
)
def destroy_volume(self):
raise NotImplementedError(
'destroy_volume is not supported '
'at this time.'
)
"""
Private Functions
"""
def _perform_cloud_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_cloud_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None):
"""
Convert the data from a Azure response object into a Node
"""
remote_desktop_port = ''
ssh_port = ''
public_ips = virtual_ips or []
if data.instance_endpoints is not None:
if len(data.instance_endpoints) >= 1:
public_ips = [data.instance_endpoints[0].vip]
for port in data.instance_endpoints:
if port.name == 'Remote Desktop':
remote_desktop_port = port.public_port
if port.name == "SSH":
ssh_port = port.public_port
return Node(
id=data.role_name,
name=data.role_name,
state=self.NODE_STATE_MAP.get(
data.instance_status,
NodeState.UNKNOWN
),
public_ips=public_ips,
private_ips=[data.ip_address],
driver=self.connection.driver,
extra={
'instance_endpoints': data.instance_endpoints,
'remote_desktop_port': remote_desktop_port,
'ssh_port': ssh_port,
'power_state': data.power_state,
'instance_size': data.instance_size,
'ex_cloud_service_name': ex_cloud_service_name
}
)
def _to_location(self, data):
"""
Convert the data from a Azure response object into a location
"""
country = data.display_name
if "Asia" in data.display_name:
country = "Asia"
if "Europe" in data.display_name:
country = "Europe"
if "US" in data.display_name:
country = "US"
if "Japan" in data.display_name:
country = "Japan"
if "Brazil" in data.display_name:
country = "Brazil"
vm_role_sizes = data.compute_capabilities.virtual_machines_role_sizes
return AzureNodeLocation(
id=data.name,
name=data.display_name,
country=country,
driver=self.connection.driver,
available_services=data.available_services,
virtual_machine_role_sizes=vm_role_sizes
)
def _to_node_size(self, data):
"""
Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize
"""
return NodeSize(
id=data["id"],
name=data["name"],
ram=data["ram"],
disk=data["disk"],
bandwidth=data["bandwidth"],
price=data["price"],
driver=self.connection.driver,
extra={
'max_data_disks': data["max_data_disks"],
'cores': data["cores"]
}
)
def _to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os,
'category': data.category,
'description': data.description,
'location': data.location,
'affinity_group': data.affinity_group,
'media_link': data.media_link,
'vm_image': False
}
)
def _vm_to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os_disk_configuration.os,
'category': data.category,
'location': data.location,
'media_link': data.os_disk_configuration.media_link,
'affinity_group': data.affinity_group,
'deployment_name': data.deployment_name,
'vm_image': True
}
)
def _to_volume(self, volume, node):
extra = {
'affinity_group': volume.affinity_group,
'os': volume.os,
'location': volume.location,
'media_link': volume.media_link,
'source_image_name': volume.source_image_name
}
role_name = getattr(volume.attached_to, 'role_name', None)
hosted_service_name = getattr(
volume.attached_to,
'hosted_service_name',
None
)
deployment_name = getattr(
volume.attached_to,
'deployment_name',
None
)
if role_name is not None:
extra['role_name'] = role_name
if hosted_service_name is not None:
extra['hosted_service_name'] = hosted_service_name
if deployment_name is not None:
extra['deployment_name'] = deployment_name
if node:
if role_name is not None and role_name == node.id:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
else:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
def _get_deployment(self, **kwargs):
_service_name = kwargs['service_name']
_deployment_slot = kwargs['deployment_slot']
response = self._perform_get(
self._get_deployment_path_using_slot(
_service_name,
_deployment_slot
),
None
)
self.raise_for_response(response, 200)
return self._parse_response(response, Deployment)
def _get_cloud_service_location(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
res = self._perform_get(
'%s?embed-detail=False' % (
self._get_hosted_service_path(service_name)
),
HostedService
)
_affinity_group = res.hosted_service_properties.affinity_group
_cloud_service_location = res.hosted_service_properties.location
if _affinity_group is not None and _affinity_group is not '':
return self.service_location(True, _affinity_group)
elif _cloud_service_location is not None:
return self.service_location(False, _cloud_service_location)
else:
return None
def _is_storage_service_unique(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
_check_availability = self._perform_get(
'%s/operations/isavailable/%s%s' % (
self._get_storage_service_path(),
_str(service_name),
''
),
AvailabilityResponse
)
self.raise_for_response(_check_availability, 200)
return _check_availability.result
def _create_storage_account(self, **kwargs):
if kwargs['is_affinity_group'] is True:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
kwargs['location'],
None, # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
else:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
None, # Affinity Group
kwargs['location'], # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
# We need to wait for this to be created before we can
# create the storage container and the instance.
self._ex_complete_async_azure_operation(
response,
"create_storage_account"
)
def _get_operation_status(self, request_id):
return self._perform_get(
'/' + self.subscription_id + '/operations/' + _str(request_id),
Operation
)
def _perform_get(self, path, response_type):
request = AzureHTTPRequest()
request.method = 'GET'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return self._parse_response(response, response_type)
return response
def _perform_post(self, path, body, response_type=None, async=False):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_put(self, path, body, response_type=None, async=False):
request = AzureHTTPRequest()
request.method = 'PUT'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_delete(self, path, async=False):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
self.raise_for_response(response, 202)
if async:
return self._parse_response_for_async_op(response)
def _perform_request(self, request):
try:
return self.connection.request(
action=request.path,
data=request.body,
headers=request.headers,
method=request.method
)
except AzureRedirectException:
e = sys.exc_info()[1]
parsed_url = urlparse.urlparse(e.location)
request.host = parsed_url.netloc
return self._perform_request(request)
except Exception as e:
raise e
def _update_request_uri_query(self, request):
"""
pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters
"""
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += '%s=%s%s' % (
name,
url_quote(value, '/()$=\','),
'&'
)
request.path = request.path[:-1]
return request.path, request.query
def _update_management_header(self, request):
"""
Add additional headers for management.
"""
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers['Content-Length'] = str(len(request.body))
# append additional headers base on the service
# request.headers.append(('x-ms-version', X_MS_VERSION))
# if it is not GET or HEAD request, must set content-type.
if request.method not in ['GET', 'HEAD']:
for key in request.headers:
if 'content-type' == key.lower():
break
else:
request.headers['Content-Type'] = 'application/xml'
return request.headers
def _parse_response(self, response, return_type):
"""
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
"""
return self._parse_response_body_from_xml_text(
response=response,
return_type=return_type
)
def _parse_response_body_from_xml_text(self, response, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
respbody = response.body
doc = minidom.parseString(respbody)
return_obj = return_type()
for node in self._get_child_nodes(doc, return_type.__name__):
self._fill_data_to_return_object(node, return_obj)
# Note: We always explicitly assign status code to the custom return
# type object
return_obj.status = response.status
return return_obj
def _get_child_nodes(self, node, tag_name):
return [childNode for childNode in node.getElementsByTagName(tag_name)
if childNode.parentNode == node]
def _fill_data_to_return_object(self, node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _ListOf):
setattr(
return_obj,
name,
self._fill_list_of(
node,
value.list_type,
value.xml_element_name
)
)
elif isinstance(value, ScalarListOf):
setattr(
return_obj,
name,
self._fill_scalar_list_of(
node,
value.list_type,
self._get_serialization_name(name),
value.xml_element_name
)
)
elif isinstance(value, _DictOf):
setattr(
return_obj,
name,
self._fill_dict_of(
node,
self._get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name
)
)
elif isinstance(value, WindowsAzureData):
setattr(
return_obj,
name,
self._fill_instance_child(node, name, value.__class__)
)
elif isinstance(value, dict):
setattr(
return_obj,
name,
self._fill_dict(
node,
self._get_serialization_name(name)
)
)
elif isinstance(value, _Base64String):
value = self._fill_data_minidom(node, name, '')
if value is not None:
value = self._decode_base64_to_text(value)
# always set the attribute,
# so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = self._fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _fill_list_of(self, xmldoc, element_type, xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, xml_element_name)
return [
self._parse_response_body_from_xml_node(xmlelement, element_type)
for xmlelement in xmlelements
]
def _parse_response_body_from_xml_node(self, node, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
return_obj = return_type()
self._fill_data_to_return_object(node, return_obj)
return return_obj
def _fill_scalar_list_of(self,
xmldoc,
element_type,
parent_xml_element_name,
xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
xml_element_name
)
return [
self._get_node_value(xmlelement, element_type)
for xmlelement in xmlelements
]
def _get_node_value(self, xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return self._to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_serialization_name(self, element_name):
"""
Converts a Python name into a serializable name.
"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
def _fill_dict_of(self, xmldoc, parent_xml_element_name,
pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
return_obj = {}
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
pair_xml_element_name
)
for pair in xmlelements:
keys = self._get_child_nodes(pair, key_xml_element_name)
values = self._get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(self, xmldoc, element_name, return_type):
"""
Converts a child of the current dom element to the specified type.
"""
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements:
return None
return_obj = return_type()
self._fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_dict(self, xmldoc, element_name):
xmlelements = self._get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _encode_base64(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(self, data):
decoded_bytes = self._decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _fill_data_minidom(self, xmldoc, element_name, data_member):
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return self._to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
elif type(data_member) is str:
return _real_unicode(value)
else:
return type(data_member)(value)
def _to_datetime(self, strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
def _get_request_body(self, request_body):
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = self._convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _convert_class_to_xml(self, source, xml_prefix=True):
root = ET.Element()
doc = self._construct_element_tree(source, root)
result = ensure_string(ET.tostring(doc, encoding='utf-8',
method='xml'))
return result
def _construct_element_tree(self, source, etree):
if source is None:
return ET.Element()
if isinstance(source, list):
for value in source:
etree.append(self._construct_element_tree(value, etree))
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
etree.append(ET.Element(class_name))
for name, value in vars(source).items():
if value is not None:
if (isinstance(value, list) or
isinstance(value, WindowsAzureData)):
etree.append(
self._construct_element_tree(value, etree)
)
else:
ele = ET.Element(self._get_serialization_name(name))
ele.text = xml_escape(str(value))
etree.append(ele)
etree.append(ET.Element(class_name))
return etree
def _parse_response_for_async_op(self, response):
if response is None:
return None
result = AsynchronousOperationResult()
if response.headers:
for name, value in response.headers.items():
if name.lower() == 'x-ms-request-id':
result.request_id = value
return result
def _get_deployment_path_using_name(self, service_name,
deployment_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments'
]
resource = ''.join(components)
return self._get_path(resource, deployment_name)
def _get_path(self, resource, name):
path = '/' + self.subscription_id + '/' + resource
if name is not None:
path += '/' + _str(name)
return path
def _get_image_path(self, image_name=None):
return self._get_path('services/images', image_name)
def _get_vmimage_path(self, image_name=None):
return self._get_path('services/vmimages', image_name)
def _get_hosted_service_path(self, service_name=None):
return self._get_path('services/hostedservices', service_name)
def _get_deployment_path_using_slot(self, service_name, slot=None):
return self._get_path(
'services/hostedservices/%s/deploymentslots' % (
_str(service_name)
),
slot
)
def _get_disk_path(self, disk_name=None):
return self._get_path('services/disks', disk_name)
def _get_role_path(self, service_name, deployment_name, role_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments/',
deployment_name,
'/roles'
]
resource = ''.join(components)
return self._get_path(resource, role_name)
def _get_storage_service_path(self, service_name=None):
return self._get_path('services/storageservices', service_name)
def _ex_complete_async_azure_operation(self, response=None,
operation_type='create_node'):
request_id = self._parse_response_for_async_op(response)
operation_status = self._get_operation_status(request_id.request_id)
timeout = 60 * 5
waittime = 0
interval = 5
while operation_status.status == "InProgress" and waittime < timeout:
operation_status = self._get_operation_status(request_id)
if operation_status.status == "Succeeded":
break
waittime += interval
time.sleep(interval)
if operation_status.status == 'Failed':
raise LibcloudError(
'Message: Async request for operation %s has failed' %
operation_type,
driver=self.connection.driver
)
def raise_for_response(self, response, valid_response):
if response.status != valid_response:
values = (response.error, response.body, response.status)
message = 'Message: %s, Body: %s, Status code: %s' % (values)
raise LibcloudError(message, driver=self)
"""
XML Serializer
Borrowed from the Azure SDK for Python which is licensed under Apache 2.0.
https://github.com/Azure/azure-sdk-for-python
"""
def _lower(text):
return text.lower()
class AzureXmlSerializer(object):
@staticmethod
def create_storage_service_input_to_xml(service_name,
description,
label,
affinity_group,
location,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Description', description),
('Label', label),
('AffinityGroup', affinity_group),
('Location', location),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def update_storage_service_input_to_xml(description,
label,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateStorageServiceInput',
[
('Description', description),
('Label', label, AzureNodeDriver._encode_base64),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def regenerate_keys_to_xml(key_type):
return AzureXmlSerializer.doc_from_data(
'RegenerateKeys',
[('KeyType', key_type)]
)
@staticmethod
def update_hosted_service_to_xml(label, description, extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateHostedService',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
],
extended_properties
)
@staticmethod
def create_hosted_service_to_xml(service_name,
label,
description,
location,
affinity_group=None,
extended_properties=None):
if affinity_group:
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('AffinityGroup', affinity_group),
],
extended_properties
)
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
],
extended_properties
)
@staticmethod
def create_storage_service_to_xml(service_name,
label,
description,
location,
affinity_group,
extended_properties=None):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
('AffinityGroup', affinity_group)
],
extended_properties
)
@staticmethod
def create_deployment_to_xml(name,
package_url,
label,
configuration,
start_deployment,
treat_warnings_as_error,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateDeployment',
[
('Name', name),
('PackageUrl', package_url),
('Label', label, AzureNodeDriver._encode_base64),
('Configuration', configuration),
('StartDeployment', start_deployment, _lower),
('TreatWarningsAsError', treat_warnings_as_error, _lower)
],
extended_properties
)
@staticmethod
def swap_deployment_to_xml(production, source_deployment):
return AzureXmlSerializer.doc_from_data(
'Swap',
[
('Production', production),
('SourceDeployment', source_deployment)
]
)
@staticmethod
def update_deployment_status_to_xml(status):
return AzureXmlSerializer.doc_from_data(
'UpdateDeploymentStatus',
[('Status', status)]
)
@staticmethod
def change_deployment_to_xml(configuration,
treat_warnings_as_error,
mode,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'ChangeConfiguration',
[
('Configuration', configuration),
('TreatWarningsAsError', treat_warnings_as_error, _lower),
('Mode', mode)
],
extended_properties
)
@staticmethod
def upgrade_deployment_to_xml(mode,
package_url,
configuration,
label,
role_to_upgrade,
force,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpgradeDeployment',
[
('Mode', mode),
('PackageUrl', package_url),
('Configuration', configuration),
('Label', label, AzureNodeDriver._encode_base64),
('RoleToUpgrade', role_to_upgrade),
('Force', force, _lower)
],
extended_properties
)
@staticmethod
def rollback_upgrade_to_xml(mode, force):
return AzureXmlSerializer.doc_from_data(
'RollbackUpdateOrUpgrade',
[
('Mode', mode),
('Force', force, _lower)
]
)
@staticmethod
def walk_upgrade_domain_to_xml(upgrade_domain):
return AzureXmlSerializer.doc_from_data(
'WalkUpgradeDomain',
[('UpgradeDomain', upgrade_domain)]
)
@staticmethod
def certificate_file_to_xml(data, certificate_format, password):
return AzureXmlSerializer.doc_from_data(
'CertificateFile',
[
('Data', data),
('CertificateFormat', certificate_format),
('Password', password)
]
)
@staticmethod
def create_affinity_group_to_xml(name, label, description, location):
return AzureXmlSerializer.doc_from_data(
'CreateAffinityGroup',
[
('Name', name),
('Label', label, AzureNodeDriver._encode_base64),
('Description', description),
('Location', location)
]
)
@staticmethod
def update_affinity_group_to_xml(label, description):
return AzureXmlSerializer.doc_from_data(
'UpdateAffinityGroup',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
]
)
@staticmethod
def subscription_certificate_to_xml(public_key, thumbprint, data):
return AzureXmlSerializer.doc_from_data(
'SubscriptionCertificate',
[
('SubscriptionCertificatePublicKey', public_key),
('SubscriptionCertificateThumbprint', thumbprint),
('SubscriptionCertificateData', data)
]
)
@staticmethod
def os_image_to_xml(label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'OSImage',
[
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def data_virtual_hard_disk_to_xml(host_caching,
disk_label,
disk_name,
lun,
logical_disk_size_in_gb,
media_link,
source_media_link):
return AzureXmlSerializer.doc_from_data(
'DataVirtualHardDisk',
[
('HostCaching', host_caching),
('DiskLabel', disk_label),
('DiskName', disk_name),
('Lun', lun),
('LogicalDiskSizeInGB', logical_disk_size_in_gb),
('MediaLink', media_link),
('SourceMediaLink', source_media_link)
]
)
@staticmethod
def disk_to_xml(has_operating_system, label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'Disk',
[
('HasOperatingSystem', has_operating_system, _lower),
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def restart_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "RestartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'RestartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def shutdown_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "ShutdownRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'ShutdownRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def start_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "StartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'StartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def windows_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('ComputerName', configuration.computer_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('AdminPassword', configuration.admin_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'ResetPasswordOnFirstLogon',
configuration.reset_password_on_first_logon,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableAutomaticUpdates',
configuration.enable_automatic_updates,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[('TimeZone', configuration.time_zone)],
xml
)
if configuration.domain_join is not None:
domain = ET.xml("DomainJoin")
creds = ET.xml("Credentials")
domain.appemnd(creds)
xml.append(domain)
AzureXmlSerializer.data_to_xml(
[('Domain', configuration.domain_join.credentials.domain)],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Username',
configuration.domain_join.credentials.username
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Password',
configuration.domain_join.credentials.password
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[('JoinDomain', configuration.domain_join.join_domain)],
domain
)
AzureXmlSerializer.data_to_xml(
[
(
'MachineObjectOU',
configuration.domain_join.machine_object_ou
)
],
domain
)
if configuration.stored_certificate_settings is not None:
cert_settings = ET.Element("StoredCertificateSettings")
xml.append(cert_settings)
for cert in configuration.stored_certificate_settings:
cert_setting = ET.Element("CertificateSetting")
cert_settings.append(cert_setting)
cert_setting.append(AzureXmlSerializer.data_to_xml(
[('StoreLocation', cert.store_location)])
)
AzureXmlSerializer.data_to_xml(
[('StoreName', cert.store_name)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('Thumbprint', cert.thumbprint)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('AdminUsername', configuration.admin_user_name)],
xml
)
return xml
@staticmethod
def linux_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('HostName', configuration.host_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserName', configuration.user_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserPassword', configuration.user_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'DisableSshPasswordAuthentication',
configuration.disable_ssh_password_authentication,
_lower
)
],
xml
)
if configuration.ssh is not None:
ssh = ET.Element("SSH")
pkeys = ET.Element("PublicKeys")
kpairs = ET.Element("KeyPairs")
ssh.append(pkeys)
ssh.append(kpairs)
xml.append(ssh)
for key in configuration.ssh.public_keys:
pkey = ET.Element("PublicKey")
pkeys.append(pkey)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
pkey
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], pkey)
for key in configuration.ssh.key_pairs:
kpair = ET.Element("KeyPair")
kpairs.append(kpair)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
kpair
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], kpair)
if configuration.custom_data is not None:
AzureXmlSerializer.data_to_xml(
[('CustomData', configuration.custom_data)],
xml
)
return xml
@staticmethod
def network_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
input_endpoints = ET.Element("InputEndpoints")
xml.append(input_endpoints)
for endpoint in configuration.input_endpoints:
input_endpoint = ET.Element("InputEndpoint")
input_endpoints.append(input_endpoint)
AzureXmlSerializer.data_to_xml(
[
(
'LoadBalancedEndpointSetName',
endpoint.load_balanced_endpoint_set_name
)
],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('LocalPort', endpoint.local_port)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Name', endpoint.name)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.port)],
input_endpoint
)
if (endpoint.load_balancer_probe.path or
endpoint.load_balancer_probe.port or
endpoint.load_balancer_probe.protocol):
load_balancer_probe = ET.Element("LoadBalancerProbe")
input_endpoint.append(load_balancer_probe)
AzureXmlSerializer.data_to_xml(
[('Path', endpoint.load_balancer_probe.path)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.load_balancer_probe.port)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.load_balancer_probe.protocol)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.protocol)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableDirectServerReturn',
endpoint.enable_direct_server_return,
_lower
)
],
input_endpoint
)
subnet_names = ET.Element("SubnetNames")
xml.append(subnet_names)
for name in configuration.subnet_names:
AzureXmlSerializer.data_to_xml(
[('SubnetName', name)],
subnet_names
)
return xml
@staticmethod
def role_to_xml(availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
xml):
AzureXmlSerializer.data_to_xml([('RoleName', role_name)], xml)
AzureXmlSerializer.data_to_xml([('RoleType', role_type)], xml)
config_sets = ET.Element("ConfigurationSets")
xml.append(config_sets)
if system_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
if isinstance(system_configuration_set, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
system_configuration_set,
config_set
)
elif isinstance(system_configuration_set, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
system_configuration_set,
config_set
)
if network_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
AzureXmlSerializer.network_configuration_to_xml(
network_configuration_set,
config_set
)
if availability_set_name is not None:
AzureXmlSerializer.data_to_xml(
[('AvailabilitySetName', availability_set_name)],
xml
)
if data_virtual_hard_disks is not None:
vhds = ET.Element("DataVirtualHardDisks")
xml.append(vhds)
for hd in data_virtual_hard_disks:
vhd = ET.Element("DataVirtualHardDisk")
vhds.append(vhd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', hd.host_caching)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', hd.disk_label)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', hd.disk_name)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('Lun', hd.lun)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', hd.media_link)],
vhd
)
if os_virtual_hard_disk is not None:
hd = ET.Element("OSVirtualHardDisk")
xml.append(hd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', os_virtual_hard_disk.host_caching)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', os_virtual_hard_disk.disk_label)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', os_virtual_hard_disk.disk_name)],
hd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', os_virtual_hard_disk.media_link)],
hd
)
AzureXmlSerializer.data_to_xml(
[('SourceImageName', os_virtual_hard_disk.source_image_name)],
hd
)
if vm_image_name is not None:
AzureXmlSerializer.data_to_xml(
[('VMImageName', vm_image_name)],
xml
)
if role_size is not None:
AzureXmlSerializer.data_to_xml([('RoleSize', role_size)], xml)
return xml
@staticmethod
def add_role_to_xml(role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
xml = AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
doc
)
result = ensure_string(ET.tostring(xml, encoding='utf-8'))
return result
@staticmethod
def update_role_to_xml(role_name,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
None,
doc
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def capture_role_to_xml(post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration):
xml = AzureXmlSerializer.data_to_xml(
[('OperationType', 'CaptureRoleOperation')]
)
AzureXmlSerializer.data_to_xml(
[('PostCaptureAction', post_capture_action)],
xml
)
if provisioning_configuration is not None:
provisioning_config = ET.Element("ProvisioningConfiguration")
xml.append(provisioning_config)
if isinstance(provisioning_configuration, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
elif isinstance(provisioning_configuration, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
AzureXmlSerializer.data_to_xml(
[('TargetImageLabel', target_image_label)],
xml
)
AzureXmlSerializer.data_to_xml(
[('TargetImageName', target_image_name)],
xml
)
doc = AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def virtual_machine_deployment_to_xml(deployment_name,
deployment_slot,
label,
role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
role_size,
virtual_network_name,
vm_image_name):
doc = AzureXmlSerializer.doc_from_xml('Deployment')
AzureXmlSerializer.data_to_xml([('Name', deployment_name)], doc)
AzureXmlSerializer.data_to_xml(
[('DeploymentSlot', deployment_slot)],
doc
)
AzureXmlSerializer.data_to_xml([('Label', label)], doc)
role_list = ET.Element("RoleList")
role = ET.Element("Role")
role_list.append(role)
doc.append(role_list)
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
role
)
if virtual_network_name is not None:
doc.append(
AzureXmlSerializer.data_to_xml(
[('VirtualNetworkName', virtual_network_name)]
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def data_to_xml(data, xml=None):
"""
Creates an xml fragment from the specified data.
data: Array of tuples, where first: xml element name
second: xml element text
third: conversion function
"""
for element in data:
name = element[0]
val = element[1]
if len(element) > 2:
converter = element[2]
else:
converter = None
if val is not None:
if converter is not None:
text = _str(converter(_str(val)))
else:
text = _str(val)
entry = ET.Element(name)
entry.text = text
if xml is not None:
xml.append(entry)
else:
return entry
return xml
@staticmethod
def doc_from_xml(document_element_name, inner_xml=None):
"""
Wraps the specified xml in an xml root element with default azure
namespaces
"""
# Note: Namespaces don't work consistency in Python 2 and 3.
"""
nsmap = {
None: "http://www.w3.org/2001/XMLSchema-instance",
"i": "http://www.w3.org/2001/XMLSchema-instance"
}
xml.attrib["xmlns:i"] = "http://www.w3.org/2001/XMLSchema-instance"
xml.attrib["xmlns"] = "http://schemas.microsoft.com/windowsazure"
"""
xml = ET.Element(document_element_name)
xml.set("xmlns", "http://schemas.microsoft.com/windowsazure")
if inner_xml is not None:
xml.append(inner_xml)
return xml
@staticmethod
def doc_from_data(document_element_name, data, extended_properties=None):
doc = AzureXmlSerializer.doc_from_xml(document_element_name)
AzureXmlSerializer.data_to_xml(data, doc)
if extended_properties is not None:
doc.append(
AzureXmlSerializer.extended_properties_dict_to_xml_fragment(
extended_properties
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def extended_properties_dict_to_xml_fragment(extended_properties):
if extended_properties is not None and len(extended_properties) > 0:
xml = ET.Element("ExtendedProperties")
for key, val in extended_properties.items():
extended_property = ET.Element("ExtendedProperty")
name = ET.Element("Name")
name.text = _str(key)
value = ET.Element("Value")
value.text = _str(val)
extended_property.append(name)
extended_property.append(value)
xml.append(extended_property)
return xml
"""
Data Classes
Borrowed from the Azure SDK for Python.
"""
class WindowsAzureData(object):
"""
This is the base of data class.
It is only used to check whether it is instance or not.
"""
pass
class WindowsAzureDataTypedList(WindowsAzureData):
list_type = None
xml_element_name = None
def __init__(self):
self.items = _ListOf(self.list_type, self.xml_element_name)
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
class OSVirtualHardDisk(WindowsAzureData):
def __init__(self, source_image_name=None, media_link=None,
host_caching=None, disk_label=None, disk_name=None):
self.source_image_name = source_image_name
self.media_link = media_link
self.host_caching = host_caching
self.disk_label = disk_label
self.disk_name = disk_name
self.os = '' # undocumented, not used when adding a role
class LinuxConfigurationSet(WindowsAzureData):
def __init__(self,
host_name=None,
user_name=None,
user_password=None,
disable_ssh_password_authentication=None,
custom_data=None):
self.configuration_set_type = 'LinuxProvisioningConfiguration'
self.host_name = host_name
self.user_name = user_name
self.user_password = user_password
self.disable_ssh_password_authentication = \
disable_ssh_password_authentication
self.ssh = SSH()
self.custom_data = custom_data
class WindowsConfigurationSet(WindowsAzureData):
def __init__(self,
computer_name=None,
admin_password=None,
reset_password_on_first_logon=None,
enable_automatic_updates=None,
time_zone=None,
admin_user_name=None):
self.configuration_set_type = 'WindowsProvisioningConfiguration'
self.computer_name = computer_name
self.admin_password = admin_password
self.reset_password_on_first_logon = reset_password_on_first_logon
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.admin_user_name = admin_user_name
self.domain_join = DomainJoin()
self.stored_certificate_settings = StoredCertificateSettings()
class DomainJoin(WindowsAzureData):
def __init__(self):
self.credentials = Credentials()
self.join_domain = ''
self.machine_object_ou = ''
class Credentials(WindowsAzureData):
def __init__(self):
self.domain = ''
self.username = ''
self.password = ''
class CertificateSetting(WindowsAzureData):
"""
Initializes a certificate setting.
thumbprint:
Specifies the thumbprint of the certificate to be provisioned. The
thumbprint must specify an existing service certificate.
store_name:
Specifies the name of the certificate store from which retrieve
certificate.
store_location:
Specifies the target certificate store location on the virtual machine
The only supported value is LocalMachine.
"""
def __init__(self, thumbprint='', store_name='', store_location=''):
self.thumbprint = thumbprint
self.store_name = store_name
self.store_location = store_location
class StoredCertificateSettings(WindowsAzureDataTypedList):
list_type = CertificateSetting
_repr_attributes = [
'items'
]
class SSH(WindowsAzureData):
def __init__(self):
self.public_keys = PublicKeys()
self.key_pairs = KeyPairs()
class PublicKey(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class PublicKeys(WindowsAzureDataTypedList):
list_type = PublicKey
_repr_attributes = [
'items'
]
class AzureKeyPair(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class KeyPairs(WindowsAzureDataTypedList):
list_type = AzureKeyPair
_repr_attributes = [
'items'
]
class LoadBalancerProbe(WindowsAzureData):
def __init__(self):
self.path = ''
self.port = ''
self.protocol = ''
class ConfigurationSet(WindowsAzureData):
def __init__(self):
self.configuration_set_type = ''
self.role_type = ''
self.input_endpoints = ConfigurationSetInputEndpoints()
self.subnet_names = ScalarListOf(str, 'SubnetName')
class ConfigurationSets(WindowsAzureDataTypedList):
list_type = ConfigurationSet
_repr_attributes = [
'items'
]
class ConfigurationSetInputEndpoint(WindowsAzureData):
def __init__(self,
name='',
protocol='',
port='',
local_port='',
load_balanced_endpoint_set_name='',
enable_direct_server_return=False):
self.enable_direct_server_return = enable_direct_server_return
self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name
self.local_port = local_port
self.name = name
self.port = port
self.load_balancer_probe = LoadBalancerProbe()
self.protocol = protocol
class ConfigurationSetInputEndpoints(WindowsAzureDataTypedList):
list_type = ConfigurationSetInputEndpoint
xml_element_name = 'InputEndpoint'
_repr_attributes = [
'items'
]
class Location(WindowsAzureData):
def __init__(self):
self.name = ''
self.display_name = ''
self.available_services = ScalarListOf(str, 'AvailableService')
self.compute_capabilities = ComputeCapability()
class Locations(WindowsAzureDataTypedList):
list_type = Location
_repr_attributes = [
'items'
]
class ComputeCapability(WindowsAzureData):
def __init__(self):
self.virtual_machines_role_sizes = ScalarListOf(str, 'RoleSize')
class VirtualMachinesRoleSizes(WindowsAzureData):
def __init__(self):
self.role_size = ScalarListOf(str, 'RoleSize')
class OSImage(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.category = ''
self.location = ''
self.logical_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.eula = ''
self.description = ''
class Images(WindowsAzureDataTypedList):
list_type = OSImage
_repr_attributes = [
'items'
]
class VMImage(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = ''
self.category = ''
self.os_disk_configuration = OSDiskConfiguration()
self.service_name = ''
self.deployment_name = ''
self.role_name = ''
self.location = ''
self.affinity_group = ''
class VMImages(WindowsAzureDataTypedList):
list_type = VMImage
_repr_attributes = [
'items'
]
class VirtualIP(WindowsAzureData):
def __init__(self):
self.address = ''
self.is_dns_programmed = ''
self.name = ''
class VirtualIPs(WindowsAzureDataTypedList):
list_type = VirtualIP
_repr_attributes = [
'items'
]
class HostedService(WindowsAzureData, ReprMixin):
_repr_attributes = [
'service_name',
'url'
]
def __init__(self):
self.url = ''
self.service_name = ''
self.hosted_service_properties = HostedServiceProperties()
self.deployments = Deployments()
class HostedServices(WindowsAzureDataTypedList, ReprMixin):
list_type = HostedService
_repr_attributes = [
'items'
]
class HostedServiceProperties(WindowsAzureData):
def __init__(self):
self.description = ''
self.location = ''
self.affinity_group = ''
self.label = _Base64String()
self.status = ''
self.date_created = ''
self.date_last_modified = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
class Deployment(WindowsAzureData):
def __init__(self):
self.name = ''
self.deployment_slot = ''
self.private_id = ''
self.status = ''
self.label = _Base64String()
self.url = ''
self.configuration = _Base64String()
self.role_instance_list = RoleInstanceList()
self.upgrade_status = UpgradeStatus()
self.upgrade_domain_count = ''
self.role_list = RoleList()
self.sdk_version = ''
self.input_endpoint_list = InputEndpoints()
self.locked = False
self.rollback_allowed = False
self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()
self.created_time = ''
self.last_modified_time = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
self.virtual_ips = VirtualIPs()
class Deployments(WindowsAzureDataTypedList):
list_type = Deployment
_repr_attributes = [
'items'
]
class UpgradeStatus(WindowsAzureData):
def __init__(self):
self.upgrade_type = ''
self.current_upgrade_domain_state = ''
self.current_upgrade_domain = ''
class RoleInstance(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.instance_name = ''
self.instance_status = ''
self.instance_upgrade_domain = 0
self.instance_fault_domain = 0
self.instance_size = ''
self.instance_state_details = ''
self.instance_error_code = ''
self.ip_address = ''
self.instance_endpoints = InstanceEndpoints()
self.power_state = ''
self.fqdn = ''
self.host_name = ''
class RoleInstanceList(WindowsAzureDataTypedList):
list_type = RoleInstance
_repr_attributes = [
'items'
]
class InstanceEndpoint(WindowsAzureData):
def __init__(self):
self.name = ''
self.vip = ''
self.public_port = ''
self.local_port = ''
self.protocol = ''
class InstanceEndpoints(WindowsAzureDataTypedList):
list_type = InstanceEndpoint
_repr_attributes = [
'items'
]
class InputEndpoint(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.vip = ''
self.port = ''
class InputEndpoints(WindowsAzureDataTypedList):
list_type = InputEndpoint
_repr_attributes = [
'items'
]
class Role(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.os_version = ''
class RoleList(WindowsAzureDataTypedList):
list_type = Role
_repr_attributes = [
'items'
]
class PersistentVMDowntimeInfo(WindowsAzureData):
def __init__(self):
self.start_time = ''
self.end_time = ''
self.status = ''
class AsynchronousOperationResult(WindowsAzureData):
def __init__(self, request_id=None):
self.request_id = request_id
class Disk(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.attached_to = AttachedTo()
self.has_operating_system = ''
self.is_corrupted = ''
self.location = ''
self.logical_disk_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.source_image_name = ''
class Disks(WindowsAzureDataTypedList):
list_type = Disk
_repr_attributes = [
'items'
]
class AttachedTo(WindowsAzureData):
def __init__(self):
self.hosted_service_name = ''
self.deployment_name = ''
self.role_name = ''
class OperationError(WindowsAzureData):
def __init__(self):
self.code = ''
self.message = ''
class Operation(WindowsAzureData):
def __init__(self):
self.id = ''
self.status = ''
self.http_status_code = ''
self.error = OperationError()
class OperatingSystem(WindowsAzureData):
def __init__(self):
self.version = ''
self.label = _Base64String()
self.is_default = True
self.is_active = True
self.family = 0
self.family_label = _Base64String()
class OSDiskConfiguration(WindowsAzureData):
def __init__(self):
self.name = ''
self.host_caching = ''
self.os_state = ''
self.os = ''
self.media_link = ''
self.logical_disk_size_in_gb = 0
class OperatingSystems(WindowsAzureDataTypedList):
list_type = OperatingSystem
_repr_attributes = [
'items'
]
class OperatingSystemFamily(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = _Base64String()
self.operating_systems = OperatingSystems()
class OperatingSystemFamilies(WindowsAzureDataTypedList):
list_type = OperatingSystemFamily
_repr_attributes = [
'items'
]
class Subscription(WindowsAzureData):
def __init__(self):
self.subscription_id = ''
self.subscription_name = ''
self.subscription_status = ''
self.account_admin_live_email_id = ''
self.service_admin_live_email_id = ''
self.max_core_count = 0
self.max_storage_accounts = 0
self.max_hosted_services = 0
self.current_core_count = 0
self.current_hosted_services = 0
self.current_storage_accounts = 0
self.max_virtual_network_sites = 0
self.max_local_network_sites = 0
self.max_dns_servers = 0
class AvailabilityResponse(WindowsAzureData):
def __init__(self):
self.result = False
class SubscriptionCertificate(WindowsAzureData):
def __init__(self):
self.subscription_certificate_public_key = ''
self.subscription_certificate_thumbprint = ''
self.subscription_certificate_data = ''
self.created = ''
class SubscriptionCertificates(WindowsAzureDataTypedList):
list_type = SubscriptionCertificate
_repr_attributes = [
'items'
]
class AzureHTTPRequest(object):
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = [] # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''
self.protocol_override = None
class AzureHTTPResponse(object):
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
"""
Helper classes and functions.
"""
class _Base64String(str):
pass
class _ListOf(list):
"""
A list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_ListOf, self).__init__()
class ScalarListOf(list):
"""
A list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(ScalarListOf, self).__init__()
class _DictOf(dict):
"""
A dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists
"""
def __init__(self,
pair_xml_element_name,
key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_DictOf, self).__init__()
class AzureNodeLocation(NodeLocation):
# we can also have something in here for available services which is an
# extra to the API with Azure
def __init__(self, id, name, country, driver, available_services,
virtual_machine_role_sizes):
super(AzureNodeLocation, self).__init__(id, name, country, driver)
self.available_services = available_services
self.virtual_machine_role_sizes = virtual_machine_role_sizes
def __repr__(self):
return (
(
'<AzureNodeLocation: id=%s, name=%s, country=%s, '
'driver=%s services=%s virtualMachineRoleSizes=%s >'
) % (
self.id,
self.name,
self.country,
self.driver.name,
','.join(self.available_services),
','.join(self.virtual_machine_role_sizes)
)
)
| StackPointCloud/libcloud | libcloud/compute/drivers/azure.py | Python | apache-2.0 | 113,180 | [
"VisIt"
] | 7e22d193ab3bfe58d218c45eafb33e4b0fd4ddddcd7f645f72b145b8b64dbf82 |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
import warnings
import logging
import re
from io import StringIO
from veidt.kernel import get_kernel
import pandas as pd
from collections import OrderedDict, defaultdict
from scipy.spatial.distance import pdist, squareform
import numpy as np
from monty.io import zopen
from pymatgen import Structure, Lattice
from veidt.potential.processing import convert_docs, pool_from, MonteCarloSampler
from veidt.potential.abstract import Potential, PotentialVeidt
from veidt.describer.atomic_describer import AGNIFingerprints
from veidt.potential.lammps.calcs import EnergyForceStress
from sklearn.model_selection import GridSearchCV
from sklearn.kernel_ridge import KernelRidge
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
class AGNIPotential(Potential):
"""
This class implements Adaptive generalizable neighborhood
informed potential.
"""
pair_style = 'pair_style agni'
pair_coeff = 'pair_coeff * * {} {}'
def __init__(self, name=None):
"""
Args:
name (str): Name of force field.
describer (AGNIFingerprints): Describer to describe
structures.
"""
self.name = name if name else 'AGNIPotential'
self.param = OrderedDict(element=None, interaction=None,
Rc=None, eta=None, sigma=None)
self.xU = None
self.yU = None
self.alphas = None
self.specie = None
def from_file(self, filename):
"""
Get the AGNIPotential parameters from file.
Args:
filename (str): Filename to be read.
Returns:
(list)
"""
with zopen(filename) as f:
lines = f.read()
generation = int(re.search('generation (.*)', lines).group(1))
num_elements = int(re.search('n_elements (.*)', lines).group(1))
element_type = re.search('\nelement(.*)\n', lines).group(1)
interaction = re.search('interaction (.*)\n', lines).group(1)
cutoff = float(re.search('Rc (.*)\n', lines).group(1))
eta_str = re.search('eta (.*)\n', lines).group(1)
etas = 1 / np.sqrt(list(map(lambda s: float(s), eta_str.split())))
sigma = float(re.search('sigma(.*)\n', lines).group(1))
lambd = float(re.search('lambda(.*)\n', lines).group(1))
# b = float(re.search('\nb(.*)\n', lines).group(1))
num_references = int(re.search('n_train(.*)\n', lines).group(1))
self.param['element'] = element_type
self.param['interaction'] = element_type
self.param['Rc'] = cutoff
self.param['eta'] = list(map(lambda s: float(s), eta_str.split()))
self.param['sigma'] = sigma
self.param['n_train'] = num_references
self.param['lambda'] = lambd
pattern = re.compile('endVar\s*(.*?)(?=\n$|$)', re.S)
def map_format(string):
return [float(s) for s in string.split()]
references_params = np.array(list(map(map_format,
pattern.findall(lines)[0].split('\n'))))
assert len(references_params) == num_references
indices = references_params[:, 0]
xU = references_params[:, 1:-2]
yU = references_params[:, -2]
alphas = references_params[:, -1]
self.xU = xU
self.yU = yU
self.alphas = alphas
def sample(self, datapool=None, r_cut=8, eta_size=8, num_samples=3000,
num_attempts=10000, t_init=1000, t_final=100):
"""
Use metropolis sampling method to select uniform and diverse enough
data subsets to represent the whole structure-derived features.
Args:
data_pool (list): Data pool to be sampled.
r_cut (float): Cutoff radius to generate high-dimensional
agni features.
eta_size (int): The size of dimensions of features, range
in logarithmic grid between 0.8A - 8A.
num_samples (int): Number of datasets sampled from datapool.
num_attempts (int): Number of sampling attempts.
t_init (int): Initial temperature.
t_final (int): Final temperature.
Returns:
(features, targets)
"""
def cost_function(x):
dist_matrix = squareform(pdist(x))
np.fill_diagonal(dist_matrix, np.Infinity)
return np.sum(1. / dist_matrix ** 2)
etas = np.exp(np.linspace(np.log(8), np.log(0.8), eta_size, dtype=np.float16))
fingerprint = AGNIFingerprints(r_cut=r_cut, etas=etas)
self.describer = fingerprint
structures = []
concat_forces = []
for dataset in datapool:
if isinstance(dataset['structure'], dict):
structure = Structure.from_dict(dataset['structure'])
else:
structure = dataset['structure']
structures.append(structure)
concat_forces.append(np.array(dataset['outputs']['forces']).ravel())
self.param['Rc'] = r_cut
self.param['element'] = structure.symbol_set[0]
self.param['interaction'] = structure.symbol_set[0]
self.param['eta'] = list(1 / (etas ** 2))
self.specie = structure.symbol_set[0]
features = fingerprint.describe_all(structures).values
targets = np.concatenate(concat_forces)
assert features.shape[0] == len(targets)
mcsampler = MonteCarloSampler(datasets=features, num_samples=num_samples,
cost_function=cost_function)
mcsampler.sample(num_attempts=num_attempts, t_init=t_init, t_final=t_final)
features_selected = features[mcsampler.index_selected]
targets_selected = targets[mcsampler.index_selected]
return features, targets, features_selected, targets_selected
def fit(self, features, targets, cv=5, alpha=1e-8,
scoring_criteria='neg_mean_absolute_error', threshold=1e-3):
"""
Fit the dataset with kernel ridge regression.
Args:
features (np.array): features X.
targets (np.array): targets y.
cv (int): The numbre of folds in cross validation.
Default to 5.
alpha (float): Small positive number.
Regularization parameter in KRR.
scoring (str): The scoring strategy to evaluate the
prediction on test sets. The same as the scoring
parameter in sklearn.model_selection.GridSearchCV.
Default to 'neg_mean_absolute_error', i.e. MAE.
threshold (float): The converged threshold of final
optimal sigma.
Returns:
(float) The optimized sigma.
"""
st_gamma = -np.inf
nd_gamma = np.inf
gamma_trials = np.logspace(-6, 4, 11)
while (abs(st_gamma - nd_gamma) > threshold):
kr = GridSearchCV(KernelRidge(kernel='rbf', alpha=alpha,
gamma=0.1), cv=cv, param_grid={"gamma": gamma_trials},
return_train_score=True)
kr.fit(features, targets)
cv_results = pd.DataFrame(kr.cv_results_)
st_gamma = cv_results['param_gamma'][cv_results['rank_test_score']
== 1].iloc[0]
nd_gamma = cv_results['param_gamma'][cv_results['rank_test_score']
== 2].iloc[0]
gamma_trials = np.linspace(min(st_gamma, nd_gamma), max(st_gamma, nd_gamma), 10)
gamma = st_gamma
K = np.exp(-gamma * squareform(pdist(features)) ** 2)
alphas = np.dot(np.linalg.inv(K + alpha * np.eye(len(features))), targets)
kkr = KernelRidge(alpha=alpha, gamma=gamma, kernel='rbf')
kkr.fit(features, targets)
self.param['n_train'] = len(features)
self.param['lambda'] = alpha
self.param['sigma'] = 1 / np.sqrt(2 * gamma)
self.xU = features
self.yU = targets
self.predictor = kkr
self.alphas = alphas
return gamma
def train(self, train_structures, energies=None, forces=None,
stresses=None, **kwargs):
"""
Training data with agni method.
Args:
train_structures ([Structure]): The list of Pymatgen Structure object.
energies ([float]): The list of total energies of each structure
in structures list.
energies ([float]): List of total energies of each structure in
structures list.
forces ([np.array]): List of (m, 3) forces array of each structure
with m atoms in structures list. m can be varied with each
single structure case.
stresses (list): List of (6, ) virial stresses of each
structure in structures list.
"""
train_pool = pool_from(train_structures, energies, forces, stresses)
_, _, features, targets = self.sample(train_pool, **kwargs)
gamma = self.fit(features, targets)
return 0
def write_param(self):
"""
Write fitted agni parameter file to perform lammps calculation.
"""
filename = 'ref.agni'
if not self.param or self.xU is None or self.yU is None \
or self.alphas is None:
raise RuntimeError("The parameters should be provided.")
assert len(self.alphas) == len(self.yU)
lines = [' '.join([key] + [str(f) for f in value])
if isinstance(value, list)
else ' '.join([key, str(value)])
for key, value in self.param.items()]
lines.insert(0, 'generation 1')
lines.insert(1, 'n_elements 1')
lines.append('endVar\n')
for index, (x, y, alpha) in enumerate(zip(self.xU, self.yU, self.alphas)):
index_str = str(index)
x_str = ' '.join([str(f) for f in x])
y_str = str(y)
alpha_str = str(alpha)
line = '{} {} {} {}'.format(index_str, x_str, y_str, alpha_str)
lines.append(line)
with open(filename, 'w') as f:
f.write('\n'.join(lines))
pair_coeff = self.pair_coeff.format(filename, self.specie)
ff_settings = [self.pair_style, pair_coeff]
return ff_settings
def evaluate2(self, test_structures, ref_energies=None,
ref_forces=None, ref_stresses=None):
"""
Evaluate energies, forces and stresses of structures with trained
interatomic potential.
Args:
test_structures ([Structure]): List of Pymatgen Structure Objects.
ref_energies ([float]): List of DFT-calculated total energies of
each structure in structures list.
ref_forces ([np.array]): List of DFT-calculated (m, 3) forces of
each structure with m atoms in structures list. m can be varied
with each single structure case.
ref_stresses (list): List of DFT-calculated (6, ) viriral stresses
of each structure in structures list.
"""
predict_pool = pool_from(test_structures, ref_energies,
ref_forces, ref_stresses)
_, df_orig = convert_docs(predict_pool)
efs_calculator = EnergyForceStress(ff_settings=self)
efs_results = efs_calculator.calculate(test_structures)
assert len(test_structures) == len(efs_results)
data_pool = []
for struct, (energy, forces, stresses) in zip(test_structures, efs_results):
d = {'outputs': {}}
d['structure'] = struct.as_dict()
d['num_atoms'] = len(struct)
d['outputs']['energy'] = energy
d['outputs']['forces'] = forces
d['outputs']['virial_stress'] = stresses
data_pool.append(d)
_, df_pred = convert_docs(data_pool)
return df_orig, df_pred
def evaluate(self, test_structures, ref_energies, ref_forces, ref_stresses):
"""
Evaluate energies, forces and stresses of structures with trained
interatomic potential.
Args:
test_structures ([Structure]): List of Pymatgen Structure Objects.
ref_energies ([float]): List of DFT-calculated total energies of
each structure in structures list.
ref_forces ([np.array]): List of DFT-calculated (m, 3) forces of
each structure with m atoms in structures list. m can be varied
with each single structure case.
ref_stresses (list): List of DFT-calculated (6, ) viriral stresses
of each structure in structures list.
"""
predict_pool = pool_from(test_structures, ref_energies,
ref_forces, ref_stresses)
_, df_orig = convert_docs(predict_pool)
data_pool = []
for struct in test_structures:
d = {'outputs': {}}
d['structure'] = struct.as_dict()
d['num_atoms'] = len(struct)
features = self.describer.describe(struct)
targets = self.predictor.predict(features.values)
d['outputs']['energy'] = 0
d['outputs']['forces'] = targets.reshape((-1, 3))
d['outputs']['virial_stress'] = [0., 0., 0., 0., 0., 0.]
data_pool.append(d)
_, df_pred = convert_docs(data_pool)
return df_orig, df_pred
class AGNIPotentialVeidt(PotentialVeidt):
"""
This class implements Adaptive generalizable neighborhood
informed potential.
"""
def __init__(self,
name=None,
n_elements=1,
element="Li",
rc=8.0,
rs=0.0,
eta=[0.0036, 0.0357, 0.0715, 0.1251, 0.2144, 0.3573, 0.7147, 1.4294],
sigma=1,
lambda_=1e-8,
xu=None,
yu=None,
alphas=None,
kernel='rbf',
**kwargs):
"""
Args:
name (str): Name of force field.
describer (AGNIFingerprints): Describer to describe
structures.
"""
self.name = name if name else 'AGNIPotential'
self.n_elements = n_elements
self.element = element
self.rs = rs
self.eta = eta
self.rc = rc
self.describer = AGNIFingerprints(r_cut=rc, etas=eta)
self.sigma = sigma
self.xu = xu
self.yu = yu
self.alphas = alphas
if self.xu is not None:
self.n_train = len(self.xu)
if alphas is not None:
warnings.warn('Pretrained model loaded')
self.lambda_ = lambda_
self.kernel = get_kernel(kernel)
self.kwargs = kwargs
def fit(self, features, targets):
"""
Fit the dataset with kernel ridge regression.
Args:
features (np.array): features X.
targets (np.array): targets y.
"""
self.xu = features
self.yu = targets
self.K = self.kernel(self.xu, self.xu, self.sigma)
self.inv = np.linalg.inv(self.K + self.lambda_ * np.eye(len(self.yu)))
alphas = np.dot(self.inv, self.yu)
self.alphas = alphas
def predict(self, features):
return self.kernel(features, self.xu, self.sigma).dot(self.alphas)
def train(self, train_structures, force_targets):
"""
Training data with agni method.
Args:
train_structures ([Structure]): The list of Pymatgen Structure object.
energies ([float]): The list of total energies of each structure
in structures list.
force_targets ([np.array]): List of (m, 3) forces array of each structure
with m atoms in structures list. m can be varied with each
single structure case.
"""
features = self.describer.transform(train_structures)
self.fit(features, force_targets)
def write_lammps_file(self, filename="param.agni", generation=1):
"""
Write fitted agni parameter file to perform lammps calculation.
"""
if self.xu is None or self.yu is None \
or self.alphas is None:
raise RuntimeError("The parameters should be provided.")
assert len(self.alphas) == len(self.yu)
line_header = """generation {generation}
n_elements {n_elements}
element {element}
interaction {interaction}
Rc {rc}
Rs {rs}
neighbors {neighbors}
eta {eta}
sigma {sigma}
lambda {lamb}
b {b}
n_train {n_train}
endVar
"""
lines = []
n_elements = self.n_elements
interaction = self.element
rc = self.rc
rs = self.rs
neighbors = self.kwargs.get('neighbors', 500)
eta = " ".join(["%s" % i for i in self.eta])
sigma = self.sigma
lamb = self.lambda_
b = self.kwargs.get('b', 100)
n_train = len(self.xu)
lines.append(line_header.format(**locals()))
for index, (x, y, alpha) in enumerate(zip(self.xu, self.yu, self.alphas)):
index_str = str(index)
x_str = ' '.join([str(f) for f in x])
y_str = str(y)
alpha_str = str(alpha)
line = '{} {} {} {}'.format(index_str, x_str, y_str, alpha_str)
lines.append(line)
with open(filename, 'w') as f:
f.write('\n'.join(lines))
def predict_from_lammps(self, structures):
filename = "%s.agni" % self.element
self.write_lammps_file(filename=filename, generation=1)
pair_style = 'pair_style agni'
pair_coeff = 'pair_coeff * * {} {}'
pair_coeff = pair_coeff.format(filename, self.element)
ff_settings = [pair_style, pair_coeff]
calculator = EnergyForceStress(ff_settings)
return calculator.calculate(structures)
@staticmethod
def from_lammps_file(filename):
"""
Get the AGNIPotential parameters from file.
Args:
filename (str): Filename to be read.
Returns:
AGNIPotential
"""
def read_line(line):
line_splits = line.split(" ")
param = line_splits[0]
value = [float(i) for i in line_splits[1:]]
if len(value) == 1:
value = value[0]
return param.lower(), value
param = {}
with zopen(filename) as f:
lines = f.readlines()
for line_index, line in enumerate(lines):
if line.startswith('endVar'):
end_index = line_index
break
for i in range(end_index + 1):
if not lines[i].startswith('#'):
line_strip = line.strip()
if line_strip:
parameter, value = read_line(line_strip)
param[parameter] = value
reference_params = np.genfromtxt(StringIO("\n".join(lines[end_index + 1:])))
param['xu'] = reference_params[:, 1:-2]
param['yu'] = reference_params[:, -2]
param['alphas'] = reference_params[:, -1]
return AGNIPotential(**param)
| materialsvirtuallab/veidt | veidt/potential/agni.py | Python | bsd-3-clause | 19,583 | [
"LAMMPS",
"pymatgen"
] | 304eaf47a52451c6e5073c504b7492abcc0b816327ea0b7533d1aae5ea4f947b |
#!/usr/bin/env python
'''
compliance_checker/protocols/netcdf.py
Functions to assist in determining if the URL points to a netCDF file
'''
def is_netcdf(url):
'''
Returns True if the URL points to a valid local netCDF file
:param str url: Location of file on the file system
'''
# Try an obvious exclusion of remote resources
if url.startswith('http'):
return False
# If it's a known extension, give it a shot
if url.endswith('nc'):
return True
# Brute force
with open(url, 'rb') as f:
magic_number = f.read(4)
if len(magic_number) < 4:
return False
if is_classic_netcdf(magic_number):
return True
elif is_hdf5(magic_number):
return True
return False
def is_classic_netcdf(file_buffer):
'''
Returns True if the contents of the byte array matches the magic number in
netCDF files
:param str file_buffer: Byte-array of the first 4 bytes of a file
'''
# CDF.
if file_buffer == b'\x43\x44\x46\x01':
return True
return False
def is_hdf5(file_buffer):
'''
Returns True if the contents of the byte array matches the magic number in
HDF5 files
:param str file_buffer: Byte-array of the first 4 bytes of a file
'''
# .HDF
if file_buffer == b'\x89\x48\x44\x46':
return True
return False
| petejan/compliance-checker | compliance_checker/protocols/netcdf.py | Python | apache-2.0 | 1,402 | [
"NetCDF"
] | 66933929c16753e72e3b53e8dfd66269fd648fb63c54a2f089dd3d4bca29692c |
# -*- coding: utf-8 -*-
#
# rate_neuron_dm.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Rate neuron decision making
------------------------------------
A binary decision is implemented in the form of two rate neurons
engaging in mutual inhibition.
Evidence for each decision is reflected by the mean input
experienced by the respective neuron.
The activity of each neuron is recorded using multimeter devices.
It can be observed how noise as well as the difference in evidence
affects which neuron exhibits larger activity and hence which
decision will be made.
"""
import nest
import matplotlib.pyplot as plt
import numpy
##########################################################################
# First,the function ``build_network`` is defined to build the network
# and return the handles of two decision units and the ``multimeter``
def build_network(sigma, dt):
nest.ResetKernel()
nest.SetKernelStatus({'resolution': dt, 'use_wfr': False})
Params = {'lambda': 0.1, 'sigma': sigma, 'tau': 1., 'rectify_output': True}
D1 = nest.Create('lin_rate_ipn', params=Params)
D2 = nest.Create('lin_rate_ipn', params=Params)
nest.Connect(D1, D2, 'all_to_all', {
'synapse_model': 'rate_connection_instantaneous', 'weight': -0.2})
nest.Connect(D2, D1, 'all_to_all', {
'synapse_model': 'rate_connection_instantaneous', 'weight': -0.2})
mm = nest.Create('multimeter')
mm.set(interval=dt, record_from=['rate'])
nest.Connect(mm, D1, syn_spec={'delay': dt})
nest.Connect(mm, D2, syn_spec={'delay': dt})
return D1, D2, mm
###########################################################################
# The function ``build_network`` takes the noise parameter sigma
# and the time resolution as arguments.
# First, the Kernel is reset and the ``use_wfr`` (waveform-relaxation)
# is set to false while the resolution is set to the specified value
# `dt`. Two rate neurons with linear activation functions are created
# and the handle is stored in the variables `D1` and `D2`. The output
# of both decision units is rectified at zero. The two decisions
# units are coupled via mutual inhibition. Next the multimeter is
# created and the handle stored in mm and the option ``record_from``
# is set. The multimeter is then connected to the two units in order
# to 'observe' them. The ``Connect`` function takes the handles as
# input.
###########################################################################
# The decision making process is simulated for three different levels
# of noise and three differences in evidence for a given decision. The
# activity of both decision units is plotted for each scenario.
fig_size = [14, 8]
fig_rows = 3
fig_cols = 3
fig_plots = fig_rows * fig_cols
face = 'white'
edge = 'white'
ax = [None] * fig_plots
fig = plt.figure(facecolor=face, edgecolor=edge, figsize=fig_size)
dt = 1e-3
sigma = [0.0, 0.1, 0.2]
dE = [0.0, 0.004, 0.008]
T = numpy.linspace(0, 200, 200 / dt - 1)
for i in range(9):
c = i % 3
r = int(i / 3)
D1, D2, mm = build_network(sigma[r], dt)
###########################################################################
# First using build_network the network is build and the handles of
# the decision units and the multimeter are stored in `D1`, `D2` and `mm`
nest.Simulate(100.0)
D1.mu = 1. + dE[c]
D2.mu = 1. - dE[c]
nest.Simulate(100.0)
########################################################################
# The network is simulated using ``Simulate``, which takes the desired
# simulation time in milliseconds and advances the network state by
# this amount of time. After an initial period in the absence of evidence
# for either decision, evidence is given by changing the state of each
senders = mm.get('events', 'senders')
voltages = mm.get('events', 'rate')
########################################################################
# The activity values ('voltages') are read out by the multimeter
ax[i] = fig.add_subplot(fig_rows, fig_cols, i + 1)
ax[i].plot(T, voltages[numpy.where(senders == D1.global_id)],
'b', linewidth=2, label="D1")
ax[i].plot(T, voltages[numpy.where(senders == D2.global_id)],
'r', linewidth=2, label="D2")
ax[i].set_ylim([-.5, 12.])
ax[i].get_xaxis().set_ticks([])
ax[i].get_yaxis().set_ticks([])
if c == 0:
ax[i].set_ylabel(r"activity ($\sigma=%.1f$) " % (sigma[r]))
ax[i].get_yaxis().set_ticks([0, 3, 6, 9, 12])
if r == 0:
ax[i].set_title(r"$\Delta E=%.3f$ " % (dE[c]))
if c == 2:
plt.legend(loc=0)
if r == 2:
ax[i].get_xaxis().set_ticks([0, 50, 100, 150, 200])
ax[i].set_xlabel('time (ms)')
########################################################################
# The activity of the two units is plotted in each scenario.
#
# In the absence of noise, the network will not make a decision if evidence
# for both choices is equal. With noise, this symmetry can be broken and a
# decision wil be taken despite identical evidence.
#
# As evidence for `D1` relative to `D2` increases, it becomes more likely that
# the corresponding decision will be taken. For small differences in the
# evidence for the two decisions, noise can lead to the 'wrong' decision.
plt.show()
| weidel-p/nest-simulator | pynest/examples/rate_neuron_dm.py | Python | gpl-2.0 | 5,952 | [
"NEURON"
] | b6c97726cebc22969f8fdeb43e242327acfabb46a5ef9e081b3ab67cea71470f |
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
## PYTHON3 - These types are no longer present in the builtin "types" module
StringType = str
UnicodeType = str
ListType = list
DictType = dict
IntType = int
LongType = int
FloatType = float
BooleanType = bool
TupleType = tuple
InstanceType = object
ident = '$Id: Types.py 1496 2010-03-04 23:46:17Z pooryorick $'
from .version import __version__
import collections
import base64
import cgi
import urllib.request, urllib.parse, urllib.error
import copy
import re
import time
from SOAPpy.Types import *
# SOAPpy-py3 modules
from .Errors import *
from .NS import NS
from .Utilities import encodeHexString, cleanDate
from .Config import Config
NaN = float('NaN')
PosInf = float('Inf')
NegInf = -PosInf
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error("anyType can't be instantiated directly")
if type(name) in (list, tuple):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in list(self._attrs.items()):
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == list:
attr = tuple(attr)
elif type(attr) != tuple:
raise AttributeError("invalid attribute type")
if len(attr) != 2:
raise AttributeError("invalid attribute length")
if type(attr[0]) not in (type(None), str):
raise AttributeError("invalid attribute namespace URI type")
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = str(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (list, tuple):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == dict:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError("invalid attribute type")
for attr, value in list(d.items()):
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError("not a valid namespace for type %s" % self._type)
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type:" % self._type)
return data
def _marshalData(self):
return self._data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError("invalid %s value" % self._type)
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError("invalid %s value" % self._type)
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError("invalid %s value" % self._type)
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType, FloatType):
raise Error("invalid %s value" % self._type)
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError("invalid %s value: %s" % (self._type, repr(data)))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError("invalid %s value: %s" % (self._type, repr(data)))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == tuple:
data = list(data)
elif type(data) != list:
data = [data]
if len(data) > 6:
raise Exception("too many values")
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(int, LongType, FloatType):
raise Exception("element %d a bad type" % i)
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception("all except the last nonzero element must be " \
"integers")
if data[i] < 0 and i > f:
raise Exception("only the first nonzero element can be negative")
elif data[i] != int(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % int(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (int, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (list, tuple):
if len(data) < 6:
raise Exception("not enough values")
if len(data) > 9:
raise Exception("too many values")
data = list(data[:6])
cleanDate(data)
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (int, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (list, tuple):
if len(data) < 1:
raise Exception("not enough values")
if len(data) > 9:
raise Exception("too many values")
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception("only leftmost elements can be none")
else:
f = i
break
cleanDate(data, f)
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (int, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (list, tuple):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception("too many values")
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
#s = ''
#
#s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
s = "%02d:%02d:%02d" % d
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (int, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (list, tuple):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception("too many values")
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (int, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (list, tuple):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception("too many values")
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (int, LongType, FloatType):
data = [data]
if type(data) in (list, tuple):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (int, LongType):
raise Exception("bad type")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (int, LongType, FloatType):
data = [data]
if type(data) in (list, tuple):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (int, LongType):
raise Exception("bad type")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (int, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (list, tuple):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception("too many values")
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (int, LongType, FloatType):
data = [data]
if type(data) in (list, tuple):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (int, LongType):
raise Exception("bad type")
if data[0] < 1 or data[0] > 12:
raise Exception("bad value")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (int, LongType, FloatType):
data = [data]
if type(data) in (list, tuple):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (int, LongType):
raise Exception("bad type")
if data[0] < 1 or data[0] > 31:
raise Exception("bad value")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError("invalid encoding")
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.parse.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error("a NOTATION can't be instantiated directly")
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (list, tuple) or \
[x for x in data if type(x) not in (StringType, UnicodeType)]:
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType):
raise ValueError("invalid %s value" % self._type)
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or data > 0:
raise ValueError("invalid %s value" % self._type)
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or data >= 0:
raise ValueError("invalid %s value" % self._type)
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < -9223372036854775808 or \
data > 9223372036854775807:
raise ValueError("invalid %s value" % self._type)
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < -2147483648 or \
data > 2147483647:
raise ValueError("invalid %s value" % self._type)
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError("invalid %s value" % self._type)
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < -128 or \
data > 127:
raise ValueError("invalid %s value" % self._type)
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or data < 0:
raise ValueError("invalid %s value" % self._type)
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < 0 or \
data > 18446744073709551615:
raise ValueError("invalid %s value" % self._type)
return data
class unsignedint(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < 0 or \
data > 4294967295:
raise ValueError("invalid %s value" % self._type)
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < 0 or \
data > 65535:
raise ValueError("invalid %s value" % self._type)
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or \
data < 0 or \
data > 255:
raise ValueError("invalid %s value" % self._type)
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (int, LongType) or data <= 0:
raise ValueError("invalid %s value" % self._type)
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error("a compound can't be instantiated directly")
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == dict:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return [self.__dict__[x] for x in self._keyord]
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
list(map( fun, self._keyord))
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if not isinstance(item, str):
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __bool__(self):
return 1
def _keys(self):
return [x for x in list(self.__dict__.keys()) if x[0] != '_']
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != list:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != list:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
# only add to key order list if it does not already
# exist in list
if not (name in self._keyord):
if pos < len(x):
self._keyord[pos] = name
else:
self._keyord.append(name)
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == list:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(collections.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (list, tuple):
raise Error("Data must be a sequence")
collections.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = [int(x) for x in str(asize).split(',')]
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError("invalid Array dimensions")
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError("invalid Array offset")
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
list(map( fun, list(range(len(self.data))) ))
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __bool__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return [x for x in list(self.__dict__.keys()) if x[0] != '_']
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError("Array is full")
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError("all elements in a sparse Array must have a " \
"position attribute")
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = [int(x) for x in pos[1:-1].split(',')]
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError("invalid Array element position %s" % str(pos))
else:
if self._posstate == 1:
raise AttributeError("only elements in a sparse Array may have a " \
"position attribute")
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error("array index out of range")
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class mapType(arrayType):
_validURIs = ('http://xml.apache.org/xml-soap',)
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._keyord=['key','value']
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy-py3 SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy-py3 objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy-py3 objects and their contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in list(data.keys()):
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==dict:
for k in list(object.keys()):
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy-py3 objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in list(data.keys()):
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in list(data.keys()):
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==dict:
for k in list(object.keys()):
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
| cmsdaq/hltd | lib/SOAPpy-py3-0.52.24/src/SOAPpy/Types.py | Python | lgpl-3.0 | 52,492 | [
"Brian"
] | 0b60c28a7db93f49f3cc4ad60405f68d04ede7566c0ba93ddd9b4050334ff6f8 |
import json
import os
from django import template
from django.conf import settings
from django.templatetags.static import static
register = template.Library()
@register.simple_tag
def bridge(filename):
""" Add hash to filename for cache invalidation.
Uses gulp-buster for cache invalidation. Adds current file hash as url arg.
"""
if not hasattr(settings, 'BASE_DIR'):
raise Exception("You must provide BASE_DIR in settings for bridge")
file_path = getattr(settings, 'BUSTERS_FILE', os.path.join('static',
'busters.json'))
buster_file = os.path.join(settings.BASE_DIR, file_path)
fp = file(buster_file, 'r')
# TODO: may be store it somewhere to not load file every time
busters_json = json.loads(fp.read())
fp.close()
file_hash = busters_json.get("static/%s" % filename)
path = static(filename)
return "%s?%s" % (path, file_hash) if file_hash is not None else path
| romantolkachyov/django-bridge | django_bridge/templatetags/bridge.py | Python | mit | 992 | [
"GULP"
] | 2cd405288960eb1bc21295a71135cbabd14dca21d490380fd03acbbc747de976 |
#!/usr/bin/env python
########################################################################
# File : dirac-proxy-init.py
# Author : Adrian Casajus
########################################################################
import os
import sys
import glob
import time
import datetime
import DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client import ProxyGeneration, ProxyUpload
from DIRAC.Core.Security import X509Chain, ProxyInfo, Properties, VOMS
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient
__RCSID__ = "$Id$"
class Params(ProxyGeneration.CLIParams):
uploadProxy = False
uploadPilot = False
addVOMSExt = False
def setUploadProxy(self, _arg):
self.uploadProxy = True
return S_OK()
def setUploadPilotProxy(self, _arg):
self.uploadPilot = True
return S_OK()
def setVOMSExt(self, _arg):
self.addVOMSExt = True
return S_OK()
def registerCLISwitches(self):
ProxyGeneration.CLIParams.registerCLISwitches(self)
Script.registerSwitch("U", "upload", "Upload a long lived proxy to the ProxyManager", self.setUploadProxy)
Script.registerSwitch("P", "uploadPilot", "Upload a long lived pilot proxy to the ProxyManager",
self.setUploadPilotProxy)
Script.registerSwitch("M", "VOMS", "Add voms extension", self.setVOMSExt)
class ProxyInit(object):
def __init__(self, piParams):
self.__piParams = piParams
self.__issuerCert = False
self.__proxyGenerated = False
self.__uploadedInfo = {}
def getIssuerCert(self):
if self.__issuerCert:
return self.__issuerCert
proxyChain = X509Chain.X509Chain()
resultProxyChainFromFile = proxyChain.loadChainFromFile(self.__piParams.certLoc)
if not resultProxyChainFromFile['OK']:
gLogger.error("Could not load the proxy: %s" % resultProxyChainFromFile['Message'])
sys.exit(1)
resultIssuerCert = proxyChain.getIssuerCert()
if not resultIssuerCert['OK']:
gLogger.error("Could not load the proxy: %s" % resultIssuerCert['Message'])
sys.exit(1)
self.__issuerCert = resultIssuerCert['Value']
return self.__issuerCert
def certLifeTimeCheck(self):
minLife = Registry.getGroupOption(self.__piParams.diracGroup, "SafeCertificateLifeTime", 2592000)
resultIssuerCert = self.getIssuerCert()
resultRemainingSecs = resultIssuerCert.getRemainingSecs() # pylint: disable=no-member
if not resultRemainingSecs['OK']:
gLogger.error("Could not retrieve certificate expiration time", resultRemainingSecs['Message'])
return
lifeLeft = resultRemainingSecs['Value']
if minLife > lifeLeft:
daysLeft = int(lifeLeft / 86400)
msg = "Your certificate will expire in less than %d days. Please renew it!" % daysLeft
sep = "=" * (len(msg) + 4)
msg = "%s\n %s \n%s" % (sep, msg, sep)
gLogger.notice(msg)
def getGroupsToUpload(self):
uploadGroups = []
if self.__piParams.uploadProxy or Registry.getGroupOption(self.__piParams.diracGroup, "AutoUploadProxy", False):
uploadGroups.append(self.__piParams.diracGroup)
if not self.__piParams.uploadPilot:
if not Registry.getGroupOption(self.__piParams.diracGroup, "AutoUploadPilotProxy", False):
return uploadGroups
issuerCert = self.getIssuerCert()
resultUserDN = issuerCert.getSubjectDN() # pylint: disable=no-member
if not resultUserDN['OK']:
return resultUserDN
userDN = resultUserDN['Value']
resultGroups = Registry.getGroupsForDN(userDN)
if not resultGroups['OK']:
gLogger.error("No groups defined for DN %s" % userDN)
return []
availableGroups = resultGroups['Value']
for group in availableGroups:
groupProps = Registry.getPropertiesForGroup(group)
if Properties.PILOT in groupProps or Properties.GENERIC_PILOT in groupProps:
uploadGroups.append(group)
return uploadGroups
def addVOMSExtIfNeeded(self):
addVOMS = self.__piParams.addVOMSExt or Registry.getGroupOption(self.__piParams.diracGroup, "AutoAddVOMS", False)
if not addVOMS:
return S_OK()
vomsAttr = Registry.getVOMSAttributeForGroup(self.__piParams.diracGroup)
if not vomsAttr:
return S_ERROR(
"Requested adding a VOMS extension but no VOMS attribute defined for group %s" %
self.__piParams.diracGroup)
resultVomsAttributes = VOMS.VOMS().setVOMSAttributes(self.__proxyGenerated, attribute=vomsAttr,
vo=Registry.getVOMSVOForGroup(self.__piParams.diracGroup))
if not resultVomsAttributes['OK']:
return S_ERROR(
"Could not add VOMS extensions to the proxy\nFailed adding VOMS attribute: %s" %
resultVomsAttributes['Message'])
gLogger.notice("Added VOMS attribute %s" % vomsAttr)
chain = resultVomsAttributes['Value']
retDump = chain.dumpAllToFile(self.__proxyGenerated)
if not retDump['OK']:
return retDump
return S_OK()
def createProxy(self):
""" Creates the proxy on disk
"""
gLogger.notice("Generating proxy...")
resultProxyGenerated = ProxyGeneration.generateProxy(piParams)
if not resultProxyGenerated['OK']:
gLogger.error(resultProxyGenerated['Message'])
sys.exit(1)
self.__proxyGenerated = resultProxyGenerated['Value']
return resultProxyGenerated
def uploadProxy(self, userGroup=False):
""" Upload the proxy to the proxyManager service
"""
issuerCert = self.getIssuerCert()
resultUserDN = issuerCert.getSubjectDN() # pylint: disable=no-member
if not resultUserDN['OK']:
return resultUserDN
userDN = resultUserDN['Value']
if not userGroup:
userGroup = self.__piParams.diracGroup
gLogger.notice("Uploading proxy for %s..." % userGroup)
if userDN in self.__uploadedInfo:
expiry = self.__uploadedInfo[userDN].get(userGroup)
if expiry:
if issuerCert.getNotAfterDate()['Value'] - datetime.timedelta(minutes=10) < expiry: # pylint: disable=no-member
gLogger.info("SKipping upload for group %s. Already uploaded" % userGroup)
return S_OK()
gLogger.info("Uploading %s proxy to ProxyManager..." % self.__piParams.diracGroup)
upParams = ProxyUpload.CLIParams()
upParams.onTheFly = True
upParams.proxyLifeTime = issuerCert.getRemainingSecs()['Value'] - 300 # pylint: disable=no-member
upParams.rfcIfPossible = self.__piParams.rfc
upParams.diracGroup = userGroup
for k in ('certLoc', 'keyLoc', 'userPasswd'):
setattr(upParams, k, getattr(self.__piParams, k))
resultProxyUpload = ProxyUpload.uploadProxy(upParams)
if not resultProxyUpload['OK']:
gLogger.error(resultProxyUpload['Message'])
sys.exit(1)
self.__uploadedInfo = resultProxyUpload['Value']
gLogger.info("Proxy uploaded")
return S_OK()
def printInfo(self):
""" Printing utilities
"""
resultProxyInfoAsAString = ProxyInfo.getProxyInfoAsString(self.__proxyGenerated)
if not resultProxyInfoAsAString['OK']:
gLogger.error('Failed to get the new proxy info: %s' % resultProxyInfoAsAString['Message'])
else:
gLogger.notice("Proxy generated:")
gLogger.notice(resultProxyInfoAsAString['Value'])
if self.__uploadedInfo:
gLogger.notice("\nProxies uploaded:")
maxDNLen = 0
maxGroupLen = 0
for userDN in self.__uploadedInfo:
maxDNLen = max(maxDNLen, len(userDN))
for group in self.__uploadedInfo[userDN]:
maxGroupLen = max(maxGroupLen, len(group))
gLogger.notice(" %s | %s | Until (GMT)" % ("DN".ljust(maxDNLen), "Group".ljust(maxGroupLen)))
for userDN in self.__uploadedInfo:
for group in self.__uploadedInfo[userDN]:
gLogger.notice(" %s | %s | %s" % (userDN.ljust(maxDNLen),
group.ljust(maxGroupLen),
self.__uploadedInfo[userDN][group].strftime("%Y/%m/%d %H:%M")))
def checkCAs(self):
if "X509_CERT_DIR" not in os.environ:
gLogger.warn("X509_CERT_DIR is unset. Abort check of CAs")
return
caDir = os.environ["X509_CERT_DIR"]
# In globus standards .r0 files are CRLs. They have the same names of the CAs but diffent file extension
searchExp = os.path.join(caDir, "*.r0")
crlList = glob.glob(searchExp)
if not crlList:
gLogger.warn("No CRL files found for %s. Abort check of CAs" % searchExp)
return
newestFPath = max(crlList, key=os.path.getmtime)
newestFTime = os.path.getmtime(newestFPath)
if newestFTime > (time.time() - (2 * 24 * 3600)):
# At least one of the files has been updated in the last 28 days
return S_OK()
if not os.access(caDir, os.W_OK):
gLogger.error("Your CRLs appear to be outdated, but you have no access to update them.")
# Try to continue anyway...
return S_OK()
# Update the CAs & CRLs
gLogger.notice("Your CRLs appear to be outdated; attempting to update them...")
bdc = BundleDeliveryClient()
res = bdc.syncCAs()
if not res['OK']:
gLogger.error("Failed to update CAs", res['Message'])
res = bdc.syncCRLs()
if not res['OK']:
gLogger.error("Failed to update CRLs", res['Message'])
# Continue even if the update failed...
return S_OK()
def doTheMagic(self):
proxy = self.createProxy()
if not proxy['OK']:
return proxy
self.checkCAs()
pI.certLifeTimeCheck()
resultProxyWithVOMS = pI.addVOMSExtIfNeeded()
if not resultProxyWithVOMS['OK']:
if "returning a valid AC for the user" in resultProxyWithVOMS['Message']:
gLogger.error(resultProxyWithVOMS['Message'])
gLogger.error("\n Are you sure you are properly registered in the VO?")
elif "Missing voms-proxy" in resultProxyWithVOMS['Message']:
gLogger.notice("Failed to add VOMS extension: no standard grid interface available")
else:
gLogger.error(resultProxyWithVOMS['Message'])
if self.__piParams.strict:
return resultProxyWithVOMS
for pilotGroup in pI.getGroupsToUpload():
resultProxyUpload = pI.uploadProxy(userGroup=pilotGroup)
if not resultProxyUpload['OK']:
if self.__piParams.strict:
return resultProxyUpload
return S_OK()
if __name__ == "__main__":
piParams = Params()
piParams.registerCLISwitches()
Script.disableCS()
Script.parseCommandLine(ignoreErrors=True)
DIRAC.gConfig.setOptionValue("/DIRAC/Security/UseServerCertificate", "False")
pI = ProxyInit(piParams)
resultDoTheMagic = pI.doTheMagic()
if not resultDoTheMagic['OK']:
gLogger.fatal(resultDoTheMagic['Message'])
sys.exit(1)
pI.printInfo()
sys.exit(0)
| fstagni/DIRAC | FrameworkSystem/scripts/dirac-proxy-init.py | Python | gpl-3.0 | 10,864 | [
"DIRAC"
] | 8e80ea5d6c3f10f3d11607b2cc52e7ee1d02b35db2f30702437f270cf051d534 |
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from mox3 import mox
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import importutils
from oslo_utils import netutils
import six
import testtools
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import network as network_obj
from nova.objects import virtual_interface as vif_obj
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_ldap
from nova.tests.unit import fake_network
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_floating_ip
from nova.tests.unit.objects import test_network
from nova.tests.unit.objects import test_service
from nova.tests.unit import utils as test_utils
from nova.tests import uuidsentinel as uuids
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': fakes.FAKE_PROJECT_ID,
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': fakes.FAKE_PROJECT_ID,
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': uuids.vif1_uuid,
'network_id': 0,
'instance_uuid': uuids.instance,
'tag': 'fake-tag1'},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': uuids.instance,
'tag': 'fake-tag2'},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': uuids.instance,
'tag': 'fake-tag3'}]
class FlatNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=False)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_get_instance_nw_info_fake(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': getattr(uuids, 'vif%i' % nid),
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid': getattr(uuids, 'vif%i' % nid),
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_end(self):
context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, dhcp_server='192.168.0.11',
allowed_start='192.168.0.10',
allowed_end='192.168.0.245')
self.assertEqual(1, len(nets))
network = nets[0]
# gateway defaults to beginning of allowed_start
self.assertEqual('192.168.0.10', network['gateway'])
# vpn_server doesn't conflict with dhcp_start
self.assertEqual('192.168.0.12', network['vpn_private_address'])
# dhcp_start doesn't conflict with dhcp_server
self.assertEqual('192.168.0.13', network['dhcp_start'])
# NOTE(vish): 10 from the beginning, 10 from the end, and
# 1 for the gateway, 1 for the dhcp server,
# 1 for the vpn server
self.assertEqual(23, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_out_of_range(self):
context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
self.assertRaises(exception.AddressOutOfRange,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_start='192.168.1.10')
def test_validate_reserved_end_invalid(self):
context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
self.assertRaises(exception.InvalidAddress,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_end='invalid')
def test_validate_cidr_invalid(self):
context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
self.assertRaises(exception.InvalidCidr,
self.network.create_networks,
context_admin, 'fake', 'invalid', False,
1, 256)
def test_validate_non_int_size(self):
context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
self.assertRaises(exception.InvalidIntValue,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 'invalid')
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def test_get_instance_nw_info(self, get):
def make_ip(index):
vif = objects.VirtualInterface(uuid=uuids.vif1_uuid, address=index)
network = objects.Network(uuid=uuids.network_1,
bridge=index,
label=index,
project_id=fakes.FAKE_PROJECT_ID,
injected=False,
netmask='255.255.255.0',
dns1=None,
dns2=None,
cidr_v6=None,
gateway_v6=None,
broadcast_v6=None,
netmask_v6=None,
rxtx_base=None,
gateway='192.168.%s.1' % index,
dhcp_server='192.168.%s.1' % index,
broadcast='192.168.%s.255' % index,
cidr='192.168.%s.0/24' % index)
return objects.FixedIP(virtual_interface=vif,
network=network,
floating_ips=objects.FloatingIPList(),
address='192.168.%s.2' % index)
objs = [make_ip(index) for index in ('3', '1', '2')]
get.return_value = objects.FixedIPList(objects=objs)
nw_info = self.network.get_instance_nw_info(self.context, None,
None, None)
for i, vif in enumerate(nw_info):
self.assertEqual(objs[i].network.bridge, vif['network']['bridge'])
@mock.patch.object(objects.Network, 'get_by_id')
def test_add_fixed_ip_instance_using_id_without_vpn(self, get_by_id):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network id.
network_id = networks[0]['id']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip:
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_id)
# Assert that we fetched the network by id, not uuid
get_by_id.assert_called_once_with(self.context,
network_id, project_only='allow_none')
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID,
get_by_id.return_value,
address=None)
@mock.patch.object(objects.Network, 'get_by_uuid')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, get_by_uuid):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network uuid.
network_uuid = networks[0]['uuid']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip,\
mock.patch.object(self.context, 'elevated',
return_value=mock.sentinel.elevated):
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_uuid)
# Assert that we fetched the network by uuid, not id, and with elevated
# context
get_by_uuid.assert_called_once_with(mock.sentinel.elevated,
network_uuid)
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context,
FAKEUUID,
get_by_uuid.return_value,
address=None)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(2, len(names))
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(1, len(names))
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(1, len(addresses))
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(1, len(addresses))
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(0, len(addresses))
def test_allocate_fixed_ip_instance_dns(self):
# Test DNS entries are created when allocating a fixed IP.
# Allocate a fixed IP to an instance. Ensure that dns entries have been
# created for the instance's name and uuid.
network = network_obj.Network._from_db_object(
self.context, network_obj.Network(), test_network.fake_network)
network.save = mock.MagicMock()
# Create a minimal instance object
instance_params = {
'display_name': HOST,
'security_groups': []
}
instance = fake_instance.fake_instance_obj(
context.RequestContext('ignore', 'ignore'),
expected_attrs=instance_params.keys(), **instance_params)
instance.save = mock.MagicMock()
# We don't specify a specific address, so we should get a FixedIP
# automatically allocated from the pool. Fix its value here.
fip = objects.FixedIP(address='192.168.0.101')
fip.save = mock.MagicMock()
with mock.patch.object(objects.Instance, 'get_by_uuid',
return_value=instance),\
mock.patch.object(objects.FixedIP, 'associate_pool',
return_value=fip):
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
instance_manager = self.network.instance_dns_manager
expected_addresses = ['192.168.0.101']
# Assert that we have a correct entry by instance display name
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
# Assert that we have a correct entry by instance uuid
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(1, res[0]['id'])
self.assertEqual(0, res[1]['id'])
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = uuids.instance
get_by_uuid.return_value = inst
usages = {'fixed_ips': {'in_use': 10, 'reserved': 1}}
reserve.side_effect = exception.OverQuota(overs='testing',
quotas={'fixed_ips': 10},
usages=usages)
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': uuids.instance})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid=uuids.instance)
fip = objects.FixedIP(instance_uuid=uuids.instance,
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': uuids.instance},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1,
vif_id=1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid=uuids.instance,
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid=uuids.instance, id=1)
with test.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1,
'uuid': uuids.instance},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate_pool')
@mock.patch('nova.network.manager.NetworkManager._add_virtual_interface')
def test_allocate_fixed_ip_create_new_vifs(self,
mock_add,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid=uuids.instance,
address=address,
virtual_interface_id=1000)
net = {'cidr': '24', 'id': 1, 'uuid': uuids.instance}
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid=uuids.instance)
mock_fixedip_associate.return_value = fip
mock_add.return_value = vif
mock_instance_get.return_value = instance
mock_vif_get.return_value = None
with test.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
self.network.allocate_fixed_ip(self.context, instance['uuid'],
net)
mock_add.assert_called_once_with(self.context, instance['uuid'],
net['id'])
self.assertEqual(fip.virtual_interface_id, vif.id)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch.object(db, 'virtual_interface_get_by_instance_and_network',
return_value=None)
@mock.patch('nova.objects.fixed_ip.FixedIP')
def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip,
mock_get_vif, mock_instance_get):
# Tests that we don't try to do anything with fixed IPs if
# _add_virtual_interface fails.
instance = fake_instance.fake_instance_obj(self.context)
mock_instance_get.return_value = instance
network = {'cidr': '24', 'id': 1,
'uuid': '398399b3-f696-4859-8695-a6560e14cb02'}
vif_error = exception.VirtualInterfaceMacAddressException()
# mock out quotas because we don't care in this test
with mock.patch.object(self.network, 'quotas_cls', objects.QuotasNoOp):
with mock.patch.object(self.network, '_add_virtual_interface',
side_effect=vif_error):
self.assertRaises(
exception.VirtualInterfaceMacAddressException,
self.network.allocate_fixed_ip, self.context,
'9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0', network)
self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls))
class FlatDHCPNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=False)
self.context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=False)
self.context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.network.manager.VlanManager._setup_network_on_host')
@mock.patch('nova.network.manager.VlanManager.'
'_validate_instance_zone_for_dns_domain')
@mock.patch('nova.network.manager.VlanManager.'
'_do_trigger_security_group_members_refresh_for_instance')
@mock.patch('nova.network.manager.VlanManager._add_virtual_interface')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.VirtualInterface.get_by_instance_and_network')
def test_allocate_fixed_ip_return_none(self, mock_get,
mock_associate, mock_get_uuid, mock_add, mock_trigger,
mock_validate, mock_setup):
net = {'cidr': '24', 'id': 1, 'uuid': uuids.instance}
fip = objects.FixedIP(instance_uuid=uuids.instance,
address=netaddr.IPAddress('1.2.3.4'),
virtual_interface_id=1)
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(self.context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid=uuids.instance)
mock_associate.return_value = fip
mock_add.return_value = vif
mock_get.return_value = None
mock_get_uuid.return_value = instance
mock_validate.return_value = False
self.network.allocate_fixed_ip(self.context_admin, instance.uuid, net)
mock_add.assert_called_once_with(self.context_admin, instance.uuid,
net['id'])
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': uuids.instance},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1,
vif_id=1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': uuids.instance,
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True,
vif_id=1)
@mock.patch.object(db, 'virtual_interface_get_by_instance_and_network',
return_value=None)
@mock.patch('nova.objects.fixed_ip.FixedIP')
def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip,
mock_get_vif):
# Tests that we don't try to do anything with fixed IPs if
# _add_virtual_interface fails.
vif_error = exception.VirtualInterfaceMacAddressException()
with mock.patch.object(self.network, '_add_virtual_interface',
side_effect=vif_error):
self.assertRaises(exception.VirtualInterfaceMacAddressException,
self.network.allocate_fixed_ip, self.context,
'9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0',
networks[0])
self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls))
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(102, networks[0]["vlan"])
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(102, networks[0]["vlan"])
self.assertEqual(103, networks[1]["vlan"])
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(102, networks[0]["vlan"])
def test_vlan_parameter(self):
# vlan parameter could not be greater than 4094
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=4095, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be greater than 4094'
self.assertIn(error_msg, six.text_type(exc))
# vlan parameter could not be less than 1
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=0, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be less than 1'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_be_integer(self):
# vlan must be an integer
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan='fake', cidr='192.168.0.1/24')
error_msg = 'vlan must be an integer'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_multiple_without_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual("192.168.3.1", networks[0]["dhcp_server"])
self.assertEqual("192.168.3.129", networks[1]["dhcp_server"])
def test_vlan_multiple_with_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100, dhcp_server='192.168.3.1')
self.assertEqual("192.168.3.1", networks[0]["dhcp_server"])
self.assertEqual("192.168.3.1", networks[1]["dhcp_server"])
def test_validate_networks(self):
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=uuids.non_existent_uuid)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=fakes.FAKE_PROJECT_ID)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
self.stubs.Set(self.network, '_floating_ip_pool_exists',
lambda _x, _y: True)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id=fakes.FAKE_PROJECT_ID)
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid=uuids.instance,
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface='',
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None,
virtual_interface_id=vifs[0]['id']
).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public IP of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', fakes.FAKE_PROJECT_ID)
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': fakes.FAKE_PROJECT_ID})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.network.rpcapi.NetworkAPI.release_dhcp')
@mock.patch('nova.db.virtual_interface_get')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get,
vif_get, release_dhcp):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
vif_get.return_value = vifs[0]
context1 = context.RequestContext('user', fakes.FAKE_PROJECT_ID)
instance = db.instance_create(context1,
{'project_id': fakes.FAKE_PROJECT_ID})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
release_dhcp.assert_called_once_with(context1, None,
networks[1]['bridge'],
fix_addr.address,
'DE:AD:BE:EF:00:00')
@mock.patch.object(linux_net, 'release_dhcp')
@mock.patch('nova.network.rpcapi.NetworkAPI.release_dhcp')
@mock.patch('nova.db.virtual_interface_get')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_rpc_pinned(self, fixed_update, net_get,
fixed_get, vif_get,
release_dhcp,
net_release_dhcp):
"""Ensure that if the RPC call to release_dhcp raises a
RPCPinnedToOldVersion, we fall back to the previous behaviour of
calling release_dhcp in the local linux_net driver. In the previous
test, release_dhcp was mocked to call the driver, since this is what
happens on a successful RPC call. In this test, we mock it to raise,
but the expected behaviour is exactly the same - namely that
release_dhcp is called in the linux_net driver, which is why the two
tests are otherwise identical.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
vif_get.return_value = vifs[0]
release_dhcp.side_effect = exception.RPCPinnedToOldVersion()
context1 = context.RequestContext('user', fakes.FAKE_PROJECT_ID)
instance = db.instance_create(context1,
{'project_id': fakes.FAKE_PROJECT_ID})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
net_release_dhcp.assert_called_once_with(networks[1]['bridge'],
fix_addr.address,
'DE:AD:BE:EF:00:00')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def _deallocate_fixed_with_dhcp(self, mock_dev_exists, fixed_update,
net_get, fixed_get):
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
def release_dhcp(self, context, instance, dev, address, vif_address):
linux_net.release_dhcp(dev, address, vif_address)
with test.nested(
mock.patch.object(network_rpcapi.NetworkAPI, 'release_dhcp',
release_dhcp),
mock.patch.object(db, 'virtual_interface_get', vif_get),
mock.patch.object(
utils, 'execute',
side_effect=processutils.ProcessExecutionError()),
) as (release_dhcp, _vif_get, _execute):
context1 = context.RequestContext('user', fakes.FAKE_PROJECT_ID)
instance = db.instance_create(context1,
{'project_id': fakes.FAKE_PROJECT_ID})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1,
instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(
test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address,
'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
mock_dev_exists.assert_called_once_with(networks[1]['bridge'])
if mock_dev_exists.return_value:
_execute.assert_called_once_with('dhcp_release',
networks[1]['bridge'],
fix_addr.address,
'DE:AD:BE:EF:00:00',
run_as_root=True)
@mock.patch('nova.network.linux_net.device_exists', return_value=True)
def test_deallocate_fixed_with_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
@mock.patch('nova.network.linux_net.device_exists', return_value=False)
def test_deallocate_fixed_without_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', fakes.FAKE_PROJECT_ID)
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': fakes.FAKE_PROJECT_ID})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stub_out('nova.db.virtual_interface_get', vif_get)
context1 = context.RequestContext('user', fakes.FAKE_PROJECT_ID)
instance = db.instance_create(context1,
{'project_id': fakes.FAKE_PROJECT_ID})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', fakes.FAKE_PROJECT_ID)
instance = db.instance_create(context1,
{'project_id': fakes.FAKE_PROJECT_ID})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(1, res[0]['id'])
self.assertEqual(0, res[1]['id'])
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in six.iteritems(kwargs):
self.__setattr__(k, v)
class CommonNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stub_out('nova.db.dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'fixed_ip_get_by_instance')
manager.db.fixed_ip_get_by_instance(
self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid=uuids.non_existent_uuid)
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info_with_update_dns_entries(
self, fixed_get):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx, instance=fake_instance.fake_instance_obj(ctx))
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_requested_networks(self):
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList.from_tuples(
[('123', '1.2.3.4'), ('123', '4.3.2.1'), ('123', None)])
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_update_dns_entries(self):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList.from_tuples(
[('123', '1.2.3.4'), ('123', '4.3.2.1')])
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual('10.0.0.1', manager.deallocate_called)
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_with_uuid(self):
cidr = '192.168.0.0/24'
uuid = FAKEUUID
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
kwargs = {'uuid': uuid}
nets = manager.create_networks(*args, **kwargs)
self.assertEqual(1, len(nets))
net = nets[0]
self.assertEqual(uuid, net['uuid'])
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(_vifs), len(res))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(_vifs), len(res))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(2, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(1, len(res))
self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(uuid, network['uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(2, len(networks))
self.assertEqual('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
output[0]['uuid'])
self.assertEqual('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
output[1]['uuid'])
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': uuids.network_1,
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': fakes.FAKE_PROJECT_ID,
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
@mock.patch('nova.objects.quotas.Quotas.rollback')
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.network.manager.NetworkManager.'
'_do_trigger_security_group_members_refresh_for_instance')
def test_fixed_ip_cleanup_rollback(self, fake_trig,
fixed_get, rollback):
manager = network_manager.NetworkManager()
fake_trig.side_effect = test.TestingException
self.assertRaises(test.TestingException,
manager.deallocate_fixed_ip,
self.context, 'fake', 'fake',
instance=fake_inst(uuid=uuids.non_existent_uuid))
rollback.assert_called_once_with()
def test_fixed_cidr_out_of_range(self):
manager = network_manager.NetworkManager()
ctxt = context.get_admin_context()
self.assertRaises(exception.AddressOutOfRange,
manager.create_networks, ctxt, label="fake",
cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25')
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.NoDBTestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(address, rval)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.network = network_manager.VlanManager(host=HOST)
self.user_id = fakes.FAKE_USER_ID
self.project_id = fakes.FAKE_PROJECT_ID
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID)
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance(context=self.context)
inst.host = HOST
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
networks = db.network_get_all(self.context)
reqnets = objects.NetworkRequestList(objects=[])
index = 0
project_id = self.user_context.project_id
for network in networks:
db.network_update(self.context, network['id'],
{'host': HOST,
'project_id': project_id})
if index == 0:
reqnets.objects.append(objects.NetworkRequest(
network_id=network['uuid'],
tag='mynic'))
index += 1
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None, requested_networks=reqnets)
self.assertEqual(1, len(nw_info))
vifs = objects.VirtualInterfaceList.get_all(self.context)
self.assertEqual(['mynic'], [vif.tag for vif in vifs])
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(netutils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': HOST,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance(context=self.context)
inst.host = HOST
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': HOST,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': HOST})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': HOST,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
REQUIRES_LOCKING = True
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = fakes.FAKE_PROJECT_ID
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_binary')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid=uuids.instance)
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host',
uuids.instance)
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid=uuids.instance)
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
uuids.instance)
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id=fakes.FAKE_PROJECT_ID
).AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id=fakes.FAKE_PROJECT_ID)
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.deleted = True
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid=uuids.instance,
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(2, called['count'])
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid=uuids.instance,
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(2, called['count'])
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(2, len(entries))
self.assertEqual(name1, entries[0])
self.assertEqual(name2, entries[1])
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(1, len(entries))
self.assertEqual(address1, entries[0])
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(1, len(entries))
self.assertEqual(name2, entries[0])
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
self.network.create_public_dns_domain(self.context, domain1,
fakes.FAKE_PROJECT_ID)
self.network.create_public_dns_domain(self.context, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(2, len(domains))
self.assertEqual(domain1, domains[0]['domain'])
self.assertEqual(domain2, domains[1]['domain'])
self.assertEqual(fakes.FAKE_PROJECT_ID, domains[0]['project'])
self.assertEqual('fakeproject', domains[1]['project'])
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(1, len(entries))
self.assertEqual(address1, entries[0])
self.network.delete_dns_domain(self.context, domain1)
self.network.delete_dns_domain(self.context, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser',
fakes.FAKE_PROJECT_ID,
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
fakes.FAKE_PROJECT_ID)
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(2, len(entries))
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', fakes.FAKE_PROJECT_ID,
is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': uuids.instance,
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif, session=None):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, uuids.instance, 123)
self.assertEqual([], macs)
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_exists(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = [{'name': 'public'}]
self.assertTrue(self.network._floating_ip_pool_exists(self.context,
'public'))
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_does_not_exist(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = []
self.assertFalse(self.network._floating_ip_pool_exists(self.context,
'public'))
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = fakes.FAKE_PROJECT_ID
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
self.network.create_private_dns_domain(self.context, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(1, len(domains))
self.assertEqual(domain1, domains[0]['domain'])
self.assertEqual(zone1, domains[0]['availability_zone'])
self.network.delete_dns_domain(self.context, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.NoDBTestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.network.ldapdns.ldap',
fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(2, len(domains))
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(2, len(entries))
self.assertEqual(name1, entries[0])
self.assertEqual(name2, entries[1])
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(1, len(entries))
self.assertEqual(address1, entries[0])
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(2, len(entries))
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(1, len(entries))
self.assertEqual(name2, entries[0])
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
class NetworkManagerNoDBTestCase(test.NoDBTestCase):
"""Tests nova.network.manager.NetworkManager without a database."""
def setUp(self):
super(NetworkManagerNoDBTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.manager = network_manager.NetworkManager()
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_not_associated(self, mock_fip_get_by_addr):
# Tests that the method is a no-op when the fixed IP is not associated
# to an instance.
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fake_network.next_fixed_ip(1))
fip.instance_uuid = None
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_allocated(self, mock_fip_get_by_addr):
# Tests that the fixed IP is not disassociated if it's allocated.
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fake_network.next_fixed_ip(1))
fip.leased = False
fip.allocated = True
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address')
def test_release_fixed_ip_mac_matches_associated_instance(self,
mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the mac passed to
# release_fixed_ip matches the VIF which has the same instance_uuid
# as the instance associated to the FixedIP object. Also tests
# that the fixed IP is marked as not leased in the database if it was
# currently leased.
instance = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = instance.uuid
fip['leased'] = True
vif = fip['virtual_interface']
vif['instance_uuid'] = instance.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_addr.return_value = vif
with mock.patch.object(fip, 'save') as mock_fip_save:
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(
self.context, fip.address, vif.address)
mock_fip_save.assert_called_once_with()
self.assertFalse(fip.leased)
mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address',
return_value=None)
def test_release_fixed_ip_vif_not_found_for_mac(self, mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the fixed IP is marked
# as deallocated and there is no VIF found in the database for the mac
# passed in.
fip = fake_network.next_fixed_ip(1)
fip['leased'] = False
mac = fip['virtual_interface']['address']
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address, mac)
mock_vif_get_by_addr.assert_called_once_with(self.context, mac)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
def test_release_fixed_ip_no_mac(self, mock_fip_get_by_addr):
# Tests that the fixed IP is disassociated when the fixed IP is
# deallocated and there is no mac address passed in (like before
# the network rpc api version bump to pass it in).
fip = fake_network.next_fixed_ip(1)
fip['leased'] = False
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(self.context, fip.address)
mock_disassociate.assert_called_once_with()
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_address')
def test_release_fixed_ip_mac_mismatch_associated_instance(self,
mock_vif_get_by_addr,
mock_fip_get_by_addr):
# Tests that the fixed IP is not disassociated when the VIF for the mac
# passed to release_fixed_ip does not have an instance_uuid that
# matches fixed_ip.instance_uuid.
old_instance = fake_instance.fake_instance_obj(self.context)
new_instance = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = new_instance.uuid
fip['leased'] = False
vif = fip['virtual_interface']
vif['instance_uuid'] = old_instance.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_addr.return_value = vif
with mock.patch.object(fip, 'disassociate') as mock_disassociate:
self.manager.release_fixed_ip(
self.context, fip.address, vif.address)
mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address)
self.assertFalse(mock_disassociate.called,
str(mock_disassociate.mock_calls))
@mock.patch.object(network_rpcapi.NetworkAPI, 'release_dhcp')
@mock.patch.object(objects.FixedIP, 'get_by_address')
@mock.patch.object(objects.VirtualInterface, 'get_by_id')
@mock.patch.object(objects.Quotas, 'reserve')
def test_deallocate_fixed_ip_explicit_disassociate(self,
mock_quota_reserve,
mock_vif_get_by_id,
mock_fip_get_by_addr,
mock_release_dhcp):
# Tests that we explicitly call FixedIP.disassociate when the fixed IP
# is not leased and has an associated instance (race with dnsmasq).
self.flags(force_dhcp_release=True)
fake_inst = fake_instance.fake_instance_obj(self.context)
fip = fake_network.next_fixed_ip(1)
fip['instance_uuid'] = fake_inst.uuid
fip['leased'] = False
vif = fip['virtual_interface']
vif['instance_uuid'] = fake_inst.uuid
vif = objects.VirtualInterface._from_db_object(
self.context, objects.VirtualInterface(), vif)
fip = objects.FixedIP._from_db_object(
self.context, objects.FixedIP(), fip)
fip.network = fake_network.fake_network_obj(self.context,
fip.network_id)
mock_fip_get_by_addr.return_value = fip
mock_vif_get_by_id.return_value = vif
@mock.patch.object(self.manager,
'_do_trigger_security_group_members_refresh_for_instance')
@mock.patch.object(self.manager,
'_validate_instance_zone_for_dns_domain',
return_value=False)
@mock.patch.object(self.manager, '_teardown_network_on_host')
@mock.patch.object(fip, 'save')
@mock.patch.object(fip, 'disassociate')
def do_test(mock_disassociate, mock_fip_save,
mock_teardown_network_on_host, mock_validate_zone,
mock_trigger_secgroup_refresh):
self.assertEqual(fake_inst.uuid, fip.instance_uuid)
self.assertFalse(fip.leased)
self.manager.deallocate_fixed_ip(
self.context, fip['address'], instance=fake_inst)
mock_trigger_secgroup_refresh.assert_called_once_with(
fake_inst.uuid)
mock_teardown_network_on_host.assert_called_once_with(self.context,
fip.network)
mock_disassociate.assert_called_once_with()
do_test()
| cloudbase/nova | nova/tests/unit/network/test_manager.py | Python | apache-2.0 | 172,055 | [
"FEFF"
] | fa5ba6d426f3c68f4aaacbf2fcf724d683d88d2f1b92258758457e0dce4cf4ec |
#!/usr/bin/python
# -*- coding: utf-8 -*
###############################################################################
# #
# This file is part of 3d Brain Atlas Reconstructor #
# #
# Copyright (C) 2010-2012 Piotr Majka, Jakub M. Kowalski #
# #
# 3d Brain Atlas Reconstructor is free software: you can redistribute #
# it and/or modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# #
# 3d Brain Atlas Reconstructor is distributed in the hope that it #
# will be useful, but WITHOUT ANY WARRANTY; without even the implied #
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with 3d Brain Atlas Reconstructor. If not, see #
# http://www.gnu.org/licenses/. #
# #
###############################################################################
"""
The module provides basic information about availiable export formats.
G{importgraph}
@var SCENE_EXPORT_FORMAT_MASK: the mask of formats allowing to export a scene
("allowing to" is not "dedicated to")
@type SCENE_EXPORT_FORMAT_MASK: frozenset([str, ...])
"""
#TODO: documentation
BAR_SCENE_TEMPLATE = 'scene_%s'
BAR_VOLUME_TEMPLATE = 'volume_%s'
BAR_MESH_TEMPLATE = 'model_%s'
BAR_EXPORT_FORMATS = [('exportToVRML', 'VRML files', '.wrl', BAR_SCENE_TEMPLATE),
('exportToX3d', 'X3D files', '.x3d', BAR_SCENE_TEMPLATE),
('exportToPOVRay', 'POV-Ray files', '.pov', BAR_SCENE_TEMPLATE),
('exportToNiftii', 'NIfTI I files', '.nii.gz', BAR_VOLUME_TEMPLATE),
('exportToVTKPolydata', 'vtk polydata files', '.vtk', BAR_MESH_TEMPLATE),
('exportToSTL', 'STL (STereoLithography) files', '.stl', BAR_MESH_TEMPLATE),
('exportToNumpy', 'NumPy array files', '.npy', BAR_VOLUME_TEMPLATE),
('exportToVolume', 'vtk structured grid files', '.vtk', BAR_VOLUME_TEMPLATE),
('exportScreenshot', 'PNG images', '.png', 'screenshot_%s'),
('exportThumbnail', 'PNG thumbnails', '.png', 'thumbnail_%s'),
('exportToWindow', None, '', '%s'),
('exportPipeline', 'pipeline files', '.xml', 'pipeline_%s')]
def getFormatInfo(formats=None):
"""
Return descriptions of requested formats.
Description is a dictionary containing keys:
- C{'desc'} for text describing the format,
- C{'ext'} for the format filename suffix,
- C{'template'} for default output filename template.
@param formats: requested formats; if not given - all formats are requested
@type formats: set([str, ...])
@return: format name to its description mapping
@rtype: {str: {str: str}, ...}
"""
if formats == None:
formats = set(row[0] for row in BAR_EXPORT_FORMATS)
formatInfo = {}
for (method, description, extension, template) in BAR_EXPORT_FORMATS:
if method in formats:
formatInfo[method] = {'desc': description,
'ext': extension,
'template': template + extension}
return formatInfo
BAR_EXPORT_FORMAT_INFO = getFormatInfo()
BAR_EXPORT_VOLUME_FORMATS = getFormatInfo(frozenset(['exportToNiftii',
'exportToNumpy',
'exportToVolume']))
BAR_EXPORT_SCENE_FORMATS = getFormatInfo(frozenset(['exportToVRML',
'exportToX3d',
'exportToPOVRay']))
SCENE_EXPORT_FORMAT_MASK = frozenset(['exportToVRML',
'exportToX3d',
'exportToPOVRay',
'exportThumbnail',
'exportScreenshot'])
IMAGE_EXPORT_FORMAT_MASK = frozenset(['exportThumbnail',
'exportScreenshot'])
# mapping of export format to default pattern of output filename
BAR_TEMPLATE = dict((method, template + extension)\
for (method, description, extension, template)\
in BAR_EXPORT_FORMATS)
# filename mask for cached models
BAR_CACHED_MODEL_MASK = BAR_TEMPLATE['exportToVTKPolydata'] % '*'
| pmajka/3dbar | lib/pymodules/python2.6/bar/rec/formats.py | Python | gpl-3.0 | 5,278 | [
"VTK"
] | a7431a677a322a8de647bbeb5dedd2659dcc57864c852de6d8d5576b233407fa |
#!/usr/bin/env python
# File created on 23 Nov 2011
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Greg Caporaso","Morgan Langille"]
__license__ = "GPL"
__version__ = "1.0.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
from os.path import abspath, dirname, isdir
from os import mkdir,makedirs
from cogent.core.tree import PhyloNode, TreeError
from numpy import array,asarray
from biom.table import SparseOTUTable, DenseOTUTable, SparsePathwayTable, \
DensePathwayTable, SparseFunctionTable, DenseFunctionTable, \
SparseOrthologTable, DenseOrthologTable, SparseGeneTable, \
DenseGeneTable, SparseMetaboliteTable, DenseMetaboliteTable,\
SparseTaxonTable, DenseTaxonTable, table_factory
from biom.parse import parse_biom_table,parse_biom_table_str, convert_biom_to_table, \
convert_table_to_biom
from subprocess import Popen, PIPE, STDOUT
import StringIO
def make_sample_transformer(scaling_factors):
def transform_sample(sample_value,sample_id,sample_metadata):
scaling_factor = scaling_factors[sample_id]
new_val = sample_value * scaling_factor
return new_val
return transform_sample
def scale_metagenomes(metagenome_table,scaling_factors):
""" scale metagenomes from metagenome table and scaling factors
"""
transform_sample_f = make_sample_transformer(scaling_factors)
new_metagenome_table = metagenome_table.transformSamples(transform_sample_f)
return new_metagenome_table
def convert_precalc_to_biom(precalc_in, ids_to_load=None,transpose=True,md_prefix='metadata_'):
"""Loads PICRUSTs tab-delimited version of the precalc file and outputs a BIOM object"""
#if given a string convert to a filehandle
if type(precalc_in) ==str or type(precalc_in) == unicode:
fh = StringIO.StringIO(precalc_in)
else:
fh=precalc_in
#first line has to be header
header_ids=fh.readline().strip().split('\t')
col_meta_locs={}
for idx,col_id in enumerate(header_ids):
if col_id.startswith(md_prefix):
col_meta_locs[col_id[len(md_prefix):]]=idx
end_of_data=len(header_ids)-len(col_meta_locs)
trait_ids = header_ids[1:end_of_data]
col_meta=[]
row_meta=[{} for i in trait_ids]
if ids_to_load:
ids_to_load=set(ids_to_load)
load_all_ids=False
else:
load_all_ids=True
matching=[]
otu_ids=[]
for line in fh:
fields = line.strip().split('\t')
row_id=fields[0]
if(row_id.startswith(md_prefix)):
#handle metadata
#determine type of metadata (this may not be perfect)
metadata_type=determine_metadata_type(line)
for idx,trait_name in enumerate(trait_ids):
row_meta[idx][row_id[len(md_prefix):]]=parse_metadata_field(fields[idx+1],metadata_type)
elif load_all_ids or (row_id in set(ids_to_load)):
otu_ids.append(row_id)
matching.append(map(float,fields[1:end_of_data]))
#add metadata
col_meta_dict={}
for meta_name in col_meta_locs:
col_meta_dict[meta_name]=fields[col_meta_locs[meta_name]]
col_meta.append(col_meta_dict)
if not load_all_ids:
ids_to_load.remove(row_id)
if not otu_ids:
raise ValueError,"No OTUs match identifiers in precalculated file. PICRUSt requires an OTU table reference/closed picked against GreenGenes.\nExample of the first 5 OTU ids from your table: {0}".format(', '.join(list(ids_to_load)[:5]))
if ids_to_load:
raise ValueError,"One or more OTU ids were not found in the precalculated file!\nAre you using the correct --gg_version?\nExample of (the {0}) unknown OTU ids: {1}".format(len(ids_to_load),', '.join(list(ids_to_load)[:5]))
#note that we transpose the data before making biom obj
if transpose:
return table_factory(asarray(matching).T,otu_ids,trait_ids,col_meta,row_meta,constructor=DenseGeneTable)
else:
return table_factory(asarray(matching),trait_ids,otu_ids,row_meta,col_meta,constructor=DenseGeneTable)
def convert_biom_to_precalc(biom_in):
"""Converts a biom file into a PICRUSt precalculated tab-delimited file """
if type(biom_in) == str or type(biom_in) == unicode:
biom_table=parse_biom_table_str(biom_in)
else:
biom_table=parse_biom_table(biom_in)
col_ids=biom_table.ObservationIds
row_ids=biom_table.SampleIds
lines=[]
header = ['#OTU_IDs']+list(col_ids)
col_metadata_names=[]
#peak at metadata for Samples (e.g. NSTI) so we can set the header
if biom_table.SampleMetadata:
col_metadata_names=biom_table.SampleMetadata[0].keys()
#add the metadata names to the header
for col_metadata_name in col_metadata_names:
header.append('metadata_'+col_metadata_name)
lines.append(map(str,header))
row_metadata_names=[]
#peak at metadata for observations (e.g. KEGG_Pathways)
if biom_table.ObservationMetadata:
row_metadata_names=biom_table.ObservationMetadata[0].keys()
for metadata_name in row_metadata_names:
metadata_line=['metadata_'+metadata_name]
#do the observation metadata now
for col_id in col_ids:
metadata = biom_table.ObservationMetadata[biom_table.getObservationIndex(col_id)]
metadata_line.append(biom_meta_to_string(metadata[metadata_name]))
lines.append(map(str,metadata_line))
#transpose the actual count data
transposed_table=biom_table._data.T
for idx,count in enumerate(transposed_table):
line=[row_ids[idx]]+map(str,count)
#add the metadata values to the end of the row now
for meta_name in col_metadata_names:
line.append(biom_table.SampleMetadata[idx][meta_name])
lines.append(line)
return "\n".join("\t".join(map(str,x)) for x in lines)
def determine_metadata_type(line):
if ';' in line:
if '|' in line:
return 'list_of_lists'
else:
return 'list'
else:
return 'string'
def parse_metadata_field(metadata_str,metadata_format='string'):
if metadata_format == 'string':
return metadata_str
elif metadata_format == 'list':
return [e.strip() for e in metadata_str.split(';')]
elif metadata_format == 'list_of_lists':
return [[e.strip() for e in y.split(';')] for y in metadata_str.split('|')]
def biom_meta_to_string(metadata):
""" Determine which format the metadata is and then convert to a string"""
#Note that since ';' and '|' are used as seperators we must replace them if they exist
if type(metadata) ==str or type(metadata)==unicode:
return metadata.replace(';',':')
elif type(metadata) == list:
if type(metadata[0]) == list:
return "|".join(";".join([y.replace(';',':').replace('|',':') for y in x]) for x in metadata)
else:
return ";".join(x.replace(';',':') for x in metadata)
def system_call(cmd, shell=True):
"""Call cmd and return (stdout, stderr, return_value).
cmd can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
Please see Python's subprocess.Popen for a description of the shell
parameter and how cmd is interpreted differently based on its value.
This code was copied from QIIME's qiime_system_call() (util.py) function on June 3rd, 2013.
"""
proc = Popen(cmd, shell=shell, universal_newlines=True, stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
def file_contains_nulls(file):
"""Checks given file for null characters. These are sometimes created on SGE clusters when system IO is overloaded."""
return '\x00' in open(file,'rb').read()
def parse_table_to_biom(table_lines, table_format="tab-delimited",\
biom_format = 'otu table'):
"""Read the lines of an open trait table file, and output a .biom table object
The trait table must be either a biom file, or a picrust tab-delimited file
table_format -- must be either 'tab-delimited' or 'biom'
"""
if table_format == "biom":
return parse_biom_table(table_lines)
elif table_format == "tab-delimited":
idx = 0 # a sparse BIOM table
BIOM_TYPES = {'otu table':[SparseOTUTable, DenseOTUTable],
'pathway table':[SparsePathwayTable, DensePathwayTable],
'function table':[SparseFunctionTable, DenseFunctionTable],
'ortholog table':[SparseOrthologTable, DenseOrthologTable],
'gene table':[SparseGeneTable, DenseGeneTable],
'metabolite table':[SparseMetaboliteTable, DenseMetaboliteTable],
'taxon table':[SparseTaxonTable, DenseTaxonTable]}
constructor = BIOM_TYPES[biom_format][idx]
sample_mapping = None
obs_mapping = None
process_func = None
try:
converted_table = (convert_table_to_biom(table_lines,\
sample_mapping,obs_mapping,process_func, constructor))
biom_table = parse_biom_table(converted_table)
#print biom_table
except ValueError:
raise ValueError("Input does not look like a classic table.")
#headers, fields = parse_trait_table(table_lines)
#now convert to biom
return biom_table
#def map_metadata_by_key(biom_table,metadata,keys_are="sample_ids"):
# """Map a dict of metadata to a biom table
# biom_table -- a BIOM format table
# metadata -- a dictionary of metadata values,
# keyed by either sample or observation id.
# keys_are -- choose 'sample_ids' or 'observation_ids'
#
# NOTE: if your metadata dict is already keyed by metadata label,
# and is a list, use the biom objects built in functions instead
# of this.
#
# """
# metadata_keys = metadata_dict.keys()
# result_list = []*len(metadata_keys)
# for k in metadata_keys:
# if keys_are == 'sample_ids':
# curr_idx = biom_table.getSampleIndex(k)
# result_list[k] = metadata
# biom_table.getSampleIndex(k)
# elif keys_are == 'observation_ids':
# result_list[k] = metadata
# biom_table.getObservationIndex(k)
# else:
# raise ValueError('keys_are must be "sample_ids", or "observation_ids", not "%s"' %(keys_are))
#
def get_picrust_project_dir():
""" Returns the top-level PICRUST directory
"""
# Get the full path of util.py
current_file_path = abspath(__file__)
# Get the directory containing util.py
current_dir_path = dirname(current_file_path)
# Return the directory containing the directory containing util.py
return dirname(current_dir_path)
def transpose_trait_table_fields(data_fields,header,id_row_idx=0,\
input_header_delimiter="\t",output_delimiter="\t"):
"""Transpose the fields of a trait table, returning new data_fields,header
data_fields: list of lists for data fields
header: a string describing the header_line
id_row_idx: index of row labels. Almost always 0 but included for
but included for completeness
input_header_delimiter: delimiter for fields in the header string
output_delimiter: use this delimiter to join header fields
NOTE: typically the header and data fields are generated
by parse_trait_table in picrust.parse
"""
header_fields = header.split(input_header_delimiter)
#ensure no trailing newlines
old_header_fields = [h.strip() for h in header_fields]
new_header_fields = [old_header_fields[0]]+\
[df[id_row_idx].strip() for df in data_fields]
non_label_data_fields = []
for row in data_fields:
non_label_fields =\
[e for i,e in enumerate(row) if i != id_row_idx]
non_label_data_fields.append(non_label_fields)
data_array = array(non_label_data_fields)
new_data_array = data_array.T
new_rows = []
for i,row in enumerate(new_data_array):
label = old_header_fields[i+1]
#this is i+1 not i because i is the blank/meaningless
#upper left corner entry.
new_row= [label]+list(row)
new_rows.append(new_row)
new_header = output_delimiter.join(new_header_fields)
return new_header+"\n",new_rows
def make_output_dir_for_file(filepath):
"""Create sub-directories for a new file if they don't already exist
"""
dirpath=dirname(filepath)
if not isdir(dirpath) and not dirpath=='':
makedirs(dirpath)
def format_biom_table(biom_table):
""" Given a biom-format Table object, returns that Table as a BIOM string"""
generated_by_str = "PICRUSt " + __version__
return biom_table.getBiomFormatJsonString(generated_by_str)
def make_output_dir(dirpath, strict=False):
"""Make an output directory if it doesn't exist
Returns the path to the directory
dirpath -- a string describing the path to the directory
strict -- if True, raise an exception if dir already
exists
"""
dirpath = abspath(dirpath)
#Check if directory already exists
if isdir(dirpath):
if strict==True:
err_str = "Directory '%s' already exists" % dirpath
raise IOError(err_str)
return dirpath
try:
makedirs(dirpath)
except IOError,e:
err_str = "Could not create directory '%s'. Are permissions set correctly? Got error: '%s'" %e
raise IOError(err_str)
return dirpath
class PicrustNode(PhyloNode):
def multifurcating(self, num, eps=None, constructor=None):
"""Return a new tree with every node having num or few children
num : the number of children a node can have max
eps : default branch length to set if self or constructor is of
PhyloNode type
constructor : a TreeNode or subclass constructor. If None, uses self
"""
if num < 2:
raise TreeError, "Minimum number of children must be >= 2"
if eps is None:
eps = 0.0
if constructor is None:
constructor = self.__class__
if hasattr(constructor, 'Length'):
set_branchlength = True
else:
set_branchlength = False
new_tree = self.copy()
for n in new_tree.preorder(include_self=True):
while len(n.Children) > num:
new_node = constructor(Children=n.Children[-num:])
if set_branchlength:
new_node.Length = eps
n.append(new_node)
return new_tree
def bifurcating(self, eps=None, constructor=None):
"""Wrap multifurcating with a num of 2"""
return self.multifurcating(2, eps, constructor)
def nameUnnamedNodes(self):
"""sets the Data property of unnamed nodes to an arbitrary value
Internal nodes are often unnamed and so this function assigns a
value for referencing.
Note*: This method is faster then pycogent nameUnnamedNodes()
because it uses a dict instead of an array. Also, we traverse
only over internal nodes (and not including tips)
"""
#make a list of the names that are already in the tree
names_in_use = {}
for node in self.iterNontips(include_self=True):
if node.Name:
names_in_use[node.Name]=1
#assign unique names to the Data property of nodes where Data = None
name_index = 1
for node in self.iterNontips(include_self=True):
#if (not node.Name) or re.match('edge',node.Name):
if not node.Name:
new_name = 'node' + str(name_index)
#choose a new name if name is already in tree
while new_name in names_in_use:
name_index += 1
new_name = 'node' + str(name_index)
node.Name = new_name
names_in_use[node.Name]=1
name_index += 1
def getSubTree(self,names):
"""return a new subtree with just the tips in names
assumes names is a set
assumes all names in names are present as tips in tree
"""
tcopy = self.deepcopy()
# unset internal names
#for n in tcopy.nontips():
# n.Name = None
# loop until our tree is the correct size
# may want to revisit conditional if goes into picrust. unclear if an infinite loop is possible
while len(tcopy.tips()) != len(names):
# for each tip, remove it if we do not want to keep it
for n in tcopy.tips():
if n.Name not in names:
n.Parent.removeNode(n)
# reduce single-child nodes
tcopy.prune()
return tcopy
def getSubTree_old(self, name_list):
"""A new instance of a sub tree that contains all the otus that are
listed in name_list.
just a small change from that in cogent.core.tree.py so that the root
node keeps its name
Credit: Julia Goodrich
"""
edge_names = self.getNodeNames(includeself=1, tipsonly=False)
for name in name_list:
if name not in edge_names:
raise ValueError("edge %s not found in tree" % name)
new_tree = self._getSubTree(name_list)
if new_tree is None:
raise TreeError, "no tree created in make sub tree"
elif new_tree.istip():
# don't keep name
new_tree.params = self.params
new_tree.Length = self.Length
return new_tree
else:
new_tree.Name = self.Name
new_tree.NameLoaded = self.NameLoaded
new_tree.params = self.params
new_tree.Length = self.Length
# keep unrooted
if len(self.Children) > 2:
new_tree = new_tree.unrooted()
return new_tree
def _getSubTree(self, included_names, constructor=None):
"""An equivalent node with possibly fewer children, or None
this is an iterative version of that in cogent.core.tree.py
Credit: Julia Goodrich
"""
nodes_stack = [[self, len(self.Children)]]
result = [[]]
# Renumber autonamed edges
if constructor is None:
constructor = self._default_tree_constructor()
while nodes_stack:
top = nodes_stack[-1]
top_node, num_unvisited_children = top
if top_node.Name in included_names:
result[-1].append(top_node.deepcopy(constructor=constructor))
nodes_stack.pop()
else:
#check the top node, any children left unvisited?
if num_unvisited_children: #has any child unvisited
top[1] -= 1 #decrease the #of children unvisited
next_child = top_node.Children[-num_unvisited_children]
# - for order
#pre-visit
nodes_stack.append([next_child, len(next_child.Children)])
if len(next_child.Children) > 0:
result.append([])
else:
node = nodes_stack.pop()[0]
children = result[-1]
children =[child for child in children if child is not None]
if len(top_node.Children) == 0:
new_node = None
elif len(children) == 0:
result.pop()
new_node = None
elif len(children) == 1:
result.pop()
# Merge parameter dictionaries by adding lengths and
# making weighted averages of other parameters. This
# should probably be moved out of here into a
# ParameterSet class (Model?) or tree subclass.
params = {}
child = children[0]
if node.Length is not None and child.Length is not None:
shared_params = [n for (n,v) in node.params.items()
if v is not None
and child.params.get(n) is not None
and n is not "length"]
length = node.Length + child.Length
if length:
params = dict([(n,
(node.params[n]*node.Length +
child.params[n]*child.Length) / length)
for n in shared_params])
params['length'] = length
new_node = child
new_node.params = params
else:
result.pop()
new_node = constructor(node, tuple(children))
if len(result)>0:
result[-1].append(new_node)
else:
return new_node
| wasade/picrust | picrust/util.py | Python | gpl-3.0 | 21,831 | [
"VisIt"
] | cf1d2e9f08adc026acbb0aff2b9c29ef542b060875952772b11ae550c67548c7 |
from __future__ import print_function
import sys
import os
import copy
import random
sys.path.insert(1, os.path.join("..",".."))
import h2o
from tests import pyunit_utils
from h2o.grid.grid_search import H2OGridSearch
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def deeplearning_grid_cars():
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif(seed=42)
train = cars[r > .2]
validation_scheme = random.randint(1,3) # 1:none, 2:cross-validation, 3:validation set
print("Validation scheme: {0}".format(validation_scheme))
if validation_scheme == 2:
nfolds = 2
print("Nfolds: 2")
if validation_scheme == 3:
valid = cars[r <= .2]
grid_space = pyunit_utils.make_random_grid_space(algo="dl")
print("Grid space: {0}".format(grid_space))
predictors = ["displacement","power","weight","acceleration","year"]
if grid_space['distribution'][0] == 'bernoulli':
response_col = "economy_20mpg"
elif grid_space['distribution'][0] == 'gaussian':
response_col = "economy"
else:
response_col = "cylinders"
print("Predictors: {0}".format(predictors))
print("Response: {0}".format(response_col))
if grid_space['distribution'][0] in ['bernoulli', 'multinomial']:
print("Converting the response column to a factor...")
train[response_col] = train[response_col].asfactor()
if validation_scheme == 3:
valid[response_col] = valid[response_col].asfactor()
print("Constructing the grid of gbm models...")
cars_dl_grid = H2OGridSearch(H2ODeepLearningEstimator, hyper_params=grid_space)
if validation_scheme == 1:
cars_dl_grid.train(x=predictors,y=response_col,training_frame=train)
elif validation_scheme == 2:
cars_dl_grid.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds)
else:
cars_dl_grid.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid)
for model in cars_dl_grid:
assert isinstance(model, H2ODeepLearningEstimator)
print("Performing various checks of the constructed grid...")
print("Check cardinality of grid, that is, the correct number of models have been created...")
size_of_grid_space = 1
for v in list(grid_space.values()):
size_of_grid_space = size_of_grid_space * len(v)
actual_size = len(cars_dl_grid)
assert size_of_grid_space == actual_size, "Expected size of grid to be {0}, but got {1}" \
"".format(size_of_grid_space,actual_size)
print("Duplicate-entries-in-grid-space check")
new_grid_space = copy.deepcopy(grid_space)
for name in list(grid_space.keys()):
if not name == "distribution":
new_grid_space[name] = grid_space[name] + grid_space[name]
print("The new search space: {0}".format(new_grid_space))
print("Constructing the new grid of gbm models...")
cars_dl_grid2 = H2OGridSearch(H2ODeepLearningEstimator, hyper_params=new_grid_space)
if validation_scheme == 1:
cars_dl_grid2.train(x=predictors,y=response_col,training_frame=train)
elif validation_scheme == 2:
cars_dl_grid2.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds)
else:
cars_dl_grid2.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid)
actual_size2 = len(cars_dl_grid2)
assert actual_size == actual_size2, "Expected duplicates to be ignored. Without dups grid size: {0}. With dups " \
"size: {1}".format(actual_size, actual_size2)
print("Check that the hyper_params that were passed to grid, were used to construct the models...")
for name in list(grid_space.keys()):
pyunit_utils.expect_model_param(cars_dl_grid, name, grid_space[name])
for model in cars_dl_grid2:
assert isinstance(model, H2ODeepLearningEstimator)
if __name__ == "__main__":
pyunit_utils.standalone_test(deeplearning_grid_cars)
else:
deeplearning_grid_cars() | YzPaul3/h2o-3 | h2o-py/tests/testdir_algos/deeplearning/pyunit_grid_cars_deeplearning.py | Python | apache-2.0 | 4,110 | [
"Gaussian"
] | 163367de4cbd0c323c9d199757f9941a2f65f83d0089709a9b3afd05f40d2209 |
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import sys
from karaage.conf.defaults import * # NOQA
from karaage.conf.process import post_process
from karaage.tests.defaults import * # NOQA
PLUGINS = [
'karaage.plugins.kgsoftware.plugin',
'karaage.plugins.kgsoftware.applications.plugin',
]
DEBUG = False
ALLOWED_HOSTS = ["localhost"]
post_process(sys.modules[__name__])
| brianmay/karaage | karaage/plugins/kgsoftware/applications/tests/settings.py | Python | gpl-3.0 | 1,078 | [
"Brian"
] | 5f0076e8c10fec7f327b361634b2e41484091deb4a45ac6118899efbf0cc2f59 |
from SymbolTable import SymbolTable, VariableSymbol, FunctionSymbol
import AST
ttype = {}
arithmetic_operators = ['+', '-', '*', '/', '%']
bitwise_operators = ['|', '&', '^', '<<', '>>']
logical_operators = ['&&', '||']
comparison_operators = ['==', '!=', '>', '<', '<=', '>=']
assignment_operators = ['=']
def all_operators():
return arithmetic_operators + bitwise_operators + logical_operators + assignment_operators + comparison_operators
for operator in all_operators():
ttype[operator] = {}
for type_ in ['int', 'float', 'string']:
ttype[operator][type_] = {}
for arithmetic_operator in arithmetic_operators:
ttype[arithmetic_operator]['int']['int'] = 'int'
ttype[arithmetic_operator]['int']['float'] = 'float'
ttype[arithmetic_operator]['float']['int'] = 'float'
ttype[arithmetic_operator]['float']['float'] = 'float'
ttype['+']['string']['string'] = 'string'
ttype['*']['string']['int'] = 'string'
ttype['=']['float']['int'] = 'float'
ttype['=']['float']['float'] = 'float'
ttype['=']['int']['int'] = 'int'
ttype['=']['string']['string'] = 'string'
ttype['=']['int']['float'] = ('int', 'warn')
for operator in bitwise_operators + logical_operators:
ttype[operator]['int']['int'] = 'int'
for comp_op in comparison_operators:
ttype[comp_op]['int']['int'] = 'int'
ttype[comp_op]['int']['float'] = 'int'
ttype[comp_op]['float']['int'] = 'int'
ttype[comp_op]['float']['float'] = 'int'
ttype[comp_op]['string']['string'] = 'int'
class NodeVisitor(object):
def visit(self, node, *args):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, *args)
def generic_visit(self, node, *args): # Called if no explicit visitor function exists for a node.
if isinstance(node, list):
for elem in node:
self.visit(elem, *args)
else:
if node is None:
pass
else:
for child in node.children:
if isinstance(child, list):
for item in child:
if isinstance(item, AST.Node):
self.visit(item, *args)
elif isinstance(child, AST.Node):
self.visit(child, *args)
# simpler version of generic_visit, not so general
#def generic_visit(self, node):
# for child in node.children:
# self.visit(child)
class TypeChecker(NodeVisitor):
def findVariable(self, tab, variable):
if variable in tab.symbols:
return tab.get(variable)
elif tab.symbol.name == variable:
return tab.symbol
elif tab.getParentScope() is not None:
return self.findVariable(tab.getParentScope(), variable)
def visit_Program(self, node):
tab = SymbolTable(None, "program", None)
self.scope = tab
self.actFunc = None
self.actComp = None
self.actLoops = []
self.noErrors = True
self.visit(node.blocks, tab)
def visit_Blocks(self, node, tab):
for block in node.blocks:
self.visit(block, tab)
def visit_Block(self, node, tab):
pass
def visit_Declaration(self, node, tab):
self.visit(node.inits, tab, node.type)
def visit_Inits(self, node, tab, type):
for init in node.inits:
self.visit(init, tab, type)
def visit_Init(self, node, tab, type):
if node.id in tab.symbols:
print "Error: Duplicated usage of symbol {0}, line {1}".format(node.id, node.line)
self.noErrors = False
value_type = self.visit(node.expression, tab)
if not value_type in ttype['='][type]:
print "Error: Value of type {0} cannot be assigned to symbol {1} of type {2}, line {3}" \
.format(value_type, node.id, type, node.line)
self.noErrors = False
else:
if "warn" in ttype['='][type][value_type]:
print "Warning: Value of type {0} assigned to symbol {1} of type {2}, line {3}" \
.format(value_type, node.id, type, node.line)
tab.put(node.id, VariableSymbol(node.id, type, node.expression))
def visit_Instructions(self, node, tab):
for instruction in node.instructions:
self.visit(instruction, tab)
def visit_Instruction(self, node, tab):
pass
def visit_Print(self, node, tab):
self.visit(node.expression, tab)
def visit_Labeled(self, node, tab):
self.visit(node.instruction, tab)
def visit_Assignment(self, node, tab):
variable = self.findVariable(tab, node.id)
if variable is None:
print "Error: Symbol {0} not defined before, line {1}".format(node.id, node.line-1)
self.noErrors = False
else:
value_type = self.visit(node.expression, tab)
if not value_type in ttype["="][variable.type]:
print "Error: Value of type {0} cannot be assigned to symbol {1} of type {2}, line {3}" \
.format(value_type, node.id, variable.type, node.expression.line)
self.noErrors = False
else:
if "warn" in ttype["="][variable.type][value_type]:
print "Warning: Value of type {0} assigned to symbol {1} of type {2}, line {3}" \
.format(value_type, node.id, variable.type, node.line)
return ttype["="][variable.type][value_type]
def visit_Choice(self, node, tab):
self.visit(node._if, tab)
self.visit(node._else, tab)
def visit_If(self, node, tab):
self.visit(node.cond, tab)
self.visit(node.statement, tab)
def visit_Else(self, node, tab):
self.visit(node.statement, tab)
def visit_While(self, node, tab):
self.visit(node.cond, tab)
self.actLoops.append(node)
self.visit(node.statement, tab)
self.actLoops.pop()
def visit_RepeatUntil(self, node, tab):
self.visit(node.cond, tab)
self.actLoops.append(node)
self.visit(node.statement, tab)
self.actLoops.pop()
def visit_Return(self, node, tab):
if not type(self.actFunc)==AST.FunctionDefinition:
print "Error: Return placed outside of a function, line {0}".format(node.line-2)
self.noErrors = False
else:
rettype = self.visit(node.expression, tab)
if rettype != self.actFunc.type:
print "Error: Invalid return type of {0}, expected {2}, line {1}".format(rettype, node.line-1, self.actFunc.type)
self.noErrors = False
self.hasReturn = True
def visit_Continue(self, node, tab):
if not type(self.actLoops[-1])==AST.While and not type(self.actLoops[-1])==AST.RepeatUntil:
print "Error: continue placed outside of a loop, line {0}".format(node.line-1)
self.noErrors = False
def visit_Break(self, node, tab):
if not type(self.actLoops[-1])==AST.While and not type(self.actLoops[-1])==AST.RepeatUntil:
print "Error: break placed outside of a loop, line {0}".format(node.line-1)
self.noErrors = False
def visit_Compound(self, node, tab, *args):
if len(args) > 0 and args[0] is True:
self.visit(node.blocks, tab)
else:
tab = tab.pushScope(node)
self.actComp = node
self.visit(node.blocks, tab)
self.actComp = None
tab = tab.popScope()
def visit_Condition(self, node, tab):
pass
def visit_Expression(self, node, tab):
pass
def visit_Const(self, node, tab):
value = node.value
if (value[0] in ('"', "'")) and (value[len(value) - 1] in ('"', "'")):
return 'string'
try:
int(value)
return 'int'
except ValueError:
try:
float(value)
return 'float'
except ValueError:
print "Error: Value's {0} type is not recognized".format(value)
self.noErrors = False
def visit_Id(self, node, tab):
variable = self.findVariable(tab, node.id)
if variable is None:
print "Error: Symbol {0} not declared before, line {1}".format(node.id, node.line)
self.noErrors = False
else:
return variable.type
def visit_BinExpr(self, node, tab):
type1 = self.visit(node.expr1, tab)
type2 = self.visit(node.expr2, tab)
op = node.operator;
if type1 is None or not type2 in ttype[op][type1]:
print "Error: Incompatible types, line", node.line
self.noErrors = False
else:
return ttype[op][type1][type2]
def visit_ExpressionInPar(self, node, tab):
expression = node.expression
return self.visit(expression, tab)
def visit_IdWithPar(self, node, tab):
function = self.findVariable(tab, node.id)
if function is None:
print "Error: Function {0} not declared before, line {1}".format(node.id, node.line)
self.noErrors = False
else:
if len(function.arguments.arg_list) != len(node.expression_list.expressions):
print "Error: Wrong number of arguments in {0} call, line {1}".format(node.id, node.line)
self.noErrors = False
else:
for i in range(len(function.arguments.arg_list)):
arg_type = function.arguments.arg_list[i].type
given_type = self.visit(node.expression_list.expressions[i], tab)
if not given_type in ttype['='][arg_type]:
print "Error: Incompatible types of arguments in {0} call, line {1}".format(node.id, node.line)
self.noErrors = False
break
self.visit(node.expression_list, tab)
return function.type
def visit_ExpressionList(self, node, tab):
for expression in node.expressions:
self.visit(expression, tab)
def visit_FunctionDefinitions(self, node, tab):
for fundef in node.fundefs:
self.visit(fundef, tab)
def visit_FunctionDefinition(self, node, tab):
fun_name = self.findVariable(tab, node.id)
if not fun_name is None:
print "Error: Symbol {0} declared before, line {1}".format(node.id, node.arglist.line)
self.noErrors = False
else:
tab.put(node.id, FunctionSymbol(node.id, node.type, node.arglist))
tab = tab.pushScope(node.id)
tab.put(node.id, FunctionSymbol(node.id, node.type, node.arglist))
self.actFunc = node
self.hasReturn = False
self.visit(node.arglist, tab)
self.visit(node.compound_instr, tab, True)
if self.hasReturn == False:
print "Error: Missing return instruction for {0} function, line {1}".format(node.id, node.arglist.line)
self.noErrors = False
self.hasReturn = False
self.actFunc = None
tab = tab.popScope()
def visit_ArgumentList(self, node, tab):
for arg in node.arg_list:
self.visit(arg, tab)
def visit_Argument(self, node, tab):
if node.id in tab.symbols:
print "Error: Duplicated usage of symbol {0}, line {1}".format(node.id, node.line)
self.noErrors = False
else:
tab.put(node.id, VariableSymbol(node.id, node.type, None))
return node.type | tmachows/kompilatory | lab04-interpreter/TypeChecker.py | Python | gpl-2.0 | 12,046 | [
"VisIt"
] | 9be0f7c2dbcc791334b62c1a9c5b647cd1e3b97fec6fc405a92c271357a26e61 |
import os
import sys
import argparse
import pysam
__author__ = 'Rob Edwards'
"""
Count the reads associated with one or more contigs.
"""
parser = argparse.ArgumentParser(description='Count reads to one or more contigs in a bam file')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='calculate the coverage of each read in a bam file')
parser.add_argument('-b', help='bam file to parse')
parser.add_argument('-s', help='sam file to parse')
parser.add_argument('-c', help='partial contig name. If provided will be matched using *is in*')
args = parser.parse_args()
samfile = None
if args.b:
samfile = pysam.AlignmentFile(args.b, "rb")
elif args.s:
samfile = pysam.AlignmentFile(args.s, "r")
else:
sys.exit("Either -s or -b must be specified")
count = {}
for read in samfile.fetch():
# print("{} -> {} : {}".format(read.query_name, read.reference_name, read.query_alignment_length))
if args.c and args.c not in read.reference_name:
continue
count[read.reference_name] = count.get(read.reference_name, 0) + 1
for c in count:
print("{}\t{}".format(c, count[c]))
| linsalrob/EdwardsLab | JohnMolkili/count_reads_to_contigs.py | Python | mit | 1,205 | [
"pysam"
] | 6d52b12827d2856b4910ca733b7c5865d1a5ce91105700f8c80e5611e84a6366 |
#-----------------------------------------------------------------------------#
# Quarter Bot - A Twitter bot that flips a coin. #
# emoji.py #
# #
# Copyright (c) 2014, Seventy Four, Inc. #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#-----------------------------------------------------------------------------#
# See: http://www.unicode.org/charts/PDF/Unicode-7.0/U70-1F300.pdf
EMOJI = {
# Weather, landscape, and sky symbols:
'CYCLONE': u'\U0001F300',
'FOGGY': u'\U0001F301',
'CLOSED UMBRELLA': u'\U0001F302',
'NIGHT WITH STARS': u'\U0001F303',
'SUNRISE OVER MOUNTAINS': u'\U0001F304',
'SUNRISE': u'\U0001F305',
'CITYSCAPE AT DUSK': u'\U0001F306',
'SUNSET OVER BUILDINGS': u'\U0001F307',
'RAINBOW': u'\U0001F308',
'BRIDGE AT NIGHT': u'\U0001F309',
'WATER WAVE': u'\U0001F30A',
'VOLCANO': u'\U0001F30B',
'MILKY WAY': u'\U0001F30C',
# Globe symbols:
'EARTH GLOBE EUROPE-AFRICA': u'\U0001F30D',
'EARTH GLOBE AMERICAS': u'\U0001F30E',
'EARTH GLOBE ASIA-AUSTRALIA': u'\U0001F30F',
'GLOBE WITH MERIDIANS': u'\U0001F310',
# Moon, sun, and star symbols:
'NEW MOON SYMBOL': u'\U0001F311',
'WAXING CRESCENT MOON SYMBOL': u'\U0001F312',
'FIRST QUARTER MOON SYMBOL': u'\U0001F313',
'WAXING GIBBOUS MOON SYMBOL': u'\U0001F314',
'FULL MOON SYMBOL': u'\U0001F315',
'WANING GIBBOUS MOON SYMBOL': u'\U0001F316',
'LAST QUARTER MOON SYMBOL': u'\U0001F317',
'WANING CRESCENT MOON SYMBOL': u'\U0001F318',
'CRESCENT MOON': u'\U0001F319',
'NEW MOON WITH FACE': u'\U0001F31A',
'FIRST QUARTER MOON WITH FACE': u'\U0001F31B',
'LAST QUARTER MOON WITH FACE': u'\U0001F31C',
'FULL MOON WITH FACE': u'\U0001F31D',
'SUN WITH FACE': u'\U0001F31E',
'GLOWING STAR': u'\U0001F31F',
'SHOOTING STAR': u'\U0001F320',
# Weather symbols:
# 'THERMOMETER': u'\U0001F321',
# 'BLACK DROPLET': u'\U0001F322',
# 'WHITE SUN': u'\U0001F323',
# 'WHITE SUN WITH SMALL CLOUD': u'\U0001F324',
# 'WHITE SUN BEHIND CLOUD': u'\U0001F325',
# 'WHITE SUN BEHIND CLOUD WITH RAIN': u'\U0001F326',
# 'CLOUD WITH RAIN': u'\U0001F327',
# 'CLOUD WITH SNOW': u'\U0001F328',
# 'CLOUD WITH LIGHTNING': u'\U0001F329',
# 'CLOUD WITH TORNADO': u'\U0001F32A',
# 'FOG': u'\U0001F32B',
# 'WIND BLOWING FACE': u'\U0001F32C',
# Plant symbols:
'CHESTNUT': u'\U0001F330',
'SEEDLING': u'\U0001F331',
'EVERGREEN TREE': u'\U0001F332',
'DECIDUOUS TREE': u'\U0001F333',
'PALM TREE': u'\U0001F334',
'CACTUS': u'\U0001F335',
# 'HOT PEPPER ': u'\U0001F336',
'TULIP': u'\U0001F337',
'CHERRY BLOSSOM': u'\U0001F338',
'ROSE': u'\U0001F339',
'HIBISCUS': u'\U0001F33A',
'SUNFLOWER': u'\U0001F33B',
'BLOSSOM': u'\U0001F33C',
'EAR OF MAIZE': u'\U0001F33D',
'EAR OF RICE': u'\U0001F33E',
'HERB': u'\U0001F33F',
'FOUR LEAF CLOVER': u'\U0001F340',
'MAPLE LEAF': u'\U0001F341',
'FALLEN LEAF': u'\U0001F342',
'LEAF FLUTTERING IN WIND': u'\U0001F343',
'MUSHROOM': u'\U0001F344',
# Fruit and vegetable symbols:
'TOMATO': u'\U0001F345',
'AUBERGINE': u'\U0001F346',
'GRAPES': u'\U0001F347',
'MELON': u'\U0001F348',
'WATERMELON': u'\U0001F349',
'TANGERINE': u'\U0001F34A',
'LEMON': u'\U0001F34B',
'BANANA': u'\U0001F34C',
'PINEAPPLE': u'\U0001F34D',
'RED APPLE': u'\U0001F34E',
'GREEN APPLE': u'\U0001F34F',
'PEAR': u'\U0001F350',
'PEACH': u'\U0001F351',
'CHERRIES': u'\U0001F352',
'STRAWBERRY': u'\U0001F353',
# Food symbols:
'HAMBURGER': u'\U0001F354',
'SLICE OF PIZZA': u'\U0001F355',
'MEAT ON BONE': u'\U0001F356',
'POULTRY LEG': u'\U0001F357',
'RICE CRACKER': u'\U0001F358',
'RICE BALL': u'\U0001F359',
'COOKED RICE': u'\U0001F35A',
'CURRY AND RICE': u'\U0001F35B',
'STEAMING BOWL': u'\U0001F35C',
'SPAGHETTI': u'\U0001F35D',
'BREAD': u'\U0001F35E',
'FRENCH FRIES': u'\U0001F35F',
'ROASTED SWEET POTATO': u'\U0001F360',
'DANGO': u'\U0001F361',
'ODEN': u'\U0001F362',
'SUSHI': u'\U0001F363',
'FRIED SHRIMP': u'\U0001F364',
'FISH CAKE WITH SWIRL DESIGN': u'\U0001F365',
'SOFT ICE CREAM': u'\U0001F366',
'SHAVED ICE': u'\U0001F367',
'ICE CREAM': u'\U0001F368',
'DOUGHNUT': u'\U0001F369',
'COOKIE': u'\U0001F36A',
'CHOCOLATE BAR': u'\U0001F36B',
'CANDY': u'\U0001F36C',
'LOLLIPOP': u'\U0001F36D',
'CUSTARD': u'\U0001F36E',
'HONEY POT': u'\U0001F36F',
'SHORTCAKE': u'\U0001F370',
'BENTO BOX': u'\U0001F371',
'POT OF FOOD': u'\U0001F372',
'COOKING': u'\U0001F373',
'FORK AND KNIFE': u'\U0001F374',
# Beverage symbols:
'TEACUP WITHOUT HANDLE': u'\U0001F375',
'SAKE BOTTLE AND CUP': u'\U0001F376',
'WINE GLASS': u'\U0001F377',
'COCKTAIL GLASS': u'\U0001F378',
'TROPICAL DRINK': u'\U0001F379',
'BEER MUG': u'\U0001F37A',
'CLINKING BEER MUGS': u'\U0001F37B',
'BABY BOTTLE': u'\U0001F37C',
# Accommodation symbol:
# 'FORK AND KNIFE WITH PLATE': u'\U0001F37D',
# Celebration symbols:
'RIBBON': u'\U0001F380',
'WRAPPED PRESENT': u'\U0001F381',
'BIRTHDAY CAKE': u'\U0001F382',
'JACK-O-LANTERN': u'\U0001F383',
'CHRISTMAS TREE': u'\U0001F384',
'FATHER CHRISTMAS': u'\U0001F385',
'FIREWORKS': u'\U0001F386',
'FIREWORK SPARKLER': u'\U0001F387',
'BALLOON': u'\U0001F388',
'PARTY POPPER': u'\U0001F389',
'CONFETTI BALL': u'\U0001F38A',
'TANABATA TREE': u'\U0001F38B',
'CROSSED FLAGS': u'\U0001F38C',
'PINE DECORATION': u'\U0001F38D',
'JAPANESE DOLLS': u'\U0001F38E',
'CARP STREAMER': u'\U0001F38F',
'WIND CHIME': u'\U0001F390',
'MOON VIEWING CEREMONY': u'\U0001F391',
'SCHOOL SATCHEL': u'\U0001F392',
'GRADUATION CAP': u'\U0001F393',
# 'HEART WITH TIP ON THE LEFT': u'\U0001F394',
# 'BOUQUET OF FLOWERS': u'\U0001F395',
# 'MILITARY MEDAL': u'\U0001F396',
# 'REMINDER RIBBON': u'\U0001F397',
# Musical symbols:
# 'MUSICAL KEYBOARD WITH JACKS': u'\U0001F398',
# 'STUDIO MICROPHONE': u'\U0001F399',
# 'LEVEL SLIDER': u'\U0001F39A',
# 'CONTROL KNOBS': u'\U0001F39B',
# 'BEAMED ASCENDING MUSICAL NOTES': u'\U0001F39C',
# 'BEAMED DESCENDING MUSICAL NOTES': u'\U0001F39D',
# Entertainment symbols:
# 'FILM FRAMES': u'\U0001F39E',
# 'ADMISSION TICKETS': u'\U0001F39F',
'CAROUSEL HORSE': u'\U0001F3A0',
'FERRIS WHEEL': u'\U0001F3A1',
'ROLLER COASTER': u'\U0001F3A2',
'FISHING POLE AND FISH': u'\U0001F3A3',
'MICROPHONE': u'\U0001F3A4',
'MOVIE CAMERA': u'\U0001F3A5',
'CINEMA': u'\U0001F3A6',
'HEADPHONE': u'\U0001F3A7',
'ARTIST PALETTE': u'\U0001F3A8',
'TOP HAT': u'\U0001F3A9',
'CIRCUS TENT': u'\U0001F3AA',
'TICKET': u'\U0001F3AB',
'CLAPPER BOARD': u'\U0001F3AC',
'PERFORMING ARTS': u'\U0001F3AD',
# Game symbols:
'VIDEO GAME': u'\U0001F3AE',
'DIRECT HIT': u'\U0001F3AF',
'SLOT MACHINE': u'\U0001F3B0',
'BILLIARDS': u'\U0001F3B1',
'GAME DIE': u'\U0001F3B2',
'BOWLING': u'\U0001F3B3',
'FLOWER PLAYING CARDS': u'\U0001F3B4',
# Musical symbols:
'MUSICAL NOTE': u'\U0001F3B5',
'MULTIPLE MUSICAL NOTES': u'\U0001F3B6',
'SAXOPHONE': u'\U0001F3B7',
'GUITAR': u'\U0001F3B8',
'MUSICAL KEYBOARD': u'\U0001F3B9',
'TRUMPET': u'\U0001F3BA',
'VIOLIN': u'\U0001F3BB',
'MUSICAL SCORE': u'\U0001F3BC',
# Sport symbols:
'RUNNING SHIRT WITH SASH': u'\U0001F3BD',
'TENNIS RACQUET AND BALL': u'\U0001F3BE',
'SKI AND SKI BOOT': u'\U0001F3BF',
'BASKETBALL AND HOOP': u'\U0001F3C0',
'CHEQUERED FLAG': u'\U0001F3C1',
'SNOWBOARDER': u'\U0001F3C2',
'RUNNER': u'\U0001F3C3',
'SURFER': u'\U0001F3C4',
# 'SPORTS MEDAL': u'\U0001F3C5',
'TROPHY': u'\U0001F3C6',
'HORSE RACING': u'\U0001F3C7',
'AMERICAN FOOTBALL': u'\U0001F3C8',
'RUGBY FOOTBALL': u'\U0001F3C9',
'SWIMMER': u'\U0001F3CA',
# 'WEIGHT LIFTER': u'\U0001F3CB',
# 'GOLFER': u'\U0001F3CC',
# 'RACING MOTORCYCLE': u'\U0001F3CD',
# 'RACING CAR': u'\U0001F3CE',
# Buiding and map symbols:
# 'SNOW CAPPED MOUNTAIN': u'\U0001F3D4',
# 'CAMPING': u'\U0001F3D5',
# 'BEACH WITH UMBRELLA': u'\U0001F3D6',
# 'BUILDING CONSTRUCTION': u'\U0001F3D7',
# 'HOUSE BUILDINGS': u'\U0001F3D8',
# 'CITYSCAPE': u'\U0001F3D9',
# 'DERELICT HOUSE BUILDING': u'\U0001F3DA',
# 'CLASSICAL BUILDING': u'\U0001F3DB',
# 'DESERT': u'\U0001F3DC',
# 'DESERT ISLAND': u'\U0001F3DD',
# 'NATIONAL PARK': u'\U0001F3DE',
# 'STADIUM': u'\U0001F3DF',
'HOUSE BUILDING': u'\U0001F3E0',
'HOUSE WITH GARDEN': u'\U0001F3E1',
'OFFICE BUILDING': u'\U0001F3E2',
'JAPANESE POST OFFICE': u'\U0001F3E3',
'EUROPEAN POST OFFICE': u'\U0001F3E4',
'HOSPITAL': u'\U0001F3E5',
'BANK': u'\U0001F3E6',
'AUTOMATED TELLER MACHINE': u'\U0001F3E7',
'HOTEL': u'\U0001F3E8',
'LOVE HOTEL': u'\U0001F3E9',
'CONVENIENCE STORE': u'\U0001F3EA',
'SCHOOL': u'\U0001F3EB',
'DEPARTMENT STORE': u'\U0001F3EC',
'FACTORY': u'\U0001F3ED',
'IZAKAYA LANTERN': u'\U0001F3EE',
'JAPANESE CASTLE': u'\U0001F3EF',
'EUROPEAN CASTLE': u'\U0001F3F0',
# Flag symbols:
# 'WHITE PENNANT': u'\U0001F3F1',
# 'BLACK PENNANT': u'\U0001F3F2',
# 'WAVING WHITE FLAG': u'\U0001F3F3',
# 'WAVING BLACK FLAG': u'\U0001F3F4',
# Rosettes:
# 'ROSETTE': u'\U0001F3F5',
# 'BLACK ROSETTE': u'\U0001F3F6',
# Miscellaneous symbol:
# 'LABEL': u'\U0001F3F7',
# Animal symbols:
'RAT': u'\U0001F400',
'MOUSE': u'\U0001F401',
'OX': u'\U0001F402',
'WATER BUFFALO': u'\U0001F403',
'COW': u'\U0001F404',
'TIGER': u'\U0001F405',
'LEOPARD': u'\U0001F406',
'RABBIT': u'\U0001F407',
'CAT': u'\U0001F408',
'DRAGON': u'\U0001F409',
'CROCODILE': u'\U0001F40A',
'WHALE': u'\U0001F40B',
'SNAIL': u'\U0001F40C',
'SNAKE': u'\U0001F40D',
'HORSE': u'\U0001F40E',
'RAM': u'\U0001F40F',
'GOAT': u'\U0001F410',
'SHEEP': u'\U0001F411',
'MONKEY': u'\U0001F412',
'ROOSTER': u'\U0001F413',
'CHICKEN': u'\U0001F414',
'DOG': u'\U0001F415',
'PIG': u'\U0001F416',
'BOAR': u'\U0001F417',
'ELEPHANT': u'\U0001F418',
'OCTOPUS': u'\U0001F419',
'SPIRAL SHELL': u'\U0001F41A',
'BUG': u'\U0001F41B',
'ANT': u'\U0001F41C',
'HONEYBEE': u'\U0001F41D',
'LADY BEETLE': u'\U0001F41E',
'FISH': u'\U0001F41F',
'TROPICAL FISH': u'\U0001F420',
'BLOWFISH': u'\U0001F421',
'TURTLE': u'\U0001F422',
'HATCHING CHICK': u'\U0001F423',
'BABY CHICK': u'\U0001F424',
'FRONT-FACING BABY CHICK': u'\U0001F425',
'BIRD': u'\U0001F426',
'PENGUIN': u'\U0001F427',
'KOALA': u'\U0001F428',
'POODLE': u'\U0001F429',
'DROMEDARY CAMEL': u'\U0001F42A',
'BACTRIAN CAMEL': u'\U0001F42B',
'DOLPHIN': u'\U0001F42C',
# Animal faces:
'MOUSE FACE': u'\U0001F42D',
'COW FACE': u'\U0001F42E',
'TIGER FACE': u'\U0001F42F',
'RABBIT FACE': u'\U0001F430',
'CAT FACE': u'\U0001F431',
'DRAGON FACE': u'\U0001F432',
'SPOUTING WHALE': u'\U0001F433',
'HORSE FACE': u'\U0001F434',
'MONKEY FACE': u'\U0001F435',
'DOG FACE': u'\U0001F436',
'PIG FACE': u'\U0001F437',
'FROG FACE': u'\U0001F438',
'HAMSTER FACE': u'\U0001F439',
'WOLF FACE': u'\U0001F43A',
'BEAR FACE': u'\U0001F43B',
'PANDA FACE': u'\U0001F43C',
'PIG NOSE': u'\U0001F43D',
# Animal symbols:
'PAW PRINTS': u'\U0001F43E',
# 'CHIPMUNK': u'\U0001F43F',
# Facial parts symbols:
'EYES': u'\U0001F440',
# 'EYE': u'\U0001F441',
'EAR': u'\U0001F442',
'NOSE': u'\U0001F443',
'MOUTH': u'\U0001F444',
'TONGUE': u'\U0001F445',
# Hand symbols:
'WHITE UP POINTING BACKHAND INDEX': u'\U0001F446',
'WHITE DOWN POINTING BACKHAND INDEX': u'\U0001F447',
'WHITE LEFT POINTING BACKHAND INDEX': u'\U0001F448',
'WHITE RIGHT POINTING BACKHAND INDEX': u'\U0001F449',
'FISTED HAND SIGN': u'\U0001F44A',
'WAVING HAND SIGN': u'\U0001F44B',
'OK HAND SIGN': u'\U0001F44C',
'THUMBS UP SIGN': u'\U0001F44D',
'THUMBS DOWN SIGN': u'\U0001F44E',
'CLAPPING HANDS SIGN': u'\U0001F44F',
'OPEN HANDS SIGN': u'\U0001F450',
# Clothing and accessories:
'CROWN': u'\U0001F451',
'WOMANS HAT': u'\U0001F452',
'EYEGLASSES': u'\U0001F453',
'NECKTIE': u'\U0001F454',
'T-SHIRT': u'\U0001F455',
'JEANS': u'\U0001F456',
'DRESS': u'\U0001F457',
'KIMONO': u'\U0001F458',
'BIKINI': u'\U0001F459',
'WOMANS CLOTHES': u'\U0001F45A',
'PURSE': u'\U0001F45B',
'HANDBAG': u'\U0001F45C',
'POUCH': u'\U0001F45D',
'MANS SHOE': u'\U0001F45E',
'ATHLETIC SHOE': u'\U0001F45F',
'HIGH-HEELED SHOE': u'\U0001F460',
'WOMANS SANDAL': u'\U0001F461',
'WOMANS BOOTS': u'\U0001F462',
'FOOTPRINTS': u'\U0001F463',
# Portrait and role symbols:
'BUST IN SILHOUETTE': u'\U0001F464',
'BUSTS IN SILHOUETTE': u'\U0001F465',
'BOY': u'\U0001F466',
'GIRL': u'\U0001F467',
'MAN': u'\U0001F468',
'WOMAN': u'\U0001F469',
'FAMILY': u'\U0001F46A',
'MAN AND WOMAN HOLDING HANDS': u'\U0001F46B',
'TWO MEN HOLDING HANDS': u'\U0001F46C',
'TWO WOMEN HOLDING HANDS': u'\U0001F46D',
'POLICE OFFICER': u'\U0001F46E',
'WOMAN WITH BUNNY EARS': u'\U0001F46F',
'BRIDE WITH VEIL': u'\U0001F470',
'PERSON WITH BLOND HAIR': u'\U0001F471',
'MAN WITH GUA PI MAO': u'\U0001F472',
'MAN WITH TURBAN': u'\U0001F473',
'OLDER MAN': u'\U0001F474',
'OLDER WOMAN': u'\U0001F475',
'BABY': u'\U0001F476',
'CONSTRUCTION WORKER': u'\U0001F477',
# Fairy tale symbols:
'PRINCESS': u'\U0001F478',
'JAPANESE OGRE': u'\U0001F479',
'JAPANESE GOBLIN': u'\U0001F47A',
'GHOST': u'\U0001F47B',
'BABY ANGEL': u'\U0001F47C',
'EXTRATERRESTRIAL ALIEN': u'\U0001F47D',
'ALIEN MONSTER': u'\U0001F47E',
'IMP': u'\U0001F47F',
'SKULL': u'\U0001F480',
# Role symbols:
'INFORMATION DESK PERSON': u'\U0001F481',
'GUARDSMAN': u'\U0001F482',
'DANCER': u'\U0001F483',
# Personal care symbols:
'LIPSTICK': u'\U0001F484',
'NAIL POLISH': u'\U0001F485',
'FACE MASSAGE': u'\U0001F486',
'HAIRCUT': u'\U0001F487',
'BARBER POLE': u'\U0001F488',
# Medical symbols:
'SYRINGE': u'\U0001F489',
'PILL': u'\U0001F48A',
# Romance symbols:
'KISS MARK': u'\U0001F48B',
'LOVE LETTER': u'\U0001F48C',
'RING': u'\U0001F48D',
'GEM STONE': u'\U0001F48E',
'KISS': u'\U0001F48F',
'BOUQUET': u'\U0001F490',
'COUPLE WITH HEART': u'\U0001F491',
'WEDDING': u'\U0001F492',
# Heart symbols:
'BEATING HEART': u'\U0001F493',
'BROKEN HEART': u'\U0001F494',
'TWO HEARTS': u'\U0001F495',
'SPARKLING HEART': u'\U0001F496',
'GROWING HEART': u'\U0001F497',
'HEART WITH ARROW': u'\U0001F498',
'BLUE HEART': u'\U0001F499',
'GREEN HEART': u'\U0001F49A',
'YELLOW HEART': u'\U0001F49B',
'PURPLE HEART': u'\U0001F49C',
'HEART WITH RIBBON': u'\U0001F49D',
'REVOLVING HEARTS': u'\U0001F49E',
'HEART DECORATION': u'\U0001F49F',
# Comic style symbols:
'DIAMOND SHAPE WITH A DOT INSIDE': u'\U0001F4A0',
'ELECTRIC LIGHT BULB': u'\U0001F4A1',
'ANGER SYMBOL': u'\U0001F4A2',
'BOMB': u'\U0001F4A3',
'SLEEPING SYMBOL': u'\U0001F4A4',
'COLLISION SYMBOL': u'\U0001F4A5',
'SPLASHING SWEAT SYMBOL': u'\U0001F4A6',
'DROPLET': u'\U0001F4A7',
'DASH SYMBOL': u'\U0001F4A8',
'PILE OF POO': u'\U0001F4A9',
'FLEXED BICEPS': u'\U0001F4AA',
'DIZZY SYMBOL': u'\U0001F4AB',
'SPEECH BALLOON': u'\U0001F4AC',
'THOUGHT BALLOON': u'\U0001F4AD',
# Japanese school grade symbols:
'WHITE FLOWER': u'\U0001F4AE',
'HUNDRED POINTS SYMBOL': u'\U0001F4AF',
# Money symbols:
'MONEY BAG': u'\U0001F4B0',
'CURRENCY EXCHANGE': u'\U0001F4B1',
'HEAVY DOLLAR SIGN': u'\U0001F4B2',
'CREDIT CARD': u'\U0001F4B3',
'BANKNOTE WITH YEN SIGN': u'\U0001F4B4',
'BANKNOTE WITH DOLLAR SIGN': u'\U0001F4B5',
'BANKNOTE WITH EURO SIGN': u'\U0001F4B6',
'BANKNOTE WITH POUND SIGN': u'\U0001F4B7',
'MONEY WITH WINGS': u'\U0001F4B8',
'CHART WITH UPWARDS TREND AND YEN SIGN': u'\U0001F4B9',
# Office symbols:
'SEAT': u'\U0001F4BA',
'PERSONAL COMPUTER': u'\U0001F4BB',
'BRIEFCASE': u'\U0001F4BC',
'MINIDISC': u'\U0001F4BD',
'FLOPPY DISK': u'\U0001F4BE',
'OPTICAL DISC': u'\U0001F4BF',
'DVD': u'\U0001F4C0',
'FILE FOLDER': u'\U0001F4C1',
'OPEN FILE FOLDER': u'\U0001F4C2',
'PAGE WITH CURL': u'\U0001F4C3',
'PAGE FACING UP': u'\U0001F4C4',
'CALENDAR': u'\U0001F4C5',
'TEAR-OFF CALENDAR': u'\U0001F4C6',
'CARD INDEX': u'\U0001F4C7',
'CHART WITH UPWARDS TREND': u'\U0001F4C8',
'CHART WITH DOWNWARDS TREND': u'\U0001F4C9',
'BAR CHART': u'\U0001F4CA',
'CLIPBOARD': u'\U0001F4CB',
'PUSHPIN': u'\U0001F4CC',
'ROUND PUSHPIN': u'\U0001F4CD',
'PAPERCLIP': u'\U0001F4CE',
'STRAIGHT RULER': u'\U0001F4CF',
'TRIANGULAR RULER': u'\U0001F4D0',
'BOOKMARK TABS': u'\U0001F4D1',
'LEDGER': u'\U0001F4D2',
'NOTEBOOK': u'\U0001F4D3',
'NOTEBOOK WITH DECORATIVE COVER': u'\U0001F4D4',
'CLOSED BOOK': u'\U0001F4D5',
'OPEN BOOK': u'\U0001F4D6',
'GREEN BOOK': u'\U0001F4D7',
'BLUE BOOK': u'\U0001F4D8',
'ORANGE BOOK': u'\U0001F4D9',
'BOOKS': u'\U0001F4DA',
'NAME BADGE': u'\U0001F4DB',
'SCROLL': u'\U0001F4DC',
# Communication symbols:
'MEMO': u'\U0001F4DD',
'TELEPHONE RECEIVER': u'\U0001F4DE',
'PAGER': u'\U0001F4DF',
'FAX MACHINE': u'\U0001F4E0',
'SATELLITE ANTENNA': u'\U0001F4E1',
'PUBLIC ADDRESS LOUDSPEAKER': u'\U0001F4E2',
'CHEERING MEGAPHONE': u'\U0001F4E3',
'OUTBOX TRAY': u'\U0001F4E4',
'INBOX TRAY': u'\U0001F4E5',
'PACKAGE': u'\U0001F4E6',
'E-MAIL SYMBOL': u'\U0001F4E7',
'INCOMING ENVELOPE': u'\U0001F4E8',
'ENVELOPE WITH DOWNWARDS ARROW ABOVE': u'\U0001F4E9',
'CLOSED MAILBOX WITH LOWERED FLAG': u'\U0001F4EA',
'CLOSED MAILBOX WITH RAISED FLAG': u'\U0001F4EB',
'OPEN MAILBOX WITH RAISED FLAG': u'\U0001F4EC',
'OPEN MAILBOX WITH LOWERED FLAG': u'\U0001F4ED',
'POSTBOX': u'\U0001F4EE',
'POSTAL HORN': u'\U0001F4EF',
'NEWSPAPER': u'\U0001F4F0',
'MOBILE PHONE': u'\U0001F4F1',
'MOBILE PHONE WITH RIGHTWARDS ARROW AT LEFT': u'\U0001F4F2',
'VIBRATION MODE': u'\U0001F4F3',
'MOBILE PHONE OFF': u'\U0001F4F4',
'NO MOBILE PHONES': u'\U0001F4F5',
'ANTENNA WITH BARS': u'\U0001F4F6',
# Audio and video symbols:
'CAMERA': u'\U0001F4F7',
# 'CAMERA WITH FLASH': u'\U0001F4F8',
'VIDEO CAMERA': u'\U0001F4F9',
'TELEVISION': u'\U0001F4FA',
'RADIO': u'\U0001F4FB',
'VIDEOCASSETTE': u'\U0001F4FC',
# 'FILM PROJECTOR': u'\U0001F4FD',
# 'PORTABLE STEREO': u'\U0001F4FE',
# User interface symbols:
'TWISTED RIGHTWARDS ARROWS': u'\U0001F500',
'CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS': u'\U0001F501',
'CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS WITH CIRCLED ONE OVERLAY': u'\U0001F502',
'CLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS': u'\U0001F503',
'ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS': u'\U0001F504',
'LOW BRIGHTNESS SYMBOL': u'\U0001F505',
'HIGH BRIGHTNESS SYMBOL': u'\U0001F506',
'SPEAKER WITH CANCELLATION STROKE': u'\U0001F507',
'SPEAKER': u'\U0001F508',
'SPEAKER WITH ONE SOUND WAVE': u'\U0001F509',
'SPEAKER WITH THREE SOUND WAVES': u'\U0001F50A',
'BATTERY': u'\U0001F50A',
'ELECTRIC PLUG': u'\U0001F50C',
'LEFT-POINTING MAGNIFYING GLASS': u'\U0001F50D',
'RIGHT-POINTING MAGNIFYING GLASS': u'\U0001F50E',
'LOCK WITH INK PEN': u'\U0001F50F',
'CLOSED LOCK WITH KEY': u'\U0001F510',
'KEY': u'\U0001F511',
'LOCK': u'\U0001F512',
'OPEN LOCK': u'\U0001F513',
'BELL': u'\U0001F514',
'BELL WITH CANCELLATION STROKE': u'\U0001F515',
'BOOKMARK': u'\U0001F516',
'LINK SYMBOL': u'\U0001F517',
'RADIO BUTTON': u'\U0001F518',
# Words with arrows:
'BACK WITH LEFTWARDS ARROW ABOVE': u'\U0001F519',
'END WITH LEFTWARDS ARROW ABOVE': u'\U0001F51A',
'ON WITH EXCLAMATION MARK WITH LEFT RIGHT ARROW ABOVE': u'\U0001F51B',
'SOON WITH RIGHTWARDS ARROW ABOVE': u'\U0001F51C',
'TOP WITH UPWARDS ARROW ABOVE': u'\U0001F51D',
# Enclosed alphanumeric symbols:
'NO ONE UNDER EIGHTEEN SYMBOL': u'\U0001F51E',
'KEYCAP TEN': u'\U0001F51F',
# User interface input status symbols:
'INPUT SYMBOL FOR LATIN CAPITAL LETTERS': u'\U0001F520',
'INPUT SYMBOL FOR LATIN SMALL LETTERS': u'\U0001F521',
'INPUT SYMBOL FOR NUMBERS': u'\U0001F522',
'INPUT SYMBOL FOR SYMBOLS': u'\U0001F523',
'INPUT SYMBOL FOR LATIN LETTERS': u'\U0001F524',
# Tool symbols:
'FIRE': u'\U0001F525',
'ELECTRIC TORCH': u'\U0001F526',
'WRENCH': u'\U0001F527',
'HAMMER': u'\U0001F528',
'NUT AND BOLT': u'\U0001F529',
'HOCHO': u'\U0001F52A',
'PISTOL': u'\U0001F52B',
'MICROSCOPE': u'\U0001F52C',
'TELESCOPE': u'\U0001F52D',
'CRYSTAL BALL': u'\U0001F52E',
'SIX POINTED STAR WITH MIDDLE DOT': u'\U0001F52F',
'JAPANESE SYMBOL FOR BEGINNER': u'\U0001F530',
'TRIDENT EMBLEM': u'\U0001F531',
# Geometric shapes:
'BLACK SQUARE BUTTON': u'\U0001F532',
'WHITE SQUARE BUTTON': u'\U0001F533',
'LARGE RED CIRCLE': u'\U0001F534',
'LARGE BLUE CIRCLE': u'\U0001F535',
'LARGE ORANGE DIAMOND': u'\U0001F536',
'LARGE BLUE DIAMOND': u'\U0001F537',
'SMALL ORANGE DIAMOND': u'\U0001F538',
'SMALL BLUE DIAMOND': u'\U0001F539',
# User interface symbols:
'UP-POINTING RED TRIANGLE': u'\U0001F53A',
'DOWN-POINTING RED TRIANGLE': u'\U0001F53B',
'UP-POINTING SMALL RED TRIANGLE': u'\U0001F53C',
'DOWN-POINTING SMALL RED TRIANGLE': u'\U0001F53D',
# Shadowed geometric shapes:
# 'LOWER RIGHT SHADOWED WHITE CIRCLE': u'\U0001F53E',
# 'UPPER RIGHT SHADOWED WHITE CIRCLE': u'\U0001F53F',
# Religious symbols:
# 'CIRCLED CROSS POMMEE': u'\U0001F540',
# 'CROSS POMMEE WITH HALF-CIRCLE BELOW': u'\U0001F541',
# 'CROSS POMMEE': u'\U0001F542',
# 'NOTCHED LEFT SEMICIRCLE WITH THREE DOTS': u'\U0001F543',
# 'NOTCHED RIGHT SEMICIRCLE WITH THREE DOTS': u'\U0001F544',
# 'SYMBOL FOR MARKS CHAPTER': u'\U0001F545',
# 'WHITE LATIN CROSS': u'\U0001F546',
# 'HEAVY LATIN CROSS': u'\U0001F547',
# 'CELTIC CROSS': u'\U0001F548',
# 'OM SYMBOL': u'\U0001F549',
# 'DOVE OF PEACE': u'\U0001F54A',
# Clock face symbols:
'CLOCK FACE ONE OCLOCK': u'\U0001F550',
'CLOCK FACE TWO OCLOCK': u'\U0001F551',
'CLOCK FACE THREE OCLOCK': u'\U0001F552',
'CLOCK FACE FOUR OCLOCK': u'\U0001F553',
'CLOCK FACE FIVE OCLOCK': u'\U0001F554',
'CLOCK FACE SIX OCLOCK': u'\U0001F555',
'CLOCK FACE SEVEN OCLOCK': u'\U0001F556',
'CLOCK FACE EIGHT OCLOCK': u'\U0001F557',
'CLOCK FACE NINE OCLOCK': u'\U0001F558',
'CLOCK FACE TEN OCLOCK': u'\U0001F559',
'CLOCK FACE ELEVEN OCLOCK': u'\U0001F55A',
'CLOCK FACE TWELVE OCLOCK': u'\U0001F55B',
'CLOCK FACE ONE-THIRTY': u'\U0001F55C',
'CLOCK FACE TWO-THIRTY': u'\U0001F55D',
'CLOCK FACE THREE-THIRTY': u'\U0001F55E',
'CLOCK FACE FOUR-THIRTY': u'\U0001F55F',
'CLOCK FACE FIVE-THIRTY': u'\U0001F560',
'CLOCK FACE SIX-THIRTY': u'\U0001F561',
'CLOCK FACE SEVEN-THIRTY': u'\U0001F562',
'CLOCK FACE EIGHT-THIRTY': u'\U0001F563',
'CLOCK FACE NINE-THIRTY': u'\U0001F564',
'CLOCK FACE TEN-THIRTY': u'\U0001F565',
'CLOCK FACE ELEVEN-THIRTY': u'\U0001F566',
'CLOCK FACE TWELVE-THIRTY': u'\U0001F567',
# Communication symbols:
# 'RIGHT SPEAKER': u'\U0001F568',
# 'RIGHT SPEAKER WITH ONE SOUND WAVE': u'\U0001F569',
# 'RIGHT SPEAKER WITH THREE SOUND WAVES': u'\U0001F56A',
# 'BULLHORN': u'\U0001F56B',
# 'BULLHORN WITH SOUND WAVES': u'\U0001F56C',
# Miscellaneous symbols:
# 'RINGING BELL': u'\U0001F56D',
# 'BOOK': u'\U0001F56E',
# 'CANDLE': u'\U0001F56F',
# 'MANTELPIECE CLOCK': u'\U0001F570',
# 'BLACK SKULL AND CROSSBONES': u'\U0001F571',
# 'NO PIRACY': u'\U0001F572',
# 'HOLE': u'\U0001F573',
# 'MAN IN BUSINESS SUIT LEVITATING': u'\U0001F574',
# 'SLEUTH OR SPY': u'\U0001F575',
# 'DARK SUNGLASSES': u'\U0001F576',
# Animal symbols:
# 'SPIDER': u'\U0001F577',
# 'SPIDER WEB': u'\U0001F578',
# Game symbol:
# 'JOYSTICK': u'\U0001F579',
# Communication symbols:
# 'LEFT HAND TELEPHONE RECEIVER': u'\U0001F57B',
# 'TELEPHONE RECEIVER WITH PAGE': u'\U0001F57C',
# 'RIGHT HAND TELEPHONE RECEIVER': u'\U0001F57D',
# 'WHITE TOUCHTONE TELEPHONE': u'\U0001F57E',
# 'BLACK TOUCHTONE TELEPHONE': u'\U0001F57F',
# 'TELEPHONE ON TOP OF MODEM': u'\U0001F580',
# 'CLAMSHELL MOBILE PHONE': u'\U0001F581',
# 'BACK OF ENVELOPE': u'\U0001F582',
# 'STAMPED ENVELOPE': u'\U0001F583',
# 'ENVELOPE WITH LIGHTNING': u'\U0001F584',
# 'FLYING ENVELOPE': u'\U0001F585',
# 'PEN OVER STAMPED ENVELOPE': u'\U0001F586',
# 'LINKED PAPERCLIPS': u'\U0001F587',
# 'BLACK PUSHPIN': u'\U0001F588',
# 'LOWER LEFT PENCIL': u'\U0001F589',
# 'LOWER LEFT BALLPOINT PEN': u'\U0001F58A',
# 'LOWER LEFT FOUNTAIN PEN': u'\U0001F58B',
# 'LOWER LEFT PAINTBRUSH': u'\U0001F58C',
# 'LOWER LEFT CRAYON': u'\U0001F58D',
# Hand symbols:
# 'LEFT WRITING HAND': u'\U0001F58E',
# 'TURNED OK HAND SIGN': u'\U0001F58F',
# 'RAISED HAND WITH FINGERS SPLAYED': u'\U0001F590',
# 'REVERSED RAISED HAND WITH FINGERS SPLAYED': u'\U0001F591',
# 'REVERSED THUMBS UP SIGN': u'\U0001F592',
# 'REVERSED THUMBS DOWN SIGN': u'\U0001F593',
# 'REVERSED VICTORY HAND': u'\U0001F594',
# 'REVERSED HAND WITH MIDDLE FINGER EXTENDED': u'\U0001F595',
# 'RAISED HAND WITH PART BETWEEN MIDDLE AND RING FINGERS': u'\U0001F596',
# 'WHITE DOWN POINTING LEFT HAND INDEX': u'\U0001F597',
# 'SIDEWAYS WHITE LEFT POINTING INDEX': u'\U0001F598',
# 'SIDEWAYS WHITE RIGHT POINTING INDEX': u'\U0001F599',
# 'SIDEWAYS BLACK LEFT POINTING INDEX': u'\U0001F59A',
# 'SIDEWAYS BLACK RIGHT POINTING INDEX': u'\U0001F59B',
# 'BLACK LEFT POINTING BACKHAND INDEX': u'\U0001F59C',
# 'BLACK RIGHT POINTING BACKHAND INDEX': u'\U0001F59D',
# 'SIDEWAYS WHITE UP POINTING INDEX': u'\U0001F59E',
# 'SIDEWAYS WHITE DOWN POINTING INDEX': u'\U0001F59E',
# 'SIDEWAYS BLACK UP POINTING INDEX': u'\U0001F5A0',
# 'SIDEWAYS BLACK DOWN POINTING INDEX': u'\U0001F5A1',
# 'BLACK UP POINTING BACKHAND INDEX': u'\U0001F5A2',
# 'BLACK DOWN POINTING BACKHAND INDEX': u'\U0001F5A3',
# Computer symbols:
# 'DESKTOP COMPUTER': u'\U0001F5A5',
# 'KEYBOARD AND MOUSE': u'\U0001F5A6',
# 'THREE NETWORKED COMPUTERS': u'\U0001F5A7',
# 'PRINTER': u'\U0001F5A8',
# 'POCKET CALCULATOR': u'\U0001F5A9',
# 'BLACK HARD SHELL FLOPPY DISK': u'\U0001F5AA',
# 'WHITE HARD SHELL FLOPPY DISK': u'\U0001F5AB',
# 'SOFT SHELL FLOPPY DISK': u'\U0001F5AC',
# 'TAPE CARTRIDGE': u'\U0001F5AD',
# 'WIRED KEYBOARD': u'\U0001F5AE',
# 'ONE BUTTON MOUSE': u'\U0001F5AF',
# 'TWO BUTTON MOUSE': u'\U0001F5B0',
# 'THREE BUTTON MOUSE': u'\U0001F5B1',
# 'TRACKBALL': u'\U0001F5B2',
# 'OLD PERSONAL COMPUTER': u'\U0001F5B3',
# 'HARD DISK': u'\U0001F5B4',
# 'SCREEN': u'\U0001F5B5',
# 'PRINTER ICON': u'\U0001F5B6',
# 'FAX ICON': u'\U0001F5B7',
# 'OPTICAL DISC ICON': u'\U0001F5B8',
# Office symbols:
# 'DOCUMENT WITH TEXT': u'\U0001F5B9',
# 'DOCUMENT WITH TEXT AND PICTURE': u'\U0001F5BA',
# 'DOCUMENT WITH PICTURE': u'\U0001F5BB',
# 'FRAME WITH PICTURE': u'\U0001F5BC',
# 'FRAME WITH TILES': u'\U0001F5BD',
# 'FRAME WITH AN X': u'\U0001F5BE',
# User interface symbols:
# 'BLACK FOLDER': u'\U0001F5BF',
# 'FOLDER': u'\U0001F5C0',
# 'OPEN FOLDER': u'\U0001F5C1',
# 'CARD INDEX DIVIDERS': u'\U0001F5C2',
# 'CARD FILE BOX': u'\U0001F5C3',
# 'FILE CABINET': u'\U0001F5C4',
# 'EMPTY NOTE': u'\U0001F5C5',
# 'EMPTY NOTE PAGE': u'\U0001F5C6',
# 'EMPTY NOTE PAD': u'\U0001F5C7',
# 'NOTE': u'\U0001F5C8',
# 'NOTE PAGE': u'\U0001F5C9',
# 'NOTE PAD': u'\U0001F5CA',
# 'EMPTY DOCUMENT': u'\U0001F5CB',
# 'EMPTY PAGE': u'\U0001F5CC',
# 'EMPTY PAGES': u'\U0001F5CD',
# 'DOCUMENT': u'\U0001F5CE',
# 'PAGE': u'\U0001F5CF',
# 'PAGES': u'\U0001F5D0',
# 'WASTEBASKET': u'\U0001F5D1',
# 'SPIRAL NOTE PAD': u'\U0001F5D2',
# 'SPIRAL CALENDAR PAD': u'\U0001F5D3',
# 'DESKTOP WINDOW': u'\U0001F5D4',
# 'MINIMIZE': u'\U0001F5D5',
# 'MAXIMIZE': u'\U0001F5D6',
# 'OVERLAP': u'\U0001F5D7',
# 'CLOCKWISE RIGHT AND LEFT SEMICIRCLE ARROWS': u'\U0001F5D8',
# 'CANCELLATION X': u'\U0001F5D9',
# 'INCREASE FONT SIZE SYMBOL': u'\U0001F5DA',
# 'DECREASE FONT SIZE SYMBOL': u'\U0001F5DB',
# 'COMPRESSION': u'\U0001F5DC',
# 'OLD KEY': u'\U0001F5DD',
# Miscellaneous symbols:
# 'ROLLED-UP NEWSPAPER': u'\U0001F5DE',
# 'PAGE WITH CIRCLED TEXT': u'\U0001F5DF',
# 'STOCK CHART': u'\U0001F5E0',
# Rating symbols:
# 'DAGGER KNIFE': u'\U0001F5E1',
# 'LIPS': u'\U0001F5E2',
# 'SPEAKING HEAD IN SILHOUETTE': u'\U0001F5E3',
# Sound symbols:
# 'THREE RAYS ABOVE': u'\U0001F5E4',
# 'THREE RAYS BELOW': u'\U0001F5E5',
# 'THREE RAYS LEFT': u'\U0001F5E6',
# 'THREE RAYS RIGHT': u'\U0001F5E7',
# Bubble symbols:
# 'LEFT SPEECH BUBBLE': u'\U0001F5E8',
# 'RIGHT SPEECH BUBBLE': u'\U0001F5E9',
# 'TWO SPEECH BUBBLES': u'\U0001F5EA',
# 'THREE SPEECH BUBBLES': u'\U0001F5EB',
# 'LEFT THOUGHT BUBBLE': u'\U0001F5EC',
# 'RIGHT THOUGHT BUBBLE': u'\U0001F5ED',
# 'LEFT ANGER BUBBLE': u'\U0001F5EE',
# 'RIGHT ANGER BUBBLE': u'\U0001F5EF',
# 'MOOD BUBBLE': u'\U0001F5F0',
# 'LIGHTNING MOOD BUBBLE': u'\U0001F5F1',
# 'LIGHTNING MOOD': u'\U0001F5F2',
# Ballot symbols:
# 'BALLOT BOX WITH BALLOT': u'\U0001F5F3',
# 'BALLOT SCRIPT X': u'\U0001F5F4',
# 'BALLOT BOX WITH SCRIPT X': u'\U0001F5F5',
# 'BALLOT BOLD SCRIPT X': u'\U0001F5F6',
# 'BALLOT BOX WITH BOLD SCRIPT X': u'\U0001F5F7',
# 'LIGHT CHECK MARK': u'\U0001F5F8',
# 'BALLOT BOX WITH BOLD CHECK': u'\U0001F5F9',
# Map symbol:
# 'WORLD MAP': u'\U0001F5FA',
# Cultural symbols:
'MOUNT FUJI': u'\U0001F5FB',
'TOKYO TOWER': u'\U0001F5FC',
'STATUE OF LIBERTY': u'\U0001F5FD',
'SILHOUETTE OF JAPAN': u'\U0001F5FE',
'MOYAI': u'\U0001F5FF',
}
def main():
for key, value in EMOJI.items():
line = u'{0} - {1}'.format(value, key)
print(line)
if __name__ == '__main__':
main()
| brainix/quarter-bot | emoji.py | Python | gpl-3.0 | 62,546 | [
"CRYSTAL",
"Octopus"
] | 3d3a30cd61ae373b04263b162990881ab509688d92520a270918511916f00ab3 |
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import os
from textwrap import dedent
from pypln.backend.workers import Extractor
from utils import TaskTest
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
class TestExtractorWorker(TaskTest):
def test_extraction_from_text_file(self):
expected = "This is a test file.\nI'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'test.txt')
self.document.update({'filename': filename,
'contents': open(filename).read()})
Extractor().delay(self.fake_id)
self.assertEqual(self.document['text'], expected)
self.assertEqual(self.document['file_metadata'], {})
self.assertEqual(self.document['mimetype'], 'text/plain')
def test_extraction_from_html_file(self):
expected = "This is a test file. I'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'test.html')
data = {'filename': filename, 'contents': open(filename).read()}
self.document.update(data)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['text'], expected)
self.assertEqual(self.document['file_metadata'], {})
self.assertEqual(self.document['mimetype'], 'text/html')
def test_extraction_from_pdf_file(self):
expected = "This is a test file.\nI'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'test.pdf')
data = {'filename': filename, 'contents': open(filename).read()}
self.document.update(data)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['text'], expected)
# Check that the expected metadata is a subset of what
# our Extractor found (it may have found more details
# depending on the toolset used to extract metadata)
metadata_expected = {
'Author': 'Álvaro Justen',
'Creator': 'Writer',
'Producer': 'LibreOffice 3.5',
'CreationDate': 'Fri Jun 1 17:07:57 2012',
'Tagged': 'no',
'Pages': '1',
'Encrypted': 'no',
'Page size': '612 x 792 pts (letter)',
'Optimized': 'no',
'PDF version': '1.4',
}
metadata_expected_set = set(metadata_expected.iteritems())
metadata = self.document['file_metadata']
metadata_set = set(metadata.iteritems())
diff_set = metadata_expected_set - metadata_set
self.assertTrue(metadata_expected_set.issubset(metadata_set),
("Extracted metadata is not a subset of the expected metadata. "
"Items missing or with different values: {}").format(
u", ".join(unicode(item) for item in diff_set)))
self.assertEqual(self.document['mimetype'], 'application/pdf')
def test_extraction_from_html(self):
contents = dedent('''
<html>
<head>
<title>Testing</title>
<script type="text/javascript">this must not appear</script>
<style type="text/css">this must not appear [2]</style>
</head>
<body>
python test1
<br>
test2
<table>
<tr><td>spam</td></tr>
<tr><td>eggs</td></tr>
<tr><td>ham</td></tr>
</table>
test3
<div>test4</div>test5
<span>test6</span>test7
<h1>bla1</h1> bla2
</body>
</html>
''')
data = {'filename': 'test.html', 'contents': contents}
self.document.update(data)
result = Extractor().delay(self.fake_id)
expected = dedent('''
Testing
python test1
test2
spam
eggs
ham
test3
test4
test5 test6 test7
bla1
bla2''').strip()
self.assertEqual(self.document['text'], expected)
self.assertEqual(self.document['mimetype'], 'text/html')
def test_language_detection_pt(self):
text_pt = 'Esse texto foi escrito por Álvaro em Português.'
data_pt = {'filename': 'text-pt.txt', 'contents': text_pt}
self.document.update(data_pt)
Extractor().delay(self.fake_id).get()
self.assertEqual(self.document['language'], 'pt')
def test_language_detection_es(self):
text_es = 'Este texto ha sido escrito en Español por Álvaro.'
data_es = {'filename': 'text-es.txt', 'contents': text_es}
self.document.update(data_es)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['language'], 'es')
def test_language_detection_en(self):
text_en = 'This text was written by Álvaro in English.'
data_en = {'filename': 'text-en.txt', 'contents': text_en}
self.document.update(data_en)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['language'], 'en')
def test_unescape_html_entities(self):
expected = (u"This text has html <entities>. Álvaro asked me to make"
" sure it also has non ascii chars.")
filename = os.path.join(DATA_DIR, 'test_html_entities.txt')
data = {'filename': filename, 'contents': open(filename).read()}
self.document.update(data)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['text'], expected)
def test_should_detect_encoding_and_return_a_unicode_object(self):
expected = u"Flávio"
filename = os.path.join(DATA_DIR, 'test_iso-8859-1.txt')
data = {'filename': filename, 'contents': open(filename).read()}
self.document.update(data)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['text'], expected)
self.assertEqual(type(self.document['text']), unicode)
def test_should_guess_mimetype_for_file_without_extension(self):
contents = "This is a test file. I'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'text_file')
data = {'filename': filename, 'contents': contents}
self.document.update(data)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['mimetype'], 'text/plain')
def test_unknown_mimetype_should_be_flagged(self):
filename = os.path.join(DATA_DIR, 'random_file')
# we can't put the expected text content here, so we'll just make sure
# it's equal to the input content, since
contents = open(filename).read()
data = {'filename': filename, 'contents': contents}
self.document.update(data)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['mimetype'], 'unknown')
self.assertEqual(self.document['text'], "")
self.assertEqual(self.document['language'], "")
self.assertEqual(self.document['file_metadata'], {})
def test_unknown_encoding_should_be_ignored(self):
filename = os.path.join(DATA_DIR, 'encoding_unknown_to_libmagic.txt')
expected = u"This file has a weird byte (\x96) that makes it impossible for libmagic to recognize it's encoding."
data = {'filename': filename, 'contents': open(filename).read()}
self.document.update(data)
Extractor().delay(self.fake_id)
self.assertEqual(self.document['text'], expected)
self.assertEqual(self.document['file_metadata'], {})
self.assertEqual(self.document['language'], 'en')
| fccoelho/pypln.backend | tests/test_worker_extractor.py | Python | gpl-3.0 | 8,375 | [
"NAMD"
] | b4401551e3c888650eca0c534ff960ec4924e6acdc33625dbc4a182a14d3dccc |
import os, sys
from collections import defaultdict
import meta
NO_ENTRY_NAME = 'no_db_entry_name'
NO_ENTRY_RANK = 'no_db_entry_rank'
ranks = { 'superkingdom' : 0,
'kingdom' : 1,
'subkingdom' : 2,
'superphylum' : 3,
'phylum' : 4,
'subphylum' : 5,
'superclass' : 6,
'class' : 7,
'subclass' : 8,
'infraclass' : 9,
'superorder' : 10,
'order' : 11,
'suborder' : 12,
'infraorder' : 13,
'parvorder' : 14,
'superfamily' : 15,
'family' : 16,
'subfamily' : 17,
'tribe' : 18,
'subtribe' : 19,
'genus' : 20,
'subgenus' : 21,
'species group' : 22,
'species subgroup' : 23,
'species' : 24,
'subspecies' : 25,
'varietas' : 26,
'forma' : 27,
'no rank' : 28 }
human = 9606
mouse = 10090
rats = 10114
rodents = 9989
primates = 9443
animalia = 33208
green_plants = 33090
eukaryota = 2759
archea = 2157
bacteria = 2
viruses = 10239
fungi = 4751
euglenozoa = 33682
alveolata = 33630
amoebozoa = 554915
fornicata = 207245
parabasalia = 5719
heterolobosea = 5752
viroids = 12884
stramenopiles = 33634
blastocladiomycota = 451459 #(ne)
chytridiomycota = 4761 #(ne)
cryptomycota = 1031332 #(da)
dikarya = 451864 #(ne)
entomophthoromycota = 1264859 #(da)
glomeromycota = 214504 #(ne)
microsporidia = 6029 #(DA)
neocallimastigomycota = 451455 #(da)
other = 28384
unclassified = 12908
artificial = 81077
class TaxTree ():
''' Loads the NCBI taxonomy tree, creates both
parent-child and child-parent relations,
enables parent-child relationship testing and
finding the least common ancestor.
'''
def __init__ (self, parent2child_fname=None, tax_nodes_fname=None):
''' Locates the ncbi taxonomy file and sets the important
taxonomy assignments (such as animalia, bacteria ecc)
:param parent2child_fname location of the ncbi taxonomy tree file
:param tax_nodes_fname location of the file containing taxid,
organism name and organism rank for each taxid in the tree.
'''
if not parent2child_fname:
parent2child_fname = os.path.join(meta.__path__[0], 'data', 'ncbi_tax_tree')
self.load(parent2child_fname)
if not tax_nodes_fname:
tax_nodes_fname = os.path.join(meta.__path__[0], 'data', 'taxid2namerank')
self.load_taxonomy_data(tax_nodes_fname)
#--------- RELEVANT TAXONOMY ASSIGNMENTS ----------#
self._h_set_relevant_taxonomy_assignments()
self._h_map_taxids_to_relevant_tax_nodes()
def load (self, parent2child_fname):
self.parent_nodes = self._h_get_tax_nodes(parent2child_fname)
self.child_nodes = self._h_populate_child_nodes()
def load_taxonomy_data(self, tax_nodes_fname):
'''
Uses data access object to find organism name and
rank of each of the tax IDs.
For each tax ID creates a node of type TaxNode
After invoking this method, there is nodes parameter
of type dict(key=tax_id:int, value=node:TaxNode)
'''
self.nodes = {}
total = len(self.parent_nodes)
current = 0
tax_nodes_file = open(tax_nodes_fname, 'r')
readline = tax_nodes_file.readline
while (True):
line = readline()
if not line: break
(taxid, org_name, rank) = line.strip().split('|')
node = TaxNode(org_name, rank)
self.nodes[int(taxid)] = node
tax_nodes_file.close()
def get_org_name(self, taxid):
if taxid not in self.nodes:
return NO_ENTRY_NAME
else:
return self.nodes[taxid].organism_name
def get_org_rank(self, taxid):
if taxid not in self.nodes:
return NO_ENTRY_RANK
else:
return self.nodes[taxid].rank
def is_child (self, child_taxid, parent_taxid):
''' Test if child_taxid is child node of parent_taxid
Node is not the child of itself
'''
# check boundary conditions
if child_taxid == parent_taxid:
return False
if parent_taxid == self.root:
return True
tmp_parent_taxid = child_taxid
while True:
if not self.parent_nodes.has_key(tmp_parent_taxid):
return False
tmp_parent_taxid = self.parent_nodes[tmp_parent_taxid]
if tmp_parent_taxid == self.root:
return False
if tmp_parent_taxid == parent_taxid:
return True
def find_lca (self, taxid_list):
''' Finds the lowest common ancestor of a list of nodes
Args:
taxid_list ([int]): List of tax_ids
Returns:
(int): tax_id of LCA
'''
# Check if all nodes exist (and sum up blast scores)
for taxid in taxid_list:
if taxid != self.root and not self.parent_nodes.has_key(taxid):
try:
raise Exception ("Key error, no element with id " + str(taxid))
except Exception, e:
pass
# Filter out invalid tax_ids - those without parents
taxid_list = filter(lambda tax_id: tax_id == self.root or self.parent_nodes.has_key(tax_id) , taxid_list)
# Check if list is empty
if len(taxid_list) == 0:
try:
raise Exception ("taxid_list is empty, cannot find LCA!")
except Exception:
sys.stderr.write("{0}\n".format(e))
return 1 # Assign to root
# each of the visited nodes remembers how many
# child nodes traversed it
self.num_visited = defaultdict(int)
current_taxids = taxid_list
num_of_nodes = len(current_taxids)
# now find the lowest common ancestor
while (True):
parent_nodes = []
for taxid in current_taxids:
# root node must not add itself to parent list
if taxid != self.root: parent_taxid = self.parent_nodes[taxid]
else: parent_taxid = None
# if parent exists, append him to parent list
# duplicates ensure that every traversal will count
if parent_taxid: parent_nodes.append(parent_taxid)
# Check for LCA
self.num_visited[taxid] += 1
if self.num_visited[taxid] == num_of_nodes:
self.lca_root = taxid
return taxid
# refresh current nodes
current_taxids = parent_nodes
def get_relevant_taxid (self, tax_id):
return self.tax2relevantTax.get(tax_id, -1)
def get_lineage(self,tax_id):
lineage = []
while (True):
if tax_id == self.root:
break
lineage.append(tax_id)
tax_id = self.parent_nodes[tax_id]
return reversed(lineage)
def get_parent_with_rank(self, tax_id, rank):
if tax_id not in self.nodes:
return -1
parent = 0
while (True):
if tax_id == self.root:
return 0
if self.nodes[tax_id].rank == rank:
return tax_id
tax_id = self.parent_nodes[tax_id]
def _h_get_tax_nodes (self, parent2child_fname):
'''Loads the taxonomy nodes in a dictionary
mapping the child to parent node.
'''
# file format per line: child_taxid parent_taxid
with open(parent2child_fname) as fd:
d = dict(self._h_from_parent_child_str (line) for line in fd)
return d
def _h_from_parent_child_str (self, line):
'''Loads two integers (taxids) from a line
'''
key, sep, value = line.strip().partition(" ")
if key == value: self.root = int(key)
return int(key), int(value)
def _h_populate_child_nodes (self):
''' Populates child nodes from parent to child
mapping dictionary
'''
child_nodes = defaultdict(list)
for (child, parent) in self.parent_nodes.iteritems():
child_nodes[parent].append(child)
return child_nodes
def _h_set_relevant_taxonomy_assignments (self):
''' Sets some of the more important taxonomy
assignments which can help in checking which kingdom
an organism belongs to.
'''
self.potential_hosts = [human,
mouse,
rats,
rodents,
primates,
animalia,
green_plants]
self.microbes = [archea,
bacteria,
viruses,
fungi,
euglenozoa,
alveolata,
amoebozoa,
fornicata,
parabasalia,
heterolobosea,
viroids,
stramenopiles,
cryptomycota,
entomophthoromycota,
microsporidia,
neocallimastigomycota]
def _h_map_taxids_to_relevant_tax_nodes(self):
host_nodes = list(self.potential_hosts)
microbe_nodes = list(self.microbes)
self.tax2relevantTax = {}
for microbe_node in self.microbes:
microbe_children = self.get_all_children(microbe_node)
for child in microbe_children:
self.tax2relevantTax[child] = microbe_node
for host_node in self.potential_hosts:
host_children = self.get_all_children(host_node)
for child in host_children:
self.tax2relevantTax[child] = host_node
tagged_nodes = self.tax2relevantTax.keys()
all_nodes = self.parent_nodes.keys()
untagged_nodes = set(all_nodes).difference(tagged_nodes)
for node in untagged_nodes:
self.tax2relevantTax[node] = -1
def get_all_children(self, tax_id):
if not self.child_nodes.has_key(tax_id):
return []
one_step_children = self.child_nodes[tax_id]
all_children = []
while (True):
if not one_step_children:
break
new_one_step_children = []
all_children.extend(one_step_children)
for child in one_step_children:
if self.child_nodes.has_key(child):
new_one_step_children.extend(self.child_nodes[child])
one_step_children = new_one_step_children
return all_children
class TaxNode (object):
'''
Taxonomy nodes hold information on relevant
taxonomy data, which are:
* organism name (scientific)
* taxonomy rank
* score (arbitrary)
'''
def __init__(self, organism_name, rank, score=0.):
self.organism_name = organism_name
self.rank = rank
self.score = score
def main():
tt = TaxTree()
print tt.nodes[9606].organism_name
if __name__ == '__main__':
main() | abulovic/pgnd-meta | meta/data/tax.py | Python | mit | 12,071 | [
"BLAST"
] | eb31435ebeaf290264aff519138f919939b2635b0733682321f141c9b55bb384 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
class HomogeneousMagneticFieldTest(ut.TestCase):
S = espressomd.System(box_l=[1.0, 1.0, 1.0])
S.seed = S.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(S.seed)
def setUp(self):
self.S.box_l = [3.0, 3.0, 3.0]
self.S.time_step = 0.01
self.S.cell_system.skin = 0.4
def tearDown(self):
self.S.constraints.clear()
def test_setter_and_getter(self):
H_field1 = np.array([0.0, 1.0, 0.0])
H_field2 = np.array([3.533, 5.842, 0.127])
H_constraint = espressomd.constraints.HomogeneousMagneticField(
H=H_field1)
np.testing.assert_almost_equal(np.copy(H_constraint.H), H_field1)
H_constraint.H = H_field2
np.testing.assert_almost_equal(np.copy(H_constraint.H), H_field2)
def test_default_value(self):
H_field_default = np.array([1.0, 0.0, 0.0])
H_constraint = espressomd.constraints.HomogeneousMagneticField()
np.testing.assert_almost_equal(
np.copy(H_constraint.H),
H_field_default)
@utx.skipIfMissingFeatures(["DIPOLES"])
def test_add_energy_and_forces(self):
H_field = [5.0, 3.0, 2.0]
dip_mom0 = [2.0, 6.0, 1.]
dip_mom1 = [-1.0, 0.5, -0.2]
# check that the dipolar energy is zero initially, ...
self.assertEqual(self.S.analysis.energy()["dipolar"], 0.0)
H_constraint = espressomd.constraints.HomogeneousMagneticField(
H=H_field)
self.S.constraints.add(H_constraint)
# ... and also after adding the constraint
self.assertEqual(self.S.analysis.energy()["dipolar"], 0.0)
# check dipolar energy when adding dipole moments
self.S.part.add(id=0, pos=[0, 0, 0], dip=dip_mom0, rotation=(1, 1, 1))
self.assertEqual(self.S.analysis.energy()["dipolar"],
-1.0 * np.dot(H_field, dip_mom0))
self.S.part.add(id=1, pos=[1, 1, 1], dip=dip_mom1, rotation=(1, 1, 1))
self.assertEqual(self.S.analysis.energy()["dipolar"],
(-1.0 * np.dot(H_field, dip_mom0)
- 1.0 * np.dot(H_field, dip_mom1)))
if espressomd.has_features(["ROTATION"]):
# check that running the integrator leads to expected torques
self.S.integrator.run(0)
torque_expected0 = np.cross(dip_mom0, H_field)
torque_expected1 = np.cross(dip_mom1, H_field)
for i in range(3):
self.assertAlmostEqual(
self.S.part[0].torque_lab[i],
torque_expected0[i],
places=10)
self.assertAlmostEqual(
self.S.part[1].torque_lab[i],
torque_expected1[i],
places=10)
if __name__ == "__main__":
ut.main()
| psci2195/espresso-ffans | testsuite/python/constraint_homogeneous_magnetic_field.py | Python | gpl-3.0 | 3,646 | [
"ESPResSo"
] | 8d8e30fca66c073b9bcc014aa8a769e7687a431d71bba76f3df2518868f02d6b |
"""
@name: PyHouse_Install/src/Install/test/win_pwd.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2015 by D. Brian Kimmel
@license: MIT License
@note: Created on Oct 27, 2015
@Summary:
"""
class pwd(object):
"""
"""
def getpwnam(p_name):
return 1
# ## END DBK
| DBrianKimmel/PyHouse_Install | src/Install/test/win_pwd.py | Python | mit | 349 | [
"Brian"
] | 6242c8c6dfe984ee591c047d98862b32bcf559bdfa81a8c2847aa2bc8b090f3b |
"""Dirty magic. Should never be used. Ever."""
# Answers SO questions # 54476645, 28244921
import ast
import inspect
from dapper.tools.colors import coloring
def get_call():
"""Get calling statement (even if it is multi-lined), 2 frames back.
NB: returns full lines (may include junk before/after calls)
coz real parsing (brackets, commas, backslash, etc) is complicated.
Also returns caller args' names, and caller's namespace.
Old method was based on looping backwards until func_name is found.
But that doesn't work since f_lineno now (since python 3.8) yields
the 1st line of a multi-line call (https://bugs.python.org/issue38283).
New method (using AST) is actually a good deal more robust, because
we get actual argnames and dont need to resort to regex.
"""
# Introspection
f0 = inspect.currentframe() # this frame
f1 = f0.f_back # frame of the function whose call we want to get
f2 = f1.f_back # frame of the calling
code, shift = inspect.getsourcelines(f2)
func_name = f1.f_code.co_name
# Using stack instead
# iFrame = 2
# frame,filename,n1,fname,lines,index = inspect.stack()[iFrame]
# Note: it may be that using one of
# code = inspect.getsource(f2)
# code = open(f2.f_code.co_filename).read().splitlines(True)
# is less problematic with regards to
# - indentation, which gets parsed by ast.NodeVisitor
# - fix01 and fix10
# because it reads the entire module or something.
# On the other hand, it might be less efficient,
# and less general (does it work when the call is in the interpreter?)
# Need a large battery of tests to really decide what's best.
# Get call's line number
n1 = f2.f_lineno
n1 -= shift
# I really don't know why these become necessary
fix01 = 0 if shift else 1
fix10 = 1 if shift else 0
# print("n1:",n1)
# print("code[n1-fix01]:\n",code[n1])
# Walk syntax tree
class Visitor(ast.NodeVisitor):
"""Get info on call if name and lineno match."""
# Inspiration for relevant parts of AST:
# https://docs.python.org/3/library/ast.html#abstract-grammar
# https://docs.python.org/3/library/ast.html#ast.Call
# http://alexleone.blogspot.com/2010/01/python-ast-pretty-printer.html
def visit_Call(self, node):
node_id = getattr(node.func, "id", None)
if node_id == func_name:
if node.lineno == (n1 + fix10):
assert info == {}, "Matched with multiple calls."
info["n1"] = node.lineno
info["c1"] = node.col_offset
info["n2"] = node.end_lineno
info["c2"] = node.end_col_offset
try:
info["argnames"] = [arg.id for arg in node.args]
except AttributeError:
pass # argnames will not be present
self.generic_visit(node)
info = {}
Visitor().visit(ast.parse("".join(code)))
assert "n2" in info, "Failed to find caller in its file."
call_text = "".join(code[n1-fix01: info["n2"]])
call_text = call_text.rstrip() # rm trailing newline
return call_text, info.get("argnames", None), f2.f_locals
# TODO 4: fails on python 3.7 and older.
# I believe there is a version in the git history that works with py <= 3.7.
# But maybe we should not use magic any more?
def magic_naming(*args, **kwargs):
"""Convert args (by their names in the call) to kwargs.
Example:
>>> a, b = 1, 2
>>> magic_naming(a, b, c=3)
{'a': 1, 'b': 2, 'c': 3}
"""
call, argnames, locvars = get_call()
assert len(args) == len(argnames), "Something's gone wrong."
# Obsolete (works with call rather than argnames).
# Insert args. Matches arg to a name by
# # - M1: id to a variable in the local namespace, and
# # - M2: the presence of said variable in the call.
# for i,arg in enumerate(args):
# # List of candidate names for arg i
# nn = [name for name in locvars if locvars[name] is arg] # (M1)
# nn = [name for name in nn if re.search(r"\b"+name+r"\b", call)] # (M2)
# if not nn:
# raise RuntimeError("Couldn't find the name for "+str(arg))
# # Associating arg to ALL matching names.
# for name in nn:
# dct[name] = arg
dct = {name: arg for arg, name in zip(args, argnames)}
dct.update(kwargs)
return dct
def spell_out(*args):
"""Print (args) including variable names.
Example
-------
>>> spell_out(3*2)
3*2:
6
"""
call, _, loc = get_call()
# Find opening/closing brackets
left = call. find("(")
right = call.rfind(")")
# Print header
import sys
c = None if "pytest" in sys.modules else "blue"
with coloring(c):
print(call[left+1:right] + ":")
# Print (normal)
print(*args)
if __name__ == "__main__":
lst = [chr(97+i) for i in range(7)]
dct2 = {c: c for c in lst}
a, b, c, d, e, f, g = lst
print(magic_naming(a, b,
c, d, # fawef
e, f, g))
spell_out(a, b*2, 3*4)
###########
# tests #
###########
# pytest reports failure on the following assertions.
# But intorspection is brittle, so that's not surprising.
d2 = magic_naming(a, b, c, d, e, f, g)
assert d2 == dct2
# Ugly call
assert \
{"b": "b", "a": 3} == \
magic_naming(b, a=3,
)
| nansencenter/DAPPER | dapper/tools/magic.py | Python | mit | 5,622 | [
"VisIt"
] | 24318a0d119c5e67adbf4226df3e7d7d10c752c951db92d03f94982b1546569f |
from __future__ import print_function
import time
import numpy as np
import sympy as sy
from bokeh.objects import Plot, DataRange1d, LinearAxis, ColumnDataSource, Glyph, Grid, Legend
from bokeh.widgetobjects import Slider, TextInput, HBox, VBox, Dialog
from bokeh.glyphs import Patch, Line, Text
from bokeh.document import Document
from bokeh.session import Session
from requests.exceptions import ConnectionError
document = Document()
session = Session()
session.use_doc('taylor_server')
session.load_document(document)
xs = sy.Symbol('x')
expr = sy.exp(-xs)*sy.sin(xs)
order = 1
def taylor(fx, xs, order, x_range=(0, 1), n=200):
x0, x1 = x_range
x = np.linspace(float(x0), float(x1), n)
fy = sy.lambdify(xs, fx, modules=['numpy'])(x)
tx = fx.series(xs, n=order).removeO()
if tx.is_Number:
ty = np.zeros_like(x)
ty.fill(float(tx))
else:
ty = sy.lambdify(xs, tx, modules=['numpy'])(x)
return x, fy, ty
def update_data():
x, fy, ty = taylor(expr, xs, order, (-2*sy.pi, 2*sy.pi), 200)
plot.title = "%s vs. taylor(%s, n=%d)" % (expr, expr, order)
legend.legends = {
"%s" % expr: [line_f_glyph],
"taylor(%s)" % expr: [line_t_glyph],
}
source.data = dict(x=x, fy=fy, ty=ty)
slider.value = order
session.store_document(document)
source = ColumnDataSource(data=dict(
x = [],
fy = [],
ty = [],
))
xdr = DataRange1d(sources=[source.columns("x")])
ydr = DataRange1d(sources=[source.columns("fy")])
plot = Plot(data_sources=[source], x_range=xdr, y_range=ydr, plot_width=800, plot_height=400)
line_f = Line(x="x", y="fy", line_color="blue", line_width=2)
line_f_glyph = Glyph(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=line_f)
plot.renderers.append(line_f_glyph)
line_t = Line(x="x", y="ty", line_color="red", line_width=2)
line_t_glyph = Glyph(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=line_t)
plot.renderers.append(line_t_glyph)
xaxis = LinearAxis(plot=plot, location="bottom")
plot.below.append(xaxis)
yaxis = LinearAxis(plot=plot, location="left")
plot.left.append(yaxis)
xgrid = Grid(plot=plot, dimension=0, axis=xaxis)
ygrid = Grid(plot=plot, dimension=1, axis=yaxis)
legend = Legend(plot=plot, orientation="bottom_left")
plot.renderers.append(legend)
def on_slider_value_change(obj, attr, old, new):
global order
order = int(new)
update_data()
def on_text_value_change(obj, attr, old, new):
try:
global expr
expr = sy.sympify(new, dict(x=xs))
except (sy.SympifyError, TypeError, ValueError) as exception:
dialog.content = str(exception)
dialog.visible = True
session.store_objects(dialog)
else:
update_data()
dialog = Dialog(title="Invalid expression", buttons=["Close"])
slider = Slider(start=1, end=20, value=order, step=1, title="Order:")
slider.on_change('value', on_slider_value_change)
text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', on_text_value_change)
inputs = HBox(children=[slider, text])
layout = VBox(children=[inputs, plot, dialog])
document.add(layout)
update_data()
if __name__ == "__main__":
link = session.object_link(document._plotcontext)
print("Please visit %s to see the plots" % link)
try:
while True:
session.load_document(document)
time.sleep(0.5)
except KeyboardInterrupt:
print()
except ConnectionError:
print("Connection to bokeh-server was terminated")
| the13fools/Bokeh_Examples | glyphs/taylor_server.py | Python | bsd-3-clause | 3,532 | [
"VisIt"
] | 02589e819aba9473bca8464b38bcae820e97a0a98da198dd1e7dd2825014bdc8 |
#!/usr/bin/env python
from distutils.core import setup
setup(name='icolor',
version='1.2',
description='Interpolate ANSI colors in strings',
author='Brian M Hunt',
author_email='brianmhunt@gmail.com',
url='https://github.com/brianmhunt/icolor',
py_modules=['icolor'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Terminals'
],
)
| brianmhunt/icolor | setup.py | Python | mit | 660 | [
"Brian"
] | a4adba7b73a1373bceaae614023c04150a2ad9e923f61b890f7321e69e51d3ae |
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for path in galaxy.roles_paths:
role_path = os.path.join(path, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
info_path = os.path.join(self.path, self.META_INSTALL)
try:
f = open(info_path, 'w+')
self._install_info = yaml.safe_dump(info, f)
except:
return False
finally:
f.close()
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy, self.options.api_server)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.options.api_server))
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and self.version not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
meta_file = member
break
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
except OSError as e:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % self.name)
try:
os.unlink(tmp_file)
except (OSError,IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
| shawnsi/ansible | lib/ansible/galaxy/role.py | Python | gpl-3.0 | 11,993 | [
"Brian",
"Galaxy"
] | 928d4d704f7618244b7f3de25946d8d14e39bb41803aab708edf0b978756f622 |
# pylint: disable=C0111
# pylint: disable=W0621
import time
import os
from lettuce import world, step
from nose.tools import assert_true, assert_in # pylint: disable=no-name-in-module
from django.conf import settings
from student.roles import CourseRole, CourseStaffRole, CourseInstructorRole
from student.models import get_user
from selenium.webdriver.common.keys import Keys
from logging import getLogger
from student.tests.factories import AdminFactory
from student import auth
logger = getLogger(__name__)
from terrain.browser import reset_data
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
world.visit('/')
signin_css = 'a.action-signin'
assert world.is_css_present(signin_css)
@step('I am logged into Studio$')
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.action.delete-section-button'
elif category == 'subsection':
css = 'a.action.delete-subsection-button'
else:
assert False, 'Invalid category: %s' % category
world.css_click(css)
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(_step):
open_new_course()
@step('(I select|s?he selects) the new course')
def select_new_course(_step, whom):
course_link_css = 'a.course-link'
world.css_click(course_link_css)
@step(u'I press the "([^"]*)" notification button$')
def press_the_notification_button(_step, name):
# Because the notification uses a CSS transition,
# Selenium will always report it as being visible.
# This makes it very difficult to successfully click
# the "Save" button at the UI level.
# Instead, we use JavaScript to reliably click
# the button.
btn_css = 'div#page-notification a.action-%s' % name.lower()
world.trigger_event(btn_css, event='focus')
world.browser.execute_script("$('{}').click()".format(btn_css))
world.wait_for_ajax_complete()
@step('I change the "(.*)" field to "(.*)"$')
def i_change_field_to_value(_step, field, value):
field_css = '#%s' % '-'.join([s.lower() for s in field.split()])
ele = world.css_find(field_css).first
ele.fill(value)
ele._element.send_keys(Keys.ENTER)
@step('I reset the database')
def reset_the_db(_step):
"""
When running Lettuce tests using examples (i.e. "Confirmation is
shown on save" in course-settings.feature), the normal hooks
aren't called between examples. reset_data should run before each
scenario to flush the test database. When this doesn't happen we
get errors due to trying to insert a non-unique entry. So instead,
we delete the database manually. This has the effect of removing
any users and courses that have been created during the test run.
"""
reset_data(None)
@step('I see a confirmation that my changes have been saved')
def i_see_a_confirmation(step):
confirmation_css = '#alert-confirmation'
assert world.is_css_present(confirmation_css)
def open_new_course():
world.clear_courses()
create_studio_user()
log_into_studio()
create_a_course()
def create_studio_user(
uname='robot',
email='robot+studio@edx.org',
password='test',
is_staff=False):
studio_user = world.UserFactory(
username=uname,
email=email,
password=password,
is_staff=is_staff)
registration = world.RegistrationFactory(user=studio_user)
registration.register(studio_user)
registration.activate()
return studio_user
def fill_in_course_info(
name='Robot Super Course',
org='MITx',
num='101',
run='2013_Spring'):
world.css_fill('.new-course-name', name)
world.css_fill('.new-course-org', org)
world.css_fill('.new-course-number', num)
world.css_fill('.new-course-run', run)
def log_into_studio(
uname='robot',
email='robot+studio@edx.org',
password='test',
name='Robot Studio'):
world.log_in(username=uname, password=password, email=email, name=name)
# Navigate to the studio dashboard
world.visit('/')
assert_in(uname, world.css_text('h2.title', timeout=10))
def add_course_author(user, course):
"""
Add the user to the instructor group of the course
so they will have the permissions to see it in studio
"""
global_admin = AdminFactory()
for role in (CourseStaffRole, CourseInstructorRole):
auth.add_users(global_admin, role(course.location), user)
def create_a_course():
course = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
world.scenario_dict['COURSE'] = course
user = world.scenario_dict.get("USER")
if not user:
user = get_user('robot+studio@edx.org')
add_course_author(user, course)
# Navigate to the studio dashboard
world.visit('/')
course_link_css = 'a.course-link'
world.css_click(course_link_css)
course_title_css = 'span.course-title'
assert_true(world.is_css_present(course_title_css))
def add_section(name='My Section'):
link_css = 'a.new-courseware-section-button'
world.css_click(link_css)
name_css = 'input.new-section-name'
save_css = 'input.new-section-name-save'
world.css_fill(name_css, name)
world.css_click(save_css)
span_css = 'span.section-name-span'
assert_true(world.is_css_present(span_css))
def add_subsection(name='Subsection One'):
css = 'a.new-subsection-item'
world.css_click(css)
name_css = 'input.new-subsection-name-input'
save_css = 'input.new-subsection-name-save'
world.css_fill(name_css, name)
world.css_click(save_css)
def set_date_and_time(date_css, desired_date, time_css, desired_time):
world.css_fill(date_css, desired_date)
# hit TAB to get to the time field
e = world.css_find(date_css).first
# pylint: disable=W0212
e._element.send_keys(Keys.TAB)
world.css_fill(time_css, desired_time)
e = world.css_find(time_css).first
e._element.send_keys(Keys.TAB)
time.sleep(float(1))
@step('I have enabled the (.*) advanced module$')
def i_enabled_the_advanced_module(step, module):
step.given('I have opened a new course section in Studio')
world.css_click('.nav-course-settings')
world.css_click('.nav-course-settings-advanced a')
type_in_codemirror(0, '["%s"]' % module)
press_the_notification_button(step, 'Save')
@world.absorb
def create_course_with_unit():
"""
Prepare for tests by creating a course with a section, subsection, and unit.
Performs the following:
Clear out all courseware
Create a course with a section, subsection, and unit
Create a user and make that user a course author
Log the user into studio
Open the course from the dashboard
Expand the section and click on the New Unit link
The end result is the page where the user is editing the new unit
"""
world.clear_courses()
course = world.CourseFactory.create()
world.scenario_dict['COURSE'] = course
section = world.ItemFactory.create(parent_location=course.location)
world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',
)
user = create_studio_user(is_staff=False)
add_course_author(user, course)
log_into_studio()
world.css_click('a.course-link')
world.wait_for_js_to_load()
css_selectors = [
'div.section-item a.expand-collapse', 'a.new-unit-item'
]
for selector in css_selectors:
world.css_click(selector)
world.wait_for_mathjax()
world.wait_for_xmodule()
assert world.is_css_present('ul.new-component-type')
@step('I have clicked the new unit button$')
@step(u'I am in Studio editing a new unit$')
def edit_new_unit(step):
create_course_with_unit()
@step('the save notification button is disabled')
def save_button_disabled(step):
button_css = '.action-save'
disabled = 'is-disabled'
assert world.css_has_class(button_css, disabled)
@step('the "([^"]*)" button is disabled')
def button_disabled(step, value):
button_css = 'input[value="%s"]' % value
assert world.css_has_class(button_css, 'is-disabled')
def _do_studio_prompt_action(intent, action):
"""
Wait for a studio prompt to appear and press the specified action button
See cms/static/js/views/feedback_prompt.js for implementation
"""
assert intent in ['warning', 'error', 'confirmation', 'announcement',
'step-required', 'help', 'mini']
assert action in ['primary', 'secondary']
world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))
action_css = 'li.nav-item > a.action-{}'.format(action)
world.trigger_event(action_css, event='focus')
world.browser.execute_script("$('{}').click()".format(action_css))
world.wait_for_ajax_complete()
world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
@world.absorb
def confirm_studio_prompt():
_do_studio_prompt_action('warning', 'primary')
@step('I confirm the prompt')
def confirm_the_prompt(step):
confirm_studio_prompt()
@step(u'I am shown a prompt$')
def i_am_shown_a_notification(step):
assert world.is_css_present('.wrapper-prompt')
def type_in_codemirror(index, text):
world.wait(1) # For now, slow this down so that it works. TODO: fix it.
world.css_click("div.CodeMirror-lines", index=index)
world.browser.execute_script("$('div.CodeMirror.CodeMirror-focused > div').css('overflow', '')")
g = world.css_find("div.CodeMirror.CodeMirror-focused > div > textarea")
if world.is_mac():
g._element.send_keys(Keys.COMMAND + 'a')
else:
g._element.send_keys(Keys.CONTROL + 'a')
g._element.send_keys(Keys.DELETE)
g._element.send_keys(text)
if world.is_firefox():
world.trigger_event('div.CodeMirror', index=index, event='blur')
world.wait_for_ajax_complete()
def upload_file(filename):
path = os.path.join(TEST_ROOT, filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
world.browser.attach_file('file', os.path.abspath(path))
button_css = '.upload-dialog .action-upload'
world.css_click(button_css)
@step(u'"([^"]*)" logs in$')
def other_user_login(step, name):
step.given('I log out')
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + '@edx.org')
login_form.find_by_name('password').fill("test")
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user(name + '@edx.org')
@step(u'the user "([^"]*)" exists( as a course (admin|staff member|is_staff))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + '@edx.org'
user = create_studio_user(uname=name, password="test", email=email)
if has_extra_perms:
if role_name == "is_staff":
user.is_staff = True
user.save()
else:
if role_name == "admin":
# admins get staff privileges, as well
roles = (CourseStaffRole, CourseInstructorRole)
else:
roles = (CourseStaffRole,)
location = world.scenario_dict["COURSE"].location
global_admin = AdminFactory()
for role in roles:
auth.add_users(global_admin, role(location), user)
@step('I log out')
def log_out(_step):
world.visit('logout')
@step(u'I click on "edit a draft"$')
def i_edit_a_draft(_step):
world.css_click("a.create-draft")
@step(u'I click on "replace with draft"$')
def i_replace_w_draft(_step):
world.css_click("a.publish-draft")
@step(u'I publish the unit$')
def publish_unit(_step):
world.select_option('visibility-select', 'public')
| pku9104038/edx-platform | cms/djangoapps/contentstore/features/common.py | Python | agpl-3.0 | 12,528 | [
"VisIt"
] | bc4a17bf5311b3c9801776d42bc3a878236469824fe85fd4dc0f2d84c78bb64e |
symbols = [
["Lower-case Greek",
5,
r"""\alpha \beta \gamma \chi \delta \epsilon \eta \iota \kappa
\lambda \mu \nu \omega \phi \pi \psi \rho \sigma \tau \theta
\upsilon \xi \zeta \digamma \varepsilon \varkappa \varphi
\varpi \varrho \varsigma \vartheta"""],
["Upper-case Greek",
6,
r"""\Delta \Gamma \Lambda \Omega \Phi \Pi \Psi \Sigma \Theta
\Upsilon \Xi \mho \nabla"""],
["Hebrew",
4,
r"""\aleph \beth \daleth \gimel"""],
["Delimiters",
6,
r"""| \{ \lfloor / \Uparrow \llcorner \vert \} \rfloor \backslash
\uparrow \lrcorner \| \langle \lceil [ \Downarrow \ulcorner
\Vert \rangle \rceil ] \downarrow \urcorner"""],
["Big symbols",
5,
r"""\bigcap \bigcup \bigodot \bigoplus \bigotimes \biguplus
\bigvee \bigwedge \coprod \oint \prod \sum \int"""],
["Standard function names",
4,
r"""\arccos \csc \ker \min \arcsin \deg \lg \Pr \arctan \det \lim
\gcd \ln \sup \cot \hom \log \tan \coth \inf \max \tanh
\sec \arg \dim \liminf \sin \cos \exp \limsup \sinh \cosh"""],
["Binary operation and relation symbols",
3,
r"""\ast \pm \slash \cap \star \mp \cup \cdot \uplus
\triangleleft \circ \odot \sqcap \triangleright \bullet \ominus
\sqcup \bigcirc \oplus \wedge \diamond \oslash \vee
\bigtriangledown \times \otimes \dag \bigtriangleup \div \wr
\ddag \barwedge \veebar \boxplus \curlywedge \curlyvee \boxminus
\Cap \Cup \boxtimes \bot \top \dotplus \boxdot \intercal
\rightthreetimes \divideontimes \leftthreetimes \equiv \leq \geq
\perp \cong \prec \succ \mid \neq \preceq \succeq \parallel \sim
\ll \gg \bowtie \simeq \subset \supset \Join \approx \subseteq
\supseteq \ltimes \asymp \sqsubset \sqsupset \rtimes \doteq
\sqsubseteq \sqsupseteq \smile \propto \dashv \vdash \frown
\models \in \ni \notin \approxeq \leqq \geqq \lessgtr \leqslant
\geqslant \lesseqgtr \backsim \lessapprox \gtrapprox \lesseqqgtr
\backsimeq \lll \ggg \gtreqqless \triangleq \lessdot \gtrdot
\gtreqless \circeq \lesssim \gtrsim \gtrless \bumpeq \eqslantless
\eqslantgtr \backepsilon \Bumpeq \precsim \succsim \between
\doteqdot \precapprox \succapprox \pitchfork \Subset \Supset
\fallingdotseq \subseteqq \supseteqq \risingdotseq \sqsubset
\sqsupset \varpropto \preccurlyeq \succcurlyeq \Vdash \therefore
\curlyeqprec \curlyeqsucc \vDash \because \blacktriangleleft
\blacktriangleright \Vvdash \eqcirc \trianglelefteq
\trianglerighteq \neq \vartriangleleft \vartriangleright \ncong
\nleq \ngeq \nsubseteq \nmid \nsupseteq \nparallel \nless \ngtr
\nprec \nsucc \subsetneq \nsim \supsetneq \nVDash \precnapprox
\succnapprox \subsetneqq \nvDash \precnsim \succnsim \supsetneqq
\nvdash \lnapprox \gnapprox \ntriangleleft \ntrianglelefteq
\lneqq \gneqq \ntriangleright \lnsim \gnsim \ntrianglerighteq
\coloneq \eqsim \nequiv \napprox \nsupset \doublebarwedge \nVdash
\Doteq \nsubset \eqcolon \ne
"""],
["Arrow symbols",
2,
r"""\leftarrow \longleftarrow \uparrow \Leftarrow \Longleftarrow
\Uparrow \rightarrow \longrightarrow \downarrow \Rightarrow
\Longrightarrow \Downarrow \leftrightarrow \updownarrow
\longleftrightarrow \updownarrow \Leftrightarrow
\Longleftrightarrow \Updownarrow \mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow \leftharpoonup
\rightharpoonup \swarrow \leftharpoondown \rightharpoondown
\nwarrow \rightleftharpoons \leadsto \dashrightarrow
\dashleftarrow \leftleftarrows \leftrightarrows \Lleftarrow
\Rrightarrow \twoheadleftarrow \leftarrowtail \looparrowleft
\leftrightharpoons \curvearrowleft \circlearrowleft \Lsh
\upuparrows \upharpoonleft \downharpoonleft \multimap
\leftrightsquigarrow \rightrightarrows \rightleftarrows
\rightrightarrows \rightleftarrows \twoheadrightarrow
\rightarrowtail \looparrowright \rightleftharpoons
\curvearrowright \circlearrowright \Rsh \downdownarrows
\upharpoonright \downharpoonright \rightsquigarrow \nleftarrow
\nrightarrow \nLeftarrow \nRightarrow \nleftrightarrow
\nLeftrightarrow \to \Swarrow \Searrow \Nwarrow \Nearrow
\leftsquigarrow
"""],
["Miscellaneous symbols",
3,
r"""\neg \infty \forall \wp \exists \bigstar \angle \partial
\nexists \measuredangle \eth \emptyset \sphericalangle \clubsuit
\varnothing \complement \diamondsuit \imath \Finv \triangledown
\heartsuit \jmath \Game \spadesuit \ell \hbar \vartriangle \cdots
\hslash \vdots \blacksquare \ldots \blacktriangle \ddots \sharp
\prime \blacktriangledown \Im \flat \backprime \Re \natural
\circledS \P \copyright \ss \circledR \S \yen \AA \checkmark \$
\iiint \iint \iint \oiiint"""]
]
def run(state_machine):
def get_n(n, l):
part = []
for x in l:
part.append(x)
if len(part) == n:
yield part
part = []
yield part
lines = []
for category, columns, syms in symbols:
syms = syms.split()
syms.sort()
lines.append("**%s**" % category)
lines.append('')
max_width = 0
for sym in syms:
max_width = max(max_width, len(sym))
max_width = max_width * 2 + 16
header = " " + (('=' * max_width) + ' ') * columns
format = '%%%ds' % max_width
for chunk in get_n(20, get_n(columns, syms)):
lines.append(header)
for part in chunk:
line = []
for sym in part:
line.append(format % (":math:`%s` ``%s``" % (sym, sym)))
lines.append(" " + " ".join(line))
lines.append(header)
lines.append('')
state_machine.insert_input(lines, "Symbol table")
return []
def math_symbol_table_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(state_machine)
def setup(app):
app.add_directive(
'math_symbol_table', math_symbol_table_directive,
False, (0, 1, 0))
if __name__ == "__main__":
# Do some verification of the tables
from matplotlib import _mathtext_data
print "SYMBOLS NOT IN STIX:"
all_symbols = {}
for category, columns, syms in symbols:
if category == "Standard Function Names":
continue
syms = syms.split()
for sym in syms:
if len(sym) > 1:
all_symbols[sym[1:]] = None
if sym[1:] not in _mathtext_data.tex2uni:
print sym
print "SYMBOLS NOT IN TABLE:"
for sym in _mathtext_data.tex2uni:
if sym not in all_symbols:
print sym
| bjornaa/gridmap | doc/sphinxext/math_symbol_table.py | Python | mit | 6,887 | [
"Bowtie"
] | 54854650df9a77a29625c935619ce8a6bb77d909f076d0d74d07ae651c631656 |
"""
Vector Autoregression (VAR) processes
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import division
from collections import defaultdict
from cStringIO import StringIO
import numpy as np
import numpy.linalg as npl
from numpy.linalg import cholesky as chol, solve
import scipy.stats as stats
import scipy.linalg as L
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import chain_dot
from statsmodels.tsa.tsatools import vec, unvec
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.output import VARSummary
import statsmodels.tsa.tsatools as tsa
import statsmodels.tsa.vector_ar.output as output
import statsmodels.tsa.vector_ar.plotting as plotting
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
mat = np.array
#-------------------------------------------------------------------------------
# VAR process routines
def ma_rep(coefs, maxn=10):
r"""
MA(\infty) representation of VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
maxn : int
Number of MA matrices to compute
Notes
-----
VAR(p) process as
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
can be equivalently represented as
.. math:: y_t = \mu + \sum_{i=0}^\infty \Phi_i u_{t-i}
e.g. can recursively compute the \Phi_i matrices with \Phi_0 = I_k
Returns
-------
phis : ndarray (maxn + 1 x k x k)
"""
p, k, k = coefs.shape
phis = np.zeros((maxn+1, k, k))
phis[0] = np.eye(k)
# recursively compute Phi matrices
for i in xrange(1, maxn + 1):
for j in xrange(1, i+1):
if j > p:
break
phis[i] += np.dot(phis[i-j], coefs[j-1])
return phis
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print 'Eigenvalues of VAR(1) rep'
for val in np.abs(eigs):
print val
return (np.abs(eigs) <= 1).all()
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lutkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in xrange(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in xrange(p):
result[h] += np.dot(coefs[j], result[h-j-1])
return result
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lutkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert(k == k2)
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k*p, k*p))
SigU[:k,:k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = L.solve(np.eye((k*p)**2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = acf[:k].T.reshape((p, k, k))
return acf
def forecast(y, coefs, intercept, steps):
"""
Produce linear MSE forecast
Parameters
----------
y :
coefs :
intercept :
steps :
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl p. 37
Also used by DynamicVAR class
"""
p = len(coefs)
k = len(coefs[0])
# initial value
forcs = np.zeros((steps, k)) + intercept
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in xrange(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in xrange(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs
def forecast_cov(ma_coefs, sig_u, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
k = len(sig_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in xrange(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, sig_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
def var_loglike(resid, omega, nobs):
r"""
Returns the value of the VAR(p) log-likelihood.
Parameters
----------
resid : ndarray (T x K)
omega : ndarray
Sigma hat matrix. Each element i,j is the average product of the
OLS residual for variable i and the OLS residual for variable j or
np.dot(resid.T,resid)/nobs. There should be no correction for the
degrees of freedom.
nobs : int
Returns
-------
llf : float
The value of the loglikelihood function for a VAR(p) model
Notes
-----
The loglikelihood function for the VAR(p) is
.. math::
-\left(\frac{T}{2}\right)
\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
"""
logdet = util.get_logdet(np.asarray(omega))
neqs = len(omega)
part1 = - (nobs * neqs / 2) * np.log(2 * np.pi)
part2 = - (nobs / 2) * (logdet + neqs)
return part1 + part2
def _reordered(self, order):
#Create new arrays to hold rearranged results from .fit()
endog = self.endog
endog_lagged = self.endog_lagged
params = self.params
sigma_u = self.sigma_u
names = self.names
k_ar = self.k_ar
endog_new = np.zeros([np.size(endog,0),np.size(endog,1)])
endog_lagged_new = np.zeros([np.size(endog_lagged,0), np.size(endog_lagged,1)])
params_new_inc, params_new = [np.zeros([np.size(params,0), np.size(params,1)])
for i in range(2)]
sigma_u_new_inc, sigma_u_new = [np.zeros([np.size(sigma_u,0), np.size(sigma_u,1)])
for i in range(2)]
num_end = len(self.params[0])
names_new = []
#Rearrange elements and fill in new arrays
k = self.k_trend
for i, c in enumerate(order):
endog_new[:,i] = self.endog[:,c]
if k > 0:
params_new_inc[0,i] = params[0,i]
endog_lagged_new[:,0] = endog_lagged[:,0]
for j in range(k_ar):
params_new_inc[i+j*num_end+k,:] = self.params[c+j*num_end+k,:]
endog_lagged_new[:,i+j*num_end+k] = endog_lagged[:,c+j*num_end+k]
sigma_u_new_inc[i,:] = sigma_u[c,:]
names_new.append(names[c])
for i, c in enumerate(order):
params_new[:,i] = params_new_inc[:,c]
sigma_u_new[:,i] = sigma_u_new_inc[:,c]
return VARResults(endog=endog_new, endog_lagged=endog_lagged_new,
params=params_new, sigma_u=sigma_u_new,
lag_order=self.k_ar, model=self.model,
trend='c', names=names_new, dates=self.dates)
#-------------------------------------------------------------------------------
# VARProcess class: for known or unknown VAR process
class VAR(tsbase.TimeSeriesModel):
r"""
Fit VAR(p) process and do lag order selection
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
Parameters
----------
endog : array-like
2-d endogenous response variable. The independent variable.
names : array-like
must match number of columns of endog
dates : array-like
must match number of rows of endog
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
def __init__(self, endog, dates=None, names=None, freq=None,
missing='none'):
super(VAR, self).__init__(endog, None, dates, freq, missing=missing)
if self.endog.ndim == 1:
raise ValueError("Only gave one variable to VAR")
if names is not None:
import warnings
warnings.warn("The names argument is deprecated and will be "
"removed in the next release.", FutureWarning)
self.names = names
else:
self.names = self.endog_names
self.y = self.endog #keep alias for now
self.neqs = self.endog.shape[1]
def _get_predict_start(self, start, k_ar):
if start is None:
start = k_ar
return super(VAR, self)._get_predict_start(start)
def predict(self, params, start=None, end=None, lags=1, trend='c'):
"""
Returns in-sample predictions or forecasts
"""
start = self._get_predict_start(start, lags)
end, out_of_sample = self._get_predict_end(end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_trend = util.get_trendorder(trend)
k = self.neqs
k_ar = lags
predictedvalues = np.zeros((end + 1 - start + out_of_sample, k))
if k_trend != 0:
intercept = params[:k_trend]
predictedvalues += intercept
y = self.y
X = util.get_var_endog(y, lags, trend=trend)
fittedvalues = np.dot(X, params)
fv_start = start - k_ar
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[:pv_end] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
y = y[-k_ar:]
coefs = params[k_trend:].reshape((k_ar, k, k)).swapaxes(1,2)
predictedvalues[pv_end:] = forecast(y, coefs, intercept, out_of_sample)
return predictedvalues
def fit(self, maxlags=None, method='ols', ic=None, trend='c',
verbose=False):
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lutkepohl pp. 146-153
Returns
-------
est : VARResults
"""
lags = maxlags
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print 'Using %d based on %s criterion' % (lags, ic)
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = len(self.endog) - lags
return self._estimate_var(lags, trend=trend)
def _estimate_var(self, lags, offset=0, trend='c'):
"""
lags : int
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : string or None
As per above
"""
# have to do this again because select_order doesn't call fit
self.k_trend = k_trend = util.get_trendorder(trend)
if offset < 0: # pragma: no cover
raise ValueError('offset must be >= 0')
y = self.y[offset:]
z = util.get_var_endog(y, lags, trend=trend)
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
params = np.linalg.lstsq(z, y_sample)[0]
resid = y_sample - np.dot(z, params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
omega = sse / df_resid
varfit = VARResults(y, z, params, omega, lags, names=self.endog_names,
trend=trend, dates=self.data.dates, model=self)
return VARResultsWrapper(varfit)
def select_order(self, maxlags=None, verbose=True):
"""
Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
verbose : bool, default True
If True, print table of info criteria and selected orders
Returns
-------
selections : dict {info_crit -> selected_order}
"""
if maxlags is None:
maxlags = int(round(12*(len(self.endog)/100.)**(1/4.)))
ics = defaultdict(list)
for p in range(maxlags + 1):
# exclude some periods to same amount of data used for each lag
# order
result = self._estimate_var(p, offset=maxlags-p)
for k, v in result.info_criteria.iteritems():
ics[k].append(v)
selected_orders = dict((k, mat(v).argmin())
for k, v in ics.iteritems())
if verbose:
output.print_ic_table(ics, selected_orders)
return selected_orders
class VARProcess(object):
"""
Class represents a known VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
intercept : ndarray (length k)
sigma_u : ndarray (k x k)
names : sequence (length k)
Returns
-------
**Attributes**:
"""
def __init__(self, coefs, intercept, sigma_u, names=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.intercept = intercept
self.sigma_u = sigma_u
self.names = names
def get_eq_index(self, name):
"Return integer position of requested equation name"
return util.get_index(self.names, name)
def __str__(self):
output = ('VAR(%d) process for %d-dimensional response y_t'
% (self.k_ar, self.neqs))
output += '\nstable: %s' % self.is_stable()
output += '\nmean: %s' % self.mean()
return output
def is_stable(self, verbose=False):
"""Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle
"""
return is_stable(self.coefs, verbose=verbose)
def plotsim(self, steps=1000):
"""
Plot a simulation from the VAR(p) process for the desired number of
steps
"""
Y = util.varsim(self.coefs, self.intercept, self.sigma_u, steps=steps)
plotting.plot_mts(Y)
def mean(self):
r"""Mean of stable process
Lutkepohl eq. 2.1.23
.. math:: \mu = (I - A_1 - \dots - A_p)^{-1} \alpha
"""
return solve(self._char_mat, self.intercept)
def ma_rep(self, maxn=10):
r"""Compute MA(:math:`\infty`) coefficient matrices
Parameters
----------
maxn : int
Number of coefficient matrices to compute
Returns
-------
coefs : ndarray (maxn x k x k)
"""
return ma_rep(self.coefs, maxn=maxn)
def orth_ma_rep(self, maxn=10, P=None):
r"""Compute Orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (k x k), optional
Matrix such that Sigma_u = PP', defaults to Cholesky descomp
Returns
-------
coefs : ndarray (maxn x k x k)
"""
if P is None:
P = self._chol_sigma_u
ma_mats = self.ma_rep(maxn=maxn)
return mat([np.dot(coefs, P) for coefs in ma_mats])
def long_run_effects(self):
"""Compute long-run effect of unit impulse
.. math::
\Psi_\infty = \sum_{i=0}^\infty \Phi_i
"""
return L.inv(self._char_mat)
@cache_readonly
def _chol_sigma_u(self):
return chol(self.sigma_u)
@cache_readonly
def _char_mat(self):
return np.eye(self.neqs) - self.coefs.sum(0)
def acf(self, nlags=None):
"""Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k)
"""
return var_acf(self.coefs, self.sigma_u, nlags=nlags)
def acorr(self, nlags=None):
"""Compute theoretical autocorrelation function
Returns
-------
acorr : ndarray (p x k x k)
"""
return util.acf_to_acorr(self.acf(nlags=nlags))
def plot_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.acorr(nlags=nlags), linewidth=linewidth)
def forecast(self, y, steps):
"""Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl pp 37-38
"""
return forecast(y, self.coefs, self.intercept, steps)
def mse(self, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
ma_coefs = self.ma_rep(steps)
k = len(self.sigma_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in xrange(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, self.sigma_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
forecast_cov = mse
def _forecast_vars(self, steps):
covs = self.forecast_cov(steps)
# Take diagonal for each cov
inds = np.arange(self.neqs)
return covs[:, inds, inds]
def forecast_interval(self, y, steps, alpha=0.05):
"""Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
Notes
-----
Lutkepohl pp. 39-40
Returns
-------
(lower, mid, upper) : (ndarray, ndarray, ndarray)
"""
assert(0 < alpha < 1)
q = util.norm_signif_level(alpha)
point_forecast = self.forecast(y, steps)
sigma = np.sqrt(self._forecast_vars(steps))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
#-------------------------------------------------------------------------------
# VARResults class
class VARResults(VARProcess):
"""Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : array
endog_lagged : array
params : array
sigma_u : array
lag_order : int
model : VAR model instance
trend : str {'nc', 'c', 'ct'}
names : array-like
List of names of the endogenous variables in order of appearance in `endog`.
dates
Returns
-------
**Attributes**
aic
bic
bse
coefs : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
cov_params
dates
detomega
df_model : int
df_resid : int
endog
endog_lagged
fittedvalues
fpe
intercept
info_criteria
k_ar : int
k_trend : int
llf
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params
k_ar : int
Order of VAR process
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
pvalues
names : list
variables names
resid
roots : array
The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle.
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
sigma_u_mle
stderr
trenorder
tvalues
y :
ys_lagged
"""
_model_type = 'VAR'
def __init__(self, endog, endog_lagged, params, sigma_u, lag_order,
model=None, trend='c', names=None, dates=None):
self.model = model
self.y = self.endog = endog #keep alias for now
self.ys_lagged = self.endog_lagged = endog_lagged #keep alias for now
self.dates = dates
self.n_totobs, neqs = self.y.shape
self.nobs = self.n_totobs - lag_order
k_trend = util.get_trendorder(trend)
if k_trend > 0: # make this the polynomial trend order
trendorder = k_trend - 1
else:
trendorder = None
self.k_trend = k_trend
self.trendorder = trendorder
self.exog_names = util.make_lag_names(names, lag_order, k_trend)
self.params = params
# Initialize VARProcess parent class
# construct coefficient matrices
# Each matrix needs to be transposed
reshaped = self.params[self.k_trend:]
reshaped = reshaped.reshape((lag_order, neqs, neqs))
# Need to transpose each coefficient matrix
intercept = self.params[0]
coefs = reshaped.swapaxes(1, 2).copy()
super(VARResults, self).__init__(coefs, intercept, sigma_u, names=names)
@cache_readonly
def coef_names(self):
"""Coefficient names (deprecated)
"""
from warnings import warn
warn("coef_names is deprecated and will be removed in 0.6.0."
"Use exog_names", FutureWarning)
return self.exog_names
def plot(self):
"""Plot input time series
"""
plotting.plot_mts(self.y, names=self.names, index=self.dates)
@property
def df_model(self):
"""Number of estimated parameters, including the intercept / trends
"""
return self.neqs * self.k_ar + self.k_trend
@property
def df_resid(self):
"Number of observations minus number of estimated parameters"
return self.nobs - self.df_model
@cache_readonly
def fittedvalues(self):
"""The predicted insample values of the response variables of the model.
"""
return np.dot(self.ys_lagged, self.params)
@cache_readonly
def resid(self):
"""Residuals of response variable resulting from estimated coefficients
"""
return self.y[self.k_ar:] - self.fittedvalues
def sample_acov(self, nlags=1):
return _compute_acov(self.y[self.k_ar:], nlags=nlags)
def sample_acorr(self, nlags=1):
acovs = self.sample_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
def plot_sample_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.sample_acorr(nlags=nlags),
linewidth=linewidth)
def resid_acov(self, nlags=1):
"""
Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
return _compute_acov(self.resid, nlags=nlags)
def resid_acorr(self, nlags=1):
"""
Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
acovs = self.resid_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
@cache_readonly
def resid_corr(self):
"Centered residual correlation matrix"
return self.resid_acorr(0)[0]
@cache_readonly
def sigma_u_mle(self):
"""(Biased) maximum likelihood estimate of noise process covariance
"""
return self.sigma_u * self.df_resid / self.nobs
@cache_readonly
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[intercept, A_1, ..., A_p] (K x (Kp + 1))
Adjusted to be an unbiased estimator
Ref: Lutkepohl p.74-75
"""
z = self.ys_lagged
return np.kron(L.inv(np.dot(z.T, z)), self.sigma_u)
def cov_ybar(self):
r"""Asymptotically consistent estimate of covariance of the sample mean
.. math::
\sqrt(T) (\bar{y} - \mu) \rightarrow {\cal N}(0, \Sigma_{\bar{y}})\\
\Sigma_{\bar{y}} = B \Sigma_u B^\prime, \text{where } B = (I_K - A_1
- \cdots - A_p)^{-1}
Notes
-----
Lutkepohl Proposition 3.3
"""
Ainv = L.inv(np.eye(self.neqs) - self.coefs.sum(0))
return chain_dot(Ainv, self.sigma_u, Ainv.T)
#------------------------------------------------------------
# Estimation-related things
@cache_readonly
def _zz(self):
# Z'Z
return np.dot(self.ys_lagged.T, self.ys_lagged)
@property
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients ex intercept
"""
# drop intercept
return self.cov_params[self.neqs:, self.neqs:]
@cache_readonly
def _cov_sigma(self):
"""
Estimated covariance matrix of vech(sigma_u)
"""
D_K = tsa.duplication_matrix(self.neqs)
D_Kinv = npl.pinv(D_K)
sigxsig = np.kron(self.sigma_u, self.sigma_u)
return 2 * chain_dot(D_Kinv, sigxsig, D_Kinv.T)
@cache_readonly
def llf(self):
"Compute VAR(p) loglikelihood"
return var_loglike(self.resid, self.sigma_u_mle, self.nobs)
@cache_readonly
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size
"""
stderr = np.sqrt(np.diag(self.cov_params))
return stderr.reshape((self.df_model, self.neqs), order='C')
bse = stderr # statsmodels interface?
@cache_readonly
def tvalues(self):
"""Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to test
significance.
"""
return self.params / self.stderr
@cache_readonly
def pvalues(self):
"""Two-sided p-values for model coefficients from Student t-distribution
"""
return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2
def plot_forecast(self, steps, alpha=0.05, plot_stderr=True):
"""
Plot forecast
"""
mid, lower, upper = self.forecast_interval(self.y[-self.k_ar:], steps,
alpha=alpha)
plotting.plot_var_forc(self.y, mid, lower, upper, names=self.names,
plot_stderr=plot_stderr)
# Forecast error covariance functions
def forecast_cov(self, steps=1):
r"""Compute forecast covariance matrices for desired number of steps
Parameters
----------
steps : int
Notes
-----
.. math:: \Sigma_{\hat y}(h) = \Sigma_y(h) + \Omega(h) / T
Ref: Lutkepohl pp. 96-97
Returns
-------
covs : ndarray (steps x k x k)
"""
mse = self.mse(steps)
omegas = self._omega_forc_cov(steps)
return mse + omegas / self.nobs
#Monte Carlo irf standard errors
def irf_errband_mc(self, orth=False, repl=1000, T=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Lutkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
ma_sort = np.sort(ma_coll, axis=0) #sort to get quantiles
index = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = ma_sort[index[0],:, :, :]
upper = ma_sort[index[1],:, :, :]
return lower, upper
def irf_resim(self, orth=False, repl=1000, T=10,
seed=None, burn=100, cum=False):
"""
Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
if seed is not None:
np.random.seed(seed=seed)
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
return ma_coll
def _omega_forc_cov(self, steps):
# Approximate MSE matrix \Omega(h) as defined in Lut p97
G = self._zz
Ginv = L.inv(G)
# memoize powers of B for speedup
# TODO: see if can memoize better
B = self._bmat_forc_cov()
_B = {}
def bpow(i):
if i not in _B:
_B[i] = np.linalg.matrix_power(B, i)
return _B[i]
phis = self.ma_rep(steps)
sig_u = self.sigma_u
omegas = np.zeros((steps, self.neqs, self.neqs))
for h in range(1, steps + 1):
if h == 1:
omegas[h-1] = self.df_model * self.sigma_u
continue
om = omegas[h-1]
for i in range(h):
for j in range(h):
Bi = bpow(h - 1 - i)
Bj = bpow(h - 1 - j)
mult = np.trace(chain_dot(Bi.T, Ginv, Bj, G))
om += mult * chain_dot(phis[i], sig_u, phis[j].T)
omegas[h-1] = om
return omegas
def _bmat_forc_cov(self):
# B as defined on p. 96 of Lut
upper = np.zeros((1, self.df_model))
upper[0,0] = 1
lower_dim = self.neqs * (self.k_ar - 1)
I = np.eye(lower_dim)
lower = np.column_stack((np.zeros((lower_dim, 1)), I,
np.zeros((lower_dim, self.neqs))))
return np.vstack((upper, self.params.T, lower))
def summary(self):
"""Compute console output summary of estimates
Returns
-------
summary : VARSummary
"""
return VARSummary(self)
def irf(self, periods=10, var_decomp=None, var_order=None):
"""Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults to
Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis
"""
if var_order is not None:
raise NotImplementedError('alternate variable order not implemented'
' (yet)')
return IRAnalysis(self, P=var_decomp, periods=periods)
def fevd(self, periods=10, var_decomp=None):
"""
Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance
"""
return FEVD(self, P=var_decomp, periods=periods)
def reorder(self, order):
"""Reorder variables for structural specification
"""
if len(order) != len(self.params[0,:]):
raise ValueError("Reorder specification length should match number of endogenous variables")
#This convert order to list of integers if given as strings
if type(order[0]) is str:
order_new = []
for i, nam in enumerate(order):
order_new.append(self.names.index(order[i]))
order = order_new
return _reordered(self, order)
#-------------------------------------------------------------------------------
# VAR Diagnostics: Granger-causality, whiteness of residuals, normality, etc.
def test_causality(self, equation, variables, kind='f', signif=0.05,
verbose=True):
"""Compute test statistic for null hypothesis of Granger-noncausality,
general function to test joint Granger-causality of multiple variables
Parameters
----------
equation : string or int
Equation to test for causality
variables : sequence (of strings or ints)
List, tuple, etc. of variables to test for Granger-causality
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.95 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Returns
-------
results : dict
"""
if isinstance(variables, (basestring, int, np.integer)):
variables = [variables]
k, p = self.neqs, self.k_ar
# number of restrictions
N = len(variables) * self.k_ar
# Make restriction matrix
C = np.zeros((N, k ** 2 * p + k), dtype=float)
eq_index = self.get_eq_index(equation)
vinds = mat([self.get_eq_index(v) for v in variables])
# remember, vec is column order!
offsets = np.concatenate([k + k ** 2 * j + k * vinds + eq_index
for j in range(p)])
C[np.arange(N), offsets] = 1
# Lutkepohl 3.6.5
Cb = np.dot(C, vec(self.params.T))
middle = L.inv(chain_dot(C, self.cov_params, C.T))
# wald statistic
lam_wald = statistic = chain_dot(Cb, middle, Cb)
if kind.lower() == 'wald':
df = N
dist = stats.chi2(df)
elif kind.lower() == 'f':
statistic = lam_wald / N
df = (N, k * self.df_resid)
dist = stats.f(*df)
else:
raise Exception('kind %s not recognized' % kind)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
conclusion = 'fail to reject' if statistic < crit_value else 'reject'
results = {
'statistic' : statistic,
'crit_value' : crit_value,
'pvalue' : pvalue,
'df' : df,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.causality_summary(results, variables, equation, kind)
print summ
return results
def test_whiteness(self, nlags=10, plot=True, linewidth=8):
"""
Test white noise assumption. Sample (Y) autocorrelations are compared
with the standard :math:`2 / \sqrt(T)` bounds.
Parameters
----------
plot : boolean, default True
Plot autocorrelations with 2 / sqrt(T) bounds
"""
acorrs = self.sample_acorr(nlags)
bound = 2 / np.sqrt(self.nobs)
# TODO: this probably needs some UI work
if (np.abs(acorrs) > bound).any():
print ('FAIL: Some autocorrelations exceed %.4f bound. '
'See plot' % bound)
else:
print 'PASS: No autocorrelations exceed %.4f bound' % bound
if plot:
fig = plotting.plot_full_acorr(acorrs[1:],
xlabel=np.arange(1, nlags+1),
err_bound=bound,
linewidth=linewidth)
fig.suptitle(r"ACF plots with $2 / \sqrt{T}$ bounds "
"for testing whiteness assumption")
def test_normality(self, signif=0.05, verbose=True):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
signif : float
Test significance threshold
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
"""
Pinv = npl.inv(self._chol_sigma_u)
w = np.array([np.dot(Pinv, u) for u in self.resid])
b1 = (w ** 3).sum(0) / self.nobs
lam_skew = self.nobs * np.dot(b1, b1) / 6
b2 = (w ** 4).sum(0) / self.nobs - 3
lam_kurt = self.nobs * np.dot(b2, b2) / 24
lam_omni = lam_skew + lam_kurt
omni_dist = stats.chi2(self.neqs * 2)
omni_pvalue = omni_dist.sf(lam_omni)
crit_omni = omni_dist.ppf(1 - signif)
conclusion = 'fail to reject' if lam_omni < crit_omni else 'reject'
results = {
'statistic' : lam_omni,
'crit_value' : crit_omni,
'pvalue' : omni_pvalue,
'df' : self.neqs * 2,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.normality_summary(results)
print summ
return results
@cache_readonly
def detomega(self):
r"""
Return determinant of white noise covariance with degrees of freedom
correction:
.. math::
\hat \Omega = \frac{T}{T - Kp - 1} \hat \Omega_{\mathrm{MLE}}
"""
return L.det(self.sigma_u)
@cache_readonly
def info_criteria(self):
"information criteria for lagorder selection"
nobs = self.nobs
neqs = self.neqs
lag_order = self.k_ar
free_params = lag_order * neqs ** 2 + neqs * self.k_trend
ld = util.get_logdet(self.sigma_u_mle)
# See Lutkepohl pp. 146-150
aic = ld + (2. / nobs) * free_params
bic = ld + (np.log(nobs) / nobs) * free_params
hqic = ld + (2. * np.log(np.log(nobs)) / nobs) * free_params
fpe = ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
return {
'aic' : aic,
'bic' : bic,
'hqic' : hqic,
'fpe' : fpe
}
@property
def aic(self):
"Akaike information criterion"
return self.info_criteria['aic']
@property
def fpe(self):
"""Final Prediction Error (FPE)
Lutkepohl p. 147, see info_criteria
"""
return self.info_criteria['fpe']
@property
def hqic(self):
"Hannan-Quinn criterion"
return self.info_criteria['hqic']
@property
def bic(self):
"Bayesian a.k.a. Schwarz info criterion"
return self.info_criteria['bic']
@cache_readonly
def roots(self):
neqs = self.neqs
k_ar = self.k_ar
p = neqs * k_ar
arr = np.zeros((p,p))
arr[:neqs,:] = np.column_stack(self.coefs)
arr[neqs:,:-neqs] = np.eye(p-neqs)
roots = np.linalg.eig(arr)[0]**-1
idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
return roots[idx]
class VARResultsWrapper(wrap.ResultsWrapper):
_attrs = {'bse' : 'columns_eq', 'cov_params' : 'cov',
'params' : 'columns_eq', 'pvalues' : 'columns_eq',
'tvalues' : 'columns_eq', 'sigma_u' : 'cov_eq',
'sigma_u_mle' : 'cov_eq',
'stderr' : 'columns_eq'}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
_wrap_methods.pop('cov_params') # not yet a method in VARResults
wrap.populate_wrapper(VARResultsWrapper, VARResults)
class FEVD(object):
"""
Compute and plot Forecast error variance decomposition and asymptotic
standard errors
"""
def __init__(self, model, P=None, periods=None):
self.periods = periods
self.model = model
self.neqs = model.neqs
self.names = model.names
self.irfobj = model.irf(var_decomp=P, periods=periods)
self.orth_irfs = self.irfobj.orth_irfs
# cumulative impulse responses
irfs = (self.orth_irfs[:periods] ** 2).cumsum(axis=0)
rng = range(self.neqs)
mse = self.model.mse(periods)[:, rng, rng]
# lag x equation x component
fevd = np.empty_like(irfs)
for i in range(periods):
fevd[i] = (irfs[i].T / mse[i]).T
# switch to equation x lag x component
self.decomp = fevd.swapaxes(0, 1)
def summary(self):
buf = StringIO()
rng = range(self.periods)
for i in range(self.neqs):
ppm = output.pprint_matrix(self.decomp[i], rng, self.names)
print >> buf, 'FEVD for %s' % self.names[i]
print >> buf, ppm
print buf.getvalue()
def cov(self):
"""Compute asymptotic standard errors
Returns
-------
"""
raise NotImplementedError
def plot(self, periods=None, figsize=(10,10), **plot_kwds):
"""Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number
"""
import matplotlib.pyplot as plt
k = self.neqs
periods = periods or self.periods
fig, axes = plt.subplots(nrows=k, figsize=figsize)
fig.suptitle('Forecast error variance decomposition (FEVD)')
colors = [str(c) for c in np.arange(k, dtype=float) / k]
ticks = np.arange(periods)
limits = self.decomp.cumsum(2)
for i in range(k):
ax = axes[i]
this_limits = limits[i].T
handles = []
for j in range(k):
lower = this_limits[j - 1] if j > 0 else 0
upper = this_limits[j]
handle = ax.bar(ticks, upper - lower, bottom=lower,
color=colors[j], label=self.names[j],
**plot_kwds)
handles.append(handle)
ax.set_title(self.names[i])
# just use the last axis to get handles for plotting
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
plotting.adjust_subplots(right=0.85)
#-------------------------------------------------------------------------------
def _compute_acov(x, nlags=1):
x = x - x.mean(0)
result = []
for lag in xrange(nlags + 1):
if lag > 0:
r = np.dot(x[lag:].T, x[:-lag])
else:
r = np.dot(x.T, x)
result.append(r)
return np.array(result) / len(x)
def _acovs_to_acorrs(acovs):
sd = np.sqrt(np.diag(acovs[0]))
return acovs / np.outer(sd, sd)
if __name__ == '__main__':
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.util import parse_lutkepohl_data
import statsmodels.tools.data as data_util
np.set_printoptions(linewidth=140, precision=5)
sdata, dates = parse_lutkepohl_data('data/%s.dat' % 'e1')
names = sdata.dtype.names
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
# model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
est = model.fit(maxlags=2)
irf = est.irf()
y = est.y[-2:]
"""
# irf.plot_irf()
# i = 2; j = 1
# cv = irf.cum_effect_cov(orth=True)
# print np.sqrt(cv[:, j * 3 + i, j * 3 + i]) / 1e-2
# data = np.genfromtxt('Canada.csv', delimiter=',', names=True)
# data = data.view((float, 4))
"""
'''
mdata = sm.datasets.macrodata.load().data
mdata2 = mdata[['realgdp','realcons','realinv']]
names = mdata2.dtype.names
data = mdata2.view((float,3))
data = np.diff(np.log(data), axis=0)
import pandas as pn
df = pn.DataFrame.fromRecords(mdata)
df = np.log(df.reindex(columns=names))
df = (df - df.shift(1)).dropna()
model = VAR(df)
est = model.fit(maxlags=2)
irf = est.irf()
'''
| yarikoptic/pystatsmodels | statsmodels/tsa/vector_ar/var_model.py | Python | bsd-3-clause | 50,782 | [
"Gaussian"
] | f56534e61f6a7b7812fb27ac47b107425c9c51da1d946fdca2be8aa3010cbe0b |
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module defines classes representing non-periodic and periodic sites.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 17, 2012"
import collections
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Specie, DummySpecie,\
get_el_sp
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.util.coord_utils import pbc_diff
from pymatgen.core.composition import Composition
class Site(collections.Mapping, collections.Hashable, PMGSONable):
"""
A generalized *non-periodic* site. This is essentially a composition
at a point in space, with some optional properties associated with it. A
Composition is used to represent the atoms and occupancy, which allows for
disordered site representation. Coords are given in standard cartesian
coordinates.
"""
position_atol = 1e-5
def __init__(self, atoms_n_occu, coords, properties=None):
"""
Create a *non-periodic* site.
Args:
atoms_n_occu: Species on the site. Can be:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords: Cartesian coordinates of site.
properties: Properties associated with the site as a dict, e.g.
{"magmom": 5}. Defaults to None.
"""
if isinstance(atoms_n_occu, collections.Mapping):
self._species = Composition(atoms_n_occu)
totaloccu = self._species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._is_ordered = totaloccu == 1 and len(self._species) == 1
else:
self._species = Composition({get_el_sp(atoms_n_occu): 1})
self._is_ordered = True
self._coords = coords
self._properties = properties if properties else {}
@property
def properties(self):
"""
Returns a view of properties as a dict.
"""
return {k: v for k, v in self._properties.items()}
def __getattr__(self, a):
#overriding getattr doens't play nice with pickle, so we
#can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
raise AttributeError(a)
def distance(self, other):
"""
Get distance between two sites.
Args:
other: Other site.
Returns:
Distance (float)
"""
return np.linalg.norm(other.coords - self.coords)
def distance_from_point(self, pt):
"""
Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float)
"""
return np.linalg.norm(np.array(pt) - self._coords)
@property
def species_string(self):
"""
String representation of species on the site.
"""
if self._is_ordered:
return list(self._species.keys())[0].__str__()
else:
sorted_species = sorted(self._species.keys())
return ", ".join(["{}:{:.3f}".format(sp, self._species[sp])
for sp in sorted_species])
@property
def species_and_occu(self):
"""
The species at the site, i.e., a Composition mapping type of
element/species to occupancy.
"""
return self._species
@property
def specie(self):
"""
The Specie/Element at the site. Only works for ordered sites. Otherwise
an AttributeError is raised. Use this property sparingly. Robust
design should make use of the property species_and_occu instead.
Raises:
AttributeError if Site is not ordered.
"""
if not self._is_ordered:
raise AttributeError("specie property only works for ordered "
"sites!")
return list(self._species.keys())[0]
@property
def coords(self):
"""
A copy of the cartesian coordinates of the site as a numpy array.
"""
return np.copy(self._coords)
@property
def is_ordered(self):
"""
True if site is an ordered site, i.e., with a single species with
occupancy 1.
"""
return self._is_ordered
@property
def x(self):
"""
Cartesian x coordinate
"""
return self._coords[0]
@property
def y(self):
"""
Cartesian y coordinate
"""
return self._coords[1]
@property
def z(self):
"""
Cartesian z coordinate
"""
return self._coords[2]
def __getitem__(self, el):
"""
Get the occupancy for element
"""
return self._species[el]
def __eq__(self, other):
"""
Site is equal to another site if the species and occupancies are the
same, and the coordinates are the same to some tolerance. numpy
function `allclose` is used to determine if coordinates are close.
"""
if other is None:
return False
return self._species == other._species and \
np.allclose(self._coords, other._coords,
atol=Site.position_atol) and \
self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self._species.keys()])
def __contains__(self, el):
return el in self._species
def __len__(self):
return len(self._species)
def __iter__(self):
return self._species.__iter__()
def __repr__(self):
return "Site: {} ({:.4f}, {:.4f}, {:.4f})".format(
self.species_string, *self._coords)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted in LiFePO4.
"""
if self._species.average_electroneg < other._species.average_electroneg:
return True
if self._species.average_electroneg > other._species.average_electroneg:
return False
if self.species_string < other.species_string:
return True
if self.species_string > other.species_string:
return False
return False
def __str__(self):
return "{} {}".format(self._coords, self.species_string)
def as_dict(self):
"""
Json-serializable dict representation for Site.
"""
species_list = []
for spec, occu in self._species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
return {"name": self.species_string, "species": species_list,
"xyz": [float(c) for c in self._coords],
"properties": self._properties,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
"""
Create Site from dict representation
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(
sp_occu["element"]):
sp = Specie.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecie.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"])
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
return cls(atoms_n_occu, d["xyz"], properties=props)
class PeriodicSite(Site, PMGSONable):
"""
Extension of generic Site object to periodic systems.
PeriodicSite includes a lattice system.
"""
def __init__(self, atoms_n_occu, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, properties=None):
"""
Create a periodic site.
Args:
atoms_n_occu: Species on the site. Can be:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (3x1 array or sequence): Coordinates of site as fractional
or cartesian coordinates.
lattice: Lattice associated with the site
to_unit_cell (bool): Translates fractional coordinate to the
basic unit cell, i.e. all fractional coordinates satisfy 0
<= a < 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
cartesian coordinates. Defaults to False.
properties (dict): Properties associated with the PeriodicSite,
e.g., {"magmom":5}. Defaults to None.
"""
self._lattice = lattice
if coords_are_cartesian:
self._fcoords = self._lattice.get_fractional_coords(coords)
c_coords = coords
else:
self._fcoords = coords
c_coords = lattice.get_cartesian_coords(coords)
if to_unit_cell:
self._fcoords = np.mod(self._fcoords, 1)
c_coords = lattice.get_cartesian_coords(self._fcoords)
super(PeriodicSite, self).__init__(atoms_n_occu, c_coords, properties)
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self._species.keys()])
@property
def lattice(self):
"""
The lattice associated with the site.
"""
return self._lattice
@property
def frac_coords(self):
"""
A copy of the fractional coordinates of the site.
"""
return np.copy(self._fcoords)
@property
def a(self):
"""
Fractional a coordinate
"""
return self._fcoords[0]
@property
def b(self):
"""
Fractional b coordinate
"""
return self._fcoords[1]
@property
def c(self):
"""
Fractional c coordinate
"""
return self._fcoords[2]
@property
def to_unit_cell(self):
"""
Copy of PeriodicSite translated to the unit cell.
"""
return PeriodicSite(self._species, np.mod(self._fcoords, 1),
self._lattice, properties=self._properties)
def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True):
"""
Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other.
"""
if check_lattice and self._lattice != other._lattice:
return False
if self._species != other._species:
return False
frac_diff = pbc_diff(self._fcoords, other._fcoords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)
def __eq__(self, other):
return self._species == other._species and \
self._lattice == other._lattice and \
np.allclose(self._coords, other._coords,
atol=Site.position_atol) and \
self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def distance_and_image_from_frac_coords(self, fcoords, jimage=None):
"""
Gets distance between site and a fractional coordinate assuming
periodic boundary conditions. If the index jimage of two sites atom j
is not specified it selects the j image nearest to the i atom and
returns the distance and jimage indices in terms of lattice vector
translations. If the index jimage of atom j is specified it returns the
distance between the i atom and the specified jimage atom, the given
jimage is also returned.
Args:
fcoords (3x1 array): fcoords to get distance from.
jimage (3x1 array): Specific periodic image in terms of
lattice translations, e.g., [1,0,0] implies to take periodic
image that is one a-lattice vector away. If jimage == None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self._lattice.get_distance_and_image(self._fcoords, fcoords,
jimage=jimage)
def distance_and_image(self, other, jimage=None):
"""
Gets distance and instance between two sites assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the j image nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations. If the index
jimage of atom j is specified it returns the distance between the ith
atom and the specified jimage atom, the given jimage is also returned.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage == None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.distance_and_image_from_frac_coords(other._fcoords, jimage)
def distance(self, other, jimage=None):
"""
Get distance between two sites assuming periodic boundary conditions.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage == None,
the image that is nearest to the site is found.
Returns:
distance (float): Distance between the two sites
"""
return self.distance_and_image(other, jimage)[0]
def __repr__(self):
return "PeriodicSite: {} ({:.4f}, {:.4f}, {:.4f}) [{:.4f}, {:.4f}, " \
"{:.4f}]".format(self.species_string, self._coords[0],
self._coords[1], self._coords[2],
self._fcoords[0], self._fcoords[1],
self._fcoords[2])
def as_dict(self):
"""
Json-serializable dict representation of PeriodicSite.
"""
species_list = []
for spec, occu in self._species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
return {"label": self.species_string, "species": species_list,
"xyz": [float(c) for c in self._coords],
"abc": [float(c) for c in self._fcoords],
"lattice": self._lattice.as_dict(),
"properties": self._properties,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d, lattice=None):
"""
Create PeriodicSite from dict representation.
Args:
d (dict): dict representation of PeriodicSite
lattice: Optional lattice to override lattice specified in d.
Useful for ensuring all sites in a structure share the same
lattice.
Returns:
PeriodicSite
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(
sp_occu["element"]):
sp = Specie.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecie.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"])
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
lattice = lattice if lattice else Lattice.from_dict(d["lattice"])
return cls(atoms_n_occu, d["abc"], lattice, properties=props)
| rousseab/pymatgen | pymatgen/core/sites.py | Python | mit | 18,071 | [
"pymatgen"
] | f2cc40977599d4af003564713d9b891d391b007011c6a5bc6fa5148dcf8cc69b |
# coding: utf8
{
'': '',
' Quotas: %(quotas)s x%(quota_amount).2f': ' Quotas: %(quotas)s x%(quota_amount).2f',
' Transaction number: %s': ' Transaction number: %s',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'/absolute/folder/path': '/absolute/folder/path',
'About': 'About',
'Account': 'Cuenta',
'Accounting': 'Contabilidad',
'Accounts plan': 'Accounts plan',
'Actions': 'Actions',
'Activate period': 'Activate period',
'Active user: ': 'Usuario activo: ',
'Add article': 'Ingresar artículo',
'Add check': 'Ingresar cheque',
'Add item': 'Ingresar ítem',
'Add payment method': 'Ingresar método de pago',
'Add tax': 'Ingresar impuesto',
'Administrative interface': 'Interfaz administrativa',
'Administrative panel': 'Panel administrativo',
'Advanced': 'Avanzado',
'All tables modified': 'All tables modified',
'Allocate': 'Asignar',
'Allocate orders': 'Allocate orders',
'Allocated': 'Asignada/o',
'Amount': 'Importe',
'Appadmin': 'Appadmin',
'Apply payment': 'Apply payment',
'Archivo': 'Archivo',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Articles': 'Artículos',
'Articles list': 'Lista de artículos',
'Assign travel': 'Assign travel <translate>',
'Auto apply': 'Auto-aplicar',
'Available databases and tables': 'Available databases and tables',
'Ayuda': 'Ayuda',
'Back to list': 'Volver a la lista',
'Backup': 'Copia de seguridad',
'Bank': 'Bank',
'Banks': 'Banks',
'Batch': 'Batch',
'Bill': 'Bill',
'Bill checked': 'Bill checked',
'Billing': 'Facturación',
'Blank for price list values': 'En blanco para valores de la lista de precios',
'Branch': 'Sucursal',
'Branches': 'Sucursales',
'Browse': 'Explorar',
'By article': 'Por artículo',
'CA': 'CC',
'CRUD': 'ABM',
'CSV parameters file: /absolute/path/file_name.csv': 'CSV parameters file: /absolute/path/file_name.csv',
'CSV table files path: /absolute/path/tables_folder': 'CSV table files path: /absolute/path/tables_folder',
'Calculate movements difference....': 'Calcular diferencia de movimientos....',
'Calculated difference: %s': 'Calculated difference: %s',
'Cancel': 'Cancel',
'Cannot be empty': 'No puede ser vacío',
'Cash': 'Caja',
'Cash/transfer': 'Cash/transfer',
'Change': 'Cambiar',
'Change layout colors': 'Change layout colors',
'Change location': 'Cambiar ubicación',
'Change password': 'Cambiar la contraseña',
'Change stock': 'Cambiar existencias',
'Change update taxes value to %s': 'Cambiar/actualizar valor de impuesto a %s',
'Change user': 'Cambiar el usuario',
'Check to delete': 'Check to delete',
'Check to delete:': 'Check to delete:',
'Checks': 'Checks',
'Checks list': 'Checks list',
'Choose a concept': 'Seleccionar concepto',
'Choose a document type': 'Choose a document type',
'Choose a price list': 'Elija una lista de precios',
'Client IP': 'Cliente IP',
'Closing': 'Cierre',
'Code': 'Código',
'Collect': 'Collect',
'Color': 'Color',
'Compras': 'Compras',
'Concept': 'Concepto',
'Contabilidad': 'Contabilidad',
'Contact Group': 'Grupo de contactos',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Could not change': 'Could not change',
'Could not load the firm contact information': 'No se pudo cargar la información de contacto de empresa',
'Could not process the operation': 'Could not process the operation',
'Could not process the operation: it is not editable': 'Could not process the operation: it is not editable',
'Could not process the receipt': 'Could not process the receipt',
'Create': 'Crear',
'Create down payment': 'Create down payment <translate>',
'Create fee': 'Crear arancel',
'Create invoice': 'Crear factura',
'Create invoice batch': 'Create invoice batch',
'Create order': 'Crear pedido',
'Create payment': 'Create payment',
'Create/Edit orders': 'Crear/editar pedidos',
'Credit': 'Credit',
'Credit card': 'Tarjeta de crédito',
'Crm': 'Crm',
'Csv to db': 'Csv to db',
'Current account': 'Cuenta corriente',
'Current account calculated amount': 'Valor calculado de la cuenta corriente',
'Current account list/payments': 'Cuenta corriente: lista/pagos',
'Current account payment data': 'Información de pagos de cuenta corriente',
'Current account payment options': 'Current account payment options',
'Current account quotas': 'Cuotas de cuenta corriente',
'Current account report': 'Informe de cuenta corriente',
'Current account value: %s': 'Current account value: %s',
'Current accounts': 'Current accounts',
'Current accounts data': 'Current accounts data',
'Current accounts detail': 'Current accounts detail',
'Current accounts payment': 'Current accounts payment',
'Current accounts payments': 'Pagos de cuentas corrientes',
'Current accounts type': 'Current accounts type',
'Current accounts type: %(at)s': 'Current accounts type: %(at)s',
'Current language': 'Lenguaje actual',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'Customer': 'Deudor',
'Customer Panel': 'Panel de Clientes',
'Customer control panel': 'Panel de control de clientes',
'Customer control panel (requires registration and login)': 'Panel de control de clientes (requiere registro y autenticación)',
'Customer current account': 'Cuenta corriente de Deudor',
'Customer current account status': 'Customer current account status',
'Customer deletion date': 'Fecha de eliminación del deuddor',
'Customer firm name': 'Razón social del deudor',
'Customer panel': 'Customer panel',
'Customer starting date': 'Fecha de inicio del deudor',
'Customer/Supplier data': 'Customer/Supplier data',
'DB Model': 'DB Model',
'Database': 'Base de datos',
'Date': 'Date',
'Dates: ': 'Dates: ',
'Db to csv': 'Db to csv',
'Deactivate access levels': 'Desactivar niveles de acceso',
'Debit': 'Debit',
'Debt limit: %s': 'Debt limit: %s',
'Default': 'Default',
'Default salesperson': 'Vendedor por defecto',
'Delete value is %s': 'Delete value is %s',
'Delete:': 'Delete:',
'Description': 'Descripción',
'Design': 'Diseño',
'Desktop App': 'Aplicación de escritorio',
'Difference': 'Difference',
'Difference: %s': 'Diferencia: %s',
'Discount by customer': 'Descuento por deudor',
'Discount/Surcharges': 'Descuentos/Recargos',
'Discounts/Surcharges': 'Discounts/Surcharges',
'Document': 'Comprobante',
'Done': 'Done',
'Due date': 'Due date',
'E-mail': 'E-mail',
'Edit': 'Editar',
'Edit current record': 'Edit current record',
'Edit in movements': 'Edit in movements',
'Edit order number': 'Edit order number',
'Efectivo': 'Efectivo',
'Ending': 'Ending',
'Entries': 'Entries',
'Entries: %s': 'Ingresos: %s',
'Entry': 'Entry',
'Erasing record %s': 'Erasing record %s',
'Error trying to get the operation customer/supplier data from database': 'Error trying to get the operation customer/supplier data from database',
'Error: could not calculate the total debt.': 'Error: could not calculate the total debt.',
'Errors': 'Errors',
'Esta es la plantilla accounting/offset_account.html': 'Esta es la plantilla accounting/offset_account.html',
'Existencias': 'Existencias',
'Exits: %s': 'Salidas: %s',
'Facilitate collection': 'Facilitate collection <translate>',
'False if deferred payment (df), True if paid with cash, ch (check) or current account': 'Falso si es pago diferido (df), Verdadero si el pago es en efvo., ch (cheque) o cuenta corriente',
'Family': 'Family',
'Fax': 'Fax',
'Fee': 'Fee',
'Fees': 'Fees',
'Fees list': 'Fees list',
'File': 'Archivo',
'File CRUD': 'ABM Archivos',
'File name': 'File name',
'Financials': 'Financials',
'Finantial situation': 'Situación financiera',
'Firm': 'Razón social',
'First name': 'First name',
'Fiscal controller': 'Fiscal controller',
'For PostgreSQL databases. Use this option with care. A superuser database conection is required': 'For PostgreSQL databases. Use this option with care. A superuser database conection is required',
'For purchases: %(pt)s payment is recorded as concept id %s(c)': 'For purchases: %(pt)s payment is recorded as concept id %s(c)',
'For purchases: %s payment is recorded as concept id %s': 'Para compras: %s pago es registrado como concepto id %s',
'Form accepted': 'Form accepted',
'Form data: %(fd)s': 'Form data: %(fd)s',
'Form data: %s': 'Form data: %s',
'Forms': 'Formularios',
'Formulas': 'Formulas',
'Funds': 'Funds',
'Generate': 'Generar',
'GestionLibre': 'GestiónLibre',
'GestionLibre %(version)s': 'GestionLibre %(version)s',
'GestionLibre %s': 'GestionLibre %s',
'GestionLibre Prealpha v4': 'GestionLibre Prealpha v4',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'ID de grupo',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Header form': 'Header form',
'Help': 'Ayuda',
'ID': 'ID',
'Import': 'Importar',
'Import csv dir': 'Import csv dir',
'Import example db from CSV': 'Import example db from CSV',
'Import legacy tables': 'Import legacy tables',
'Import/Export': 'Import/Export',
'Increase/Decrease stock values': 'Increase/Decrease stock values',
'Increase/decrease stock values': 'Increase/decrease stock values',
'Index': 'Inicio',
'Initialize': 'Initialize',
'Insert movements element': 'Ingresar elemento de movimientos',
'Insert order element': 'Insert order element',
'Installment': 'Installment',
'Installment created': 'Installment created',
'Installments': 'Planes de pago',
'Insufficient source stock quantity': 'Insufficient source stock quantity',
'Insufficient stock value.': 'Insufficient stock value.',
'Internal State': 'Internal State',
'Invalid Query': 'Invalid Query',
'Invalid email': 'Invalid email',
'Invalid login': 'Invalid login',
'Invoice header type': 'Tipo de encabezado de factura',
'Item added': 'Item added',
'Item value input: %s': 'Item value input: %s',
'Journal Entries': 'Libros diarios',
'Journal Entry': 'Libro diario',
'Journal entries': 'Libros diarios',
'Journal entry': 'Journal entry',
'Journal entry total amount': 'Suma total del libro diario',
'Label': 'Etiqueta',
'Labels': 'Labels',
'Languages': 'Lenguajes',
'Last name': 'Last name',
'Layout': 'Layout',
'Layout colors': 'Colores de la interfaz',
'List fees': 'List fees',
'List installments': 'List installments',
'List of operation elements': 'Lista de elementos de la operación',
'List of operations': 'Lista de operaciones',
'List order allocation operations': 'Lista de operaciones de asignaciones de pedidos',
'List order allocations': 'Lista de asignaciones de pedidos',
'Lists': 'Lists',
'Logged in': 'Logged in',
'Logged out': 'Logged out',
'Login': 'Iniciar sesión',
'Login accepted': 'Login accepted',
'Logout': 'Terminar sesión',
'Lost password?': 'Lost password?',
'Map': 'Mapeo',
'Menu Model': 'Menu Model',
'Migration': 'Migration',
'Model': 'Modelo',
'Modify header': 'Modificar encabezado',
'Modify movements element': 'Modify movements element',
'Modify operation item': 'Modify operation item',
'Modify operation number': 'Modificar número de operación',
'Modify sales order element': 'Modify sales order element',
'Move stock items': 'Move stock items',
'Movement (offset): %(mo)s: %(a)s': 'Movement (offset): %(mo)s: %(a)s',
'Movements': 'Movimientos',
'Movements (Operations)': 'Movimientos (operaciones)',
'Movements add check': 'Movements add check',
'Movements add discount surcharge': 'Movements add discount surcharge',
'Movements add item': 'Movements add item',
'Movements add payment method': 'Movements add payment method',
'Movements add tax': 'Movements add tax',
'Movements articles': 'Movements articles',
'Movements current account concept': 'Movements current account concept',
'Movements current account data': 'Movements current account data',
'Movements current account quotas': 'Movements current account quotas',
'Movements detail': 'Detalle de operación',
'Movements element': 'Movements element',
'Movements header': 'Movements header',
'Movements list': 'Lista de movimientos',
'Movements modify check': 'Movements modify check',
'Movements modify element': 'Movements modify element',
'Movements modify header': 'Movements modify header',
'Movements modify item': 'Movements modify item',
'Movements option update stock': 'Movements option update stock',
'Movements option update taxes': 'Movements option update taxes',
'Movements panel': 'Panel de movimientos',
'Movements price list': 'Movements price list',
'Movements process': 'Movements process',
'Movements process. Operation: %s': 'Registrar movimientos. Operación: %s',
'Movements select': 'Movements select',
'Movements select warehouse': 'Movements select warehouse',
'Movements start': 'Movements start',
'Moving to new record': 'Moving to new record',
'Name': 'Nombre',
'New Record': 'New Record',
'New customer': 'New customer',
'New customer order element': 'New customer order element',
'New customer order modify element': 'New customer order modify element',
'New expenses invoice': 'New expenses invoice',
'New fee': 'New fee',
'New function': 'New function',
'New installment': 'Nuevo plan de pago',
'New invoice': 'New invoice',
'New operation': 'Nueva operación',
'New operation (movements form)': 'Nueva operación (formulario de movimientos)',
'New operation check': 'New operation check',
'New operation item': 'Nuevo ítem de operación',
'New operation tax': 'New operation tax',
'New option': 'Nueva opción',
'New option created.': 'New option created.',
'New order allocation': 'New order allocation',
'New packing slip from this allocation': 'Nuevo remito desde esta asignación de pedidos',
'New query': 'Nueva consulta',
'New subcustomer': 'New subcustomer',
'No databases in this application': 'No databases in this application',
'No document type specified': 'No document type specified',
'No tax id selected': 'No tax id selected',
'None selected': 'No se seleccionó un elemento',
'Number': 'Número',
'Object or table name': 'Nombre de tabla u objeto',
'Observations': 'Observaciones',
'Operation': 'Operación',
'Operation %(operation)s is not editable': 'La operación %(operation)s no se puede editar',
'Operation %s is not editable': 'La operación %s no es editable',
'Operation detail': 'Detalle de la operación',
'Operation details: %s': 'Operation details: %s',
'Operation discounts and surcharges': 'Descuentos y recargos de la operación',
'Operation header': 'Encabezado de la operación',
'Operation header incomplete. Please select a document type': 'Operation header incomplete. Please select a document type',
'Operation id(s): %s': 'Operation id(s): %s',
'Operation installment': 'Operation installment',
'Operation modified': 'Operación modificada',
'Operation number %(id)s': 'Operation number %(id)s',
'Operation number %s': 'Número de operación %s',
'Operation processed': 'Operation processed',
'Operation processing failed: debt limit reached': 'Operation processing failed: debt limit reached',
'Operation processing result': 'Resultado del registro de la operación',
'Operation successfully processed': 'La operación se registró correctamente',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s',
'Operation: %s. Amount: %s. Value: %s. Concept: %s, Quantity: %s, Movement: %s': 'Operación: %s. Importe: %s. Valor: %s. Concepto: %s, Cantidad: %s, Movimiento: %s',
'Operations': 'Operaciones',
'Operations list': 'Lista de operaciones',
'Option': 'Option',
'Option modified.': 'Option modified.',
'Options': 'Opciones',
'Order allocation': 'Asignación de pedidos',
'Order allocation %s': 'Order allocation %s',
'Order allocation list': 'Lista de asignación de pedidos',
'Order list': 'Lista de pedidos',
'Order number': 'Order number',
'Ordered': 'Pedido/a',
'Origin': 'Origen',
'Other': 'Otros',
'Output': 'Output',
'Packing slip': 'Remito',
'Page setup': 'Configurar página',
'Parameters': 'Parámetros',
'Passages': 'Passages',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Password reset': 'Reiniciar contraseña',
'Pay': 'Pay',
'Per item printing': 'Impresión por ítem',
'Period': 'Ciclo/Período',
'Please choose different warehouses': 'Please choose different warehouses',
"Please insert your firm's tax id": 'Por favor ingrese la identificación tributaria de su empresa',
'Points to order / invoice / packingslips': 'Apunta a pedidos / facturas / remitos',
'Populate tables': 'Populate tables',
'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s': 'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s',
'Post register specify firm': 'Post register specify firm',
'Post registration form': 'Post registration form',
'Post-registration form': 'Formulario post-registro',
'Postal address': 'Dirección postal',
'Posted': 'Registrado',
'Predefine documents': 'Predefinir comprobantes',
'Price check': 'Price check',
'Price list': 'Lista de precios',
'Price lists': 'Price lists',
'Prices': 'Precios',
'Print this document': 'Imprimir este documento',
'Print...': 'Impresión...',
'Process': 'Registrar',
'Process jurisdictions': 'Procesar jurisdicciones',
'Process operation': 'Registrar operación',
'Processes': 'Processes',
'Product': 'Producto',
'Product billing': 'Product billing',
'Product code': 'Código de producto',
'Production': 'Production',
'Profile': 'Profile',
'Prototype app': 'Prototype app',
'Purchases': 'Compras',
'Quantity': 'Cantidad',
'Queries': 'Consultas',
'Query:': 'Query:',
'Quit': 'Salir',
'Quota': 'Quota',
'Quotas': 'Quotas',
'RIA Create/Edit operations': 'Modo RIA crear/editar operaciones',
'RIA Product billing': 'Modo RIA facturación de productos',
'RIA Receipt': 'Modo RIA recibos',
'RIA Stock': 'Modo RIA existencias',
'RIA Stock main menu': 'RIA Stock main menu',
'Read': 'Read',
'Receipt items list': 'Receipt items list',
'Receipt number': 'Receipt number',
'Receipt processed': 'Receipt processed',
'Receipts list': 'Receipts list',
'Receive': 'Recibir',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Record Created',
'Record ID': 'ID del registro',
'Record Updated': 'Record Updated',
'Record updated': 'Record updated',
'Redirecting from event': 'Redirecting from event',
'Referenced table': 'Tabla referenciada',
'Register': 'Registrarse',
'Registration': 'Registration',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Replica': 'Replica',
'Reportes': 'Reportes',
'Reports': 'Reportes',
'Reset': 'Reiniciar',
'Reset Password key': 'Reset Password key',
'Reset operation': 'Reiniciar operación',
'Reset order': 'Reset order',
'Reset packing slip': 'Reset packing slip',
'Reset receipt': 'Reset receipt',
'Revert payment application': 'Revert payment application',
'Ria movements': 'Ria movements',
'Ria movements process': 'Ria movements process',
'Ria movements reset': 'Ria movements reset',
'Ria new customer order': 'Ria new customer order',
'Ria new customer order reset': 'Ria new customer order reset',
'Ria product billing': 'Ria product billing',
'Ria product billing start': 'Ria product billing start',
'Ria stock': 'Ria stock',
'Role': 'Rol',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'SCM': 'SCM',
'Sales': 'Ventas',
'Sales contact': 'Contacto de ventas',
'Scm': 'Scm',
'Se requiere un usuario autenticado': 'Se requiere un usuario autenticado',
'Securities': 'Securities',
'Security policies': 'Políticas de seguridad',
'Select': 'Select',
'Select an operation type': 'Seleccione una clase de operación',
'Select price list': 'Selecciones una lista de precios',
'Select warehouse': 'Seleccione un depósito',
'Selection action: %s': 'Selection action: %s',
'Send': 'Enviar',
'Session closed by user input': 'Sesión finalizada por acción del usuario',
'Session data: %s': 'Session data: %s',
'Set colors as default': 'Establecer como colores por defecto',
'Set default layout colors': 'Set default layout colors',
'Set language': 'Set language',
'Set options': 'Set options',
'Setting offset concept to %s': 'Setting offset concept to %s',
'Setup': 'Configuración',
'Specify firm': 'Especificar razón social',
'Starting': 'Starting',
'Stock': 'Existencias',
'Stock item update': 'Stock item update',
'Stock list': 'Listado de existencias',
'Stock movement': 'Movimiento de existencias',
'Stock query': 'Consulta de existencias',
'Stock updated': 'Stock updated',
'Stock value changed': 'Stock value changed',
'Storage folder': 'Storage folder',
'Structures': 'Structures',
'Stylesheet': 'Stylesheet',
'Subcustomer': 'Cliente',
'Subcustomer current account': 'Cuenta corriente cliente',
'Submit': 'Submit',
'Summary': 'Summary',
'Supplier': 'Proveedor',
'System tables': 'Tablas del sistema',
'TAX ID': 'Identificación impositiva',
'Tables': 'Tables',
'Tax _id': 'Tax _id',
'Tax id': 'Clave impositiva',
'Taxes are': 'Acción para impuestos',
'Telephone numbers': 'Números telefónicos',
'Terms of payment': 'Terms of payment <translate>',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The CSV data was stored at your web2py root folder': 'The CSV data was stored at your web2py root folder',
'The db load failed with these errors: ': 'The db load failed with these errors: ',
'The db records were uploaded correctly': 'The db records were uploaded correctly',
'The following operations were created': 'The following operations were created',
'The form has errors': 'The form has errors',
'The item specified was not found in the warehouse': 'The item specified was not found in the warehouse',
'The item will be removed without confirmation': 'Se eliminará el ítem sin confirmación',
'The links': 'Enlaces',
'The operation has current account movements: %s': 'The operation has current account movements: %s',
'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s': 'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s',
'The user entered does not exist': 'The user entered does not exist',
'This action requires authenticated users': 'Se requiere un usuario autenticado',
'This is the webapp index view of': 'Esta es la vista inicial de la interfaz web de',
'Timestamp': 'Fecha y hora',
'Total': 'Total',
'Total amount': 'Monto total',
'Total debt': 'Total adeudado',
'Transfers': 'Transferencias',
'Trying with': 'Trying with',
'Type of current account': 'Tipo de cuenta corriente',
'Update': 'Actualización',
'Update fee': 'Update fee',
'Update installment': 'Update installment',
'Update order allocation': 'Actualizar asignación de pedido',
'Update quota': 'Update quota',
'Update:': 'Update:',
'Updating stock id: %(st)s as %(vl)s': 'Updating stock id: %(st)s as %(vl)s',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'ID de usuario',
'VAT sub-journal': 'Subdiario IVA',
"Valid firm tax id's": 'Identificación tributaria válida',
'Value': 'Valor',
'Values: %s': 'Values: %s',
'Various': 'Varios',
'Ventanas': 'Ventanas',
'Ventas': 'Ventas',
'Verify': 'Verificar',
'Verify Password': 'Verify Password',
'View': 'View',
'WARNING: JOURNAL ENTRY IS UNBALANCED': 'WARNING: JOURNAL ENTRY IS UNBALANCED',
'Warehouse': 'Depósito',
'Warning! Wrong document type.': 'Warning! Wrong document type.',
'Web interface': 'Interfaz web',
'Welcome': 'Welcome',
'Welcome to web2py and GestionLibre': 'Welcome to web2py and GestionLibre',
'Wiki': 'Wiki',
'Windows': 'Ventanas',
"You have not specified you firm's TAX ID. Please visit the": "You have not specified you firm's TAX ID. Please visit the",
'abbr': 'abrev',
'account': 'cuenta',
'accounting': 'accounting',
'accounting period': 'Ejercicio contable',
'accumulated': 'acumulada/o',
'addition': 'ingresado/a',
'additions': 'ingresos',
'address': 'direcciones',
'adherent': 'adherente',
'agreement': 'acuerdo',
'aliquot': 'alícuota',
'allowance': 'allowance <translate>',
'amount': 'importe',
'and try again': 'and try again',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'args': 'args',
'authorization code': 'código de autorización',
'avoidance': 'avoidance <translate>',
'balance': 'balance',
'balanced': 'balanceado',
'bank': 'banco',
'bank check': 'cheque',
'bank checks': 'cheques',
'banks': 'bancos',
'bd': 'bd',
'birth': 'nacimiento',
'books': 'books <translate>',
'bouncer': 'rechazado',
'branch': 'sucursal',
'budget': 'budget <translate>',
'cache': 'cache',
'calculate': 'calcular',
'canceled': 'cancelada/o',
'cancellation': 'cancelación',
'capacity': 'capacidad',
'cash': 'Caja',
'cash box': 'caja',
'category': 'categoría',
'check limit': 'límite de cheques',
'checkbook': 'chequera',
'city': 'ciudad',
'closed': 'cerrada/o',
'code': 'código',
'coefficient': 'coeficiente',
'collected': 'cobrada/o',
'collection': 'colección',
'collections': 'colecciones',
'color': 'color',
'commission': 'comisión',
'compress': 'comprimir',
'concept': 'concepto',
'condition': 'condición',
'confirm printing': 'confirmar impresión',
'contact': 'contacto',
'continuous': 'continuo',
'contribution': 'contribución',
'contribution discount': 'descuento por contribución',
'copies': 'copias',
'cost center': 'centro de costo',
'countable': 'contable',
'country': 'país',
'coupons': 'cupones',
'credit': 'crédito',
'crm': 'crm',
'current account': 'cuenta corriente',
'current account limit': 'límite de cuenta corriente',
'customer': 'deudor',
'customer group': 'grupo deudores',
'customize me!': 'customize me!',
'data uploaded': 'data uploaded',
'database': 'database',
'database %s select': 'database %s select',
'datum': 'datum <translate>',
'days': 'días',
'db': 'db',
'debit': 'débito',
'debt limit': 'límite de deuda',
'default': 'default',
'deletion': 'eliminación',
'department': 'departamento',
'description': 'descripción',
'descriptions': 'descripciones',
'design': 'design',
'desired': 'deseada/o',
'detail': 'detalle',
'disabled': 'deshabilitada/o',
'discount': 'descuento',
'discounts': 'descuentos',
'discriminate': 'discriminar',
'discriminated': 'discriminada/o',
'document': 'comprobante',
'document purchases': 'comprobante de compras',
'document sales': 'comprobante de ventas',
'does not update stock': 'no actualizar las existencias',
'done!': 'done!',
'down payment': 'down payment <translate>',
'draft': 'borrador',
'due date': 'fecha de vencimiento',
'due_date': 'fecha de vencimiento',
'email': 'email',
'ending': 'finaliza',
'ending quota': 'última cuota',
'enter a number between %(min)g and %(max)g': 'ingrese un número entre %(min)g y %(max)g',
'enter an integer between %(min)g and %(max)g': 'ingrese un entero entre %(min)g y %(max)g',
'enter from %(min)g to %(max)g characters': 'ingrese de %(min)g a %(max)g caracteres',
'entry': 'ingreso',
'exchanged': 'intercambiada/o',
'exit': 'salida',
'expenditure': 'gasto',
'export as csv file': 'export as csv file',
'extra': 'extra',
'extra hours': 'horas extra',
'extras': 'extras',
'failure': 'inasistencias',
'family': 'familia',
'fax': 'fax',
'fee': 'arancel',
'fees': 'aranceles',
'file': 'archivo',
'filename.ext': 'filename.ext',
'financials': 'financials',
'first due': 'primer vencimiento',
'first name': 'nombre',
'fiscal': 'fiscal',
'fiscal controller': 'Controlador fiscal',
'fixed': 'fija/o',
'floor': 'piso',
'form': 'formulario',
'format': 'formato',
'formula': 'fórmula',
'from table': 'from table',
'fund': 'fondo',
'government increase': 'aumento del gobierno',
'gross receipts': 'ingresos brutos',
'half bonus': 'medio aguinaldo',
'healthcare': 'obra social',
'hour': 'hora',
'hourly': 'horaria/o',
'i.e. third party payment transaction number': 'i.e. third party payment transaction number',
'id': 'id',
'id 1': 'id 1',
'id number': 'número de id',
'identity card': 'tarjeta identificatoria',
'index value': 'valor de índice',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'installment': 'plan de pago',
'interests': 'intereses',
'internal': 'interna/o',
'invalid request': 'invalid request',
'invert': 'invertir',
'invoice': 'factura',
'invoices': 'facturas',
'issue': 'issue <translate>',
'journal entry': 'libro diario',
'journalized': 'journalized <translate>',
'jurisdiction': 'jurisdicción',
'kinship': 'parentezco',
'labor union': 'sindicato',
'language': 'lenguaje',
'large family': 'familia numerosa',
'last name': 'apellido',
'late payment': 'pago con retraso',
'legal name': 'razón social',
'lines': 'líneas',
'liquidated': 'liquidado',
'liquidation': 'liquidación',
'lot': 'lote',
'marital status': 'estado civil',
'measure': 'unidad de medida',
'migration': 'migration',
'module': 'módulo',
'month': 'mes',
'monthly amount': 'importe mensual',
'movement': 'movimiento',
'msg': 'msg',
'multiple pages': 'múltiples páginas',
'name': 'nombre',
'nationality': 'nacionalidad',
'nationality id': 'id de nacionalidad',
'net': 'neto',
'new record inserted': 'new record inserted',
'next': 'próxima/o',
'next 100 rows': 'next 100 rows',
'not logged in': 'no autenticado',
'not updated': 'no actualizadar',
'notes': 'notas',
'number': 'número',
'observations': 'observaciones',
'operation': 'operación',
'operation 1': 'operación 1',
'operation 2': 'operación 2',
'operations': 'operations',
'or import from csv file': 'or import from csv file',
'order number': 'número de orden',
'orderable': 'asignable a pedidos',
'orders': 'pedidos',
'other': 'otras/os',
'output': 'output',
'own': 'propia/o',
'packing slips': 'remitos',
'pages': 'páginas',
'paid': 'paga/o',
'paid quotas': 'cuotas pagas',
'paid vacation': 'vacaciones pagas',
'password': 'contraseña',
'patronal': 'patronal',
'payment': 'pago',
'payment method': 'payment method <translate>',
'payment terms': 'payment terms <translate>',
'payroll': 'payroll <translate>',
'pension': 'jubilación',
'per diem': 'per diem <translate>',
'percentage': 'porcentaje',
'place of delivery': 'lugar de entrega',
'plant': 'planta',
'please input your password again': 'please input your password again',
'point of sale': 'punto de venta',
'posted': 'hora/fecha de registro',
'preprinted': 'preimpreso',
'presentation': 'presentación',
'presenteesm': 'presentismo',
'presenteesm discount': 'descuento de presentismo',
'previous 100 rows': 'previous 100 rows',
'price': 'precio',
'price list': 'lista de precios',
'printed': 'impreso',
'printer': 'impresora',
'prints': 'imprime',
'priority': 'prioridad',
'processed': 'registrado',
'products': 'productos',
'profit percentage': 'porcentaje de ganancias',
'quantity': 'cantidad',
'quantity 1': 'cantidad 1',
'quantity 2': 'cantidad 2',
'queries': 'consultas',
'quota': 'cuota',
'quotas': 'cuotas',
'rate': 'rate <translate>',
'receipt': 'recibo',
'receipts': 'recibos',
'receives': 'recibe',
'record': 'record',
'record does not exist': 'record does not exist',
'record id': 'record id',
'registration': 'registration',
'registration key': 'clave de registro',
'rejection': 'rechazo',
'remunerative': 'remunerativa/o',
'repair': 'reparar',
'replica': 'replica',
'replicate': 'replicar',
'replicated': 'replicada/o',
'represent': 'represent',
'requires': 'requires',
'reserved': 'reservada/o',
'reset password key': 'clave para reconfigurar contraseña',
'retentions': 'retenciones',
'role': 'rol',
'salary': 'salario',
'salesperson': 'personal de ventas',
'schedule': 'agenda',
'schooling': 'escolaridad',
'scm': 'scm',
'scrap': 'scrap <translate>',
'second due': 'segundo vencimiento',
'selected': 'selected',
'seniority': 'antigüedad',
'seniority years': 'años de antigüedad',
'separate': 'separada/o',
'session.difference :%s': 'session.diferencia :%s',
'setup': 'setup',
'sex': 'sexo',
'sick days': 'inasistencia por enfermedad',
'situation': 'situación',
'size': 'tamaño',
'social services': 'social services <translate>',
'source': 'fuente',
'spouse': 'esposa',
'staff': 'personal',
'staff category': 'categoría de personal',
'starting': 'comienza',
'starting quota': 'cuota inicial',
'state': 'estado',
'statement': 'statement <translate>',
'stock': 'existencias',
'stock quantity': 'cantidad en existencia',
'street': 'calle',
'subcategory': 'subcategoría',
'subcustomer': 'cliente',
'subject': 'asunto',
'supplier': 'proveedor',
'surcharge': 'recargo',
'surcharges': 'recargos',
'suspended': 'suspendida/o',
'table': 'table',
'table number': 'número de tabla',
'tax': 'impuesto',
'tax identificar': 'identificar impuesto',
'tax identification': 'clave impositiva',
'taxed': 'gravada/o',
'telephone': 'teléfono',
'term': 'término',
'text': 'texto',
'ticket': 'ticket',
'times': 'times <translate>',
'transport': 'transporte',
'type': 'tipo',
'unable to parse csv file': 'unable to parse csv file',
'unitary': 'unitaria/o',
'units': 'unidades',
'updated': 'actualizar',
'updates stock': 'actualizar existencias',
'upper limit': 'límite superior',
'user': 'usuario',
'vacations': 'vacaciones',
'valuation': 'valuación',
'value': 'valor',
'value already in database or empty': 'valor en la base de datos o vacío',
'value not in database': 'value not in database',
'voided': 'anulado',
'voluntary': 'voluntaria/o',
'warehouse': 'depósito',
'with old record': 'with old record',
'year': 'año',
'zip code': 'código postal',
}
| reingart/gestionlibre | languages/es-es.py | Python | agpl-3.0 | 34,263 | [
"VisIt"
] | 400624179e0fb433f68a7b84804b93b1aeecd90a92cdb01ba402930a9cfbd4bf |
#!/usr/bin/python2
#NOTE: Run this plot script from directory deft/papers/fuzzy-fmt with command
# ./new-melting_anyplot_script.py [directory where data stored] --f[fixed quantity] [value of fixed quantity] --x[] --y[]
#to create plots from plot.dat files already in the data directory
#ie. ENTER ./new-melting_anyplot_script.py pears --ftemp 2 --ydiff --xgw --ptname addsomethingtoplotname
#to plot diff_free_enery vs gw at fixed T=2
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import sys
parser = argparse.ArgumentParser(description='Creates a plot.', epilog="stuff...")
groupf = parser.add_mutually_exclusive_group(required=True)
groupx = parser.add_mutually_exclusive_group(required=True)
groupy = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('directory', metavar='exisiting_data_directory', type=str,
help='exisiting directory for data files')
groupf.add_argument('--ftemp', action="store_true",
help='use plot.dat file with this fixed temperature')
groupf.add_argument('--fdensity', action="store_true",
help='use plot.dat file with this fixed temperature')
parser.add_argument('value', metavar='value_of_fixed_quantity', type=str,
help='use plot.dat file with this fixed value')
parser.add_argument('--xlab', metavar='label for x-axis', type=str,
help='label for x-axis. use with --xcol.')
parser.add_argument('--ylab', metavar='label for y-axis', type=str,
help='label for y-axis. use with --ycol.')
parser.add_argument('--ptname', metavar='include in name of plot', type=str,
help='info added to plot name')
groupx.add_argument('--xtemp', action="store_true",
help='temperature on x-axis')
groupx.add_argument('--xdensity', action="store_true",
help='density on x-axis')
groupx.add_argument('--xcfe', action="store_true",
help='crystal free energy/atom on x-axis')
groupx.add_argument('--xhfe', action="store_true",
help='homogeneous free energy/atom on x-axis')
groupx.add_argument('--xdiff', action="store_true",
help='diff in free energy on x-axis')
groupx.add_argument('--xfv', action="store_true",
help='fraction of vacancies (fv) on x-axis')
groupx.add_argument('--xgw', action="store_true",
help='Gaussian width on x-axis')
groupx.add_argument('--xcol', metavar='column for x-axis data', type=int,
help='column for x-axis data')
groupy.add_argument('--ytemp', action="store_true",
help='temperature on y-axis')
groupy.add_argument('--ydensity', action="store_true",
help='density on y-axis')
groupy.add_argument('--ycfe', action="store_true",
help='crystal free energy/atom on y-axis')
groupy.add_argument('--yhfe', action="store_true",
help='homogeneous free energy/atom on y-axis')
groupy.add_argument('--ydiff', action="store_true",
help='diff in free energy on y-axis')
groupy.add_argument('--yfv', action="store_true",
help='fraction of vacancies (fv) on y-axis')
groupy.add_argument('--ygw', action="store_true",
help='Gaussian width on y-axis')
groupy.add_argument('--ycol', metavar='column for y-axis data', type=int,
help='column for y-axis data')
args=parser.parse_args()
#print
#print args
data_directory=args.directory
if args.ftemp:
fixed_quantity="kT"
elif args.fdensity:
fixed_quantity="n"
fixed_value=args.value
#data_file=data_directory+"/plot_"+fixed_quantity+fixed_value+".dat"
data_file=data_directory+"/plot.dat"
thisdata = np.loadtxt(data_file)
print
print "Using data from file:"+data_file
print
if args.xtemp:
x_axis=thisdata[:,0]
x_label="Temperature (kT)"
x_plot="kT"
elif args.xdensity:
x_axis=thisdata[:,1]
x_label="Reduced Density (n)"
x_plot="n"
elif args.xfv:
x_axis=thisdata[:,2]
x_label="Fraction of vacancies (fv)"
x_plot="fv"
elif args.xgw:
x_axis=thisdata[:,3]
x_label="Width of Gaussian (gwidth)"
x_plot="gw"
elif args.xhfe:
x_axis=thisdata[:,4]
x_label="Homogeneous Free Energy/atom"
x_plot="hFE"
elif args.xcfe:
x_axis=thisdata[:,5]
x_label="Crystal Free Energy/atom"
x_plot="cFE"
elif args.xdiff:
x_axis=thisdata[:,6]
x_label="Diff=(cFE-hFE)/atom"
x_plot="DiffFE"
elif args.xcol:
x_axis=thisdata[:,args.xcol]
x_label=args.xlab
x_plot=args.xlab
if args.ytemp:
y_axis=thisdata[:,0]
y_label="Temperature (kT)"
y_plot="kT"
elif args.ydensity:
y_axis=thisdata[:,1]
y_label="Reduced Density (n)"
y_plot="n"
elif args.yfv:
y_axis=thisdata[:,2]
y_label="Fraction of vacancies (fv)"
y_plot="fv"
elif args.ygw:
y_axis=thisdata[:,3]
y_label="Width of Gaussian (gwidth)"
y_plot="gw"
elif args.yhfe:
y_axis=thisdata[:,4]
y_label="Homogeneous Free Energy/atom"
y_plot="hFE"
elif args.ycfe:
y_axis=thisdata[:,5]
y_label="Crystal Free Energy/atom"
y_plot="cFE"
elif args.ydiff:
y_axis=thisdata[:,6]
y_label="Diff=(cFE-hFE)/atom"
y_plot="DiffFE"
elif args.ycol:
y_axis=thisdata[:,args.ycol]
y_label=args.ylab
y_plot=args.ylab
plot_name=data_directory+"/plot_"+y_plot+"vs"+x_plot+"_"+fixed_quantity+fixed_value+".png"
if args.ptname:
plot_name=data_directory+"/plot_"+y_plot+"vs"+x_plot+"_"+fixed_quantity+fixed_value+"_"+args.ptname+".png"
plot_title=y_label+" vs "+x_label+" at Fixed "+fixed_quantity+"="+fixed_value
a=0
plt.plot(x_axis, y_axis, color="purple")
b=plt.gca().get_ylim()[0]-10
#Plot x-axis vs y-axis
if b < 0 :
plt.axhspan(a, b, color='b', alpha=0.15, lw=0)
else : plt.axhspan(a, -10, color='b', alpha=0.15, lw=0)
plt.plot(x_axis, y_axis, color="purple")
plt.title(plot_title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.savefig(plot_name)
#plt.show()
| droundy/deft | papers/fuzzy-fmt/new-melting_anyplot_script.py | Python | gpl-2.0 | 6,245 | [
"CRYSTAL",
"Gaussian"
] | 80deb16525c0ab60b6d5ff06a2592ab2a2e29c1572627a764b2aa722cf559723 |
import ast
from .fixer import fix
UNSUPPORTED_ASSIGNMENTS = ast.Subscript, ast.Attribute
class AstProvider(object):
def __init__(self):
self.cache = {}
def get(self, module):
try:
return self.cache[module.name]
except KeyError:
pass
source = module.get_source()
if source:
tree, _ = fix(source)
else:
tree = None
self.cache[module.name] = tree
return tree
class NodeProvider(object):
def get_node(self):
raise NotImplementedError()
def __getitem__(self, name):
try:
return self.nodes.get(name, None)
except AttributeError:
pass
node = self.get_node()
if not node:
return ('undefined', None)
else:
self.nodes = NameExtractor().process(self.get_node())
return self.nodes.get(name, None)
class CtxNodeProvider(NodeProvider):
def __init__(self, ctx, node):
self.node = node
self.ctx = ctx
def get_node(self):
return self.node
class NameExtractor(ast.NodeVisitor):
def visit_FunctionDef(self, node):
self.attrs[node.name] = 'func', node
def visit_ImportFrom(self, node):
for n in node.names:
module_name = '.' * node.level + (node.module if node.module else '')
if n.name == '*':
# TODO handle star names in imported objects
pass
else:
name = n.asname if n.asname else n.name
self.attrs[name] = 'imported', n.name, module_name
def visit_ClassDef(self, node):
self.attrs[node.name] = 'class', node
def visit_Assign(self, node):
if isinstance(node.targets[0], ast.Tuple):
targets = node.targets[0].elts
else:
targets = node.targets
for i, n in enumerate(targets):
if isinstance(n, UNSUPPORTED_ASSIGNMENTS):
continue
self.attrs[n.id] = 'assign', i, node.value, n
def process(self, node):
if not node:
return {}
self.attrs = {}
self.generic_visit(node)
return self.attrs
class ReturnExtractor(ast.NodeVisitor):
def process(self, node):
self.result = []
self.generic_visit(node)
return self.result
def visit_Return(self, node):
if node.value:
self.result.append(node)
class TreeDumper(ast.NodeVisitor):
def default(self, node):
self.result += ' ' * self.level + '%s %s\n' % (type(node), vars(node))
self.level += 1
self.generic_visit(node)
self.level -= 1
def __getattr__(self, name):
if name in ('_attrs'):
return object.__getattr__(self, name)
return self.default
def process(self, node):
self.level = 0
self.result = ''
self.visit(node)
return self.result
def dump_tree(tree):
return TreeDumper().process(tree)
| baverman/supplement | supplement/tree.py | Python | mit | 3,043 | [
"VisIt"
] | 308b9280b1a51ea822274d4c6c8f1b392dc4ed6f2f996a8dec6376f7837ccc0d |
"""Imports and standardises data into crowdastro.
Matthew Alger
The Australian National University
2016
"""
import argparse
import csv
import hashlib
import logging
import os
from astropy.coordinates import SkyCoord
import astropy.io.fits
from astropy.io import ascii
import astropy.utils.exceptions
import astropy.wcs
import h5py
import numpy
import scipy.spatial.distance
import sklearn.neighbors
from .config import config
from .exceptions import CatalogueError
VERSION = '0.5.1' # Data version, not module version!
# max number of components * individual component signature size.
MAX_RADIO_SIGNATURE_LENGTH = 50
MAX_NAME_LENGTH = 50 # b
MAX_ZOONIVERSE_ID_LENGTH = 20 # b
PATCH_RADIUS = config['patch_radius'] # px
ARCMIN = 1 / 60 # deg
CANDIDATE_RADIUS = ARCMIN # deg
FITS_CONVENTION = 1
def hash_file(f):
"""Finds the MD5 hash of a file.
File must be opened in bytes mode.
"""
h = hashlib.md5()
chunk_size = 65536 # 64 KiB
for chunk in iter(lambda: f.read(chunk_size), b''):
h.update(chunk)
return h.hexdigest()
def checksum_file(filename, h):
"""Checks files hash to expected hashes.
filename: str.
h: Hex hash string to compare against.
-> True iff file matches hash.
"""
with open(filename, 'rb') as f:
h_ = hash_file(f)
return h_ == h
def prep_h5(f_h5, ir_survey):
"""Creates hierarchy in HDF5 file."""
f_h5.create_group('/atlas/cdfs')
f_h5.create_group('/atlas/elais')
f_h5.create_group('/{}/cdfs'.format(ir_survey))
f_h5.create_group('/{}/elais'.format(ir_survey))
f_h5.attrs['version'] = VERSION
f_h5.attrs['ir_survey'] = ir_survey
def import_atlas(f_h5, test=False, field='cdfs'):
"""Imports the ATLAS dataset into crowdastro, as well as associated SWIRE.
f_h5: An HDF5 file.
test: Flag to run on only 10 subjects. Default False.
"""
from . import rgz_data as data
# Fetch groups from HDF5.
cdfs = f_h5['/atlas/{}'.format(field)]
# First pass, I'll find coords, names, and Zooniverse IDs, as well as how
# many data points there are.
coords = []
names = []
zooniverse_ids = []
if (field == 'cdfs'):
# We need the ATLAS name, but we can only get it by going through the
# ATLAS catalogue and finding the nearest component.
# https://github.com/chengsoonong/crowdastro/issues/63
# Fortunately, @jbanfield has already done this, so we can just load
# that CSV and match the names.
# TODO(MatthewJA): This matches the ATLAS component ID, but maybe we
# should be using the name instead.
rgz_to_atlas = {}
with open(config['data_sources']['rgz_to_atlas']) as f:
reader = csv.DictReader(f)
for row in reader:
rgz_to_atlas[row['ID_RGZ']] = row['ID']
all_subjects = data.get_all_subjects(survey='atlas', field=field)
if test:
all_subjects = all_subjects.limit(10)
for subject in all_subjects:
ra, dec = subject['coords']
zooniverse_id = subject['zooniverse_id']
rgz_source_id = subject['metadata']['source']
if rgz_source_id not in rgz_to_atlas:
logging.debug('Skipping %s; no matching ATLAS component.',
zooniverse_id)
continue
name = rgz_to_atlas[rgz_source_id]
# Store the results.
coords.append((ra, dec))
names.append(name)
zooniverse_ids.append(zooniverse_id)
elif (field == 'elais'):
atlascatalogue = ascii.read(config['data_sources']['atlas_catalogue'])
ras, decs = atlascatalogue['RA_deg'], atlascatalogue['Dec_deg']
e_ids = atlascatalogue['ID']
fields = atlascatalogue['field']
# Store the results.
for ra, dec, e_id, field_ in zip(ras, decs, e_ids, fields):
if (field_ == 'ELAIS-S1'):
coords.append((ra, dec))
names.append(e_id)
zooniverse_ids.append(e_id)
n_cdfs = len(names)
# Sort the data by Zooniverse ID.
coords_to_zooniverse_ids = dict(zip(coords, zooniverse_ids))
names_to_zooniverse_ids = dict(zip(names, zooniverse_ids))
coords.sort(key=coords_to_zooniverse_ids.get)
names.sort(key=names_to_zooniverse_ids.get)
zooniverse_ids.sort()
# Begin to store the data. We will have two tables: one for numeric data,
# and one for strings. We will have to preallocate the numeric table so that
# we aren't storing huge amounts of image data in memory.
# Strings.
dtype = [('zooniverse_id', '<S{}'.format(MAX_ZOONIVERSE_ID_LENGTH)),
('name', '<S{}'.format(MAX_NAME_LENGTH))]
string_data = numpy.array(list(zip(zooniverse_ids, names)), dtype=dtype)
cdfs.create_dataset('string', data=string_data, dtype=dtype)
# Numeric.
image_size = (config['surveys']['atlas']['fits_width'] *
config['surveys']['atlas']['fits_height'])
# RA, DEC, radio, (distance to SWIRE object added later)
dim = (n_cdfs, 1 + 1 + image_size)
numeric = cdfs.create_dataset('_numeric', shape=dim, dtype='float32')
# Load image patches and store numeric data.
with astropy.io.fits.open(
config['data_sources']['atlas_{}_image'.format(field)],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
pix_coords = wcs.all_world2pix(coords, FITS_CONVENTION)
assert pix_coords.shape[1] == 2
logging.debug('Fetching %d ATLAS images.', len(pix_coords))
for index, (x, y) in enumerate(pix_coords):
radio = atlas_image[0].data[
0, 0, # stokes, freq
int(y) - config['surveys']['atlas']['fits_height'] // 2:
int(y) + config['surveys']['atlas']['fits_height'] // 2,
int(x) - config['surveys']['atlas']['fits_width'] // 2:
int(x) + config['surveys']['atlas']['fits_width'] // 2]
numeric[index, 0] = coords[index][0]
numeric[index, 1] = coords[index][1]
numeric[index, 2:2 + image_size] = radio.reshape(-1)
logging.debug('ATLAS imported.')
def remove_nulls(n):
"""Swaps nulls with zeros."""
if n == 'null':
return 0
return n
def import_swire(f_h5, field='cdfs'):
"""Imports the SWIRE dataset into crowdastro.
f_h5: An HDF5 file.
field: 'cdfs' or 'elais'.
"""
names = []
rows = []
logging.debug('Reading SWIRE catalogue.')
with open(
config['data_sources']['swire_{}_catalogue'.format(field)]
) as f_tbl:
# This isn't a valid ASCII table, so Astropy can't handle it. This means
# we have to parse it manually.
if field == 'cdfs':
for _ in range(5): # Skip the first five lines.
next(f_tbl)
# Get the column names.
columns = [c.strip() for c in next(f_tbl).strip().split('|')][1:-1]
assert len(columns) == 156
for _ in range(3): # Skip the next three lines.
next(f_tbl)
for row in f_tbl:
row = row.strip().split()
assert len(row) == 156
row = dict(zip(columns, row))
name = row['object']
ra = float(row['ra'])
dec = float(row['dec'])
flux_ap2_36 = float(remove_nulls(row['flux_ap2_36']))
flux_ap2_45 = float(remove_nulls(row['flux_ap2_45']))
flux_ap2_58 = float(remove_nulls(row['flux_ap2_58']))
flux_ap2_80 = float(remove_nulls(row['flux_ap2_80']))
flux_ap2_24 = float(remove_nulls(row['flux_ap2_24']))
stell_36 = float(remove_nulls(row['stell_36']))
# Extra -1 is so we can store nearest distance later.
rows.append((ra, dec, flux_ap2_36, flux_ap2_45, flux_ap2_58,
flux_ap2_80, flux_ap2_24, stell_36, -1))
names.append(name)
elif field == 'elais':
for _ in range(121): # Skip the first 121 lines.
next(f_tbl)
# Get the column names.
columns = [c.strip() for c in next(f_tbl).strip().split('|')][1:-1]
assert len(columns) == 54
for _ in range(3): # Skip the next three lines.
next(f_tbl)
for row in f_tbl:
row = row.strip().split()
assert len(row) == 54
row = dict(zip(columns, row))
name = row['object']
ra = float(row['ra'])
dec = float(row['dec'])
flux_ap2_36 = float(remove_nulls(row['flux_ap2_36']))
flux_ap2_45 = float(remove_nulls(row['flux_ap2_45']))
flux_ap2_58 = float(remove_nulls(row['flux_ap2_58']))
flux_ap2_80 = float(remove_nulls(row['flux_ap2_80']))
flux_ap2_24 = float(remove_nulls(row['flux_ap2_24']))
stell_36 = float(remove_nulls(row['stell_36']))
# Extra -1 is so we can store nearest distance later.
rows.append((ra, dec, flux_ap2_36, flux_ap2_45, flux_ap2_58,
flux_ap2_80, flux_ap2_24, stell_36, -1))
names.append(name)
logging.debug('Found %d SWIRE objects.', len(names))
# Sort by name.
rows_to_names = dict(zip(rows, names))
rows.sort(key=rows_to_names.get)
names.sort()
names = numpy.array(names, dtype='<S{}'.format(MAX_NAME_LENGTH))
rows = numpy.array(rows)
# Filter on distance - only include image data for SWIRE objects within a
# given radius of an ATLAS object. Otherwise, there's way too much data to
# store.
swire_positions = rows[:, :2]
atlas_positions = f_h5['/atlas/{}/_numeric'.format(field)][:, :2]
logging.debug('Computing SWIRE k-d tree.')
swire_tree = sklearn.neighbors.KDTree(swire_positions, metric='euclidean')
indices = numpy.concatenate(
swire_tree.query_radius(atlas_positions, CANDIDATE_RADIUS))
indices = numpy.unique(indices)
logging.debug('Found %d SWIRE objects near ATLAS objects.', len(indices))
names = names[indices]
rows = rows[indices]
swire_positions = swire_positions[indices]
# Get distances.
logging.debug('Finding ATLAS-SWIRE object distances.')
distances = scipy.spatial.distance.cdist(atlas_positions, swire_positions,
'euclidean')
assert distances.shape[0] == atlas_positions.shape[0]
assert distances.shape[1] == swire_positions.shape[0]
logging.debug('Done finding distances.')
# Write numeric data to HDF5.
rows[:, 8] = distances.min(axis=0)
atlas_numeric = f_h5['/atlas/{}/_numeric'.format(field)]
f_h5['/atlas/{}'.format(field)].create_dataset(
'numeric', dtype='float32',
shape=(atlas_numeric.shape[0],
atlas_numeric.shape[1] + len(indices)))
f_h5['/atlas/{}/numeric'.format(field)][
:, :atlas_numeric.shape[1]] = atlas_numeric
f_h5['/atlas/{}/numeric'.format(field)][
:, atlas_numeric.shape[1]:] = distances
del f_h5['/atlas/{}/_numeric'.format(field)]
image_size = (PATCH_RADIUS * 2) ** 2
dim = (rows.shape[0], rows.shape[1] + image_size)
numeric = f_h5['/swire/{}'.format(field)].create_dataset(
'numeric', shape=dim, dtype='float32')
numeric[:, :rows.shape[1]] = rows
f_h5['/swire/{}'.format(field)].create_dataset('string', data=names)
# Load and store radio images.
logging.debug('Importing radio patches.')
with astropy.io.fits.open(
config['data_sources']['atlas_{}_image'.format(field)],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
pix_coords = wcs.all_world2pix(swire_positions, FITS_CONVENTION)
assert pix_coords.shape[1] == 2
assert pix_coords.shape[0] == len(indices)
logging.debug('Fetching %d ATLAS patches.', len(indices))
for index, (x, y) in enumerate(pix_coords):
radio = atlas_image[0].data[
0, 0, # stokes, freq
int(y) - PATCH_RADIUS:
int(y) + PATCH_RADIUS,
int(x) - PATCH_RADIUS:
int(x) + PATCH_RADIUS]
numeric[index, -image_size:] = radio.reshape(-1)
def import_wise(f_h5, field='cdfs'):
"""Imports the WISE dataset into crowdastro.
f_h5: An HDF5 file.
field: 'cdfs' or 'elais'.
"""
names = []
rows = []
logging.debug('Reading WISE catalogue.')
with open(
config['data_sources']['wise_{}_catalogue'.format(field)]) as f_tbl:
# This isn't a valid ASCII table, so Astropy can't handle it. This means
# we have to parse it manually.
for _ in range(105): # Skip the first 105 lines.
next(f_tbl)
# Get the column names.
columns = [c.strip() for c in next(f_tbl).strip().split('|')][1:-1]
assert len(columns) == 45
for _ in range(3): # Skip the next three lines.
next(f_tbl)
for row in f_tbl:
row = row.strip().split()
assert len(row) == 45
row = dict(zip(columns, row))
name = row['designation']
ra = float(row['ra'])
dec = float(row['dec'])
w1mpro = float(remove_nulls(row['w1mpro']))
w2mpro = float(remove_nulls(row['w2mpro']))
w3mpro = float(remove_nulls(row['w3mpro']))
w4mpro = float(remove_nulls(row['w4mpro']))
# Extra -1 is so we can store nearest distance later.
rows.append((ra, dec, w1mpro, w2mpro, w3mpro, w4mpro, -1))
names.append(name)
logging.debug('Found %d WISE objects.', len(names))
# Sort by name.
rows_to_names = dict(zip(rows, names))
rows.sort(key=rows_to_names.get)
names.sort()
names = numpy.array(names, dtype='<S{}'.format(MAX_NAME_LENGTH))
rows = numpy.array(rows)
# Filter on distance - only include image data for WISE objects within a
# given radius of an ATLAS object. Otherwise, there's way too much data to
# store.
wise_positions = rows[:, :2]
atlas_positions = f_h5['/atlas/{}/_numeric'.format(field)][:, :2]
logging.debug('Computing WISE k-d tree.')
wise_tree = sklearn.neighbors.KDTree(wise_positions, metric='euclidean')
indices = numpy.concatenate(
wise_tree.query_radius(atlas_positions, CANDIDATE_RADIUS))
indices = numpy.unique(indices)
logging.debug('Found %d WISE objects near ATLAS objects.', len(indices))
names = names[indices]
rows = rows[indices]
wise_positions = wise_positions[indices]
# Get distances.
logging.debug('Finding ATLAS-WISE object distances.')
distances = scipy.spatial.distance.cdist(atlas_positions, wise_positions,
'euclidean')
assert distances.shape[0] == atlas_positions.shape[0]
assert distances.shape[1] == wise_positions.shape[0]
logging.debug('Done finding distances.')
# Write numeric data to HDF5.
rows[:, 6] = distances.min(axis=0)
atlas_numeric = f_h5['/atlas/{}/_numeric'.format(field)]
f_h5['/atlas/{}'.format(field)].create_dataset(
'numeric', dtype='float32',
shape=(atlas_numeric.shape[0],
atlas_numeric.shape[1] + len(indices)))
numeric_f = f_h5['/atlas/{}/numeric'.format(field)]
numeric_f[:, :atlas_numeric.shape[1]] = atlas_numeric
numeric_f[:, atlas_numeric.shape[1]:] = distances
del f_h5['/atlas/{}/_numeric'.format(field)]
image_size = (PATCH_RADIUS * 2) ** 2
dim = (rows.shape[0], rows.shape[1] + image_size)
numeric = f_h5['/wise/{}'.format(field)].create_dataset(
'numeric', shape=dim, dtype='float32')
numeric[:, :rows.shape[1]] = rows
f_h5['/wise/{}'.format(field)].create_dataset('string', data=names)
# Load and store radio images.
logging.debug('Importing radio patches.')
with astropy.io.fits.open(
config['data_sources']['atlas_{}_image'.format(field)],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
pix_coords = wcs.all_world2pix(wise_positions, FITS_CONVENTION)
assert pix_coords.shape[1] == 2
assert pix_coords.shape[0] == len(indices)
logging.debug('Fetching %d ATLAS patches.', len(indices))
for index, (x, y) in enumerate(pix_coords):
radio = atlas_image[0].data[
0, 0, # stokes, freq
int(y) - PATCH_RADIUS:
int(y) + PATCH_RADIUS,
int(x) - PATCH_RADIUS:
int(x) + PATCH_RADIUS]
numeric[index, -image_size:] = radio.reshape(-1)
def import_norris(f_h5):
"""Imports the Norris et al. (2006) labels.
f_h5: crowdastro HDF5 file with WISE or SWIRE already imported.
"""
ir_survey = f_h5.attrs['ir_survey']
ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2]
ir_tree = sklearn.neighbors.KDTree(ir_positions)
norris_dat = astropy.io.ascii.read(config['data_sources']['norris_coords'])
norris_swire = norris_dat['SWIRE']
norris_coords = []
for s in norris_swire:
s = s.strip()
if len(s) < 19:
continue
# e.g. J032931.44-281722.0
ra_hr = s[1:3]
ra_min = s[3:5]
ra_sec = s[5:10]
dec_sgn = s[10]
dec_deg = s[11:13]
dec_min = s[13:15]
dec_sec = s[15:19]
ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec)
dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec)
logging.debug('Reading Norris coordinate: {}; {}'.format(ra, dec))
coord = SkyCoord(ra=ra, dec=dec,
unit=('hourangle, deg'))
norris_coords.append(coord)
norris_labels = numpy.zeros((ir_positions.shape[0],))
for skycoord in norris_coords:
# Find a neighbour.
ra = skycoord.ra.degree
dec = skycoord.dec.degree
((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])
if dist < config['surveys'][ir_survey]['distance_cutoff']:
norris_labels[ir] = 1
f_h5.create_dataset('/{}/cdfs/norris_labels'.format(ir_survey),
data=norris_labels)
def import_fan(f_h5):
"""Imports the Fan et al. (2015) labels.
f_h5: crowdastro HDF5 file with WISE or SWIRE already imported.
"""
ir_survey = f_h5.attrs['ir_survey']
ir_names = f_h5['/{}/cdfs/string'.format(ir_survey)]
ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2]
ir_tree = sklearn.neighbors.KDTree(ir_positions)
fan_coords = []
with open(config['data_sources']['fan_swire'], 'r') as fan_dat:
for row in csv.DictReader(fan_dat):
ra_hr = row['swire'][8:10]
ra_min = row['swire'][10:12]
ra_sec = row['swire'][12:17]
dec_sgn = row['swire'][17]
dec_deg = row['swire'][18:20]
dec_min = row['swire'][20:22]
dec_sec = row['swire'][22:26]
ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec)
dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec)
fan_coords.append((ra, dec))
fan_labels = numpy.zeros((ir_positions.shape[0],))
for ra, dec in fan_coords:
# Find a neighbour.
skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg'))
ra = skycoord.ra.degree
dec = skycoord.dec.degree
((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])
if dist < config['surveys'][ir_survey]['distance_cutoff']:
fan_labels[ir] = 1
f_h5.create_dataset('/{}/cdfs/fan_labels'.format(ir_survey),
data=fan_labels)
def contains(bbox, point):
"""Checks if point is within bbox.
bbox: [[x0, x1], [y0, y1]]
point: [x, y]
-> bool
"""
return (bbox[0][0] <= point[0] <= bbox[0][1] and
bbox[1][0] <= point[1] <= bbox[1][1])
bbox_cache_ = {} # Should help speed up ATLAS membership checking.
def make_radio_combination_signature(radio_annotation, wcs, atlas_positions,
subject, pix_offset):
"""Generates a unique signature for a radio annotation.
radio_annotation: 'radio' dictionary from a classification.
wcs: World coordinate system associated with the ATLAS image.
atlas_positions: [[RA, DEC]] NumPy array.
subject: RGZ subject dict.
pix_offset: (x, y) pixel position of this radio subject on the ATLAS image.
-> Something immutable
"""
from . import rgz_data as data
# TODO(MatthewJA): This only works on ATLAS. Generalise.
# My choice of immutable object will be stringified crowdastro ATLAS
# indices.
zooniverse_id = subject['zooniverse_id']
subject_fits = data.get_radio_fits(subject)
subject_wcs = astropy.wcs.WCS(subject_fits.header)
atlas_ids = []
x_offset, y_offset = pix_offset
for c in radio_annotation.values():
# Note that the x scale is not the same as the IR scale, but the scale
# factor is included in the annotation, so I have multiplied this out
# here for consistency.
scale_width = c.get('scale_width', '')
scale_height = c.get('scale_height', '')
if scale_width:
scale_width = float(scale_width)
else:
# Sometimes, there's no scale, so I've included a default scale.
scale_width = config['surveys']['atlas']['scale_width']
if scale_height:
scale_height = float(scale_height)
else:
scale_height = config['surveys']['atlas']['scale_height']
# These numbers are in terms of the PNG images, so I need to multiply by
# the click-to-fits ratio.
scale_width *= config['surveys']['atlas']['click_to_fits_x']
scale_height *= config['surveys']['atlas']['click_to_fits_y']
subject_bbox = [
[
float(c['xmin']) * scale_width,
float(c['xmax']) * scale_width,
],
[
float(c['ymin']) * scale_height,
float(c['ymax']) * scale_height,
],
]
# ...and by the mosaic ratio. There's probably double-up here, but this
# makes more sense.
scale_width *= config['surveys']['atlas']['mosaic_scale_x']
scale_height *= config['surveys']['atlas']['mosaic_scale_y']
# Get the bounding box of the radio source in pixels.
# Format: [xs, ys]
bbox = [
[
float(c['xmin']) * scale_width,
float(c['xmax']) * scale_width,
],
[
float(c['ymin']) * scale_height,
float(c['ymax']) * scale_height,
],
]
assert bbox[0][0] < bbox[0][1]
assert bbox[1][0] < bbox[1][1]
# Convert the bounding box into RA/DEC.
bbox = wcs.all_pix2world(bbox[0] + x_offset, bbox[1] + y_offset,
FITS_CONVENTION)
subject_bbox = subject_wcs.all_pix2world(subject_bbox[0],
subject_bbox[1], FITS_CONVENTION)
# TODO(MatthewJA): Remove (or disable) this sanity check.
# The bbox is backwards along the x-axis for some reason.
bbox[0] = bbox[0][::-1]
assert bbox[0][0] < bbox[0][1]
assert bbox[1][0] < bbox[1][1]
bbox = numpy.array(bbox)
# What is this radio source called? Check if we have an object in the
# bounding box. We'll cache these results because there is a lot of
# overlap.
cache_key = tuple(tuple(b) for b in bbox)
if cache_key in bbox_cache_:
index = bbox_cache_[cache_key]
else:
x_gt_min = atlas_positions[:, 0] >= bbox[0, 0]
x_lt_max = atlas_positions[:, 0] <= bbox[0, 1]
y_gt_min = atlas_positions[:, 1] >= bbox[1, 0]
y_lt_max = atlas_positions[:, 1] <= bbox[1, 1]
within = numpy.all([x_gt_min, x_lt_max, y_gt_min, y_lt_max], axis=0)
indices = numpy.where(within)[0]
if len(indices) == 0:
logging.debug('Skipping radio source not in catalogue for '
'%s', zooniverse_id)
continue
else:
if len(indices) > 1:
logging.debug('Found multiple (%d) ATLAS matches '
'for %s', len(indices), zooniverse_id)
index = indices[0]
bbox_cache_[cache_key] = index
atlas_ids.append(str(index))
atlas_ids.sort()
if not atlas_ids:
raise CatalogueError('No catalogued radio sources.')
return ';'.join(atlas_ids)
def parse_classification(classification, subject, atlas_positions, wcs,
pix_offset):
"""Converts a raw RGZ classification into a classification dict.
Scales all positions and flips y axis of clicks.
classification: RGZ classification dict.
subject: Associated RGZ subject dict.
atlas_positions: [[RA, DEC]] NumPy array.
wcs: World coordinate system of the ATLAS image.
pix_offset: (x, y) pixel position of this radio subject on the ATLAS image.
-> dict mapping radio signature to list of corresponding IR host pixel
locations.
"""
result = {}
n_invalid = 0
for annotation in classification['annotations']:
if 'radio' not in annotation:
# This is a metadata annotation and we can ignore it.
continue
if annotation['radio'] == 'No Contours':
# I'm not sure how this occurs. I'm going to ignore it.
continue
try:
radio_signature = make_radio_combination_signature(
annotation['radio'], wcs, atlas_positions,
subject, pix_offset)
except CatalogueError:
# Ignore invalid annotations.
n_invalid += 1
logging.debug('Ignoring invalid annotation for %s.',
subject['zooniverse_id'])
continue
ir_locations = []
if annotation['ir'] != 'No Sources':
for ir_click in annotation['ir']:
ir_x = float(annotation['ir'][ir_click]['x'])
ir_y = float(annotation['ir'][ir_click]['y'])
# Rescale to a consistent size.
ir_x *= config['surveys']['atlas']['click_to_fits_x']
ir_y *= config['surveys']['atlas']['click_to_fits_y']
# Ignore out-of-range data.
if not 0 <= ir_x <= config['surveys']['atlas']['fits_width']:
n_invalid += 1
continue
if not 0 <= ir_y <= config['surveys']['atlas']['fits_height']:
n_invalid += 1
continue
# Flip the y axis to match other data conventions.
ir_y = config['surveys']['atlas']['fits_height'] - ir_y
# Rescale to match the mosaic WCS.
ir_x *= config['surveys']['atlas']['mosaic_scale_x']
ir_y *= config['surveys']['atlas']['mosaic_scale_y']
# Move to the reference location of the radio subject.
ir_x += pix_offset[0]
ir_y += pix_offset[1]
# Convert the location into RA/DEC.
(ir_x,), (ir_y,) = wcs.wcs_pix2world([ir_x], [ir_y], 1)
ir_location = (ir_x, ir_y)
ir_locations.append(ir_location)
result[radio_signature] = ir_locations
if n_invalid:
logging.debug('%d invalid annotations for %s.', n_invalid,
subject['zooniverse_id'])
return result
def import_classifications(f_h5, test=False):
"""Imports Radio Galaxy Zoo classifications into crowdastro.
f_h5: An HDF5 file.
test: Flag to run on only 10 subjects. Default False.
"""
# TODO(MatthewJA): This only works for ATLAS/CDFS. Generalise.
from . import rgz_data as data
atlas_positions = f_h5['/atlas/cdfs/numeric'][:, :2]
atlas_ids = f_h5['/atlas/cdfs/string']['zooniverse_id']
classification_positions = []
classification_combinations = []
classification_usernames = []
with astropy.io.fits.open(
# RGZ only has cdfs classifications
config['data_sources']['atlas_cdfs_image'],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
for obj_index, atlas_id in enumerate(atlas_ids):
subject = data.get_subject(atlas_id.decode('ascii'))
assert subject['zooniverse_id'] == atlas_ids[obj_index].decode('ascii')
classifications = data.get_subject_classifications(subject)
offset, = wcs.all_world2pix([subject['coords']], FITS_CONVENTION)
# The coords are of the middle of the subject.
offset[0] -= (config['surveys']['atlas']['fits_width'] *
config['surveys']['atlas']['mosaic_scale_x'] // 2)
offset[1] -= (config['surveys']['atlas']['fits_height'] *
config['surveys']['atlas']['mosaic_scale_y'] // 2)
for c_index, classification in enumerate(classifications):
user_name = classification.get('user_name', '').encode(
'ascii', errors='ignore')
# Usernames actually don't have an upper length limit on RGZ(?!) so
# I'll cap everything at 50 characters for my own sanity.
if len(user_name) > 50:
user_name = user_name[:50]
classification = parse_classification(classification, subject,
atlas_positions, wcs, offset)
full_radio = '|'.join(classification.keys())
for radio, locations in classification.items():
if not locations:
locations = [(None, None)]
for click_index, location in enumerate(locations):
# Check whether the click index is 0 to maintain the
# assumption that we only need the first click.
pos_row = (obj_index, location[0], location[1],
click_index == 0)
com_row = (obj_index, full_radio, radio)
# A little redundancy here with the index, but we can assert
# that they are the same later to check integrity.
classification_positions.append(pos_row)
classification_combinations.append(com_row)
classification_usernames.append(user_name)
combinations_dtype = [('index', 'int'),
('full_signature', '<S{}'.format(
MAX_RADIO_SIGNATURE_LENGTH)),
('signature', '<S{}'.format(
MAX_RADIO_SIGNATURE_LENGTH))]
classification_positions = numpy.array(classification_positions,
dtype=float)
classification_combinations = numpy.array(classification_combinations,
dtype=combinations_dtype)
f_h5['/atlas/cdfs/'].create_dataset('classification_positions',
data=classification_positions,
dtype=float)
f_h5['/atlas/cdfs/'].create_dataset('classification_usernames',
data=classification_usernames,
dtype='<S50')
f_h5['/atlas/cdfs/'].create_dataset('classification_combinations',
data=classification_combinations,
dtype=combinations_dtype)
def _populate_parser(parser):
parser.description = 'Imports and standardises data into crowdastro.'
parser.add_argument('--h5', default='data/crowdastro.h5',
help='HDF5 output file')
parser.add_argument('--test', action='store_true', default=False,
help='Run with a small number of subjects',)
parser.add_argument('--ir', choices={'swire', 'wise'},
default='swire', help='which infrared survey to use')
def check_raw_data():
"""Validates existence and correctness of raw data files."""
for source, filename in config['data_sources'].items():
if source == 'radio_galaxy_zoo_db':
# Skip the MongoDB name.
continue
if not os.path.exists(filename):
logging.error(
'{} expected at {} but not found'.format(source, filename))
if source in config['data_checksums']:
valid = checksum_file(filename, config['data_checksums'][source])
if not valid:
logging.error('{} has incorrect hash'.format(filename))
else:
logging.debug('{} has correct hash'.format(filename))
def _main(args):
check_raw_data()
with h5py.File(args.h5, 'w') as f_h5:
prep_h5(f_h5, args.ir)
import_atlas(f_h5, test=args.test, field='cdfs')
import_atlas(f_h5, test=args.test, field='elais')
if args.ir == 'swire':
import_swire(f_h5, field='cdfs')
import_swire(f_h5, field='elais')
elif args.ir == 'wise':
import_wise(f_h5, field='cdfs')
import_wise(f_h5, field='elais')
import_norris(f_h5)
import_fan(f_h5)
import_classifications(f_h5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
_populate_parser(parser)
args = parser.parse_args()
_main(args)
| chengsoonong/crowdastro | crowdastro/import_data.py | Python | mit | 34,208 | [
"Galaxy"
] | 3cf82b81d50d5fa2700f0d7ff999a8fedf0f68acb9d911cc6591d08aa43bec79 |
########################################################################
# File : SSHComputingElement.py
# Author : Dumitru Laurentiu, A.T.
########################################################################
""" SSH (Virtual) Computing Element: For a given IP/host it will send jobs directly through ssh
"""
__RCSID__ = "$Id$"
import os
import urllib
import json
import stat
from urlparse import urlparse
from DIRAC import S_OK, S_ERROR
from DIRAC import rootPath
from DIRAC import gLogger
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Resources.Computing.PilotBundle import bundleProxy, writeScript
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.List import breakListIntoChunks
class SSH(object):
""" SSH class encapsulates passing commands and files through an SSH tunnel
to a remote host. It can use either ssh or gsissh access. The final host
where the commands will be executed and where the files will copied/retrieved
can be reached through an intermediate host if SSHTunnel parameters is defined.
SSH constructor parameters are defined in a SSH accessible Computing Element
in the Configuration System:
- SSHHost: SSH host name
- SSHUser: SSH user login
- SSHPassword: SSH password
- SSHPort: port number if not standard, e.g. for the gsissh access
- SSHKey: location of the ssh private key for no-password connection
- SSHOptions: any other SSH options to be used
- SSHTunnel: string defining the use of intermediate SSH host. Example:
'ssh -i /private/key/location -l final_user final_host'
- SSHType: ssh ( default ) or gsissh
The class public interface includes two methods:
sshCall( timeout, command_sequence )
scpCall( timeout, local_file, remote_file, upload = False/True )
"""
def __init__(self, host=None, parameters=None):
self.host = host
if parameters is None:
parameters = {}
if not host:
self.host = parameters.get('SSHHost', '')
self.user = parameters.get('SSHUser', '')
self.password = parameters.get('SSHPassword', '')
self.port = parameters.get('SSHPort', '')
self.key = parameters.get('SSHKey', '')
self.options = parameters.get('SSHOptions', '')
self.sshTunnel = parameters.get('SSHTunnel', '')
self.sshType = parameters.get('SSHType', 'ssh')
if self.port:
self.options += ' -p %s' % self.port
if self.key:
self.options += ' -i %s' % self.key
self.options = self.options.strip()
self.log = gLogger.getSubLogger('SSH')
def __ssh_call(self, command, timeout):
try:
import pexpect
expectFlag = True
except BaseException as x:
from DIRAC.Core.Utilities.Subprocess import shellCall
expectFlag = False
if not timeout:
timeout = 999
if expectFlag:
ssh_newkey = 'Are you sure you want to continue connecting'
try:
child = pexpect.spawn(command, timeout=timeout)
i = child.expect([pexpect.TIMEOUT, ssh_newkey, pexpect.EOF, 'assword: '])
if i == 0: # Timeout
return S_OK((-1, child.before, 'SSH login failed'))
elif i == 1: # SSH does not have the public key. Just accept it.
child.sendline('yes')
child.expect('assword: ')
i = child.expect([pexpect.TIMEOUT, 'assword: '])
if i == 0: # Timeout
return S_OK((-1, str(child.before) + str(child.after), 'SSH login failed'))
elif i == 1:
child.sendline(self.password)
child.expect(pexpect.EOF)
return S_OK((0, child.before, ''))
elif i == 2:
# Passwordless login, get the output
return S_OK((0, child.before, ''))
if self.password:
child.sendline(self.password)
child.expect(pexpect.EOF)
return S_OK((0, child.before, ''))
return S_ERROR((-2, child.before, ''))
except BaseException as x:
res = (-1, 'Encountered exception %s: %s' % (Exception, str(x)))
return S_ERROR(res)
else:
# Try passwordless login
result = shellCall(timeout, command)
# print ( "!!! SSH command: %s returned %s\n" % (command, result) )
if result['Value'][0] == 255:
return S_ERROR((-1, 'Cannot connect to host %s' % self.host, ''))
return result
def sshCall(self, timeout, cmdSeq):
""" Execute remote command via a ssh remote call
:param int timeout: timeout of the command
:param cmdSeq: list of command components
:type cmdSeq: python:list
"""
command = cmdSeq
if isinstance(cmdSeq, list):
command = ' '.join(cmdSeq)
pattern = "__DIRAC__"
if self.sshTunnel:
command = command.replace("'", '\\\\\\\"')
command = command.replace('$', '\\\\\\$')
command = '/bin/sh -c \' %s -q %s -l %s %s "%s \\\"echo %s; %s\\\" " \' ' % (self.sshType, self.options,
self.user, self.host,
self.sshTunnel, pattern, command)
else:
#command = command.replace( '$', '\$' )
command = '%s -q %s -l %s %s "echo %s; %s"' % (self.sshType, self.options, self.user, self.host,
pattern, command)
self.log.debug("SSH command: %s" % command)
result = self.__ssh_call(command, timeout)
self.log.debug("SSH command result %s" % str(result))
if not result['OK']:
return result
# Take the output only after the predefined pattern
ind = result['Value'][1].find('__DIRAC__')
if ind == -1:
return result
status, output, error = result['Value']
output = output[ind + 9:]
if output.startswith('\r'):
output = output[1:]
if output.startswith('\n'):
output = output[1:]
result['Value'] = (status, output, error)
return result
def scpCall(self, timeout, localFile, remoteFile, postUploadCommand='', upload=True):
""" Perform file copy through an SSH magic.
:param int timeout: timeout of the command
:param str localFile: local file path, serves as source for uploading and destination for downloading.
Can take 'Memory' as value, in this case the downloaded contents is returned
as result['Value']
:param str remoteFile: remote file full path
:param str postUploadCommand: command executed on the remote side after file upload
:param bool upload: upload if True, download otherwise
"""
if upload:
if self.sshTunnel:
remoteFile = remoteFile.replace('$', r'\\\\\$')
postUploadCommand = postUploadCommand.replace('$', r'\\\\\$')
command = "/bin/sh -c 'cat %s | %s -q %s %s@%s \"%s \\\"cat > %s; %s\\\"\"' " % (localFile,
self.sshType,
self.options,
self.user,
self.host,
self.sshTunnel,
remoteFile,
postUploadCommand)
else:
command = "/bin/sh -c \"cat %s | %s -q %s %s@%s 'cat > %s; %s'\" " % (localFile,
self.sshType,
self.options,
self.user,
self.host,
remoteFile,
postUploadCommand)
else:
finalCat = '| cat > %s' % localFile
if localFile.lower() == 'memory':
finalCat = ''
if self.sshTunnel:
remoteFile = remoteFile.replace('$', '\\\\\\$')
command = "/bin/sh -c '%s -q %s -l %s %s \"%s \\\"cat %s\\\"\" %s'" % (self.sshType,
self.options,
self.user,
self.host,
self.sshTunnel,
remoteFile,
finalCat)
else:
remoteFile = remoteFile.replace('$', r'\$')
command = "/bin/sh -c '%s -q %s -l %s %s \"cat %s\" %s'" % (self.sshType,
self.options,
self.user,
self.host,
remoteFile,
finalCat)
self.log.debug("SSH copy command: %s" % command)
return self.__ssh_call(command, timeout)
class SSHComputingElement(ComputingElement):
#############################################################################
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
ComputingElement.__init__(self, ceUniqueID)
self.ceType = 'SSH'
self.execution = "SSH"
self.batchSystem = 'Host'
self.submittedJobs = 0
self.outputTemplate = ''
self.errorTemplate = ''
#############################################################################
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults(self)
# Now batch system specific ones
if 'ExecQueue' not in self.ceParameters:
self.ceParameters['ExecQueue'] = self.ceParameters.get('Queue', '')
if 'SharedArea' not in self.ceParameters:
#. isn't a good location, move to $HOME
self.ceParameters['SharedArea'] = '$HOME'
if 'BatchOutput' not in self.ceParameters:
self.ceParameters['BatchOutput'] = 'data'
if 'BatchError' not in self.ceParameters:
self.ceParameters['BatchError'] = 'data'
if 'ExecutableArea' not in self.ceParameters:
self.ceParameters['ExecutableArea'] = 'data'
if 'InfoArea' not in self.ceParameters:
self.ceParameters['InfoArea'] = 'info'
if 'WorkArea' not in self.ceParameters:
self.ceParameters['WorkArea'] = 'work'
def _reset(self):
""" Process CE parameters and make necessary adjustments
"""
self.batchSystem = self.ceParameters.get('BatchSystem', 'Host')
if 'BatchSystem' not in self.ceParameters:
self.ceParameters['BatchSystem'] = self.batchSystem
self.loadBatchSystem()
self.user = self.ceParameters['SSHUser']
self.queue = self.ceParameters['Queue']
self.submitOptions = self.ceParameters.get('SubmitOptions', '')
if 'ExecQueue' not in self.ceParameters or not self.ceParameters['ExecQueue']:
self.ceParameters['ExecQueue'] = self.ceParameters.get('Queue', '')
self.execQueue = self.ceParameters['ExecQueue']
self.log.info("Using queue: ", self.queue)
self.sharedArea = self.ceParameters['SharedArea']
self.batchOutput = self.ceParameters['BatchOutput']
if not self.batchOutput.startswith('/'):
self.batchOutput = os.path.join(self.sharedArea, self.batchOutput)
self.batchError = self.ceParameters['BatchError']
if not self.batchError.startswith('/'):
self.batchError = os.path.join(self.sharedArea, self.batchError)
self.infoArea = self.ceParameters['InfoArea']
if not self.infoArea.startswith('/'):
self.infoArea = os.path.join(self.sharedArea, self.infoArea)
self.executableArea = self.ceParameters['ExecutableArea']
if not self.executableArea.startswith('/'):
self.executableArea = os.path.join(self.sharedArea, self.executableArea)
self.workArea = self.ceParameters['WorkArea']
if not self.workArea.startswith('/'):
self.workArea = os.path.join(self.sharedArea, self.workArea)
result = self._prepareRemoteHost()
if not result['OK']:
return result
self.submitOptions = ''
if 'SubmitOptions' in self.ceParameters:
self.submitOptions = self.ceParameters['SubmitOptions']
self.removeOutput = True
if 'RemoveOutput' in self.ceParameters:
if self.ceParameters['RemoveOutput'].lower() in ['no', 'false', '0']:
self.removeOutput = False
self.preamble = self.ceParameters.get('Preamble', '')
return S_OK()
def _prepareRemoteHost(self, host=None):
""" Prepare remote directories and upload control script
"""
ssh = SSH(host=host, parameters=self.ceParameters)
# Make remote directories
dirTuple = tuple(uniqueElements([self.sharedArea,
self.executableArea,
self.infoArea,
self.batchOutput,
self.batchError,
self.workArea]))
nDirs = len(dirTuple)
cmd = 'mkdir -p %s; ' * nDirs % dirTuple
cmd = "bash -c '%s'" % cmd
self.log.verbose('Creating working directories on %s' % self.ceParameters['SSHHost'])
result = ssh.sshCall(30, cmd)
if not result['OK']:
self.log.warn('Failed creating working directories: %s' % result['Message'][1])
return result
status, output, _error = result['Value']
if status == -1:
self.log.warn('Timeout while creating directories')
return S_ERROR('Timeout while creating directories')
if "cannot" in output:
self.log.warn('Failed to create directories: %s' % output)
return S_ERROR('Failed to create directories: %s' % output)
# Upload the control script now
batchSystemDir = os.path.join(rootPath, "DIRAC", "Resources", "Computing", "BatchSystems")
batchSystemScript = os.path.join(batchSystemDir, '%s.py' % self.batchSystem)
batchSystemExecutor = os.path.join(batchSystemDir, 'executeBatch.py')
self.log.verbose('Uploading %s script to %s' % (self.batchSystem, self.ceParameters['SSHHost']))
remoteScript = '%s/execute_batch' % self.sharedArea
result = ssh.scpCall(30,
'%s %s' % (batchSystemScript, batchSystemExecutor),
remoteScript,
postUploadCommand='chmod +x %s' % remoteScript)
if not result['OK']:
self.log.warn('Failed uploading control script: %s' % result['Message'][1])
return result
status, output, _error = result['Value']
if status != 0:
if status == -1:
self.log.warn('Timeout while uploading control script')
return S_ERROR('Timeout while uploading control script')
self.log.warn('Failed uploading control script: %s' % output)
return S_ERROR('Failed uploading control script')
# Chmod the control scripts
#self.log.verbose( 'Chmod +x control script' )
#result = ssh.sshCall( 10, "chmod +x %s/%s" % ( self.sharedArea, self.controlScript ) )
# if not result['OK']:
# self.log.warn( 'Failed chmod control script: %s' % result['Message'][1] )
# return result
#status, output, _error = result['Value']
# if status != 0:
# if status == -1:
# self.log.warn( 'Timeout while chmod control script' )
# return S_ERROR( 'Timeout while chmod control script' )
# else:
# self.log.warn( 'Failed uploading chmod script: %s' % output )
# return S_ERROR( 'Failed uploading chmod script' )
return S_OK()
def __executeHostCommand(self, command, options, ssh=None, host=None):
if not ssh:
ssh = SSH(host=host, parameters=self.ceParameters)
options['BatchSystem'] = self.batchSystem
options['Method'] = command
options['SharedDir'] = self.sharedArea
options['OutputDir'] = self.batchOutput
options['ErrorDir'] = self.batchError
options['WorkDir'] = self.workArea
options['InfoDir'] = self.infoArea
options['ExecutionContext'] = self.execution
options['User'] = self.user
options['Queue'] = self.queue
options = json.dumps(options)
options = urllib.quote(options)
cmd = "bash --login -c 'python %s/execute_batch %s'" % (self.sharedArea, options)
self.log.verbose('CE submission command: %s' % cmd)
result = ssh.sshCall(120, cmd)
if not result['OK']:
self.log.error('%s CE job submission failed' % self.ceType, result['Message'])
return result
sshStatus = result['Value'][0]
sshStdout = result['Value'][1]
sshStderr = result['Value'][2]
# Examine results of the job submission
if sshStatus == 0:
output = sshStdout.strip().replace('\r', '').strip()
try:
index = output.index('============= Start output ===============')
output = output[index + 42:]
except BaseException:
return S_ERROR("Invalid output from remote command: %s" % output)
try:
output = urllib.unquote(output)
result = json.loads(output)
if isinstance(result, basestring) and result.startswith('Exception:'):
return S_ERROR(result)
return S_OK(result)
except BaseException:
return S_ERROR('Invalid return structure from job submission')
else:
return S_ERROR('\n'.join([sshStdout, sshStderr]))
def submitJob(self, executableFile, proxy, numberOfJobs=1):
# self.log.verbose( "Executable file path: %s" % executableFile )
if not os.access(executableFile, 5):
os.chmod(executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
if proxy:
self.log.verbose('Setting up proxy for payload')
wrapperContent = bundleProxy(executableFile, proxy)
name = writeScript(wrapperContent, os.getcwd())
submitFile = name
else: # no proxy
submitFile = executableFile
result = self._submitJobToHost(submitFile, numberOfJobs)
if proxy:
os.remove(submitFile)
return result
def _submitJobToHost(self, executableFile, numberOfJobs, host=None):
""" Submit prepared executable to the given host
"""
ssh = SSH(host=host, parameters=self.ceParameters)
# Copy the executable
submitFile = '%s/%s' % (self.executableArea, os.path.basename(executableFile))
result = ssh.scpCall(30, executableFile, submitFile, postUploadCommand='chmod +x %s' % submitFile)
if not result['OK']:
return result
jobStamps = []
for _i in xrange(numberOfJobs):
jobStamps.append(makeGuid()[:8])
numberOfProcessors = self.ceParameters.get('NumberOfProcessors', 1)
wholeNode = self.ceParameters.get('WholeNode', False)
# Collect command options
commandOptions = {'Executable': submitFile,
'NJobs': numberOfJobs,
'SubmitOptions': self.submitOptions,
'JobStamps': jobStamps,
'WholeNode': wholeNode,
'NumberOfProcessors': numberOfProcessors,
'Preamble': self.preamble}
resultCommand = self.__executeHostCommand('submitJob', commandOptions, ssh=ssh, host=host)
if not resultCommand['OK']:
return resultCommand
result = resultCommand['Value']
if result['Status'] != 0:
return S_ERROR('Failed job submission: %s' % result['Message'])
else:
batchIDs = result['Jobs']
if batchIDs:
ceHost = host
if host is None:
ceHost = self.ceName
jobIDs = ['%s%s://%s/%s' % (self.ceType.lower(), self.batchSystem.lower(), ceHost, _id) for _id in batchIDs]
else:
return S_ERROR('No jobs IDs returned')
result = S_OK(jobIDs)
self.submittedJobs += len(batchIDs)
return result
def killJob(self, jobIDList):
""" Kill a bunch of jobs
"""
if isinstance(jobIDList, basestring):
jobIDList = [jobIDList]
return self._killJobOnHost(jobIDList)
def _killJobOnHost(self, jobIDList, host=None):
""" Kill the jobs for the given list of job IDs
"""
jobDict = {}
for job in jobIDList:
stamp = os.path.basename(urlparse(job).path)
jobDict[stamp] = job
stampList = jobDict.keys()
commandOptions = {'JobIDList': stampList, 'User': self.user}
resultCommand = self.__executeHostCommand('killJob', commandOptions, host=host)
if not resultCommand['OK']:
return resultCommand
result = resultCommand['Value']
if result['Status'] != 0:
return S_ERROR('Failed job kill: %s' % result['Message'])
if result['Failed']:
return S_ERROR('%d jobs failed killing' % len(result['Failed']))
return S_OK(len(result['Successful']))
def _getHostStatus(self, host=None):
""" Get jobs running at a given host
"""
resultCommand = self.__executeHostCommand('getCEStatus', {}, host=host)
if not resultCommand['OK']:
return resultCommand
result = resultCommand['Value']
if result['Status'] != 0:
return S_ERROR('Failed to get CE status: %s' % result['Message'])
return S_OK(result)
def getCEStatus(self, jobIDList=None):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.submittedJobs
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
resultHost = self._getHostStatus()
if not resultHost['OK']:
return resultHost
result['RunningJobs'] = resultHost['Value'].get('Running', 0)
result['WaitingJobs'] = resultHost['Value'].get('Waiting', 0)
if "AvailableCores" in resultHost['Value']:
result['AvailableCores'] = resultHost['Value']['AvailableCores']
self.log.verbose('Waiting Jobs: ', result['WaitingJobs'])
self.log.verbose('Running Jobs: ', result['RunningJobs'])
return result
def getJobStatus(self, jobIDList):
""" Get the status information for the given list of jobs
"""
return self._getJobStatusOnHost(jobIDList)
def _getJobStatusOnHost(self, jobIDList, host=None):
""" Get the status information for the given list of jobs
"""
resultDict = {}
jobDict = {}
for job in jobIDList:
stamp = os.path.basename(urlparse(job).path)
jobDict[stamp] = job
stampList = jobDict.keys()
for jobList in breakListIntoChunks(stampList, 100):
resultCommand = self.__executeHostCommand('getJobStatus', {'JobIDList': jobList}, host=host)
if not resultCommand['OK']:
return resultCommand
result = resultCommand['Value']
if result['Status'] != 0:
return S_ERROR('Failed to get job status: %s' % result['Message'])
for stamp in result['Jobs']:
resultDict[jobDict[stamp]] = result['Jobs'][stamp]
return S_OK(resultDict)
def _getJobOutputFiles(self, jobID, host=None):
""" Get output file names for the specific CE
"""
jobStamp = os.path.basename(urlparse(jobID).path)
host = urlparse(jobID).hostname
if 'OutputTemplate' in self.ceParameters:
self.outputTemplate = self.ceParameters['OutputTemplate']
self.errorTemplate = self.ceParameters['ErrorTemplate']
if self.outputTemplate:
output = self.outputTemplate % jobStamp
error = self.errorTemplate % jobStamp
elif 'OutputTemplate' in self.ceParameters:
self.outputTemplate = self.ceParameters['OutputTemplate']
self.errorTemplate = self.ceParameters['ErrorTemplate']
output = self.outputTemplate % jobStamp
error = self.errorTemplate % jobStamp
elif hasattr(self.batch, 'getJobOutputFiles'):
resultCommand = self.__executeHostCommand('getJobOutputFiles', {'JobIDList': [jobStamp]}, host=host)
if not resultCommand['OK']:
return resultCommand
result = resultCommand['Value']
if result['Status'] != 0:
return S_ERROR('Failed to get job output files: %s' % result['Message'])
if 'OutputTemplate' in result:
self.outputTemplate = result['OutputTemplate']
self.errorTemplate = result['ErrorTemplate']
output = result['Jobs'][jobStamp]['Output']
error = result['Jobs'][jobStamp]['Error']
else:
output = '%s/%s.out' % (self.batchOutput, jobStamp)
error = '%s/%s.err' % (self.batchError, jobStamp)
return S_OK((jobStamp, host, output, error))
def getJobOutput(self, jobID, localDir=None):
""" Get the specified job standard output and error files. If the localDir is provided,
the output is returned as file in this directory. Otherwise, the output is returned
as strings.
"""
result = self._getJobOutputFiles(jobID)
if not result['OK']:
return result
jobStamp, _host, outputFile, errorFile = result['Value']
self.log.verbose('Getting output for jobID %s' % jobID)
if localDir:
localOutputFile = '%s/%s.out' % (localDir, jobStamp)
localErrorFile = '%s/%s.err' % (localDir, jobStamp)
else:
localOutputFile = 'Memory'
localErrorFile = 'Memory'
host = urlparse(jobID).hostname
ssh = SSH(parameters=self.ceParameters, host=host)
result = ssh.scpCall(30, localOutputFile, outputFile, upload=False)
if not result['OK']:
return result
output = result['Value'][1]
if localDir:
output = localOutputFile
result = ssh.scpCall(30, localErrorFile, errorFile, upload=False)
if not result['OK']:
return result
error = result['Value'][1]
if localDir:
error = localErrorFile
return S_OK((output, error))
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| petricm/DIRAC | Resources/Computing/SSHComputingElement.py | Python | gpl-3.0 | 26,783 | [
"DIRAC"
] | 9494631d5be20c19fbb4096d9a15c7fc257f7f5e55fd4e449a4bf6d790c601cb |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from .ExodusPlugin import ExodusPlugin
class BackgroundPlugin(QtWidgets.QWidget, ExodusPlugin):
"""
Plugin responsible for background colors.
This plugin only contains menu items, see the addToMenu method.
"""
#: Emitted when the window needs updated.
windowRequiresUpdate = QtCore.pyqtSignal()
#: pyqtSignal: Emitted when the chigger objects options are changed
windowOptionsChanged = QtCore.pyqtSignal(dict)
#: pyqtSignal: Emitted when the colorbar options are changed
colorbarOptionsChanged = QtCore.pyqtSignal(dict)
#: pyqtSignal: Emitted when the result options are changed
resultOptionsChanged = QtCore.pyqtSignal(dict)
def __init__(self, **kwargs):
super(BackgroundPlugin, self).__init__(**kwargs)
self._preferences.addBool("exodus/backgroundGradient",
"Use background gradient",
True,
"Turn on/off the background gradient",
)
self._preferences.addColor("exodus/gradientTopColor",
"Background top gradient color",
QtGui.QColor(111, 111, 111),
"Set the top gradient color",
)
self._preferences.addColor("exodus/gradientBottomColor",
"Background bottom gradient color",
QtGui.QColor(180, 180, 180),
"Set the top gradient color",
)
self._preferences.addColor("exodus/solidBackgroundColor",
"Solid Background color",
QtGui.QColor(111, 111, 111),
"Solid Background color",
)
# Background Toggle action (see addToMenu)
self.hide()
self.GradientToggle = None
self.BlackPreset = None
self.WhitePreset = None
self.GradientTopColor = None
self.GradientBottomColor = None
self.SolidColor = None
self._set_result_color = kwargs.pop('set_result_color', False)
self._gradient_state = True
self._black_font_state = False
# Default colors
self._top = QtGui.QColor(self._preferences.value("exodus/gradientTopColor"))
self._bottom = QtGui.QColor(self._preferences.value("exodus/gradientBottomColor"))
self._solid = QtGui.QColor(self._preferences.value("exodus/solidBackgroundColor"))
# Setup this widget
self.setup()
def _callbackGradientTopColor(self):
"""
Callback of for selecting top color of gradient
"""
dialog = QtWidgets.QColorDialog()
c = dialog.getColor(initial=self._top, title='Select top gradient color')
if c.isValid():
self._top = c
self.updateOptions()
def _prefCallbackGradientTopColor(self, value):
"""
Updates top color when preference saved.
"""
self._top = QtGui.QColor(value)
self.updateOptions()
self.windowRequiresUpdate.emit()
def _callbackGradientBottomColor(self):
"""
Callback for selecting bottom color of gradient
"""
dialog = QtWidgets.QColorDialog()
c = dialog.getColor(initial=self._bottom, title='Select bottom gradient color')
if c.isValid():
self._bottom = c
self.updateOptions()
def _prefCallbackGradientBottomColor(self, value):
"""
Updates bottom color when preference saved.
"""
self._bottom = QtGui.QColor(value)
self.updateOptions()
self.windowRequiresUpdate.emit()
def _callbackSolidColor(self):
"""
Callback for selecting solid color.
"""
dialog = QtWidgets.QColorDialog()
c = dialog.getColor(initial=self._solid, title='Select solid color')
if c.isValid():
self._solid = c
self.updateOptions()
def _prefCallbackGradientSolidColor(self, value):
"""
Updates solid color when preference saved.
"""
self._solid = QtGui.QColor(value)
self.updateOptions()
self.windowRequiresUpdate.emit()
def _prefCallbackBackgroundGradient(self, value):
"""
Updates top color when preference saved.
"""
self.GradientToggle.setChecked(value)
self.updateOptions()
self.windowRequiresUpdate.emit()
def onSetupWindow(self, *args):
"""
Update RenderWindow options.
"""
self.updateWindowOptions()
def onSetupResult(self, result):
"""
Update ExodusResult options.
"""
self.updateResultOptions()
def onSetupColorbar(self, colorbar):
"""
Update ExodusColorbar options.
"""
self.ColorbarBlackFontToggle.setVisible(colorbar[0].getOption('visible'))
self.updateOptions()
def onSetEnableWidget(self, value):
"""
Enable/disable the menu items.
"""
super(BackgroundPlugin, self).onSetEnableWidget(value)
self.GradientToggle.setEnabled(value)
self.BlackPreset.setEnabled(value)
self.WhitePreset.setEnabled(value)
self.ColorbarBlackFontToggle.setEnabled(value)
self.TopGradientColor.setEnabled(value)
self.BottomGradientColor.setEnabled(value)
self.SolidColor.setEnabled(value)
def addToMenu(self, menu):
"""
Create a toggle for the background color.
"""
submenu = menu.addMenu('Background')
toggle = self._preferences.value("exodus/backgroundGradient")
self.GradientToggle = submenu.addAction("Gradient")
self.GradientToggle.setCheckable(True)
self.GradientToggle.setChecked(toggle)
self.GradientToggle.toggled.connect(self._callbackGradientToggle)
submenu.addSeparator()
self.BlackPreset = submenu.addAction('Black (preset)')
self.BlackPreset.setCheckable(True)
self.BlackPreset.setChecked(False)
self.BlackPreset.toggled.connect(self._callbackBlackPreset)
self.WhitePreset = submenu.addAction('White (preset)')
self.WhitePreset.setCheckable(True)
self.WhitePreset.setChecked(False)
self.WhitePreset.toggled.connect(self._callbackWhitePreset)
submenu.addSeparator()
self.ColorbarBlackFontToggle = submenu.addAction("Use Black Font/Mesh")
self.ColorbarBlackFontToggle.setCheckable(True)
self.ColorbarBlackFontToggle.setChecked(False)
self.ColorbarBlackFontToggle.toggled.connect(self._callbackColorbarBlackFontToggle)
submenu.addSeparator()
self.TopGradientColor = submenu.addAction("Select Top Gradient Color")
self.TopGradientColor.triggered.connect(self._callbackGradientTopColor)
self.BottomGradientColor = submenu.addAction("Select Bottom Gradient Color")
self.BottomGradientColor.triggered.connect(self._callbackGradientBottomColor)
self.SolidColor = submenu.addAction("Select Solid Color")
self.SolidColor.triggered.connect(self._callbackSolidColor)
self.onSetEnableWidget(False)
def updateResultOptions(self):
"""
Apply ExodusResult options.
"""
if self.ColorbarBlackFontToggle.isChecked() and self._set_result_color:
self.resultOptionsChanged.emit({'color':[0,0,0]})
elif self._set_result_color:
self.resultOptionsChanged.emit({'color':[1,1,1]})
def updateColorbarOptions(self):
"""
Apply the ExodusColorbar options.
"""
if self.ColorbarBlackFontToggle.isChecked():
self.colorbarOptionsChanged.emit({'primary':dict(font_color=[0,0,0])})
else:
self.colorbarOptionsChanged.emit({'primary':dict(font_color=[1,1,1])})
def updateWindowOptions(self):
"""
Apply the RenderWindow options.
"""
if self.GradientToggle.isChecked():
top = self._top.getRgb()
bottom = self._bottom.getRgb()
background = [bottom[0]/255., bottom[1]/255., bottom[2]/255.]
background2 = [top[0]/255., top[1]/255., top[2]/255.]
elif self.BlackPreset.isChecked():
background = [0, 0, 0]
background2 = None
elif self.WhitePreset.isChecked():
background = [1, 1, 1]
background2 = None
else:
solid = self._solid.getRgb()
background = [solid[0]/255., solid[1]/255., solid[2]/255.]
background2 = None
self.windowOptionsChanged.emit({'background':background,
'background2':background2,
'gradient_background':self.GradientToggle.isChecked()})
def updateOptions(self):
"""
Apply options to all chigger objects.
"""
self.updateResultOptions()
self.updateColorbarOptions()
self.updateWindowOptions()
def _setupBackgroundSelect(self, qobject):
"""
Setup the background toggle options.
"""
qobject.addItem('Gradient')
qobject.addItem('Black')
qobject.addItem('White')
qobject.addItem('Solid (custom)')
qobject.currentIndexChanged.connect(self._callbackBackgroundSelect)
def _callbackGradientToggle(self, value):
"""
Called when the gradient toggle is checked/Unchecked.
"""
self.GradientToggle.setChecked(value)
self._gradient_state = value
if value:
self.BlackPreset.blockSignals(True)
self.BlackPreset.setChecked(False)
self.BlackPreset.blockSignals(False)
self.WhitePreset.blockSignals(True)
self.WhitePreset.setChecked(False)
self.WhitePreset.blockSignals(False)
self.ColorbarBlackFontToggle.blockSignals(True)
self.ColorbarBlackFontToggle.setChecked(False)
self.ColorbarBlackFontToggle.blockSignals(False)
self.updateOptions()
self.windowRequiresUpdate.emit()
def _callbackBlackPreset(self, value):
"""
Called when the black preset is toggled.
"""
self.BlackPreset.setChecked(value)
if value:
self.GradientToggle.blockSignals(True)
self.GradientToggle.setChecked(False)
self.GradientToggle.blockSignals(False)
self.WhitePreset.blockSignals(True)
self.WhitePreset.setChecked(False)
self.WhitePreset.blockSignals(False)
self.ColorbarBlackFontToggle.blockSignals(True)
self.ColorbarBlackFontToggle.setChecked(False)
self.ColorbarBlackFontToggle.blockSignals(False)
else:
self.GradientToggle.blockSignals(True)
self.GradientToggle.setChecked(self._gradient_state)
self.GradientToggle.blockSignals(False)
self.ColorbarBlackFontToggle.blockSignals(True)
self.ColorbarBlackFontToggle.setChecked(self._black_font_state)
self.ColorbarBlackFontToggle.blockSignals(False)
self.updateOptions()
self.windowRequiresUpdate.emit()
def _callbackWhitePreset(self, value):
"""
Called when the white preset is toggled.
"""
self.WhitePreset.setChecked(value)
if value:
self.GradientToggle.blockSignals(True)
self.GradientToggle.setChecked(False)
self.GradientToggle.blockSignals(False)
self.BlackPreset.blockSignals(True)
self.BlackPreset.setChecked(False)
self.BlackPreset.blockSignals(False)
self.ColorbarBlackFontToggle.blockSignals(True)
self.ColorbarBlackFontToggle.setChecked(True)
self.ColorbarBlackFontToggle.blockSignals(False)
else:
self.GradientToggle.blockSignals(True)
self.GradientToggle.setChecked(self._gradient_state)
self.GradientToggle.blockSignals(False)
self.ColorbarBlackFontToggle.blockSignals(True)
self.ColorbarBlackFontToggle.setChecked(self._black_font_state)
self.ColorbarBlackFontToggle.blockSignals(False)
self.updateOptions()
self.windowRequiresUpdate.emit()
def _callbackColorbarBlackFontToggle(self, value):
"""
Called when the drop down black font option is toggled.
"""
self._black_font_state = value
self.ColorbarBlackFontToggle.setChecked(value)
self.updateOptions()
self.windowRequiresUpdate.emit()
def main(size=None):
"""
Run the BackgroundPlugin all by its lonesome.
"""
from peacock.ExodusViewer.ExodusPluginManager import ExodusPluginManager
from peacock.ExodusViewer.plugins.VTKWindowPlugin import VTKWindowPlugin
from peacock.ExodusViewer.plugins.ColorbarPlugin import ColorbarPlugin
widget = ExodusPluginManager(plugins=[lambda: VTKWindowPlugin(size=size),
BackgroundPlugin,
ColorbarPlugin])
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(widget)
menubar = main_window.menuBar()
menubar.setNativeMenuBar(False)
widget.addToMainMenu(menubar)
main_window.show()
return widget, widget.VTKWindowPlugin, main_window
if __name__ == '__main__':
from peacock.utils import Testing
app = QtWidgets.QApplication(sys.argv)
filename = Testing.get_chigger_input('mug_blocks_out.e')
widget, window, main_window = main()
window.onSetFilename(filename)
window.onSetVariable('diffused')
window.onWindowRequiresUpdate()
sys.exit(app.exec_())
| nuclear-wizard/moose | python/peacock/ExodusViewer/plugins/BackgroundPlugin.py | Python | lgpl-2.1 | 14,075 | [
"MOOSE"
] | f1e7de7ff9f6c0296a880fd3a06529e3c6a31dce4b7787070dde0d281c2db85b |
# Natural Language Toolkit: Corpus Reader Utilities
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import os
import bisect
import re
import tempfile
from functools import reduce
try:
import cPickle as pickle
except ImportError:
import pickle
# Use the c version of ElementTree, which is faster, if possible:
try: from xml.etree import cElementTree as ElementTree
except ImportError: from xml.etree import ElementTree
from nltk.compat import string_types, text_type
from nltk.tokenize import wordpunct_tokenize
from nltk.internals import slice_bounds
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.data import SeekableUnicodeStreamReader
from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25
######################################################################
#{ Corpus View
######################################################################
class StreamBackedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file, which acts like a sequence of tokens:
it can be accessed by index, iterated over, etc. However, the
tokens are only constructed as-needed -- the entire corpus is
never stored in memory at once.
The constructor to ``StreamBackedCorpusView`` takes two arguments:
a corpus fileid (specified as a string or as a ``PathPointer``);
and a block reader. A "block reader" is a function that reads
zero or more tokens from a stream, and returns them as a list. A
very simple example of a block reader is:
>>> def simple_block_reader(stream):
... return stream.readline().split()
This simple block reader reads a single line at a time, and
returns a single token (consisting of a string) for each
whitespace-separated substring on the line.
When deciding how to define the block reader for a given
corpus, careful consideration should be given to the size of
blocks handled by the block reader. Smaller block sizes will
increase the memory requirements of the corpus view's internal
data structures (by 2 integers per block). On the other hand,
larger block sizes may decrease performance for random access to
the corpus. (But note that larger block sizes will *not*
decrease performance for iteration.)
Internally, ``CorpusView`` maintains a partial mapping from token
index to file position, with one entry per block. When a token
with a given index *i* is requested, the ``CorpusView`` constructs
it as follows:
1. First, it searches the toknum/filepos mapping for the token
index closest to (but less than or equal to) *i*.
2. Then, starting at the file position corresponding to that
index, it reads one block at a time using the block reader
until it reaches the requested token.
The toknum/filepos mapping is created lazily: it is initially
empty, but every time a new block is read, the block's
initial token is added to the mapping. (Thus, the toknum/filepos
map has one entry per block.)
In order to increase efficiency for random access patterns that
have high degrees of locality, the corpus view may cache one or
more blocks.
:note: Each ``CorpusView`` object internally maintains an open file
object for its underlying corpus file. This file should be
automatically closed when the ``CorpusView`` is garbage collected,
but if you wish to close it manually, use the ``close()``
method. If you access a ``CorpusView``'s items after it has been
closed, the file object will be automatically re-opened.
:warning: If the contents of the file are modified during the
lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
is undefined.
:warning: If a unicode encoding is specified when constructing a
``CorpusView``, then the block reader may only call
``stream.seek()`` with offsets that have been returned by
``stream.tell()``; in particular, calling ``stream.seek()`` with
relative offsets, or with offsets based on string lengths, may
lead to incorrect behavior.
:ivar _block_reader: The function used to read
a single block from the underlying file stream.
:ivar _toknum: A list containing the token index of each block
that has been processed. In particular, ``_toknum[i]`` is the
token index of the first token in block ``i``. Together
with ``_filepos``, this forms a partial mapping between token
indices and file positions.
:ivar _filepos: A list containing the file position of each block
that has been processed. In particular, ``_toknum[i]`` is the
file position of the first character in block ``i``. Together
with ``_toknum``, this forms a partial mapping between token
indices and file positions.
:ivar _stream: The stream used to access the underlying corpus file.
:ivar _len: The total number of tokens in the corpus, if known;
or None, if the number of tokens is not yet known.
:ivar _eofpos: The character position of the last character in the
file. This is calculated when the corpus view is initialized,
and is used to decide when the end of file has been reached.
:ivar _cache: A cache of the most recently read block. It
is encoded as a tuple (start_toknum, end_toknum, tokens), where
start_toknum is the token index of the first token in the block;
end_toknum is the token index of the first token not in the
block; and tokens is a list of the tokens in the block.
"""
def __init__(self, fileid, block_reader=None, startpos=0,
encoding='utf8'):
"""
Create a new corpus view, based on the file ``fileid``, and
read with ``block_reader``. See the class documentation
for more information.
:param fileid: The path to the file that is read by this
corpus view. ``fileid`` can either be a string or a
``PathPointer``.
:param startpos: The file position at which the view will
start reading. This can be used to skip over preface
sections.
:param encoding: The unicode encoding that should be used to
read the file's contents. If no encoding is specified,
then the file's contents will be read as a non-unicode
string (i.e., a str).
"""
if block_reader:
self.read_block = block_reader
# Initialize our toknum/filepos mapping.
self._toknum = [0]
self._filepos = [startpos]
self._encoding = encoding
# We don't know our length (number of tokens) yet.
self._len = None
self._fileid = fileid
self._stream = None
self._current_toknum = None
"""This variable is set to the index of the next token that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current token number."""
self._current_blocknum = None
"""This variable is set to the index of the next block that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current block number."""
# Find the length of the file.
try:
if isinstance(self._fileid, PathPointer):
self._eofpos = self._fileid.file_size()
else:
self._eofpos = os.stat(self._fileid).st_size
except Exception as exc:
raise ValueError('Unable to open or access %r -- %s' %
(fileid, exc))
# Maintain a cache of the most recently read block, to
# increase efficiency of random access.
self._cache = (-1, -1, None)
fileid = property(lambda self: self._fileid, doc="""
The fileid of the file that is accessed by this view.
:type: str or PathPointer""")
def read_block(self, stream):
"""
Read a block from the input stream.
:return: a block of tokens from the input stream
:rtype: list(any)
:param stream: an input stream
:type stream: stream
"""
raise NotImplementedError('Abstract Method')
def _open(self):
"""
Open the file stream associated with this corpus view. This
will be called performed if any value is read from the view
while its file stream is closed.
"""
if isinstance(self._fileid, PathPointer):
self._stream = self._fileid.open(self._encoding)
elif self._encoding:
self._stream = SeekableUnicodeStreamReader(
open(self._fileid, 'rb'), self._encoding)
else:
self._stream = open(self._fileid, 'rb')
def close(self):
"""
Close the file stream associated with this corpus view. This
can be useful if you are worried about running out of file
handles (although the stream should automatically be closed
upon garbage collection of the corpus view). If the corpus
view is accessed after it is closed, it will be automatically
re-opened.
"""
if self._stream is not None:
self._stream.close()
self._stream = None
def __len__(self):
if self._len is None:
# iterate_from() sets self._len when it reaches the end
# of the file:
for tok in self.iterate_from(self._toknum[-1]): pass
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
start, stop = slice_bounds(self, i)
# Check if it's in the cache.
offset = self._cache[0]
if offset <= start and stop <= self._cache[1]:
return self._cache[2][start-offset:stop-offset]
# Construct & return the result.
return LazySubsequence(self, start, stop)
else:
# Handle negative indices
if i < 0: i += len(self)
if i < 0: raise IndexError('index out of range')
# Check if it's in the cache.
offset = self._cache[0]
if offset <= i < self._cache[1]:
return self._cache[2][i-offset]
# Use iterate_from to extract it.
try:
return next(self.iterate_from(i))
except StopIteration:
raise IndexError('index out of range')
# If we wanted to be thread-safe, then this method would need to
# do some locking.
def iterate_from(self, start_tok):
# Start by feeding from the cache, if possible.
if self._cache[0] <= start_tok < self._cache[1]:
for tok in self._cache[2][start_tok-self._cache[0]:]:
yield tok
start_tok += 1
# Decide where in the file we should start. If `start` is in
# our mapping, then we can jump straight to the correct block;
# otherwise, start at the last block we've processed.
if start_tok < self._toknum[-1]:
block_index = bisect.bisect_right(self._toknum, start_tok)-1
toknum = self._toknum[block_index]
filepos = self._filepos[block_index]
else:
block_index = len(self._toknum)-1
toknum = self._toknum[-1]
filepos = self._filepos[-1]
# Open the stream, if it's not open already.
if self._stream is None:
self._open()
# Each iteration through this loop, we read a single block
# from the stream.
while filepos < self._eofpos:
# Read the next block.
self._stream.seek(filepos)
self._current_toknum = toknum
self._current_blocknum = block_index
tokens = self.read_block(self._stream)
assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
'block reader %s() should return list or tuple.' %
self.read_block.__name__)
num_toks = len(tokens)
new_filepos = self._stream.tell()
assert new_filepos > filepos, (
'block reader %s() should consume at least 1 byte (filepos=%d)' %
(self.read_block.__name__, filepos))
# Update our cache.
self._cache = (toknum, toknum+num_toks, list(tokens))
# Update our mapping.
assert toknum <= self._toknum[-1]
if num_toks > 0:
block_index += 1
if toknum == self._toknum[-1]:
assert new_filepos > self._filepos[-1] # monotonic!
self._filepos.append(new_filepos)
self._toknum.append(toknum+num_toks)
else:
# Check for consistency:
assert new_filepos == self._filepos[block_index], (
'inconsistent block reader (num chars read)')
assert toknum+num_toks == self._toknum[block_index], (
'inconsistent block reader (num tokens returned)')
# If we reached the end of the file, then update self._len
if new_filepos == self._eofpos:
self._len = toknum + num_toks
# Generate the tokens in this block (but skip any tokens
# before start_tok). Note that between yields, our state
# may be modified.
for tok in tokens[max(0, start_tok-toknum):]:
yield tok
# If we're at the end of the file, then we're done.
assert new_filepos <= self._eofpos
if new_filepos == self._eofpos:
break
# Update our indices
toknum += num_toks
filepos = new_filepos
# If we reach this point, then we should know our length.
assert self._len is not None
# Enforce closing of stream once we reached end of file
# We should have reached EOF once we're out of the while loop.
self.close()
# Use concat for these, so we can use a ConcatenatedCorpusView
# when possible.
def __add__(self, other):
return concat([self, other])
def __radd__(self, other):
return concat([other, self])
def __mul__(self, count):
return concat([self] * count)
def __rmul__(self, count):
return concat([self] * count)
class ConcatenatedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file that joins together one or more
``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
one file handle is left open at any time.
"""
def __init__(self, corpus_views):
self._pieces = corpus_views
"""A list of the corpus subviews that make up this
concatenation."""
self._offsets = [0]
"""A list of offsets, indicating the index at which each
subview begins. In particular::
offsets[i] = sum([len(p) for p in pieces[:i]])"""
self._open_piece = None
"""The most recently accessed corpus subview (or None).
Before a new subview is accessed, this subview will be closed."""
def __len__(self):
if len(self._offsets) <= len(self._pieces):
# Iterate to the end of the corpus.
for tok in self.iterate_from(self._offsets[-1]): pass
return self._offsets[-1]
def close(self):
for piece in self._pieces:
piece.close()
def iterate_from(self, start_tok):
piecenum = bisect.bisect_right(self._offsets, start_tok)-1
while piecenum < len(self._pieces):
offset = self._offsets[piecenum]
piece = self._pieces[piecenum]
# If we've got another piece open, close it first.
if self._open_piece is not piece:
if self._open_piece is not None:
self._open_piece.close()
self._open_piece = piece
# Get everything we can from this piece.
for tok in piece.iterate_from(max(0, start_tok-offset)):
yield tok
# Update the offset table.
if piecenum+1 == len(self._offsets):
self._offsets.append(self._offsets[-1] + len(piece))
# Move on to the next piece.
piecenum += 1
def concat(docs):
"""
Concatenate together the contents of multiple documents from a
single corpus, using an appropriate concatenation function. This
utility function is used by corpus readers when the user requests
more than one document at a time.
"""
if len(docs) == 1:
return docs[0]
if len(docs) == 0:
raise ValueError('concat() expects at least one object!')
types = set(d.__class__ for d in docs)
# If they're all strings, use string concatenation.
if all(isinstance(doc, string_types) for doc in docs):
return ''.join(docs)
# If they're all corpus views, then use ConcatenatedCorpusView.
for typ in types:
if not issubclass(typ, (StreamBackedCorpusView,
ConcatenatedCorpusView)):
break
else:
return ConcatenatedCorpusView(docs)
# If they're all lazy sequences, use a lazy concatenation
for typ in types:
if not issubclass(typ, AbstractLazySequence):
break
else:
return LazyConcatenation(docs)
# Otherwise, see what we can do:
if len(types) == 1:
typ = list(types)[0]
if issubclass(typ, list):
return reduce((lambda a,b:a+b), docs, [])
if issubclass(typ, tuple):
return reduce((lambda a,b:a+b), docs, ())
if ElementTree.iselement(typ):
xmltree = ElementTree.Element('documents')
for doc in docs: xmltree.append(doc)
return xmltree
# No method found!
raise ValueError("Don't know how to concatenate types: %r" % types)
######################################################################
#{ Corpus View for Pickled Sequences
######################################################################
class PickleCorpusView(StreamBackedCorpusView):
"""
A stream backed corpus view for corpus files that consist of
sequences of serialized Python objects (serialized using
``pickle.dump``). One use case for this class is to store the
result of running feature detection on a corpus to disk. This can
be useful when performing feature detection is expensive (so we
don't want to repeat it); but the corpus is too large to store in
memory. The following example illustrates this technique:
>>> from nltk.corpus.reader.util import PickleCorpusView
>>> from nltk.util import LazyMap
>>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP
>>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP
>>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP
"""
BLOCK_SIZE = 100
PROTOCOL = -1
def __init__(self, fileid, delete_on_gc=False):
"""
Create a new corpus view that reads the pickle corpus
``fileid``.
:param delete_on_gc: If true, then ``fileid`` will be deleted
whenever this object gets garbage-collected.
"""
self._delete_on_gc = delete_on_gc
StreamBackedCorpusView.__init__(self, fileid)
def read_block(self, stream):
result = []
for i in range(self.BLOCK_SIZE):
try: result.append(pickle.load(stream))
except EOFError: break
return result
def __del__(self):
"""
If ``delete_on_gc`` was set to true when this
``PickleCorpusView`` was created, then delete the corpus view's
fileid. (This method is called whenever a
``PickledCorpusView`` is garbage-collected.
"""
if getattr(self, '_delete_on_gc'):
if os.path.exists(self._fileid):
try: os.remove(self._fileid)
except (OSError, IOError): pass
self.__dict__.clear() # make the garbage collector's job easier
@classmethod
def write(cls, sequence, output_file):
if isinstance(output_file, string_types):
output_file = open(output_file, 'wb')
for item in sequence:
pickle.dump(item, output_file, cls.PROTOCOL)
@classmethod
def cache_to_tempfile(cls, sequence, delete_on_gc=True):
"""
Write the given sequence to a temporary file as a pickle
corpus; and then return a ``PickleCorpusView`` view for that
temporary corpus file.
:param delete_on_gc: If true, then the temporary file will be
deleted whenever this object gets garbage-collected.
"""
try:
fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
output_file = os.fdopen(fd, 'wb')
cls.write(sequence, output_file)
output_file.close()
return PickleCorpusView(output_file_name, delete_on_gc)
except (OSError, IOError) as e:
raise ValueError('Error while creating temp file: %s' % e)
######################################################################
#{ Block Readers
######################################################################
def read_whitespace_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(stream.readline().split())
return toks
def read_wordpunct_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(wordpunct_tokenize(stream.readline()))
return toks
def read_line_block(stream):
toks = []
for i in range(20):
line = stream.readline()
if not line: return toks
toks.append(line.rstrip('\n'))
return toks
def read_blankline_block(stream):
s = ''
while True:
line = stream.readline()
# End of file:
if not line:
if s: return [s]
else: return []
# Blank line:
elif line and not line.strip():
if s: return [s]
# Other line:
else:
s += line
def read_alignedsent_block(stream):
s = ''
while True:
line = stream.readline()
if line[0] == '=' or line[0] == '\n' or line[:2] == '\r\n':
continue
# End of file:
if not line:
if s: return [s]
else: return []
# Other line:
else:
s += line
if re.match('^\d+-\d+', line) is not None:
return [s]
def read_regexp_block(stream, start_re, end_re=None):
"""
Read a sequence of tokens from a stream, where tokens begin with
lines that match ``start_re``. If ``end_re`` is specified, then
tokens end with lines that match ``end_re``; otherwise, tokens end
whenever the next line matching ``start_re`` or EOF is found.
"""
# Scan until we find a line matching the start regexp.
while True:
line = stream.readline()
if not line: return [] # end of file.
if re.match(start_re, line): break
# Scan until we find another line matching the regexp, or EOF.
lines = [line]
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [''.join(lines)]
# End of token:
if end_re is not None and re.match(end_re, line):
return [''.join(lines)]
# Start of new token: backup to just before it starts, and
# return the token we've already collected.
if end_re is None and re.match(start_re, line):
stream.seek(oldpos)
return [''.join(lines)]
# Anything else is part of the token.
lines.append(line)
def read_sexpr_block(stream, block_size=16384, comment_char=None):
"""
Read a sequence of s-expressions from the stream, and leave the
stream's file position at the end the last complete s-expression
read. This function will always return at least one s-expression,
unless there are no more s-expressions in the file.
If the file ends in in the middle of an s-expression, then that
incomplete s-expression is returned when the end of the file is
reached.
:param block_size: The default block size for reading. If an
s-expression is longer than one block, then more than one
block will be read.
:param comment_char: A character that marks comments. Any lines
that begin with this character will be stripped out.
(If spaces or tabs precede the comment character, then the
line will not be stripped.)
"""
start = stream.tell()
block = stream.read(block_size)
encoding = getattr(stream, 'encoding', None)
assert encoding is not None or isinstance(block, text_type)
if encoding not in (None, 'utf-8'):
import warnings
warnings.warn('Parsing may fail, depending on the properties '
'of the %s encoding!' % encoding)
# (e.g., the utf-16 encoding does not work because it insists
# on adding BOMs to the beginning of encoded strings.)
if comment_char:
COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
while True:
try:
# If we're stripping comments, then make sure our block ends
# on a line boundary; and then replace any comments with
# space characters. (We can't just strip them out -- that
# would make our offset wrong.)
if comment_char:
block += stream.readline()
block = re.sub(COMMENT, _sub_space, block)
# Read the block.
tokens, offset = _parse_sexpr_block(block)
# Skip whitespace
offset = re.compile(r'\s*').search(block, offset).end()
# Move to the end position.
if encoding is None:
stream.seek(start+offset)
else:
stream.seek(start+len(block[:offset].encode(encoding)))
# Return the list of tokens we processed
return tokens
except ValueError as e:
if e.args[0] == 'Block too small':
next_block = stream.read(block_size)
if next_block:
block += next_block
continue
else:
# The file ended mid-sexpr -- return what we got.
return [block.strip()]
else: raise
def _sub_space(m):
"""Helper function: given a regexp match, return a string of
spaces that's the same length as the matched string."""
return ' '*(m.end()-m.start())
def _parse_sexpr_block(block):
tokens = []
start = end = 0
while end < len(block):
m = re.compile(r'\S').search(block, end)
if not m:
return tokens, end
start = m.start()
# Case 1: sexpr is not parenthesized.
if m.group() != '(':
m2 = re.compile(r'[\s(]').search(block, start)
if m2:
end = m2.start()
else:
if tokens: return tokens, end
raise ValueError('Block too small')
# Case 2: parenthesized sexpr.
else:
nesting = 0
for m in re.compile(r'[()]').finditer(block, start):
if m.group()=='(': nesting += 1
else: nesting -= 1
if nesting == 0:
end = m.end()
break
else:
if tokens: return tokens, end
raise ValueError('Block too small')
tokens.append(block[start:end])
return tokens, end
######################################################################
#{ Finding Corpus Items
######################################################################
def find_corpus_fileids(root, regexp):
if not isinstance(root, PathPointer):
raise TypeError('find_corpus_fileids: expected a PathPointer')
regexp += '$'
# Find fileids in a zipfile: scan the zipfile's namelist. Filter
# out entries that end in '/' -- they're directories.
if isinstance(root, ZipFilePathPointer):
fileids = [name[len(root.entry):] for name in root.zipfile.namelist()
if not name.endswith('/')]
items = [name for name in fileids if re.match(regexp, name)]
return sorted(items)
# Find fileids in a directory: use os.walk to search all (proper
# or symlinked) subdirectories, and match paths against the regexp.
elif isinstance(root, FileSystemPathPointer):
items = []
# workaround for py25 which doesn't support followlinks
kwargs = {}
if not py25():
kwargs = {'followlinks': True}
for dirname, subdirs, fileids in os.walk(root.path, **kwargs):
prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
items += [prefix+fileid for fileid in fileids
if re.match(regexp, prefix+fileid)]
# Don't visit svn directories:
if '.svn' in subdirs: subdirs.remove('.svn')
return sorted(items)
else:
raise AssertionError("Don't know how to handle %r" % root)
def _path_from(parent, child):
if os.path.split(parent)[1] == '':
parent = os.path.split(parent)[0]
path = []
while parent != child:
child, dirname = os.path.split(child)
path.insert(0, dirname)
assert os.path.split(child)[0] != child
return path
######################################################################
#{ Paragraph structure in Treebank files
######################################################################
def tagged_treebank_para_block_reader(stream):
# Read the next paragraph.
para = ''
while True:
line = stream.readline()
# End of paragraph:
if re.match('======+\s*$', line):
if para.strip(): return [para]
# End of file:
elif line == '':
if para.strip(): return [para]
else: return []
# Content line:
else:
para += line
| JFriel/honours_project | venv/lib/python2.7/site-packages/nltk/corpus/reader/util.py | Python | gpl-3.0 | 30,870 | [
"VisIt"
] | b06721904f995197de52eb1eb35a6fb465cddd0380a4086cc80a4b13f123e861 |
#########################################################################
# Ryuretic: A Modular Framework for RYU #
# !/ryu/ryu/app/Ryuretic/Ryuretic_Intf.py #
# Authors: #
# Jacob Cox (jcox70@gatech.edu) #
# Sean Donovan (sdonovan@gatech.edu) #
# Ryuretic_Intf.py #
# date 28 April 2016 #
#########################################################################
# Copyright (C) 2016 Jacob Cox - All Rights Reserved #
# You may use, distribute and modify this code under the #
# terms of the Ryuretic license, provided this work is cited #
# in the work for which it is used. #
# For latest updates, please visit: #
# https://github.com/Ryuretic/RAP #
#########################################################################
"""How To Run This Program
1) Ensure you have Ryu installed.
2) Save the following files to /home/ubuntu/ryu/ryu/app/Ryuretic directory
a) Ryuretic_Intf.py
b) Ryuretic.py
c) Pkt_Parse13.py
d) switch_mod13.py
3) In your controller terminal type: cd ryu
4) Enter PYTHONPATH=. ./bin/ryu-manager ryu/app/Ryuretic/Ryuretic_Intf_v1.py
"""
#########################################################################
from Ryuretic import coupler
#################1 Import Needed Libraries 1###################
#[1] Import needed libraries here #
#########################################################################
import string, random
class Ryuretic_coupler(coupler):
def __init__(self, *args, **kwargs):
super(Ryuretic_coupler, self).__init__(*args, **kwargs)
############## 2 Add User Variables 2 ###################
#[2] Add new global variables here. #
# Ex. ICMP_ECHO_REQUEST = 8, self.netView = {} #
#################################################################
self.cntrl = {'mac':'ca:ca:ca:ad:ad:ad','ip':'192.168.0.40','port':None}
self.validNAT = {'mac':'aa:aa:aa:aa:aa:aa','ip':'192.168.0.224'}
self.t_agentIP = '192.168.0.1'
self.t_agent = {} #Records TA parameter from respond_to_ping
self.dns_tbl = {} #Use to redirect DNS
self.tcp_tbl = {} #Use to redirect TCP
self.port_mac_map = {} #Used by multi-mac detector
self.port_AV = {} #Tracks per port Time-2-ack average
self.portTTA = {}
self.tta = {} #Tracks TCP handshake per (src,srcip,srcport,dstip)
self.tcpConnCount = 0 #Future var for tracking total TCP connections
self.policyTbl = {} #Tracks policies applied to port/mac
self.netView = {} #Maps switch connections by port,mac,ip
self.portTbl, self.macTbl, self.ipTbl = {},{},{}
self.testIP = '0.0.0.0' #'192.168.0.22'
#self.portTbl[9]='test'
#self.macTbl['aa:aa:aa:aa:00:22'] = 'test'
#self.ipTbl['192.168.0.22'] = 'test'
#Assigns flag to MAC/Port
self.keyID = 101
ICMP_ECHO_REPLY = 0
ICMP_ECHO_REQUEST = 8
################ 3 Proactive Rule Sets 3 ###################
#[3] Insert proactive rules defined below. Follow format below #
# Options include drop or redirect, fwd is the default. #
#####################################################################
def get_proactive_rules(self, dp, parser, ofproto):
return None, None
#fields, ops = self.honeypot(dp, parser, ofproto)
#return fields, ops
################# 4 Reactive Rule Sets 4 #####################
#[4] use below handles to direct packets to reactive user modules #
# defined in location #[5]. If no rule is added, then #
# the default self.default_Fields_Ops(pkt) must be used #
#####################################################################
# Determine highest priority fields and ops pair, if needed #
# xfields = [fields0, fields1, fields2] #
# xops = [ops0, ops1, ops2] #
# fields,ops = self._build_FldOps(xfields,xops) #
#####################################################################
def handle_eth(self,pkt):
#print "Handle Ether: ", pkt['srcmac'],'->',pkt['dstmac']
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
#def handle_arp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle ARP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle ARP: ",pkt['srcip'],"->",pkt['dstip']
#fields, ops = self.respond_to_arp(pkt)
##Determin if mac or port has a status
##pkt_status = self.check_net_tbl(pkt['srcmac'],pkt['inport'])
##print pkt_status
#self.install_field_ops(pkt,fields,ops)
def handle_arp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle ARP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle ARP: ",pkt['srcip'],"->",pkt['dstip']
fields, ops = self.respond_to_arp(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_ip(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle IP"
#fields, ops = self.TTL_Check(pkt) #Lab 9
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_icmp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle ICMP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle ICMP: ",pkt['srcip'],"->",pkt['dstip']
fields,ops = self.respond_to_ping(pkt)
self.install_field_ops(pkt, fields, ops)
def handle_tcp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle TCP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle TCP: ",pkt['srcip'],"->",pkt['dstip']
#print "Handle TCP: ",pkt['srcport'],"->",pkt['dstport']
pkt_status = self.check_ip_tbl(pkt)
if pkt_status == 'test': #test src and dest
fields,ops = self.redirect_TCP(pkt)
elif pkt_status == 'deny':
fields,ops = self.redirect_TCP(pkt)
else:
#fields,ops = self.default_Field_Ops(pkt)
#fields,ops = self.test_TCP(pkt)
fields,ops = self.TTA_analysis(pkt)
self.install_field_ops(pkt, fields, ops)
def test_TCP(self,pkt):
fields,ops = self.default_Field_Ops(pkt)
if pkt['srcip'] == self.testIP:
print "IP detected: ", pkt['srcip']
self.flagHost(pkt,'test')
fields,ops=self.redirect_TCP(pkt)
return fields,ops
return fields,ops
def redirect_TCP(self,pkt):
print "Redirect_TCP: "
print "pkt info: ", pkt['srcmac'],' ',pkt['dstmac'],' ',pkt['srcip'],' ',pkt['dstip']
print pkt['srcport'],' ',pkt['dstport']
#Uses ipTbl, tcp_tbl, and t_agent
fields,ops = self.default_Field_Ops(pkt)
if self.ipTbl.has_key(pkt['srcip']):
if self.ipTbl[pkt['srcip']] in ['test','deny']:
print "ipTbl Contents", self.ipTbl
key = (pkt['srcip'],pkt['srcport'])
print "Key is : ", key
self.tcp_tbl[key] = {'dstip':pkt['dstip'],'dstmac':pkt['dstmac'],
'dstport':pkt['dstport']}
fields.update({'srcmac':pkt['srcmac'],'srcip':pkt['srcip']})
fields.update({'dstmac':self.t_agent['mac'],'dstip':self.t_agent['ip']})
#if pkt['dstport'] == 443:
#fields['dstport'] = 80
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':self.t_agent['port']}
print "TCP Table: ", self.tcp_tbl[key]
elif self.ipTbl.has_key(pkt['dstip']):
print "Returning to ", pkt['dstip']
if self.ipTbl[pkt['dstip']] in ['test','deny']:
print "ipTbl Contents", self.ipTbl
key = (pkt['dstip'],pkt['dstport'])
print "Key and table: ", key, ' ', self.tcp_tbl[key]
fields.update({'srcmac':self.tcp_tbl[key]['dstmac'],
'srcip':self.tcp_tbl[key]['dstip']})
#if self.tcp_tbl[key]['dstport'] == 443:
#fields.update({'srcport':443})
fields.update({'dstmac':pkt['dstmac'], 'dstip':pkt['dstip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':None}
#self.tcp_tbl.pop(key)
#print "TCP Table: ", self.tcp_tbl
return fields, ops
# Add flag to policyTbl, macTbl, portTbl
def flagHost(self,pkt,flag):
print 'Flag Host: ', pkt['srcmac'],'->',flag
self.macTbl[pkt['srcmac']]={'stat':flag,'port':pkt['inport'],
'ip':pkt['srcip']}
self.portTbl[pkt['inport']]=flag
self.ipTbl[pkt['srcip']] = flag
if flag != 'norm':
keyID = self.keyID
self.keyID += 1
#create passkey
passkey =''.join(random.choice(string.ascii_letters) for x in range(8))
#update policy table
self.policyTbl[keyID]={'inport':pkt['inport'],'srcmac':pkt['srcmac'],
'ip':pkt['srcip'],'passkey':passkey,'stat':flag}
#Notify trusted agent of newly flagged client
self.update_TA(pkt, keyID, 'l') #load message'
def handle_udp(self,pkt):
print "-------------------------------------------------------------"
print "Handle UDP: ",pkt['srcmac'],"->",pkt['dstmac']
print "Handle UDP: ",pkt['srcip'],'->',pkt['dstip']
#Added to build MAC and port associations
pkt_status = self.check_ip_tbl(pkt)
if pkt_status == 'test': #test src and dest
fields,ops = self.redirect_DNS(pkt)
elif pkt_status == 'deny':
fields,ops = self.redirect_DNS(pkt)
else:
fields,ops = self.test_DNS(pkt)
self.install_field_ops(pkt, fields, ops)
def test_DNS(self,pkt):
print "Testing DNS"
fields,ops = self.default_Field_Ops(pkt)
if pkt['srcip'] == self.testIP:
print "IP detected: ", pkt['srcip']
self.flagHost(pkt,'test')
fields,ops=self.redirect_DNS(pkt)
return fields,ops
return fields,ops
def redirect_DNS(self,pkt):
print "Redirect_DNS: "
#Uses macTbl, dns_tbl, and t_agent
fields,ops = self.default_Field_Ops(pkt)
if self.ipTbl.has_key(pkt['srcip']):
if self.ipTbl[pkt['srcip']]== 'test':
key = (pkt['srcip'],pkt['srcport'])
print key
self.dns_tbl[key] = {'dstip':pkt['dstip'],'dstmac':pkt['dstmac']}
fields.update({'dstmac':self.t_agent['mac'],
'dstip':self.t_agent['ip']})
fields.update({'srcmac':pkt['srcmac'],'srcip':pkt['srcip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':self.t_agent['port']}
elif self.ipTbl.has_key(pkt['dstip']):
if self.ipTbl[pkt['dstip']]== 'test':
key = (pkt['dstip'],pkt['dstport'])
print key
fields.update({'srcmac':self.dns_tbl[key]['dstmac'],
'srcip':self.dns_tbl[key]['dstip']})
fields.update({'dstmac':pkt['dstmac'], 'dstip':pkt['dstip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':None}
#self.dns_tbl.pop(key)
#print "DNS Table: ", self.dns_tbl
return fields, ops
#Check status of port and mac.
def check_ip_tbl(self,pkt):
#print "Check_ip_tbl:"
srcip,dstip = pkt['srcip'],pkt['dstip']
if self.ipTbl.has_key(srcip):
#print "Found: ", srcip,'->', self.ipTbl[srcip]
return self.ipTbl[srcip]
elif self.ipTbl.has_key(dstip):
#print "Found: ", dstip,'->', self.ipTbl[dstip]
return self.ipTbl[dstip]
else:
#print "Not Found: ", srcip, ', ', dstip
return 'No_Flag'
# All packets not defined above are handled here.
def handle_unk(self,pkt):
print "-------------------------------------------------------------"
print "Handle Uknown"
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt, fields, ops)
######################################################################
# The following are from the old NFG file.
def default_Field_Ops(self,pkt):
def _loadFields(pkt):
#keys specifies match fields for action. Default is
#inport and srcmac. ptype used for craft icmp, udp, etc.
fields = {'keys':['inport','srcmac'],'ptype':[], 'dp':pkt['dp'],
'ofproto':pkt['ofproto'], 'msg':pkt['msg'],
'inport':pkt['inport'], 'srcmac':pkt['srcmac'],
'ethtype':pkt['ethtype'], 'dstmac':None, 'srcip':None,
'proto':None, 'dstip':None, 'srcport':None, 'dstport':None,
'com':None, 'id':0}
return fields
def _loadOps():
#print "Loading ops"
#Specifies the timeouts, priority, operation and outport
#options for op: 'fwd','drop', 'mir', 'redir', 'craft'
ops = {'hard_t':None, 'idle_t':None, 'priority':10, \
'op':'fwd', 'newport':None}
return ops
#print "default Field_Ops called"
fields = _loadFields(pkt)
ops = _loadOps()
return fields, ops
######################################################################
############ 5 Ryuretic Network Application Modules 5 ##############
#[5] Add user created methods below. Examples are provided to assist #
# the user with basic python, dictionary, list, and function calls #
######################################################################
# Confirm mac has been seen before and no issues are recorded
def TTL_Check(self, pkt):
#initialize fields and ops with default settings
fields, ops = self.default_Field_Ops(pkt)
if pkt['srcmac'] != self.validNAT['mac']:
if pkt['ttl']==63 or pkt['ttl']==127:
print 'TTL Decrement Detected on ',pkt['srcmac'],' TTL is :',pkt['ttl']
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
ops['idle_t'] = 5
print "Packet TTL: ", pkt['ttl'], ' ', pkt['srcip'],' ', \
pkt['inport'],' ', pkt['srcmac']
else:
ops['idle_t'] = 20
priority = 10
return fields, ops
def Multi_MAC_Checker(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
print "*** Checking MAC ***"
#self.port_mac_map = {}
if self.port_mac_map.has_key(pkt['inport']):
if pkt['srcmac'] != self.port_mac_map[pkt['inport']]:
print " Multi-mac port detected "
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
fields, ops = self.fwd_persist(pkt,fields,ops)
else:
self.port_mac_map[pkt['inport']] = pkt['srcmac']
return fields, ops
#change name to monitor_TCP for RAP
def TTA_analysis(self,pkt):
weight = 9
seed = 5.0
fields, ops = self.default_Field_Ops(pkt)
bits = pkt['bits']
dst, dstip, dstport = pkt['dstmac'], pkt['dstip'], pkt['dstport']
src, srcip, srcport = pkt['srcmac'], pkt['srcip'], pkt['srcport']
inport = pkt['inport']
send = (src,srcip,srcport,dstip)
arrive = (dst,dstip,dstport,srcip)
t_in = pkt['t_in']
#print"*****\n"+self.tta+"/n******/n"+self.port_AV+"/n*****"
if bits == 20:
if self.tta.has_key(send):
self.tta[send]['stage'] = 0
elif self.tta.has_key(arrive):
#print pkt
self.tta[arrive]['stage'] = 0
return fields, ops
if bits == 2:
if self.tta.has_key(send):
self.tta[send].update({'inport':inport,'stage':1})
else:
self.tta.update({send:{'inport':inport,'stage':1}})
return fields, ops
if bits == 18:
if self.tta.has_key(arrive):
if self.tta[arrive]['stage']==1:
self.tta[arrive].update({'syn':t_in,'stage':2})
#print 'time18: ', t_in
return fields,ops
if bits == 16:
if self.tta.has_key(send):
if self.tta[send]['stage']==2:
tta = t_in - self.tta[send]['syn']
#print 'time16: ', t_in
self.tta[send].update({'stage':3, 'ack':t_in, 'tta':tta})
#print '** Calc TTA :', tta
if self.port_AV.has_key(self.tta[send]['inport']):
portAV = ((self.port_AV[self.tta[send]['inport']] * \
weight) + tta)/(weight+1)
self.port_AV[self.tta[send]['inport']] = portAV
#print 'Port Averages: ', self.port_AV
else:
portAV = ((seed*weight)+tta)/(weight+1)
self.port_AV.update({self.tta[send]['inport']:portAV})
#print 'Port Averages: ', self.port_AV
#if self.portTTA.has_key(inport):
##self.portTTA[inport].append(tta)
#self.portTTA[inport].append(portAV)
#if len(self.portTTA[inport]) > 20:
#print "Port TTA (", inport, '): ', self.portTTA[inport]
#else:
##self.portTTA[inport] = [tta]
#self.portTTA[inport] = [portAV]
#print "Port TTA: ", self.portTTA[inport]
#print "****"
#print "Port and TTA: ", inport, self.tta[send]['tta']
#print "****"
del self.tta[send]
return fields, ops
#print "Persist"
fields, ops = self.tcp_persist(pkt,fields,ops)
return fields, ops
if bits == 24:
#print "HTTP Push"
return fields, ops
if bits == 17:
#print 'Port Averages: ', self.port_AV
if self.tta.has_key(send):
del self.tta[send]
elif self.tta.has_key(arrive):
del self.tta[arrive]
return fields, ops
#print "Packet not addressed", bits, inport, src, dstip
return fields, ops
# Call to temporarily install drop parameter for a packet to switch
def add_drop_params(self, pkt, fields, ops):
#may need to include priority
fields['keys'] = ['inport']
fields['inport'] = pkt['inport']
ops['priority'] = 100
ops['idle_t'] = 60
ops['op']='drop'
return fields, ops
# Call to temporarily install TCP flow connection on switch
def tcp_persist(self, pkt,fields,ops):
#print "TCP_Persist: ", pkt['srcmac'],'->', pkt['dstmac']
#print "TCP_Persist: ", pkt['srcip'],'->',pkt['dstip']
fields['keys'] = ['inport', 'srcmac', 'srcip', 'ethtype', 'srcport']
fields['srcport'] = pkt['srcport']
fields['srcip'] = pkt['srcip']
ops['idle_t'] = 5
ops['priority'] = 10
return fields, ops
def fwd_persist(self, pkt,fields,ops):
ops['idle_t'] = 3
ops['priority'] = 10
return fields, ops
def arp_persist(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport','srcmac','ethtype']
ops['idle_t'] = 10
ops['priority'] = 2
return fields, ops
################################################################
"""
The following code is implemented to allow the trusted agent to comm
with the controller and vice versa.
"""
################################################################
#Receive and respond to arp
def respond_to_arp(self,pkt):
#print 'Respond to Arp:', pkt['srcmac'],'->',pkt['dstmac']
#print 'Respond to Arp:', pkt['srcip'],'->',pkt['dstip']
fields, ops = self.default_Field_Ops(pkt)
#Added to build MAC and port associations
if not self.macTbl.has_key(pkt['srcmac']):
self.macTbl[pkt['srcmac']] = {'port':pkt['inport'], 'stat':'unk'}
if pkt['dstip'] == self.cntrl['ip']:
print "Message to Controller"
fields['keys']=['srcmac', 'srcip', 'ethtype', 'inport']
fields['ptype'] = 'arp'
fields['dstip'] = pkt['srcip']
fields['srcip'] = self.cntrl['ip']
fields['dstmac'] = pkt['srcmac']
fields['srcmac'] = self.cntrl['mac']
fields['ethtype'] = 0x0806
ops['op'] = 'craft'
ops['newport'] = pkt['inport']
#print "INPORT: ", pkt['inport']
return fields, ops
#Respond to ping. Forward or respond if to cntrl from trusted agent.
def respond_to_ping(self,pkt):
def get_fields(keyID):
srcmac = self.policyTbl[keyID]['srcmac']
inport = self.policyTbl[keyID]['inport']
srcip = self.policyTbl[keyID]['ip']
print inport, ', ', srcmac, ', ', srcip
return srcmac, inport, srcip
def remove_keyID(keyID):
print "Policy Table Contents: ", self.policyTbl
if self.policyTbl.has_key(keyID):
srcmac, inport, srcip = get_fields(keyID)
if self.macTbl.has_key(srcmac):
print "Removing MAC", srcmac
self.macTbl.pop(srcmac)
if self.portTbl.has_key(inport):
print "Removing Port", inport
self.portTbl.pop(inport)
if self.ipTbl.has_key(srcip):
print "Removing IP", srcip
self.ipTbl.pop(srcip)
self.policyTbl.pop(keyID)
#print "Respond to Ping: ", pkt['srcmac'],'->',pkt['dstmac']
fields, ops = self.default_Field_Ops(pkt)
if pkt['dstip'] == self.cntrl['ip'] and pkt['srcip'] == self.t_agentIP:
#print'respond to ping'
rcvData = pkt['data'].data
#Actions {a-acknowledge, i-init, d-delete, r-result, v-verify}
#action, keyID = rcvData.split(',')
#keyID = keyID.rstrip(' \t\r\n\0')
print rcvData
try:
action, keyID, result = rcvData.split(',')
result = result.rstrip(' \t\r\n\0')
print "Received Result"
except:
action, keyID = rcvData.split(',')
print "Received Revocation."
keyID = keyID.rstrip(' \t\r\n\0')
print "Key ID Length: ", len(keyID)
keyID = int(keyID)
print "KeyID is ", keyID, ', ', type(keyID)
print "Action is ", action, "\n\n\n*********"
######################################################
if action == 'i':
self.t_agent = {'ip':pkt['srcip'],'mac':pkt['srcmac'],
'port':pkt['inport'],'msg':pkt['msg'],
'ofproto':pkt['ofproto'], 'dp':pkt['dp']}
print "T_AGENT Loaded"
elif action == 'd':
#Deleting flagged host policy
print "Removing (",keyID,") from Policy Table"
print "Existing Keys: ", self.policyTbl.keys()
remove_keyID(keyID)
elif action == 'r':
print "Validating result"
print "Key present?", self.policyTbl.has_key(keyID)
if self.policyTbl.has_key(keyID):
print "Test Result is: ", result
if result == 'P':
print "Removing keyID"
remove_keyID(keyID)
elif result =='F':
print "Flagging Host: ", self.policyTbl[keyID]['ip']
self.policyTbl[keyID]['stat'] = 'deny'
srcmac, inport, srcip = get_fields(keyID)
self.macTbl[srcmac].update({'stat':'deny'})
self.portTbl[inport],self.ipTbl[srcip] ='deny','deny'
self.update_TA(pkt, keyID,'e') #send edit message
#Notify TA of update_TA(self,pkt, keyID)
else:
print "An Error Occured"
elif action is 'u':
#This is more complicated it requires data not being stored
#may need to add fields to policyTable. Maybe not.
pass
elif action is 'a':
#Acknowledge receipt
pass
else:
print "No match"
fields.update({'srcmac':self.cntrl['mac'], 'dstmac':pkt['srcmac']})
fields.update({'srcip':self.cntrl['ip'], 'dstip':pkt['srcip']})
fields.update({'ptype':'icmp','ethtype':0x0800, 'proto':1})
fields['com'] = 'a,'+rcvData
ops.update({'op':'craft', 'newport':pkt['inport']})
return fields, ops
#Crafts tailored ICMP message for trusted agent
def update_TA(self,pkt, keyID, message):
table = self.policyTbl[keyID]
print 'Update Table: ', pkt['srcmac'],'->',keyID,'->',table['stat']
print 'Update Table: ', table['srcmac'],'->',keyID,'->',table['stat']
#print "Updating Trusted Agent"
fields, ops = {},{}
fields['keys'] = ['inport', 'srcip']
fields.update({'dstip':self.t_agent['ip'], 'srcip':self.cntrl['ip']})
fields.update({'dstmac':self.t_agent['mac'], 'srcmac':self.cntrl['mac']})
fields.update({'dp':self.t_agent['dp'], 'msg':self.t_agent['msg']})
fields.update({'inport':self.t_agent['port'],'ofproto':\
self.t_agent['ofproto']})
fields.update({'ptype':'icmp', 'ethtype':0x0800, 'proto':1, 'id':0})
fields['com'] = message+','+table['srcmac']+','+str(table['inport'])+\
','+str(table['passkey'])+','+table['stat']+\
','+str(keyID)
ops = {'hard_t':None, 'idle_t':None, 'priority':0, \
'op':'craft', 'newport':self.t_agent['port']}
self.install_field_ops(pkt, fields, ops)
################################################################
"""
The following code controls the redirection of packets from their intended
destination to our trusted agent. This occurs when a port is flagged.
"""
################################################################
#Create a method to inject a redirect anytime the sta4 IP address is
#Check status of port and mac.
def check_net_tbl(self,pkt):
mac, ip, port = pkt['srcmac'], pkt['srcip'], pkt['inport']
print "(536) Check NetTbl: ", mac, ' & ', port,'->',self.macTbl.keys()
if mac in self.macTbl.keys():
#print "Found: ", mac,'->', self.macTbl[mac]['stat']
return self.macTbl[mac]['stat']
elif port in self.portTbl.keys():
#print "Port ", port, " found in table."
return self.portTbl[port]
elif ip in self.ipTbl.keys():
#print "IP ", ip, " found in table."
return self.ipTbl[ip]
else:
#print "Not Found: ", mac
return 'new'
#Redirect ICMP packets to trusted agent
def Icmp_Redirect(self,pkt):
print "Redirecting ICMP", pkt['srcmac'],'->',pkt['dstmac'],'||',self.t_agent['mac']
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport', 'ethtype']
fields['dstmac'] = self.t_agent['mac']
fields['dstip'] = self.t_agent['ip']
fields['ethtype'] = pkt['ethtype']
ops['op'] = 'redir'
ops['newport'] = self.t_agent['port']
ops['priority'] = 100
ops['idle_t'] = 180
#ops['hard_t'] = 180
return fields, ops
| Ryuretic/RAP | ryu/ryu/app/Ryuretic/Ryuretic_Intf_v7.py | Python | apache-2.0 | 24,856 | [
"VisIt"
] | eaee13e122cd7eeb3423c10cfcb5e39c0e40f01493b147b0ff3379ff48a0e826 |
import json
import logging
from edge.opensearch.isoresponse import IsoResponse
from datetime import date, datetime
class IsoResponseBySolr(IsoResponse):
def __init__(self):
super(IsoResponseBySolr, self).__init__()
def generate(self, solrDatasetResponse, solrGranuleResponse = None, pretty=False):
self._populate(solrDatasetResponse, solrGranuleResponse)
return super(IsoResponseBySolr, self).generate(pretty)
def _populate(self, solrDatasetResponse, solrGranuleResponse = None):
if solrDatasetResponse is not None:
solrJson = json.loads(solrDatasetResponse)
logging.debug('dataset count: '+str(len(solrJson['response']['docs'])))
if len(solrJson['response']['docs']) == 1:
# ok now populate variables!
doc = solrJson['response']['docs'][0]
#self.variables['Dataset_ShortName'] = doc['Dataset-ShortName'][0]
#self.variables['Dataset_ShortName'] = u'unko'
self.variables['doc'] = doc
# Format dates
try:
self.variables['DatasetCitation_ReleaseDate'] = date.fromtimestamp(float(doc['DatasetCitation-ReleaseDateLong'][0]) / 1000).strftime('%Y%m%d')
self.variables['DatasetCoverage_StartTime'] = self._convertTimeLongToISO(doc['DatasetCoverage-StartTimeLong'][0])
self.variables['DatasetCoverage_StopTime'] = self._convertTimeLongToISO(doc['DatasetCoverage-StopTimeLong'][0])
except:
pass
try:
# Create list of unique dataset sensor
self.variables['UniqueDatasetSensor'] = {}
for i, x in enumerate(doc['DatasetSource-Sensor-ShortName']):
self.variables['UniqueDatasetSensor'][x] = i
self.variables['UniqueDatasetSensor'] = self.variables['UniqueDatasetSensor'].values()
# Create list of unique dataset source
self.variables['UniqueDatasetSource'] = {}
for i, x in enumerate(doc['DatasetSource-Source-ShortName']):
self.variables['UniqueDatasetSource'][x] = i
self.variables['UniqueDatasetSource'] = self.variables['UniqueDatasetSource'].values()
# Replace all none, None values with empty string
doc['DatasetParameter-VariableDetail'] = [self._filterString(variableDetail) for variableDetail in doc['DatasetParameter-VariableDetail']]
# Current date
self.variables['DateStamp'] = datetime.utcnow().strftime('%Y%m%d')
# Data format version
self.variables['DatasetPolicy_DataFormat_Version'] = self._getDataFormatVersion(doc['DatasetPolicy-DataFormat'][0])
except Exception as e:
logging.debug("Problem generating ISO " + str(e))
del self.variables['doc']
if solrGranuleResponse is not None:
solrGranuleJson = json.loads(solrGranuleResponse)
logging.debug('granule count: '+str(len(solrGranuleJson['response']['docs'])))
for doc in solrGranuleJson['response']['docs']:
self._populateItem(solrGranuleResponse, doc, None)
doc['Granule-StartTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-StartTimeLong'][0])
doc['Granule-StopTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-StopTimeLong'][0])
doc['Granule-ArchiveTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-ArchiveTimeLong'][0])
doc['Granule-CreateTimeLong'][0] = self._convertTimeLongToISO(doc['Granule-CreateTimeLong'][0])
# Create dictionary for bounding box extent
'''
if ('GranuleReal-Value' in doc and 'GranuleReal-DatasetElement-Element-ShortName' in doc):
self.variables['GranuleBoundingBox'] = dict(zip(doc['GranuleReal-DatasetElement-Element-ShortName'], doc['GranuleReal-Value']))
'''
if 'GranuleSpatial-NorthLat' in doc and 'GranuleSpatial-EastLon' in doc and 'GranuleSpatial-SouthLat' in doc and 'GranuleSpatial-WestLon' in doc:
self.variables['GranuleBoundingBox'] = dict([('southernmostLatitude', doc['GranuleSpatial-SouthLat'][0]),
('northernmostLatitude', doc['GranuleSpatial-NorthLat'][0]),
('westernmostLongitude', doc['GranuleSpatial-WestLon'][0]),
('easternmostLongitude', doc['GranuleSpatial-EastLon'][0])])
break
self.variables['granules'] = solrGranuleJson['response']['docs']
def _populateChannel(self, solrResponse):
pass
def _populateItem(self, solrResponse, doc, item):
pass
def _convertTimeLongToISO(self, time):
isoTime = ''
try:
isoTime = datetime.utcfromtimestamp(float(time) / 1000).isoformat() + 'Z'
except ValueError:
pass
return isoTime
def _filterString(self, str):
if str.lower() == 'none':
return ''
else:
return str
def _getDataFormatVersion(self, dataFormat):
version = ''
if dataFormat == 'NETCDF':
version = 3
elif dataFormat == 'HDF':
version = 4
else:
try:
version = int(dataFormat[-1])
except:
pass
return version
| dataplumber/edge | src/main/python/libraries/edge/opensearch/isoresponsebysolr.py | Python | apache-2.0 | 6,181 | [
"NetCDF"
] | cb34ec6874db0f3a3f0fd88eebcfb85e6388b328f57fa79b20cdc9dd116028c8 |
#!/usr/bin/env python
__author__ = "Jae Stutzman"
__copyright__ = "Copyright 2012, Jae Stutzman"
__license__ = "GPLv3"
__version__ = "0.1.0"
__email__ = "jaebird@gmail.com"
__status__ = "Testing"
"""
Description:
This script reads an XBee serial stream (config settings at the bottom
of this script) being sent by remote XBees. The purpose is to interface
with the Victor Multi-Kill M260 to provide the user with an email whenever
a mouse is killed or the trap is in a trouble state.
License:
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import serial, time, datetime, sys, os, signal, logging
from xbee import xbee
import gmailit
import collections
KEEP_RUNNING = True
# Debug logging off by default pass "-d" parameter from cmd line
SERIALPORT = "/dev/ttyAMA0" # the com/serial port the XBee is connected to
BAUDRATE = 9600 # the baud rate we talk to the xbee
SERIALPORT_TIMEOUT = 10 # 10 sec timeout on serial port
EMAIL_LIST = ["user@somewhere.com", "anotheruser@somewhereelse.com"]
GRNLEDPIN = 0 # which XBee DIN has the Green LED for mouse kills
REDLEDPIN = 4 # which XBee DIN has the Red LED for mousetrap trouble or low battery
mouseTraps = dict() # dictionary of mousetraps being monitored
class MouseTrap:
MAXTIMEDELTA = datetime.timedelta(seconds=3600) # 1 hr after the max delta trap is decidedly offline
lastUpdateDateTime = datetime.datetime(2012,1,1)
samplesToClear = 3
clearSamplesRecvd = 0
trapNumber = 0 # each trap is unique based on the xbee MY id
lastGreenLED = 0
lastRedLED = 0
lastMouseTrapFull = False
mouseTrapFullSet = False
lastMouseTrapTrouble = False
mouseTrapTroubleSet = False
lastMouseCaught = False
mouseCaughtSet = False
trapOnline = False
# ctor, sets the trap number
def __init__(self, trapNumber):
self.trapNumber = trapNumber
# called everytime an update for this mousetrap is receieved
def update(self, currentGreenLED, currentRedLED):
# set last update to now
self.lastUpdateDateTime = datetime.datetime.now()
if not self.trapOnline:
currentTrapOnline = self.trapOnline = True
log_info(self.trapNumber, "Online: %s" % self.trapOnline)
return
# reset current values
currentMouseTrapFull = False
currentMouseTrapTrouble = False
currentMouseCaught = False
# the trap will trigger both green and red to make amber
# to signify that the trap is full (10 mice)
if currentGreenLED and currentRedLED:
currentMouseTrapFull = True
# trap trouble: problem or low battery
elif currentRedLED:
currentMouseTrapTrouble = True
# trap has at least one mouse
elif currentGreenLED:
currentMouseCaught = True
if currentMouseTrapFull or currentRedLED or currentGreenLED:
self.clearSamplesRecvd = 0
elif self.mouseTrapFullSet or self.mouseTrapTroubleSet or self.mouseCaughtSet:
self.clearSamplesRecvd = self.clearSamplesRecvd + 1
#print "Trap: " + str(self.trapNumber) + " Clear Samples Recv: " + str(self.clearSamplesRecvd)
# send the message of the current mousetrap state
if currentMouseTrapFull != self.lastMouseTrapFull:
self.lastMouseTrapFull = currentMouseTrapFull
if self.lastMouseTrapFull and not self.mouseTrapFullSet:
log_info(self.trapNumber, "Mousetrap FULL!")
self.mouseTrapFullSet = True
elif currentMouseTrapTrouble != self.lastMouseTrapTrouble:
self.lastMouseTrapTrouble = currentMouseTrapTrouble
if self.lastMouseTrapTrouble and not self.mouseTrapTroubleSet:
log_info(self.trapNumber, "Low Battery or Trouble!")
self.mouseTrapTroubleSet = True
elif currentMouseCaught != self.lastMouseCaught: # value has toggled
self.lastMouseCaught = currentMouseCaught
if self.lastMouseCaught and not self.mouseCaughtSet:
log_info(self.trapNumber, "Mouse Killed!")
self.mouseCaughtSet = True
# clear the sets
elif self.clearSamplesRecvd > self.samplesToClear:
if self.mouseTrapFullSet:
self.mouseTrapFullSet = False
elif self.mouseTrapTroubleSet:
log_info(self.trapNumber, "Trouble Cleared")
self.mouseTrapTroubleSet = False
elif self.mouseCaughtSet:
log_info(self.trapNumber, "Mouse Cleared.")
self.mouseCaughtSet = False
logger.debug("Trap: %s, Trap Full Set: %s, Mouse Caught Set: %s" \
% (self.trapNumber, self.mouseTrapFullSet, self.mouseCaughtSet))
# check for whether the trap is no longer sending data
def check_update(self):
if self.trapOnline and (datetime.datetime.now() - self.lastUpdateDateTime > self.MAXTIMEDELTA):
self.trapOnline = False
self.mouseCaught = False
self.mouseTrapTrouble = False
self.mouseTrapFull = False
log_info(self.trapNumber, "Mousetrap not sending data, OFFLINE!")
class EmailHandler(logging.Handler): # Inherit from logging.Handler
def __init__(self, email_address):
# run the regular Handler __init__
logging.Handler.__init__(self)
# Our custom argument
self.email_address = email_address
def emit(self, record):
# Only send email if the log type is INFO
if (record.levelname == "INFO"):
# record.message is the log message
msg = self.format(record)
gmailit.mail(self.email_address, "Mouse Trap Status", msg, None)
class StreamToLogger(object):
def __init__(self, label, logger, log_level=logging.INFO):
self.label = label
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
var = None
for line in buf.rstrip().splitlines():
log_text = "%s: %s" % (self.label, line.rstrip())
self.logger.log(self.log_level, log_text)
def signal_handler(signal, frame):
global KEEP_RUNNING
KEEP_RUNNING = False
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# logs the info to console and sends an email
def log_info(trapnumber, text):
log_text = "Mousetrap: %s - %s" % (trapnumber, text)
logger.info(log_text)
# the runs every time serial data is received or serial port timeout
def main_loop():
global mousetraps
global KEEP_RUNNING
try:
# grab one packet from the xbee, or timeout
packet = xbee.find_packet(ser)
except KeyboardInterrupt:
KEEP_RUNNING = False
except:
logger.debug("Error reading packet.")
return
if packet == True: # received data but not sync'd
return
elif packet == None: # Did not receive data, timed out.
for mouseTrap in mouseTraps.itervalues():
mouseTrap.check_update()
# for debugging
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Serial port Timedout waiting for data.")
else:
try:
xb = xbee(packet) # parse the packet
# no op but by parsing to string it forces AttributeError if bad packet
nop = str(xb)
greenOn = False
redOn = False
greenCount = 0
redCount = 0
# check the samples to determine if either of the LEDs are "ON"
for sample in xb.digital_samples:
greenCount += sample[GRNLEDPIN]
redCount += sample[REDLEDPIN]
if greenCount > 0:
greenOn = True
if redCount > 0:
redOn = True
if logger.isEnabledFor(logging.DEBUG):
green = ""
red = ""
for sample in xb.digital_samples:
green += str(sample[GRNLEDPIN])
red += str(sample[REDLEDPIN])
logger.debug("Address: %s, RSSI: %s, Sample Count: %s Green LED: %s, Red LED: %s" \
% (xb.address_16, xb.rssi, len(xb.digital_samples), green, red))
#logger.debug("Address: %s, RSSI: %s" % (xb.address_16, xb.rssi))
# if the trap has not yet been added to the dictionary
if not mouseTraps.has_key(xb.address_16):
mouseTrap = MouseTrap(xb.address_16)
mouseTraps[xb.address_16] = mouseTrap
# update the trap witht the LED values
mouseTraps[xb.address_16].update(greenOn, redOn)
except AttributeError as attrError:
logger.debug(attrError)
# check for traps last update and set offline if past deltatime
for mouseTrap in mouseTraps.itervalues():
mouseTrap.check_update()
#### MAIN ####
logLevel = logging.INFO
#logLevel = logging.DEBUG
script_dir = os.path.dirname(os.path.realpath(__file__))
if (sys.argv and len(sys.argv) > 1):
if sys.argv[1] == "-d":
logLevel = logging.DEBUG
# main application logger
logger = logging.getLogger(__name__)
logger.setLevel(logLevel)
# stderr logging
sl = StreamToLogger('STDERR', logger, logging.ERROR)
sys.stderr = sl
fh = logging.FileHandler(script_dir + '/mousewatcher.log')
fh.setLevel(logging.DEBUG)
# create stdout handler to log to stdout
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logLevel)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
for email_address in EMAIL_LIST:
em = EmailHandler(email_address)
em.setLevel(logging.INFO)
em.setFormatter(formatter)
logger.addHandler(em)
logger.info("Receiver Starting...")
# open up the serial port to get data transmitted to xbee
ser = None
try:
ser = serial.Serial(SERIALPORT, BAUDRATE, timeout = SERIALPORT_TIMEOUT)
ser.open()
if ser.isOpen():
ser.flushInput()
ser.flushOutput()
except Exception as inst:
log_text = "Error with serial port: %s - %s" % (type(inst), inst)
logger.error(log_text)
KEEP_RUNNING = False
# loop forever
while KEEP_RUNNING:
main_loop()
logger.info("Receiver Terminating!")
"""
XBee Configuration Settings:
XBEE (XB24, XBEE 802.15.4, Ver 10EC)
== Transmitter Setup ==
Network & Security:
PAN ID: 6687
MY: 1 (for the first one, increment by one per trap)
Sleep Modes
SM = 4 (Cyclic sleep)
ST = 1388 hex or 5000 ms
SP = 68B0 hex or 26800 x 10 ms = 268 sec or 4:28 min
I/O Settings
D4 = 3 (Digital In)
D0 = 3 (Digital In)
PR = EE (Pull-Up Resistor disabled for D0 and D4)
IC = 11 (Change detect for D0 and D4)
== Receiver Setup ==
PAN ID: 6687
Everything Else = Defaults
"""
| jaebird/mousewatcher | mousewatcher.py | Python | gpl-3.0 | 11,891 | [
"Amber"
] | 1a32af10cd58b78192f264c49f469a1f34fd1d6528d2eacd32ec04ad01a463f8 |
import itertools
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from scipy import interpolate
from astropy import units
from matplotlib import pyplot as plt
# Imports for fast_running_median
from collections import deque
from itertools import islice
from bisect import insort, bisect_left
from pypeit.core import pydl
from pypeit import msgs
def wavegrid(wave_min, wave_max, dwave, osamp=1.0):
"""
Utility routine to generate a uniform grid of wavelengths
Args:
wave_min: float
Mininum wavelength. Can be in linear or log.
wave_max: float
Maximum wavelength. Can be in linear or log.
dwave: float
Delta wavelength interval
osamp: float
Oversampling factor
Returns:
wave_grid: float ndarray
Wavelength grid
"""
ngrid = int(np.ceil((wave_max - wave_min)/dwave))
wave_grid = wave_min + (dwave/osamp)*np.arange(int(np.ceil(osamp*ngrid)))
return wave_grid
def rebin(a, newshape):
'''Rebin an array to a new shape using slicing. This routine is taken from:
https://scipy-cookbook.readthedocs.io/items/Rebinning.html. The image shapes need
not be integer multiples of each other, but in this regime the transformation will
not be reversible, i.e. if a_orig = rebin(rebin(a,newshape), a.shape) then
a_orig will not be everywhere equal to a (but it will be equal in most places).
Args:
a: ndarray, any dtype
Image of any dimensionality and data type
newshape:
Shape of the new image desired. Dimensionality must be the same as a.
Returns:
ndarray: same dtype as input Image with same values as a rebinning to shape newshape
'''
if not len(a.shape) == len(newshape):
msgs.error('Dimension of a image does not match dimension of new requested image shape')
slices = [slice(0, old, float(old) / new) for old, new in zip(a.shape, newshape)]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') # choose the biggest smaller integer index
return a[tuple(indices)]
# TODO This function is only used by procimg.lacosmic. Can it be replaced by above?
def rebin_evlist(frame, newshape):
# This appears to be from
# https://scipy-cookbook.readthedocs.io/items/Rebinning.html
shape = frame.shape
lenShape = len(shape)
factor = np.asarray(shape)/np.asarray(newshape)
evList = ['frame.reshape('] + \
['int(newshape[%d]),int(factor[%d]),'% (i, i) for i in range(lenShape)] + \
[')'] + ['.sum(%d)' % (i+1) for i in range(lenShape)] + \
['/factor[%d]' % i for i in range(lenShape)]
return eval(''.join(evList))
def pyplot_rcparams():
"""
params for pretty matplotlib plots
Returns:
"""
# set some plotting parameters
plt.rcParams["xtick.top"] = True
plt.rcParams["ytick.right"] = True
plt.rcParams["xtick.minor.visible"] = True
plt.rcParams["ytick.minor.visible"] = True
plt.rcParams["ytick.direction"] = 'in'
plt.rcParams["xtick.direction"] = 'in'
plt.rcParams["xtick.major.size"] = 6
plt.rcParams["ytick.major.size"] = 6
plt.rcParams["xtick.minor.size"] = 3
plt.rcParams["ytick.minor.size"] = 3
plt.rcParams["xtick.major.width"] = 1
plt.rcParams["ytick.major.width"] = 1
plt.rcParams["xtick.minor.width"] = 1
plt.rcParams["ytick.minor.width"] = 1
plt.rcParams["axes.linewidth"] = 1
plt.rcParams["lines.linewidth"] = 3
plt.rcParams["lines.markeredgewidth"] = 2
plt.rcParams["patch.linewidth"] = 3
plt.rcParams["hatch.linewidth"] = 3
plt.rcParams["font.size"] = 13
plt.rcParams["legend.frameon"] = False
plt.rcParams["legend.handletextpad"] = 1
def pyplot_rcparams_default():
"""
restore default rcparams
Returns:
"""
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
def smooth(x, window_len, window='flat'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that edge effects are minimize at the beginning and end part of the signal.
This code taken from this cookbook and slightly modified: https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing., default is 'flat'
output:
the smoothed signal, same shape as x
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
# print(len(s))
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='same')
return y[(window_len-1):(y.size-(window_len-1))]
def fast_running_median(seq, window_size):
"""
Compute the median of sequence of numbers with a running window. The boundary conditions are identical to the
scipy 'reflect' boundary codition:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last pixel.
This code has been confirmed to produce identical results to scipy.ndimage.filters.median_filter with the reflect
boundary condition, but is ~ 100 times faster.
Args:
seq (list or 1-d numpy array of numbers):
window_size (int): size of running window.
Returns:
ndarray: median filtered values
Code contributed by Peter Otten, made to be consistent with scipy.ndimage.filters.median_filter by Joe Hennawi.
See discussion at:
http://groups.google.com/group/comp.lang.python/browse_thread/thread/d0e011c87174c2d0
"""
# pad the array for the reflection
seq_pad = np.concatenate((seq[0:window_size][::-1],seq,seq[-1:(-1-window_size):-1]))
window_size= int(window_size)
seq_pad = iter(seq_pad)
d = deque()
s = []
result = []
for item in islice(seq_pad, window_size):
d.append(item)
insort(s, item)
result.append(s[len(d)//2])
m = window_size // 2
for item in seq_pad:
old = d.popleft()
d.append(item)
del s[bisect_left(s, old)]
insort(s, item)
result.append(s[m])
# This takes care of the offset produced by the original code deducec by trial and error comparison with
# scipy.ndimage.filters.medfilt
result = np.roll(result, -window_size//2 + 1)
return result[window_size:-window_size]
# TODO JFH: This is the old bspline_fit which shoul be deprecated. I think some codes still use it though. We should transtion to pydl everywhere
def bspline_fit(x,y,order=3,knots=None,everyn=20,xmin=None,xmax=None,w=None,bkspace=None):
""" bspline fit to x,y
Should probably only be called from func_fit
Parameters:
---------
x: ndarray
y: ndarray
func: str
Name of the fitting function: polynomial, legendre, chebyshev, bspline
deg: int
deg of the spline. Default=3 (cubic)
xmin: float, optional
Minimum value in the array [both must be set to normalize]
xmax: float, optional
Maximum value in the array [both must be set to normalize]
w: ndarray, optional
weights to be used in the fitting (weights = 1/sigma)
knots: ndarray, optional
Internal knots only. External ones are added by scipy
everyn: int
Knot everyn good pixels, if used
bkspace: float
Spacing of breakpoints in units of x
Returns:
---------
tck : tuple
describes the bspline
"""
task = 0 # Default of splrep
if w is None:
ngd = x.size
gd = np.arange(ngd)
weights = None
else:
gd = np.where(w > 0.)[0]
weights = w[gd]
ngd = len(gd)
# Make the knots
if knots is None:
if bkspace is not None:
xrnge = (np.max(x[gd]) - np.min(x[gd]))
startx = np.min(x[gd])
nbkpts = max(int(xrnge/bkspace) + 1,2)
tempbkspace = xrnge/(nbkpts-1)
knots = np.arange(1, nbkpts-1)*tempbkspace + startx
# Remove cases where two knots have no data between them
keep_knots = np.array([True]*len(knots))
for ii in range(1,len(knots)): # Ugly for loop..
if not np.any((x[gd] > knots[ii-1]) & (x[gd] < knots[ii])):
keep_knots[ii] = False
knots = knots[keep_knots]
elif everyn is not None:
# A knot every good N pixels
idx_knots = np.arange(everyn//2, ngd-everyn//2, everyn)
knots = x[gd[idx_knots]]
else:
msgs.error("No method specified to generate knots")
else:
task = -1
# Generate spline
try:
tck = interpolate.splrep(x[gd], y[gd], w=weights, k=order, xb=xmin, xe=xmax, t=knots, task=task)
except ValueError:
# Knot problem (usually)
msgs.warn("Problem in the bspline knot")
raise ValueError("Crashing out of bspline fitting")
return tck
#ToDo I would prefer to remove the kwargs_bspline and
# and make them explicit
def bspline_profile(xdata, ydata, invvar, profile_basis, inmask = None, upper=5, lower=5,
maxiter=25, nord = 4, bkpt=None, fullbkpt=None,
relative=None, kwargs_bspline={}, kwargs_reject={}):
"""
Create a B-spline in the least squares sense with rejection, using a model profile
Parameters
----------
xdata : :class:`numpy.ndarray`
Independent variable.
ydata : :class:`numpy.ndarray`
Dependent variable.
invvar : :class:`numpy.ndarray`
Inverse variance of `ydata`.
profile_basis : :class:`numpy.ndarray`
model profiles
upper : :class:`int` or :class:`float`, optional
Upper rejection threshold in units of sigma, defaults to 5 sigma.
lower : :class:`int` or :class:`float`, optional
Lower rejection threshold in units of sigma, defaults to 5 sigma.
maxiter : :class:`int`, optional
Maximum number of rejection iterations, default 10. Set this to
zero to disable rejection.
nord : :class:`int`, optional
Order of B-spline fit
bkpt : :class:`numpy.ndarray`
Array of breakpoints to be used for the b-spline
fullbkpt : :class:`numpy.ndarray`
Full array of breakpoints to be used for the b-spline, without letting the b-spline class append on any extra bkpts
relative : class:`numpy.ndarray`
Array of integer indices to be used for computing the reduced chi^2 of the fits, which then is used as a scale factor for
the upper,lower rejection thresholds
kwargs_bspline : dict
Passed to bspline
kwargs_reject : dict
Passed to djs_reject
Returns
-------
:func:`tuple`
A tuple containing the (sset, outmask, yfit, reduced_chi), where
sset: object
bspline object
outmask: : :class:`numpy.ndarray`
output mask which the same size as xdata, such that rejected points have outmask set to False
yfit : :class:`numpy.ndarray`
result of the bspline fit (same size as xdata)
reduced_chi: float
value of the reduced chi^2
"""
# Checks
nx = xdata.size
if ydata.size != nx:
msgs.error('Dimensions of xdata and ydata do not agree.')
# ToDO at the moment invvar is a required variable input
# if invvar is not None:
# if invvar.size != nx:
# raise ValueError('Dimensions of xdata and invvar do not agree.')
# else:
# #
# # This correction to the variance makes it the same
# # as IDL's variance()
# #
# var = ydata.var()*(float(nx)/float(nx-1))
# if var == 0:
# var = 1.0
# invvar = np.ones(ydata.shape, dtype=ydata.dtype)/var
npoly = int(profile_basis.size/nx)
if profile_basis.size != nx*npoly:
msgs.error('Profile basis is not a multiple of the number of data points.')
# Init
yfit = np.zeros(ydata.shape)
reduced_chi = 0.
if invvar.size == 1:
outmask = True
else:
outmask = np.ones(invvar.shape, dtype='bool')
if inmask is None:
inmask = (invvar > 0)
nin = np.sum(inmask)
msgs.info("Fitting npoly =" + "{:3d}".format(npoly) + " profile basis functions, nin=" + "{:3d}".format(nin) + " good pixels")
msgs.info("****************************** Iter Chi^2 # rejected Rel. fact ******************************")
msgs.info(" ---- ----- ---------- --------- ")
maskwork = outmask & inmask & (invvar > 0)
if not maskwork.any():
msgs.error('No valid data points in bspline_profile!.')
else:
# Init bspline class
sset = pydl.bspline(xdata[maskwork], nord=nord, npoly=npoly, bkpt=bkpt, fullbkpt=fullbkpt,
funcname='Bspline longslit special', **kwargs_bspline)
if maskwork.sum() < sset.nord:
msgs.warn('Number of good data points fewer than nord.')
exit_status = 4
return sset, outmask, yfit, reduced_chi, exit_status
# This was checked in detail against IDL for identical inputs
outer = (np.outer(np.ones(nord, dtype=float), profile_basis.flatten('F'))).T
action_multiple = outer.reshape((nx, npoly * nord), order='F')
#--------------------
# Iterate spline fit
iiter = 0
error = -1
qdone = False
exit_status = 0
relative_factor = 1.0
tempin = np.copy(inmask)
while (error != 0 or qdone is False) and iiter <= maxiter and (exit_status == 0):
ngood = maskwork.sum()
goodbk = sset.mask.nonzero()[0]
if ngood <= 1 or not sset.mask.any():
sset.coeff = 0
exit_status = 2 # This will end iterations
#iiter = maxiter + 1 # End iterations
else:
# Do the fit. Return values from workit for error are as follows:
# 0 if fit is good
# -1 if some breakpoints are masked, so try the fit again
# -2 if everything is screwed
# we'll do the fit right here..............
if error != 0:
bf1, laction, uaction = sset.action(xdata)
if np.any(bf1 == -2) or (bf1.size !=nx*nord):
msgs.error("BSPLINE_ACTION failed!")
action = np.copy(action_multiple)
for ipoly in range(npoly):
action[:, np.arange(nord)*npoly + ipoly] *= bf1
del bf1 # Clear the memory
if np.sum(np.isfinite(action) is False) > 0:
msgs.error("Infinities in action matrix, wavelengths may be very messed up!!!")
error, yfit = sset.workit(xdata, ydata, invvar*maskwork,action, laction, uaction)
iiter += 1
if error == -2:
msgs.warn(" All break points have been dropped!! Fit failed, I hope you know what you are doing")
exit_status = 3
return (sset, np.zeros(xdata.shape,dtype=bool), np.zeros(xdata.shape), reduced_chi, exit_status)
elif error == 0:
# Iterate the fit -- next rejection iteration
chi_array = (ydata - yfit)*np.sqrt(invvar * maskwork)
reduced_chi = np.sum(chi_array**2)/(ngood - npoly*(len(goodbk) + nord)-1)
relative_factor = 1.0
# JFH -- What is
if relative is not None:
nrel = len(relative)
if nrel == 1:
relative_factor = np.sqrt(reduced_chi)
else:
this_chi2 = np.sum(chi_array[relative]**2)/(nrel - (len(goodbk) + nord) - 1)
relative_factor = np.sqrt(this_chi2)
relative_factor = max(relative_factor,1.0)
# Rejection
# ToDO JFH by setting inmask to be tempin which is maskwork, we are basically implicitly enforcing sticky rejection
# here. See djs_reject.py. I'm leaving this as is for consistency with the IDL version, but this may require
# further consideration. I think requiring sticky to be set is the more transparent behavior.
maskwork, qdone = pydl.djs_reject(ydata, yfit, invvar=invvar,
inmask=tempin, outmask=maskwork,
upper=upper*relative_factor,
lower=lower*relative_factor, **kwargs_reject)
tempin = np.copy(maskwork)
msgs.info(" {:4d}".format(iiter) + "{:8.3f}".format(reduced_chi) +
" {:7d}".format((maskwork == 0).sum()) + " {:6.2f}".format(relative_factor))
else:
msgs.info(" {:4d}".format(iiter) + " --- --- --- ---")
if iiter == (maxiter + 1):
exit_status = 1
# Exit status:
# 0 = fit exited cleanly
# 1 = maximum iterations were reached
# 2 = all points were masked
# 3 = all break points were dropped
# 4 = Number of good data points fewer than nord
msgs.info("***************************************************************************************************")
msgs.info(
"Final fit after " + "{:2d}".format(iiter) + " iterations: reduced_chi = " + "{:8.3f}".format(reduced_chi) +
", rejected = " + "{:7d}".format((maskwork == 0).sum()) + ", relative_factor = {:6.2f}".format(relative_factor))
# Finish
outmask = np.copy(maskwork)
# Return
return sset, outmask, yfit, reduced_chi, exit_status
def calc_ivar(varframe):
""" Calculate the inverse variance based on the input array
Args:
varframe (ndarray): Variance image
Returns:
ndarray: Inverse variance image
"""
ivar = (varframe > 0.) / (np.abs(varframe) + (varframe == 0))
return ivar
def func_fit(x, y, func, deg, x2 = None, minx=None, maxx=None, minx2=None, maxx2=None, w=None, inmask = None, guesses=None,
bspline_par=None, return_errors=False):
""" General routine to fit a function to a given set of x,y points
Parameters
----------
x : ndarray
y : ndarray
func : str
polynomial, legendre, chebyshev, bspline, gaussian
deg : int
degree of the fit
minx : float, optional
maxx
w
guesses : tuple
bspline_par : dict
Passed to bspline_fit()
Returns
-------
coeff : ndarray or tuple
ndarray for standard function fits
tuple for bspline
"""
# If the user provided an inmask apply it. The logic below of evaluating the fit only at the non-masked
# pixels is preferable to the other approach of simply setting the weights to zero. The reason for that is that
# the fits use a least-square optimization approach using matrix algebra, and lots of zero weights are
# 1) more costly, and 2) will not produce exactly the same result (due to roundoff error) as actually
# removing the locations you want to mask.
if inmask is not None:
x_out = x[inmask]
y_out = y[inmask]
if x2 is not None:
x2_out = x2[inmask]
else:
x2_out = None
if w is not None:
w_out = w[inmask]
else:
w_out = None
else:
x_out = x
y_out = y
if x2 is not None:
x2_out = x2
else:
x2_out = None
if w is not None:
w_out = w
else:
w_out = None
# For two-d fits x = x, y = x2, y = z
if ('2d' in func) and (x2_out is not None):
# Is this a 2d fit?
return polyfit2d_general(x_out, x2_out, y_out, deg, w=w_out, function=func[:-2],minx=minx, maxx=maxx, miny=minx2, maxy=maxx2)
elif func == "polynomial":
return np.polynomial.polynomial.polyfit(x_out, y_out, deg, w=w_out)
elif func == "legendre":
if minx is None or maxx is None:
if np.size(x_out) == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = np.min(x_out), np.max(x_out)
else:
xmin, xmax = minx, maxx
xv = 2.0 * (x_out-xmin)/(xmax-xmin) - 1.0
return np.polynomial.legendre.legfit(xv, y_out, deg, w=w_out)
elif func == "chebyshev":
if minx is None or maxx is None:
if np.size(x_out) == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = np.min(x_out), np.max(x_out)
else:
xmin, xmax = minx, maxx
xv = 2.0 * (x_out-xmin)/(xmax-xmin) - 1.0
return np.polynomial.chebyshev.chebfit(xv, y_out, deg, w=w_out)
elif func == "bspline":
if bspline_par is None:
bspline_par = {}
# TODO -- Deal with this kwargs-like kludge
return bspline_fit(x_out, y_out, order=deg, w=w_out, **bspline_par)
elif func == "gaussian":
# Guesses
if guesses is None:
ampl, cent, sigma = guess_gauss(x_out, y_out)
# As first guess choose slope and intercept to be zero
b = 0
m = 0
else:
if deg == 2:
ampl, sigma = guesses
elif deg == 3:
ampl, cent, sigma = guesses
elif deg == 4:
b, ampl, cent, sigma = guesses
elif deg == 5:
m, b, ampl, cent, sigma = guesses
# Error
if w_out is not None:
sig_y = 1./w_out
else:
sig_y = None
if deg == 2: # 2 parameter fit
popt, pcov = curve_fit(gauss_2deg, x_out, y_out, p0=[ampl, sigma], sigma=sig_y)
elif deg == 3: # Standard 3 parameters
popt, pcov = curve_fit(gauss_3deg, x_out, y_out, p0=[ampl, cent, sigma],
sigma=sig_y)
elif deg == 4: # 4 parameters
popt, pcov = curve_fit(gauss_4deg, x_out, y_out, p0=[b, ampl, cent, sigma],sigma=sig_y)
elif deg == 5: # 5 parameters
popt, pcov = curve_fit(gauss_5deg, x_out, y_out, p0=[m, b, ampl, cent, sigma],sigma=sig_y)
else:
msgs.error("Not prepared for deg={:d} for Gaussian fit".format(deg))
# Return
if return_errors:
return popt, pcov
else:
return popt
elif func == "moffat":
# Guesses
if guesses is None:
ampl, cent, sigma = guess_gauss(x_out, y_out)
p0 = ampl
p2 = 3. # Standard guess
p1 = (2.355*sigma)/(2*np.sqrt(2**(1./p2)-1))
else:
p0,p1,p2 = guesses
# Error
if w_out is not None:
sig_y = 1./w_out
else:
sig_y = None
if deg == 3: # Standard 3 parameters
popt, pcov = curve_fit(moffat, x_out, y_out, p0=[p0,p1,p2], sigma=sig_y)
else:
msgs.error("Not prepared for deg={:d} for Moffat fit".format(deg))
# Return
return popt
else:
msgs.error("Fitting function '{0:s}' is not implemented yet" + msgs.newline() +
"Please choose from 'polynomial', 'legendre', 'chebyshev','bspline'")
def func_val(c, x, func, x2 = None, minx=None, maxx=None, minx2=None, maxx2=None):
""" Generic routine to return an evaluated function
Functional forms include:
polynomial, legendre, chebyshev, bspline, gauss
Parameters
----------
c : ndarray
coefficients
x
func
minx
maxx
Returns
-------
values : ndarray
"""
# For two-d fits x = x, y = x2, y = z
if ('2d' in func) and (x2 is not None):
# Is this a 2d fit?
if func[:-2] == "polynomial":
return np.polynomial.polynomial.polyval2d(x, x2, c)
elif func[:-2] in ["legendre", "chebyshev"]:
# Scale x-direction
xv = scale_minmax(x, minx=minx, maxx=maxx)
# Scale x2-direction
x2v = scale_minmax(x2, minx=minx2, maxx=maxx2)
if func[:-2] == "legendre":
return np.polynomial.legendre.legval2d(xv, x2v, c)
elif func[:-2] == "chebyshev":
return np.polynomial.chebyshev.chebval2d(xv, x2v, c)
else:
msgs.error("Function {0:s} has not yet been implemented for 2d fits".format(func))
return None
elif func == "polynomial":
return np.polynomial.polynomial.polyval(x, c)
elif func == "legendre":
if minx is None or maxx is None:
if np.size(x) == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = np.min(x), np.max(x)
else:
xmin, xmax = minx, maxx
xv = 2.0 * (x-xmin)/(xmax-xmin) - 1.0
return np.polynomial.legendre.legval(xv, c)
elif func == "chebyshev":
if minx is None or maxx is None:
if np.size(x) == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = np.min(x), np.max(x)
else:
xmin, xmax = minx, maxx
xv = 2.0 * (x-xmin)/(xmax-xmin) - 1.0
return np.polynomial.chebyshev.chebval(xv, c)
elif func == "bspline":
return interpolate.splev(x, c, ext=1)
elif func == "gaussian":
if len(c) == 2:
return gauss_2deg(x, c[0], c[1])
elif len(c) == 3:
return gauss_3deg(x, c[0], c[1], c[2])
else:
msgs.error("Not ready for this type of gaussian")
elif func == "moffat":
if len(c) == 3:
return moffat(x, c[0], c[1], c[2])
else:
msgs.error("Not ready for this type of Moffat")
else:
msgs.error("Fitting function '{0:s}' is not implemented yet" + msgs.newline() +
"Please choose from 'polynomial', 'legendre', 'chebyshev', 'bspline'")
def calc_fit_rms(xfit, yfit, fit, func, minx=None, maxx=None, weights=None):
""" Simple RMS calculation
Args:
xfit : ndarray
yfit : ndarray
fit : coefficients
func : str
minx : float, optional
maxx : float, optional
Returns:
float: RMS
"""
if weights is None:
weights = np.ones(xfit.size)
# Normalise
weights /= np.sum(weights)
values = func_val(fit, xfit, func, minx=minx, maxx=maxx)
# rms = np.std(yfit-values)
rms = np.sqrt(np.sum(weights*(yfit-values)**2))
# Return
return rms
def robust_meanstd(array):
"""
Determine a robust measure of the mean and dispersion of array
Args:
array (ndarray): an array of values
Returns:
float, float: median of the array and a robust estimate of the standand deviation (assuming a symmetric distribution)
"""
med = np.median(array)
mad = np.median(np.abs(array-med))
return med, 1.4826*mad
def polyfitter2d(data, mask=None, order=2):
"""
2D fitter
Args:
data:
mask:
order:
Returns:
"""
x, y = np.meshgrid(np.linspace(0.0, 1.0, data.shape[1]), np.linspace(0.0, 1.0, data.shape[0]))
if isinstance(mask, (float, int)):
# mask is the value that should be masked in data
w = np.where(data != mask)
xf = x[w].flatten()
yf = y[w].flatten()
m = polyfit2d(xf, yf, data[w].T.flatten(), order)
elif mask is None or mask.size == 0:
# There are no masks
xf = x.flatten()
yf = y.flatten()
m = polyfit2d(xf, yf, data.T.flatten(), order)
elif len(mask.shape) == 1:
# mask is applied along one axis
mskar = np.ones((data.shape[0], data.shape[1]))
mskar[mask, :] = 0
w = np.where(mskar == 1)
xf = x[w].flatten()
yf = y[w].flatten()
m = polyfit2d(xf, yf, data[w].T.flatten(), order)
elif mask.shape[0] == data.shape[0] and mask.shape[1] == data.shape[1]:
# mask is an array that indicates the masked data
w = np.where(mask == 0)
xf = x[w].flatten()
yf = y[w].flatten()
m = polyfit2d(xf, yf, data[w].T.flatten(), order)
# Return the best model
return m, polyval2d(x, y, m).T
def polyfit2d(x, y, z, order=3):
"""
Generate 2D polynomial
Args:
x:
y:
z:
order:
Returns:
"""
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, null, null, null = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
"""
Generate 2D polynomial
Args:
x:
y:
m:
Returns:
"""
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i, j) in zip(m, ij):
z += a * x**i * y**j
return z
def moffat(x,p0,p1,p2):
"""
Moffat profile
This 3 parameter formulation assumes the trace is known
Args:
x (float or ndarray): x values
p0 (float): Amplitude
p1 (float):
Width scaling
p2 : float
Returns:
float or ndarray: Evaluated Moffat
"""
return p0 / (1+(x/p1)**2)**p2
def gauss_2deg(x,ampl,sigm):
"""
Simple 2 parameter Gaussian (amplitude, sigma)
Args:
x
ampl
sigm
Returns:
float or ndarray: Evaluated Gausssian
"""
return ampl*np.exp(-1.*x**2/2./sigm**2)
def gauss_3deg(x,ampl,cent,sigm):
""" Simple 3 parameter Gaussian
Args:
x (float or ndarray): x-valus
ampl (float): Amplitude
cent (float): Centroid
sigm (float): sigma
Returns:
float or ndarray: Evaluated Gausssian
"""
return ampl*np.exp(-1.*(cent-x)**2/2/sigm**2)
def gauss_4deg(x,b, ampl,cent,sigm):
""" Simple 3 parameter Gaussian
Args:
x
b (float): Floor
ampl (float): Amplitude
cent (float): Centroid
sigm (float): sigma
Returns:
float or ndarray: Evaluated Gausssian
"""
return b + ampl*np.exp(-1.*(cent-x)**2/2/sigm**2)
def gauss_5deg(x,m, b, ampl,cent,sigm):
""" Simple 3 parameter Gaussian
Args:
x
m (float): Slope of floor
b (float): Floor
ampl (float): Amplitude
cent (float): Centroid
sigm (float): sigma
Returns:
float or ndarray: Evaluated Gausssian
"""
return b + m*x + ampl*np.exp(-1.*(cent-x)**2/2/sigm**2)
def guess_gauss(x,y):
"""
Guesses Gaussian parameters with basic stats
Args:
x (ndarray): x-values
y (ndarray): y-values
Returns:
float, float, float: Amplitude, centroid, sigma
"""
ypos = y - y.min()
cent = np.sum(ypos*x)/np.sum(ypos)
sigma = np.sqrt(np.abs(np.sum((x-cent)**2*ypos)/np.sum(ypos))) # From scipy doc
# Calculate ampl from pixels within +/- sigma/2
cen_pix= np.abs(x-cent)<sigma/2
if np.any(cen_pix):
ampl = np.median(y[cen_pix])
else:
ampl = y.max()
# Return
return ampl, cent, sigma
def polyfit2d_general(x, y, z, deg, w=None, function='polynomial',
minx=None, maxx=None, miny=None, maxy=None):
"""
:param x: array of x values
:param y: array of y values
:param z: value of data at each (x,y) coordinate
:param deg: degree of polynomial fit in the form [nx,ny]
:param w: weights
:return: coefficients
"""
# msgs.work("Generalize to different polynomial types")
x = np.asarray(x)
y = np.asarray(y)
z = np.asarray(z)
deg = np.asarray(deg)
# Vander
if function == 'polynomial':
vander = np.polynomial.polynomial.polyvander2d(x, y, deg)
elif function == 'legendre':
xv = scale_minmax(x, minx=minx, maxx=maxx)
yv = scale_minmax(y, minx=miny, maxx=maxy)
vander = np.polynomial.legendre.legvander2d(xv, yv, deg)
else:
msgs.error("Not ready for this type of {:s}".format(function))
# Weights
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
msgs.bug("utils.polyfit2d - Expected 1D vector for weights")
if len(x) != len(w) or len(y) != len(w) or len(x) != len(y):
msgs.bug("utils.polyfit2d - Expected x, y and weights to have same length")
z = z * w
vander = vander * w[:,np.newaxis]
# Reshape
vander = vander.reshape((-1,vander.shape[-1]))
z = z.reshape((vander.shape[0],))
c = np.linalg.lstsq(vander, z, rcond=None)[0]
return c.reshape(deg+1)
def scale_minmax(x, minx=None, maxx=None):
"""
Scale in the input array
Args:
x (ndarray): x-values
minx (float, optional): Minimum value for scaling
maxx (float, optional): Maximum value for scaling
Returns:
ndarray: Scaled x values
"""
if minx is None or maxx is None:
if np.size(x) == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = np.min(x), np.max(x)
else:
xmin, xmax = minx, maxx
xv = 2.0 * (x-xmin)/(xmax-xmin) - 1.0
return xv
def robust_polyfit(xarray, yarray, order, weights=None, maxone=True, sigma=3.0,
function="polynomial", initialmask=None, forceimask=False,
minx=None, maxx=None, guesses=None, bspline_par=None, verbose=True):
"""
A robust (equally weighted) polynomial fit is performed to the xarray, yarray pairs
mask[i] = 1 are masked values
:param xarray: independent variable values
:param yarray: dependent variable values
:param order: the order of the polynomial to be used in the fitting
:param weights: weights to be used in the fitting (weights = 1/sigma)
:param maxone: If True, only the most deviant point in a given iteration will be removed
:param sigma: confidence interval for rejection
:param function: which function should be used in the fitting (valid inputs: 'polynomial', 'legendre', 'chebyshev', 'bspline')
:param initialmask: a mask can be supplied as input, these values will be masked for the first iteration. 1 = value masked
:param forceimask: if True, the initialmask will be forced for all iterations
:param minx: minimum value in the array (or the left limit for a legendre/chebyshev polynomial)
:param maxx: maximum value in the array (or the right limit for a legendre/chebyshev polynomial)
:return: mask, ct -- mask is an array of the masked values, ct is the coefficients of the robust polyfit.
"""
# Setup the initial mask
if initialmask is None:
mask = np.zeros(xarray.size, dtype=np.int)
if forceimask:
msgs.warn("Initial mask cannot be enforced -- no initital mask supplied")
forceimask = False
else:
mask = initialmask.copy()
mskcnt = np.sum(mask)
# Iterate, and mask out new values on each iteration
ct = guesses
while True:
w = np.where(mask == 0)
xfit = xarray[w]
yfit = yarray[w]
if weights is not None:
wfit = weights[w]
else:
wfit = None
ct = func_fit(xfit, yfit, function, order, w=wfit,
guesses=ct, minx=minx, maxx=maxx, bspline_par=bspline_par)
yrng = func_val(ct, xarray, function, minx=minx, maxx=maxx)
sigmed = 1.4826*np.median(np.abs(yfit-yrng[w]))
#if xarray.size-np.sum(mask) <= order+2: JFH fixed this bug
if xarray.size - np.sum(mask) <= order + 1:
if verbose:
msgs.warn("More parameters than data points - fit might be undesirable")
break # More data was masked than allowed by order
if maxone: # Only remove the most deviant point
tst = np.abs(yarray[w]-yrng[w])
m = np.argmax(tst)
if tst[m] > sigma*sigmed:
mask[w[0][m]] = 1
else:
if forceimask:
w = np.where((np.abs(yarray-yrng) > sigma*sigmed) | (initialmask==1))
else:
w = np.where(np.abs(yarray-yrng) > sigma*sigmed)
mask[w] = 1
if mskcnt == np.sum(mask): break # No new values have been included in the mask
mskcnt = np.sum(mask)
# Final fit
w = np.where(mask == 0)
xfit = xarray[w]
yfit = yarray[w]
if weights is not None:
wfit = weights[w]
else:
wfit = None
ct = func_fit(xfit, yfit, function, order, w=wfit, minx=minx, maxx=maxx, bspline_par=bspline_par)
return mask, ct
# TODO This should replace robust_polyfit. #ToDO This routine needs to return dicts with the minx and maxx set
def robust_polyfit_djs(xarray, yarray, order, x2 = None, function = 'polynomial', minx = None, maxx = None, minx2 = None, maxx2 = None,
bspline_par = None,
guesses = None, maxiter=10, inmask=None, sigma=None,invvar=None, lower=None, upper=None,
maxdev=None,maxrej=None, groupdim=None,groupsize=None, groupbadpix=False, grow=0,
sticky=True, use_mad=True):
"""
A robust polynomial fit is performed to the xarray, yarray pairs
mask[i] = 1 are good values
xarray: independent variable values
yarray: dependent variable values
order: the order of the polynomial to be used in the fitting
x2: ndarray, default = None
Do a 2d fit?
function: which function should be used in the fitting (valid inputs: 'polynomial', 'legendre', 'chebyshev', 'bspline')
minx: minimum value in the array (or the left limit for a legendre/chebyshev polynomial)
maxx: maximum value in the array (or the right limit for a legendre/chebyshev polynomial)
guesses : tuple
bspline_par : dict
Passed to bspline_fit()
maxiter : :class:`int`, optional
Maximum number of rejection iterations, default 10. Set this to zero to disable rejection and simply do a fit.
inmask : :class:`numpy.ndarray`, optional
Input mask. Bad points are marked with a value that evaluates to ``False``.
Must have the same number of dimensions as `data`. Points masked as bad "False" in the inmask
will also always evaluate to "False" in the outmask
sigma : :class: float or `numpy.ndarray`, optional
Standard deviation of the yarray, used to reject points based on the values
of `upper` and `lower`. This can either be a single float for the entire yarray or a ndarray with the same
shape as the yarray.
invvar : :class: float or `numpy.ndarray`, optional
Inverse variance of the data, used to reject points based on the values
of `upper` and `lower`. This can either be a single float for the entire yarray or a ndarray with the same
shape as the yarray. If both `sigma` and `invvar` are set the code will return an error.
lower : :class:`int` or :class:`float`, optional
If set, reject points with data < model - lower * sigma.
upper : :class:`int` or :class:`float`, optional
If set, reject points with data > model + upper * sigma.
maxdev : :class:`int` or :class:`float`, optional
If set, reject points with abs(data-model) > maxdev. It is permitted to
set all three of `lower`, `upper` and `maxdev`.
maxrej: :class:`int` or :class:`numpy.ndarray`, optional
Maximum number of points to reject in this iteration. If `groupsize` or
`groupdim` are set to arrays, this should be an array as well.
groupdim: class: `int`
Dimension along which to group the data; set to 1 to group along the 1st dimension, 2 for the 2nd dimension, etc.
If data has shape [100,200], then setting GROUPDIM=2 is equivalent to grouping the data with groupsize=100.
In either case, there are 200 groups, specified by [*,i]. NOT WELL TESTED IN PYTHON!
groupsize: class: `int`
If this and maxrej are set, then reject a maximum of maxrej points per group of groupsize points. If groupdim is also
set, then this specifies sub-groups within that. NOT WELL TESTED IN PYTHON!!
groupbadpix : :class:`bool`, optional
If set to ``True``, consecutive sets of bad pixels are considered groups,
overriding the values of `groupsize`.
grow : :class:`int`, optional, default = 0
If set to a non-zero integer, N, the N nearest neighbors of rejected
pixels will also be rejected.
sticky : :class:`bool`, optional, default is True
If set to ``True``, pixels rejected in one iteration remain rejected in
subsequent iterations, even if the model changes. If
use_mad : :class: `bool`, optional, defaul = False
It set to ``True``, compute the median of the maximum absolute deviation between the data and use this for the rejection instead of
the default which is to compute the standard deviation of the yarray - modelfit. Note that it is not possible to specify use_mad=True
and also pass in values for sigma or invvar, and the code will return an error if this is done.
Returns:
--------
:return: mask, ct -- mask is an array of the masked values, ct is the coefficients of the robust polyfit.
"""
# Setup the initial mask
if inmask is None:
inmask = np.ones(xarray.size, dtype=bool)
if sigma is not None and invvar is not None:
msgs.error('You cannot specify both sigma and invvar')
elif sigma is not None:
weights = 1.0/sigma**2
elif invvar is not None:
weights = np.copy(invvar)
else:
weights = np.ones(xarray.size,dtype=float)
# Iterate, and mask out new values on each iteration
ct = guesses
iIter = 0
qdone = False
thismask = np.copy(inmask)
while (not qdone) and (iIter < maxiter):
if np.sum(thismask) <= np.sum(order) + 1:
msgs.warn("More parameters than data points - fit might be undesirable")
if not np.any(thismask):
msgs.warn("All points were masked. Returning current fit and masking all points. Fit is likely undesirable")
if ct is None:
ct = np.zeros(order + 1)
return thismask, ct
ct = func_fit(xarray, yarray, function, order, x2 = x2, w=weights, inmask=thismask,guesses=ct,
minx=minx, maxx=maxx,minx2=minx2,maxx2=maxx2, bspline_par=bspline_par)
ymodel = func_val(ct, xarray, function, x2 = x2, minx=minx, maxx=maxx,minx2=minx2,maxx2=maxx2)
# TODO Add nrej and nrej_tot as in robust_optimize below?
thismask, qdone = pydl.djs_reject(yarray, ymodel, outmask=thismask,inmask=inmask, sigma=sigma, invvar=invvar,
lower=lower,upper=upper,maxdev=maxdev,maxrej=maxrej,
groupdim=groupdim,groupsize=groupsize,groupbadpix=groupbadpix,grow=grow,
use_mad=use_mad,sticky=sticky)
iIter += 1
if (iIter == maxiter) & (maxiter != 0):
msgs.warn('Maximum number of iterations maxiter={:}'.format(maxiter) + ' reached in robust_polyfit_djs')
outmask = np.copy(thismask)
if np.sum(outmask) == 0:
msgs.warn('All points were rejected!!! The fits will be zero everywhere.')
# Do the final fit
ct = func_fit(xarray, yarray, function, order, x2 = x2, w=weights, inmask = outmask, minx=minx, maxx=maxx, minx2 = minx2, maxx2=maxx2, bspline_par=bspline_par)
return outmask, ct
def robust_optimize(ydata, fitfunc, arg_dict, maxiter=10, inmask=None, sigma=None,invvar=None, lower=None, upper=None,
maxdev=None,maxrej=None, groupdim=None, groupsize=None, groupbadpix=False, grow=0, sticky=True,
use_mad=True, **kwargs_optimizer):
# Setup the initial mask
if inmask is None:
inmask = np.ones(ydata.size, dtype=bool)
if sigma is not None and invvar is not None:
msgs.error('You cannot specify both sigma and invvar')
iter = 0
qdone = False
thismask = np.copy(inmask)
while (not qdone) and (iter < maxiter):
result, ymodel = fitfunc(ydata, thismask, arg_dict, **kwargs_optimizer)
thismask_iter = thismask.copy()
thismask, qdone = pydl.djs_reject(ydata, ymodel, outmask=thismask, inmask=inmask, invvar=invvar,
lower=lower, upper=upper, maxdev=maxdev, maxrej=maxrej,
groupdim=groupdim, groupsize=groupsize, groupbadpix=groupbadpix, grow=grow,
use_mad=use_mad, sticky=sticky)
nrej = np.sum(thismask_iter & np.invert(thismask))
nrej_tot = np.sum(inmask & np.invert(thismask))
msgs.info(
'Iteration #{:d}: nrej={:d} new rejections, nrej_tot={:d} total rejections'.format(iter, nrej, nrej_tot))
iter += 1
if (iter == maxiter) & (maxiter != 0):
msgs.warn('Maximum number of iterations maxiter={:}'.format(maxiter) + ' reached in sens_tell_fit')
outmask = np.copy(thismask)
if np.sum(outmask) == 0:
msgs.warn('All points were rejected!!! The fits will be zero everywhere.')
# Perform a final fit using the final outmask
result, ymodel = fitfunc(ydata, outmask, arg_dict, **kwargs_optimizer)
return result, ymodel, outmask
def subsample(frame):
"""
Used by LACosmic
Args:
frame (ndarray):
Returns:
ndarray: Sliced image
"""
newshape = (2*frame.shape[0], 2*frame.shape[1])
slices = [slice(0, old, float(old)/new) for old, new in zip(frame.shape, newshape)]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i')
return frame[tuple(indices)]
def yamlify(obj, debug=False):
"""Recursively process an object so it can be serialised for yaml.
Based on jsonify in `linetools <https://pypi.python.org/pypi/linetools>`_.
Note: All string-like keys in :class:`dict` s are converted to
:class:`str`.
Also found in desiutils
Parameters
----------
obj : :class:`object`
Any object.
debug : :class:`bool`, optional
Print extra information if requested.
Returns
-------
:class:`object`
An object suitable for yaml serialization. For example
:class:`numpy.ndarray` is converted to :class:`list`,
:class:`numpy.int64` is converted to :class:`int`, etc.
"""
if isinstance(obj, (np.float64, np.float32)):
obj = float(obj)
elif isinstance(obj, (np.int32, np.int64, np.int16)):
obj = int(obj)
elif isinstance(obj, np.bool_):
obj = bool(obj)
# elif isinstance(obj, bytes):
# obj = obj.decode('utf-8')
elif isinstance(obj, (np.string_, str)):
obj = str(obj)
elif isinstance(obj, units.Quantity):
try:
obj = obj.value.tolist()
except AttributeError:
obj = obj.value
elif isinstance(obj, np.ndarray): # Must come after Quantity
obj = obj.tolist()
elif isinstance(obj, dict):
# First convert keys
nobj = {}
for key, value in obj.items():
if isinstance(key, str):
nobj[str(key)] = value
else:
nobj[key] = value
# Now recursive
obj = nobj
for key, value in obj.items():
obj[key] = yamlify(value, debug=debug)
elif isinstance(obj, list):
for i, item in enumerate(obj):
obj[i] = yamlify(item, debug=debug)
elif isinstance(obj, tuple):
obj = list(obj)
for i, item in enumerate(obj):
obj[i] = yamlify(item, debug=debug)
obj = tuple(obj)
# elif isinstance(obj, Unit):
# obj = obj.name
# elif obj is units.dimensionless_unscaled:
# obj = 'dimensionless_unit'
if debug:
print(type(obj))
return obj
| PYPIT/PYPIT | pypeit/utils.py | Python | gpl-3.0 | 49,387 | [
"Gaussian"
] | 90e1095512054271c013ae38f046488ba50d7f5e14dd4b003a6ec8bacf62779f |
# coding: utf-8
#
# Copyright (C) 2014 Savoir-faire Linux Inc. (<www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from getpass import getuser
import pwd
from fabric.api import lcd, cd, task, roles, env, local, run, runs_once, execute
from fabric.colors import red, green
from fabric.contrib.files import exists
from fabric.contrib.console import confirm
# Import socket to find the localhost IP address
import socket
# Import default variables
from default_vars import *
# Import local variables' overrides, if they exist
if path.exists(path.join(path.dirname(__file__), 'local_vars.py')):
from local_vars import *
# Function to manage differents users, hosts, roles, and variables #
#####################################################################
# Get info of the current user and host
user_name = getuser()
host_name = local("hostname", capture=True)
# Set the env dict with the roles and the hosts
env.roledefs['local'] = ["{}@{}".format(user_name, host_name)]
env.roledefs['docker'] = ["root@{}".format(AEGIR_HOSTNAME)]
env.roledefs['dk_aegir'] = ["aegir@{}".format(AEGIR_HOSTNAME)]
env.roledefs['jenkins'] = ["jenkins@{}".format(host_name)]
def set_env(role):
"""
Helper function to set the correct values of the global variables in function of the role
:param role: the role to use for define the host
:return:
"""
global WORKSPACE
WORKSPACE = {
'local': LOCAL_WORKSPACE,
'docker': AEGIR_DOCKER_WORKSPACE,
'dk_aegir': AEGIR_DOCKER_WORKSPACE,
}[role]
def fab_run(role="local", cmd="", capture=False):
"""
Helper function to run the task locally or remotely
:param role: the role to use for define the host
:param cmd: the command to execute
:param capture: if it should return or not the output of the command
:return: the function to execute the command locally or remotely
"""
if role == "local":
return local(cmd, capture)
else:
return run(cmd)
def fab_cd(role, directory):
"""
Helper function to manage the context locally or remotely
:param role: the role to use for define the host
:param directory: the directory of context
:return: the function to manage the context locally or remotely
"""
if role == "local":
return lcd(directory)
else:
return cd(directory)
def fab_exists(role, directory):
"""
Herlper function to check if a directory exist locally or remotely
:param role: the role to use for define the host.
:param directory: the directory to check
:return: the function for check the existence of the directory locally or remotely
"""
if role == "local":
return path.exists(directory)
else:
return exists(directory)
def fab_add_to_hosts(ip, site_hostname):
"""
Helper function to add the ip and hostname to /etc/hosts
:param ip:
:param site_hostname:
:return:
"""
if confirm(green('Do you want add to the /etc/hosts the line "{} {}"? '
'If you say yes you will be able to visit the site using a more frienldy url '
'"http://{}".'.format(ip, site_hostname, site_hostname))):
# Add if not find the comment "# Docker auto-added host" to the file /etc/hosts
local(
'grep "# Docker auto-added host" /etc/hosts > /dev/null || '
'sudo sed -i "$ a # Docker auto-added host" /etc/hosts')
# Add the ip address and hostname after the comment "# Docker auto-added host"
local('sudo sed -i "/# Docker auto-added host/i\{} {}" /etc/hosts'.format(ip, site_hostname))
def fab_remove_from_hosts(site_hostname):
"""
Helper function to remove the ip and the hostname to /etc/hosts
:param site_hostname:
:return:
"""
print(green('Enter your password to remove the {} from your /etc/hosts file'.format(site_hostname)))
local('sudo sed -i "/{}/d" /etc/hosts'.format(site_hostname))
def fab_update_hosts(ip, site_hostname):
"""
Helper function to update the file /etc/hosts
:param ip:
:param site_hostname:
:return:
"""
fab_remove_from_hosts(site_hostname)
fab_add_to_hosts(ip, site_hostname)
# Helper functions to manage docker images and containers #
###########################################################
def docker_ps(running_only=False):
args = '' if running_only else '-a'
result = local('docker ps {}'.format(args), capture=True)
lines = result.stdout.splitlines()
# container name is supposed to be the last column
assert lines[0].strip().endswith('NAMES')
return [line.strip().split(' ')[-1] for line in lines[1:]]
def docker_tryrun(imgname, containername=None, opts='', mounts=None, cmd='', restart=True):
# mounts is a list of (from, to, canwrite) path tuples. ``from`` is relative to the project root.
# Returns True if the container was effectively ran (false if it was restarted or aborted)
if not mounts:
mounts = []
if containername and containername in docker_ps(running_only=True):
print green("{} already running".format(containername))
return False
if containername and containername in docker_ps(running_only=False):
if restart:
print green("{} already exists and is stopped. Restarting!".format(containername))
local('docker restart {}'.format(containername))
return True
else:
print red("There's a dangling container {}! That's not supposed to happen. Aborting".format(containername))
print "Run 'docker rm {}' to remove that container".format(containername)
return False
for from_path, to_path, canwrite in mounts:
abspath = from_path
opt = ' -v {}:{}'.format(abspath, to_path)
if not canwrite:
opt += ':ro'
opts += opt
if containername:
containername_opt = '-h {} --name {}'.format(containername, containername)
else:
containername_opt = ''
local('docker run {} {} {} {}'.format(opts, containername_opt, imgname, cmd))
return True
def docker_ensureruns(containername):
# Makes sure that containername runs. If it doesn't, try restarting it. If the container
# doesn't exist, spew an error.
if containername not in docker_ps(running_only=True):
if containername in docker_ps(running_only=False):
local('docker restart {}'.format(containername))
return True
else:
return False
else:
return True
def docker_ensure_data_container(containername, volume_paths=None, base_image='busybox'):
# Make sure that we have our data containers running. Data containers are *never* removed.
# Their only purpose is to hold volume data.
# Returns whether a container was created by this call
if containername not in docker_ps(running_only=False):
if volume_paths:
volume_args = ' '.join('-v %s' % volpath for volpath in volume_paths)
else:
volume_args = ''
local('docker create %s --name %s %s' % (volume_args, containername, base_image))
return True
return False
def docker_isrunning(containername):
# Check if the containername is running.
if containername not in docker_ps(running_only=True):
return False
else:
return True
def docker_images():
result = local('docker images', capture=True)
lines = result.stdout.splitlines()
# image name is supposed to be the first column
assert lines[0].strip().startswith('REPOSITORY')
return [line.strip().split(' ')[0] for line in lines]
# Task to manage docker's images and containers generally in the localhost#
###########################################################################
@task
@roles('local')
def docker_create_aegir_image(role='local'):
"""
Create docker aegir image.
"""
set_env(role)
with fab_cd(role, '{}/aegir'.format(WORKSPACE)):
# Set the key avialable for the container
manage_needed_files('local', 'aegir', True)
if '{}/{}'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE) in docker_images():
print(red('Docker image {}/{} was found, you has already build this image'.format(AEGIR_PROJECT_NAME,
AEGIR_PROJECT_TYPE)))
else:
fab_run(role, 'docker build -t {}/{} .'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE))
print(green('Docker image {}/{} was build successful'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE)))
manage_needed_files('local', 'aegir', False)
@task
@roles('local')
def docker_run_aegir_container(role='local'):
"""
Run docker aegir container.
"""
set_env(role)
with fab_cd(role, '{}/aegir'.format(WORKSPACE)):
#fab_run(role, 'sudo chmod -R 777 {}'.format(AEGIR_HOME_WORKSPACE))
if '{}/{}'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE) in docker_images():
if docker_tryrun('{}/{}'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE),
'{}_container'.format(AEGIR_PROJECT_NAME),
'-d -p {}'.format(AEGIR_DOCKER_PORT_TO_BIND) ):
# ,mounts=[(AEGIR_HOME_WORKSPACE, AEGIR_DOCKER_WORKSPACE, True)]):
# If container was successful build, get the IP address and show it to the user.
ip = fab_run(role, 'docker inspect -f "{{{{.NetworkSettings.IPAddress}}}}" '
'{}_container'.format(AEGIR_PROJECT_NAME), capture=True)
fab_update_hosts(ip, AEGIR_HOSTNAME)
print(green('Docker container {}_container was build successful. '
'To visit the Website open a web browser in http://{} or http://localhost:{}.'
''.format(AEGIR_PROJECT_NAME, AEGIR_HOSTNAME, AEGIR_DOCKER_PORT_TO_BIND)))
else:
print(red('Docker image {}/{} not found and is a requirement to run the {}_container.'
'Please, run first "fab create" in order to build the {}/{} '
'image'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE, AEGIR_PROJECT_NAME, AEGIR_PROJECT_NAME,
AEGIR_PROJECT_TYPE)))
@task
@roles('local')
def docker_stop_aegir_container(role='local'):
"""
Stop docker aegir container.
"""
set_env(role)
with fab_cd(role, '{}/aegir'.format(WORKSPACE)):
if '{}_container'.format(AEGIR_PROJECT_NAME) in docker_ps():
fab_remove_from_hosts(AEGIR_HOSTNAME)
fab_run(role, 'docker stop {}_container'.format(AEGIR_PROJECT_NAME))
print(green('Docker container {}_container was successful stopped'.format(AEGIR_PROJECT_NAME)))
else:
print(red('Docker container {}_container was not running or paused'.format(AEGIR_PROJECT_NAME)))
@task
@roles('local')
def docker_remove_aegir_container(role='local'):
"""
Remove docker aegir container.
"""
set_env(role)
with fab_cd(role, '{}/aegir'.format(WORKSPACE)):
if '{}_container'.format(AEGIR_PROJECT_NAME) in docker_ps():
fab_remove_from_hosts(AEGIR_HOSTNAME)
fab_run(role, 'docker rm -f {}_container'.format(AEGIR_PROJECT_NAME))
print(green('Docker container {}_container was successful removed'.format(AEGIR_PROJECT_NAME)))
else:
print(red('Docker container {}_container was already removed'.format(AEGIR_PROJECT_NAME)))
@task
@roles('local')
def docker_remove_aegir_image(role='local'):
"""
Remove docker aegir image.
"""
set_env(role)
with fab_cd(role, '{}/aegir'.format(WORKSPACE)):
if docker_isrunning('{}_container'.format(AEGIR_PROJECT_NAME)):
print(red('Docker container {}_container is running, '
'you should stopped it after remove the image {}/{}'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_NAME,
AEGIR_PROJECT_TYPE)))
if '{}/{}'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE) in docker_images():
fab_run(role, 'docker rmi -f {}/{}'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE))
# Remove dangling docker images to free space.
if '<none>' in docker_images():
fab_run(role, 'docker images --filter="dangling=true" -q | xargs docker rmi -f')
print(green('Docker image {}/{} was successful removed'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE)))
else:
print(red('Docker image {}/{} was not found'.format(AEGIR_PROJECT_NAME, AEGIR_PROJECT_TYPE)))
@task
@roles('local')
def docker_connect_aegir_container(role='local'):
"""
Connect to docker aegir container using "docker -it exec <name> bash".
"""
set_env(role)
with fab_cd(role, '{}/aegir'.format(WORKSPACE)):
if docker_isrunning('{}_container'.format(AEGIR_PROJECT_NAME)):
fab_run(role, 'docker exec -it {}_container bash'.format(AEGIR_PROJECT_NAME))
else:
print(red('Docker container {}_container is not running, it should be running to be able to connect.'))
@task
@roles('local')
def docker_ssh_aegir_container(role='local', path_key='~/.ssh/id_rsa'):
"""
Connect to docker aegir container through ssh protocol using you private key that should be in '~/.ssh/id_rsa'.
"""
set_env(role)
ip = fab_run(role, 'docker inspect -f "{{{{.NetworkSettings.IPAddress}}}}" {}_container'.format(AEGIR_PROJECT_NAME),
capture=True)
if ip:
fab_run(role, 'ssh -i {} root@{}'.format(path_key, ip))
@task
@roles('docker')
def docker_update_host_aegir_container():
"""
Helper function to update the ip and hostname in docker container.
"""
# Get the ip of the container, this
ip = local('docker inspect -f "{{{{.NetworkSettings.IPAddress}}}}" {}_container'.format(AEGIR_PROJECT_NAME),
capture=True)
run("sed '/{}/c\{} {} {}_container localhost localhost.domainlocal' "
"/etc/hosts > /root/hosts.backup".format(ip, ip, AEGIR_HOSTNAME, AEGIR_PROJECT_NAME))
run("cat /root/hosts.backup > /etc/hosts")
@task
@roles('local')
def docker_run_jenkins_container(role='local'):
"""
Run docker jenkins container. The same that run: $ fab cjrun
"""
set_env(role)
# Change permision in jenkins_home dir and run the container using the official image
fab_run(role, 'sudo chmod -R 777 {}'.format(JENKINS_HOME_WORKSPACE))
fab_run(role, 'docker run -d -p {} -v {}:{} -h {} --name {}_container '
'jenkins'.format(JENKINS_DOCKER_PORT_TO_BIND, JENKINS_HOME_WORKSPACE, JENKINS_DOCKER_WORKSPACE,
JENKINS_HOSTNAME, JENKINS_PROJECT_NAME))
@task
@roles('local')
def docker_stop_jenkins_container(role='local'):
"""
Stop docker jenkins container.
"""
set_env(role)
# Create aegir dir and Setup ssh keys to use fabric
if docker_isrunning('{}_container'.format(JENKINS_PROJECT_NAME)):
fab_run(role, 'docker stop jenkins_container')
@task
@roles('local')
def docker_remove_jenkins_container(role='local'):
"""
Remove docker jenkins container, always stop docker jenkins container first.
"""
set_env(role)
if docker_isrunning('{}_container'.format(JENKINS_PROJECT_NAME)):
docker_stop_jenkins_container()
# Create aegir dir and Setup ssh keys to use fabric
fab_run(role, 'docker rm {}_container'.format(JENKINS_PROJECT_NAME))
@task
@roles('local')
def docker_connect_jenkins_container(role='local'):
"""
Connect to docker jenkins container using "docker -it exec <name> bash".
"""
set_env(role)
with fab_cd(role, '{}/aegir'.format(WORKSPACE)):
if docker_isrunning('{}_container'.format(JENKINS_PROJECT_NAME)):
fab_run(role, 'docker exec -it {}_container bash'.format(JENKINS_PROJECT_NAME))
else:
print(red('Docker container {}_container is not running, it should be running to be able to connect.'))
@task
@roles('local')
def copy_ssh_keys(role='local', ):
"""
Copy your ssh keys to use it to clone git projects and to connect to containers using ssh protocol.
"""
set_env(role)
fab_run(role, 'sudo chown {}:{} -R {}'.format(user_name, user_name, WORKSPACE))
copy = True
if fab_exists(role, '{}/deploy/id_rsa.pub'.format(WORKSPACE)):
if confirm(red('There is a SSH public key in your deploy directory Say [Y] to keep this key, say [n] to '
'overwrite the key')):
copy = False
with fab_cd(role, WORKSPACE):
if copy:
fab_run(role, 'cp ~/.ssh/id_rsa* deploy/')
print(green('SSH public key copied successful'))
else:
print(red('Keeping public the existing SSH key'))
@task
@roles('local')
def manage_needed_files(role='local', project='aegir', action=True):
"""
Handle your ssh keys to use it in the docker aegir container to clone git projects and to connect to it using ssh.
"""
set_env(role)
with fab_cd(role, WORKSPACE):
if action:
fab_run(role, 'cp deploy/id_rsa.pub {}'.format(project))
fab_run(role, 'cp deploy/migrate-sites {}'.format(project))
fab_run(role, 'cp deploy/remove-platforms {}'.format(project))
else:
fab_run(role, 'rm {}/id_rsa.pub'.format(project))
fab_run(role, 'rm {}/migrate-sites'.format(project))
fab_run(role, 'rm {}/remove-platforms'.format(project))
@task
@roles('docker')
def create_aegir_user(role='docker'):
"""
Create the Aegir user dans docker aegir container.
"""
set_env(role)
# Create aegir dir and Setup ssh keys to use fabric
fab_run(role, 'adduser --system --group --home /var/aegir --shell /bin/bash aegir')
fab_run(role, 'passwd aegir')
fab_run(role, 'adduser aegir www-data')
if not fab_exists(role, '/var/aegir/.ssh'):
fab_run(role, 'mkdir /var/aegir/.ssh')
fab_run(role, 'cp /root/.ssh/* /var/aegir/.ssh')
fab_run(role, 'cp /root/migrate-sites /var/aegir/')
fab_run(role, 'cp /root/remove-platforms /var/aegir/')
fab_run(role, 'cat /var/aegir/.ssh/id_rsa.pub >> /var/aegir/.ssh/authorized_keys')
fab_run(role, 'chown -R aegir:aegir /var/aegir')
fab_run(role, 'a2enmod rewrite')
@task
@roles('docker')
def webserver_config(role='docker'):
"""
Webserver configuration dans docker aegir container.
"""
set_env(role)
fab_run(role, 'ln -s /var/aegir/config/apache.conf /etc/apache2/conf-available/aegir.conf')
@task
@roles('docker')
def php_config(role='docker'):
"""
PHP configuration dans docker aegir container.
"""
set_env(role)
fab_run(role, 'sudo sed -i "s@memory_limit = 128M@memory_limit = 512M@g" /etc/php5/cli/php.ini')
fab_run(role, 'sudo sed -i "s@memory_limit = 128M@memory_limit = 512M@g" /etc/php5/apache2/php.ini')
@task
@roles('docker')
def sudo_config(role='docker'):
"""
Sudo configuration dans docker aegir container.
"""
set_env(role)
fab_run(role, 'touch /etc/sudoers.d/aegir')
fab_run(role, 'echo "Defaults:aegir !requiretty" > /etc/sudoers.d/aegir')
fab_run(role, 'echo "aegir ALL=NOPASSWD: /usr/sbin/apache2ctl" > /etc/sudoers.d/aegir')
@task
@roles('docker')
def database_config(role='docker'):
"""
Database configuration dans docker aegir container.
"""
set_env(role)
fab_run(role, 'mysql -uroot -e "GRANT ALL PRIVILEGES ON *.* TO \'{}\'@\'%\' '
'IDENTIFIED BY \'{}\'; FLUSH PRIVILEGES;"'.format(AEGIR_DB_USER, AEGIR_DB_PASS))
fab_run(role, 'mysql_secure_installation')
@task
@roles('dk_aegir')
def install_aegir_components(role='dk_aegir'):
"""
Install Aegir components dans docker aegir container.
"""
set_env(role)
with fab_cd(role, AEGIR_DOCKER_WORKSPACE):
fab_run(role, 'drush dl provision-7.x')
fab_run(role, 'drush cc drush')
fab_run(role, 'drush hostmaster-install')
@task
@roles('docker')
def enable_aegir_conf(role='docker'):
"""
Enable Aegir configuration dans docker aegir container.
"""
set_env(role)
fab_run(role, 'a2enconf aegir')
fab_run(role, 'service apache2 reload')
fab_run(role, 'ln -s /var/aegir /opt/aegir')
@task
@roles('local')
def create_jenkins_user(role='local'):
"""
Create the Jenkins user in you localhost to use it like an slave.
"""
set_env(role)
try:
pwd.getpwnam('jenkins')
except KeyError:
fab_run(role, 'sudo adduser --system --group --shell /bin/bash jenkins')
fab_run(role, 'sudo passwd jenkins')
fab_run(role, 'sudo adduser jenkins www-data')
fab_run(role, 'sudo adduser jenkins docker')
# WARNING THIS MODIFY YOU PERSONNAL .ssh/ files.
fab_run(role, 'cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys')
fab_run(role, 'ssh-keyscan gitlab.savoirfairelinux.com >> ~/.ssh/known_hosts')
fab_run(role, 'ssh-keyscan github.com >> ~/.ssh/known_hosts')
fab_run(role, 'ssh-keyscan local.aegir.sfl >> ~/.ssh/known_hosts')
fab_run(role, 'ssh-keyscan localhost >> ~/.ssh/known_hosts')
# The files are reused by the Jenkins user.
fab_run(role, 'sudo cp -r ~/.ssh /home/jenkins/')
fab_run(role, 'sudo openssl rsa -in ~/.ssh/id_rsa -out /home/jenkins/.ssh/id_rsa')
fab_run(role, 'sudo mkdir /home/jenkins/workspace')
fab_run(role, 'sudo chown -R jenkins:jenkins /home/jenkins')
@task
@roles('local')
def remove_jenkins_user(role='local'):
"""
Remove the Jenkins user from you localhost to use it like an slave.
"""
set_env(role)
fab_run(role, 'sudo userdel jenkins')
fab_run(role, 'sudo rm -rf /home/jenkins')
@task
@roles('local')
def add_jenkins_host(role='local'):
"""
Add the IP and hostname of the Jenkins container to your /etc/hosts, so you can use the hostname to visit the site.
"""
ip = fab_run(role, 'docker inspect -f "{{{{.NetworkSettings.IPAddress}}}}" jenkins_container'
''.format(AEGIR_PROJECT_NAME), capture=True)
fab_update_hosts(ip, JENKINS_HOSTNAME)
print(green('Now you can visit your the site at http://{}:8080'.format(JENKINS_HOSTNAME)))
@task(alias='ds')
@runs_once
def docker_setup():
"""
Complete docker setup process, used generally when building the docker image for install and configure Aegir.
The same that run: $ fab ds
"""
# General task
execute(copy_ssh_keys)
# Aegir tasks
execute(docker_create_aegir_image)
execute(docker_run_aegir_container)
execute(docker_update_host_aegir_container)
execute(create_aegir_user)
execute(webserver_config)
execute(php_config)
execute(sudo_config)
execute(database_config)
execute(install_aegir_components)
execute(enable_aegir_conf)
# Jenkins task
execute(create_jenkins_user)
execute(docker_run_jenkins_container)
execute(add_jenkins_host)
print green('Docker setup finished with success!')
@task(alias='dcc')
@runs_once
def docker_clean_all():
"""
Complete docker REMOVE process, used generally to remove all containers and images. The same that run: $ fab dcc
"""
execute(docker_stop_jenkins_container)
execute(docker_stop_aegir_container)
execute(docker_remove_jenkins_container)
execute(docker_remove_aegir_container)
execute(docker_remove_aegir_image)
execute(remove_jenkins_user)
print green('Docker clean all finished with success!')
@task(alias='dkstop')
@runs_once
def docker_stop_all():
"""
Complete docker STOP process, used generally to stop all containers. The same that run: $ fab dkstop
"""
execute(docker_stop_jenkins_container)
execute(docker_stop_aegir_container)
print green('Docker clean all finished with success!')
@task(alias='dkstart')
@runs_once
def docker_start_all():
"""
Complete docker STOP process, used generally to stop all containers. The same that run: $ fab dkstart
"""
execute(docker_run_aegir_container)
fab_run('local', 'docker start {}_container'.format(JENKINS_PROJECT_NAME))
execute(add_jenkins_host)
print green('Docker clean all finished with success!')
| sfl-drupal/docker-aegir-jenkins | deploy/fabfile.py | Python | gpl-2.0 | 25,241 | [
"VisIt"
] | d5a80e6dab3ed6951f2af81e22068904a1d87911e9e18b89b51dc461589dc5c5 |
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010 (ita)
"""
This file is provided to enable compatibility with waf 1.5
It was enabled by default in waf 1.6, but it is not used in waf 1.7
"""
import sys
from waflib import ConfigSet, Logs, Options, Scripting, Task, Build, Configure, Node, Runner, TaskGen, Utils, Errors, Context
# the following is to bring some compatibility with waf 1.5 "import waflib.Configure → import Configure"
sys.modules['Environment'] = ConfigSet
ConfigSet.Environment = ConfigSet.ConfigSet
sys.modules['Logs'] = Logs
sys.modules['Options'] = Options
sys.modules['Scripting'] = Scripting
sys.modules['Task'] = Task
sys.modules['Build'] = Build
sys.modules['Configure'] = Configure
sys.modules['Node'] = Node
sys.modules['Runner'] = Runner
sys.modules['TaskGen'] = TaskGen
sys.modules['Utils'] = Utils
from waflib.Tools import c_preproc
sys.modules['preproc'] = c_preproc
from waflib.Tools import c_config
sys.modules['config_c'] = c_config
ConfigSet.ConfigSet.copy = ConfigSet.ConfigSet.derive
ConfigSet.ConfigSet.set_variant = Utils.nada
Build.BuildContext.add_subdirs = Build.BuildContext.recurse
Build.BuildContext.new_task_gen = Build.BuildContext.__call__
Build.BuildContext.is_install = 0
Node.Node.relpath_gen = Node.Node.path_from
def name_to_obj(self, s, env=None):
Logs.warn('compat: change "name_to_obj(name, env)" by "get_tgen_by_name(name)"')
return self.get_tgen_by_name(s)
Build.BuildContext.name_to_obj = name_to_obj
def env_of_name(self, name):
try:
return self.all_envs[name]
except KeyError:
Logs.error('no such environment: '+name)
return None
Build.BuildContext.env_of_name = env_of_name
def set_env_name(self, name, env):
self.all_envs[name] = env
return env
Configure.ConfigurationContext.set_env_name = set_env_name
def retrieve(self, name, fromenv=None):
try:
env = self.all_envs[name]
except KeyError:
env = ConfigSet.ConfigSet()
self.prepare_env(env)
self.all_envs[name] = env
else:
if fromenv: Logs.warn("The environment %s may have been configured already" % name)
return env
Configure.ConfigurationContext.retrieve = retrieve
Configure.ConfigurationContext.sub_config = Configure.ConfigurationContext.recurse
Configure.ConfigurationContext.check_tool = Configure.ConfigurationContext.load
Configure.conftest = Configure.conf
Configure.ConfigurationError = Errors.ConfigurationError
Options.OptionsContext.sub_options = Options.OptionsContext.recurse
Options.OptionsContext.tool_options = Context.Context.load
Options.Handler = Options.OptionsContext
Task.simple_task_type = Task.task_type_from_func = Task.task_factory
Task.TaskBase.classes = Task.classes
def setitem(self, key, value):
if key.startswith('CCFLAGS'):
key = key[1:]
self.table[key] = value
ConfigSet.ConfigSet.__setitem__ = setitem
@TaskGen.feature('d')
@TaskGen.before('apply_incpaths')
def old_importpaths(self):
if getattr(self, 'importpaths', []):
self.includes = self.importpaths
from waflib import Context
eld = Context.load_tool
def load_tool(*k, **kw):
ret = eld(*k, **kw)
if 'set_options' in ret.__dict__:
Logs.warn('compat: rename "set_options" to options')
ret.options = ret.set_options
if 'detect' in ret.__dict__:
Logs.warn('compat: rename "detect" to "configure"')
ret.configure = ret.detect
return ret
Context.load_tool = load_tool
rev = Context.load_module
def load_module(path):
ret = rev(path)
if 'set_options' in ret.__dict__:
Logs.warn('compat: rename "set_options" to "options" (%r)' % path)
ret.options = ret.set_options
if 'srcdir' in ret.__dict__:
Logs.warn('compat: rename "srcdir" to "top" (%r)' % path)
ret.top = ret.srcdir
if 'blddir' in ret.__dict__:
Logs.warn('compat: rename "blddir" to "out" (%r)' % path)
ret.out = ret.blddir
return ret
Context.load_module = load_module
old_post = TaskGen.task_gen.post
def post(self):
self.features = self.to_list(self.features)
if 'cc' in self.features:
Logs.warn('compat: the feature cc does not exist anymore (use "c")')
self.features.remove('cc')
self.features.append('c')
if 'cstaticlib' in self.features:
Logs.warn('compat: the feature cstaticlib does not exist anymore (use "cstlib" or "cxxstlib")')
self.features.remove('cstaticlib')
self.features.append(('cxx' in self.features) and 'cxxstlib' or 'cstlib')
if getattr(self, 'ccflags', None):
Logs.warn('compat: "ccflags" was renamed to "cflags"')
self.cflags = self.ccflags
return old_post(self)
TaskGen.task_gen.post = post
def waf_version(*k, **kw):
Logs.warn('wrong version (waf_version was removed in waf 1.6)')
Utils.waf_version = waf_version
import os
@TaskGen.feature('c', 'cxx', 'd')
@TaskGen.before('apply_incpaths', 'propagate_uselib_vars')
@TaskGen.after('apply_link', 'process_source')
def apply_uselib_local(self):
"""
process the uselib_local attribute
execute after apply_link because of the execution order set on 'link_task'
"""
env = self.env
from waflib.Tools.ccroot import stlink_task
# 1. the case of the libs defined in the project (visit ancestors first)
# the ancestors external libraries (uselib) will be prepended
self.uselib = self.to_list(getattr(self, 'uselib', []))
self.includes = self.to_list(getattr(self, 'includes', []))
names = self.to_list(getattr(self, 'uselib_local', []))
get = self.bld.get_tgen_by_name
seen = set([])
tmp = Utils.deque(names) # consume a copy of the list of names
if tmp:
Logs.warn('compat: "uselib_local" is deprecated, replace by "use"')
while tmp:
lib_name = tmp.popleft()
# visit dependencies only once
if lib_name in seen:
continue
y = get(lib_name)
y.post()
seen.add(lib_name)
# object has ancestors to process (shared libraries): add them to the end of the list
if getattr(y, 'uselib_local', None):
for x in self.to_list(getattr(y, 'uselib_local', [])):
obj = get(x)
obj.post()
if getattr(obj, 'link_task', None):
if not isinstance(obj.link_task, stlink_task):
tmp.append(x)
# link task and flags
if getattr(y, 'link_task', None):
link_name = y.target[y.target.rfind(os.sep) + 1:]
if isinstance(y.link_task, stlink_task):
env.append_value('STLIB', [link_name])
else:
# some linkers can link against programs
env.append_value('LIB', [link_name])
# the order
self.link_task.set_run_after(y.link_task)
# for the recompilation
self.link_task.dep_nodes += y.link_task.outputs
# add the link path too
tmp_path = y.link_task.outputs[0].parent.bldpath()
if not tmp_path in env['LIBPATH']:
env.prepend_value('LIBPATH', [tmp_path])
# add ancestors uselib too - but only propagate those that have no staticlib defined
for v in self.to_list(getattr(y, 'uselib', [])):
if not env['STLIB_' + v]:
if not v in self.uselib:
self.uselib.insert(0, v)
# if the library task generator provides 'export_includes', add to the include path
# the export_includes must be a list of paths relative to the other library
if getattr(y, 'export_includes', None):
self.includes.extend(y.to_incnodes(y.export_includes))
@TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib')
@TaskGen.after('apply_link')
def apply_objdeps(self):
"add the .o files produced by some other object files in the same manner as uselib_local"
names = getattr(self, 'add_objects', [])
if not names:
return
names = self.to_list(names)
get = self.bld.get_tgen_by_name
seen = []
while names:
x = names[0]
# visit dependencies only once
if x in seen:
names = names[1:]
continue
# object does not exist ?
y = get(x)
# object has ancestors to process first ? update the list of names
if getattr(y, 'add_objects', None):
added = 0
lst = y.to_list(y.add_objects)
lst.reverse()
for u in lst:
if u in seen: continue
added = 1
names = [u]+names
if added: continue # list of names modified, loop
# safe to process the current object
y.post()
seen.append(x)
for t in getattr(y, 'compiled_tasks', []):
self.link_task.inputs.extend(t.outputs)
@TaskGen.after('apply_link')
def process_obj_files(self):
if not hasattr(self, 'obj_files'):
return
for x in self.obj_files:
node = self.path.find_resource(x)
self.link_task.inputs.append(node)
@TaskGen.taskgen_method
def add_obj_file(self, file):
"""Small example on how to link object files as if they were source
obj = bld.create_obj('cc')
obj.add_obj_file('foo.o')"""
if not hasattr(self, 'obj_files'): self.obj_files = []
if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files')
self.obj_files.append(file)
old_define = Configure.ConfigurationContext.__dict__['define']
@Configure.conf
def define(self, key, val, quote=True):
old_define(self, key, val, quote)
if key.startswith('HAVE_'):
self.env[key] = 1
old_undefine = Configure.ConfigurationContext.__dict__['undefine']
@Configure.conf
def undefine(self, key):
old_undefine(self, key)
if key.startswith('HAVE_'):
self.env[key] = 0
# some people might want to use export_incdirs, but it was renamed
def set_incdirs(self, val):
Logs.warn('compat: change "export_incdirs" by "export_includes"')
self.export_includes = val
TaskGen.task_gen.export_incdirs = property(None, set_incdirs)
| tommo/gii | support/waf/waflib/extras/compat15.py | Python | mit | 9,285 | [
"VisIt"
] | 5d0ed5d264252d6c4c661c02b5e84a888b5ab250a04fc4ae5dbf039308066570 |
# MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2008 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from molmod.data.periodic import periodic
from molmod.units import A
import os, numpy
__all__ = ["mkinput", "mkinput_multiopt"]
template="""%%chk=%(basename)s.chk
%%nproc=%(nproc)i
%%mem=%(mem)s
# %(lot)s %(route_args)s maxdisk=%(maxdisk)s NoSymm
Who cares about the title?
%(charge)i %(spin)i
%(atom_lines)s
%(post)s
"""
def mkinput(
molecule, charge, spin, lot, route_args, post, nproc, mem, maxdisk, com_filename,
center=True, overwrite=False, ghost_mask=None
):
destdir = os.path.dirname(com_filename)
basename = os.path.basename(com_filename)
if basename.endswith(".com"):
basename = basename[:-4]
if not os.path.isdir(destdir):
os.makedirs(destdir)
com_filename = os.path.join(destdir, "%s.com" % basename)
if not os.path.isfile(com_filename) or overwrite:
if molecule is None:
atom_lines = "${atom_lines}"
else:
coordinates = numpy.array(molecule.coordinates)
if center:
# move the coordinates to the origin
coordinates -= molecule.coordinates.mean(0)
symbols = [periodic[number].symbol for number in molecule.numbers]
# Optionally set ghost atoms:
if ghost_mask is not None:
for i in xrange(len(symbols)):
if ghost_mask[i]:
symbols[i] = "%s-Bq" % symbols[i]
atom_lines = "\n".join("% 5s % 12.7f % 12.7f % 12.7f" % (
symbol, cor[0], cor[1], cor[2]
) for symbol, cor in zip(symbols, coordinates/A))
# Write an xyz file
molecule.write_to_file(os.path.join(destdir, "geom.xyz"))
f = file(com_filename, "w")
f.write(template % {
"basename": basename,
"nproc": nproc,
"mem": mem,
"lot": lot,
"route_args": route_args,
"maxdisk": maxdisk,
"charge": charge,
"spin": spin,
"atom_lines": atom_lines,
"post": post,
})
f.close()
template_multiopt_top="""%%chk=%(basename)s.chk
%%nproc=%(nproc)i
%%mem=%(mem)s
# %(lot)s opt=ModRedundant maxdisk=%(maxdisk)s NoSymm
Who cares about the title?
%(charge)i %(spin)i
%(atom_lines)s
%(post)s
"""
template_multiopt_link="""--Link1--
%%chk=%(basename)s.chk
%%mem=%(mem)s
%%nproc=%(nproc)s
#p %(lot)s opt Geom(AllCheck) maxdisk=%(maxdisk)s NoSymm
Who cares about the title?
"""
def mkinput_multiopt(
molecule, charge, spin, lot_mem_pairs, post, nproc, maxdisk, com_filename,
center=True, overwrite=False
):
destdir = os.path.dirname(com_filename)
basename = os.path.basename(com_filename)
if basename.endswith(".com"):
basename = basename[:-4]
if len(destdir) > 0 and not os.path.isdir(destdir):
os.makedirs(destdir)
com_filename = os.path.join(destdir, "%s.com" % basename)
if not os.path.isfile(com_filename) or overwrite:
if center:
# move the coordinates to the origin
molecule.coordinates -= molecule.coordinates.mean(0)
symbols = [periodic[number].symbol for number in molecule.numbers]
f = file(com_filename, "w")
# Write a gaussian file (top)
atom_lines = "\n".join("% 2s % 12.7f % 12.7f % 12.7f" % (
periodic[number].symbol, cor[0], cor[1], cor[2]
) for number, cor in zip(molecule.numbers, molecule.coordinates/A))
lot, mem = lot_mem_pairs[0]
f.write(template_multiopt_top % {
"basename": basename,
"nproc": nproc,
"mem": mem,
"lot": lot,
"maxdisk": maxdisk,
"charge": charge,
"spin": spin,
"atom_lines": atom_lines,
"post": post,
})
for lot, mem in lot_mem_pairs[1:]:
f.write(template_multiopt_link % {
"basename": basename,
"nproc": nproc,
"mem": mem,
"lot": lot,
"maxdisk": maxdisk,
"post": post,
})
f.close()
# Write an xyz file
molecule.write_to_file(os.path.join(destdir, "geom.xyz"))
| woutersmet/Molmodsummer | lib/molmod/io/gaussian03/mkinput.py | Python | gpl-3.0 | 5,030 | [
"Gaussian"
] | c597b71f252d5192e718c25632dbd9bb3c1c413fd775c6ded631801142ea6ebe |
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr1_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom1.phy", header=None, index=None)
print(tott.shape)
| evanbiederstedt/RRBSfun | trees/chrom_scripts/normal_chr01.py | Python | mit | 25,843 | [
"MCell"
] | b8c7f1505618886aff4f551631472fe3219cabdb8838b3f0aeafcd2c1d05154a |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common base structures."""
import tvm._ffi
import tvm.error
import tvm.runtime._ffi_node_api
from tvm.runtime import Object
from . import _ffi_api
from . import json_compact
class Node(Object):
"""Base class of all IR Nodes, implements astext function."""
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
annotate: Optional[Object->str]
Optionally annotate function to provide additional
information in the comment block.
Returns
-------
text : str
The text format of the expression.
Notes
-----
The meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big (e.g constant weights),
so it can be helpful to skip printing the meta data section.
"""
return _ffi_api.AsText(self, show_meta_data, annotate)
def __str__(self):
return _ffi_api.PrettyPrint(self)
@tvm._ffi.register_object("SourceName")
class SourceName(Object):
"""A identifier for a source location.
Parameters
----------
name : str
The name of the source.
"""
def __init__(self, name):
self.__init_handle_by_constructor__(_ffi_api.SourceName, name)
@tvm._ffi.register_object("Span")
class Span(Object):
"""Specifies a location in a source program.
Parameters
----------
source : SourceName
The source name.
lineno : int
The line number.
col_offset : int
The column offset of the location.
"""
def __init__(self, source_name, line, end_line, column, end_column):
self.__init_handle_by_constructor__(
_ffi_api.Span, source_name, line, end_line, column, end_column
)
@tvm._ffi.register_object
class EnvFunc(Object):
"""Environment function.
This is a global function object that can be serialized by its name.
"""
def __call__(self, *args):
return _ffi_api.EnvFuncCall(self, *args)
@property
def func(self):
return _ffi_api.EnvFuncGetPackedFunc(self)
@staticmethod
def get(name):
"""Get a static env function
Parameters
----------
name : str
The name of the function.
"""
return _ffi_api.EnvFuncGet(name)
def load_json(json_str):
"""Load tvm object from json_str.
Parameters
----------
json_str : str
The json string
Returns
-------
node : Object
The loaded tvm node.
"""
try:
return tvm.runtime._ffi_node_api.LoadJSON(json_str)
except tvm.error.TVMError:
json_str = json_compact.upgrade_json(json_str)
return tvm.runtime._ffi_node_api.LoadJSON(json_str)
def save_json(node):
"""Save tvm object as json string.
Parameters
----------
node : Object
A TVM object to be saved.
Returns
-------
json_str : str
Saved json string.
"""
return tvm.runtime._ffi_node_api.SaveJSON(node)
def structural_equal(lhs, rhs, map_free_vars=False):
"""Check structural equality of lhs and rhs.
The structural equality is recursively defined in the DAG of IRNodes.
There are two kinds of nodes:
- Graph node: a graph node in lhs can only be mapped as equal to
one and only one graph node in rhs.
- Normal node: equality is recursively defined without the restriction
of graph nodes.
Vars(tir::Var, TypeVar) and non-constant relay expression nodes are graph nodes.
For example, it means that `%1 = %x + %y; %1 + %1` is not structurally equal
to `%1 = %x + %y; %2 = %x + %y; %1 + %2` in relay.
A var-type node(e.g. tir::Var, TypeVar) can be mapped as equal to another var
with the same type if one of the following condition holds:
- They appear in a same definition point(e.g. function argument).
- They points to the same VarNode via the same_as relation.
- They appear in a same usage point, and map_free_vars is set to be True.
The rules for var are used to remap variables occurs in function
arguments and let-bindings.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether or not shall we map free vars that does
not bound to any definitions as equal to each other.
Return
------
result : bool
The comparison result.
See Also
--------
structural_hash
assert_strucural_equal
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
return bool(tvm.runtime._ffi_node_api.StructuralEqual(lhs, rhs, False, map_free_vars))
def assert_structural_equal(lhs, rhs, map_free_vars=False):
"""Assert lhs and rhs are structurally equal to each other.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether or not shall we map free vars that does
not bound to any definitions as equal to each other.
Raises
------
ValueError : if assertion does not hold.
See Also
--------
structural_equal
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
tvm.runtime._ffi_node_api.StructuralEqual(lhs, rhs, True, map_free_vars)
def structural_hash(node, map_free_vars=False):
"""Compute structural hash of node
The structural hash value is recursively defined in the DAG of IRNodes.
There are two kinds of nodes:
- Normal node: the hash value is defined by its content and type only.
- Graph node: each graph node will be assigned a unique index ordered by the
first occurence during the visit. The hash value of a graph node is
combined from the hash values of its contents and the index.
structural_hash is made to be concistent with structural_equal.
If two nodes are structurally equal to each other,
then their structural hash (with the same map_free_vars option)
should be equal to each other as well.
If the structural hash of two nodes equals to each other,
then it is highly likely(except for rare hash value collison cases)
that the two nodes are structurally equal to each other.
Parameters
----------
node : Object
The input to be hashed.
map_free_vars : bool
If map_free_vars is set to true, we will hash free variables
by the order of their occurences. Otherwise, we will hash by
their in-memory pointer address.
Return
------
result : int
The hash result
See Also
--------
structrual_equal
"""
return tvm.runtime._ffi_node_api.StructuralHash(node, map_free_vars)
| dmlc/tvm | python/tvm/ir/base.py | Python | apache-2.0 | 7,780 | [
"VisIt"
] | 3a2a0d82e18720b25b7301c60093fd127ba5693d93a7620987ca7b370904e6c2 |
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from scipy.stats import multivariate_normal
from pgmpy.factors.distributions import BaseDistribution
class GaussianDistribution(BaseDistribution):
"""
In its most common representation, a multivariate Gaussian distribution
over X1, X2, ..., Xn is characterized by an n-dimensional mean vector μ,
and a symmetric n x n covariance matrix Σ.
This is the base class for its representation.
"""
def __init__(self, variables, mean, cov):
"""
Parameters
----------
variables: iterable of any hashable python object
The variables for which the distribution is defined.
mean: list, array-like
1-D array of size n where n is the number of variables.
cov: n x n, 2-D array like
n x n dimensional matrix where n is the number of variables.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis = GD(variables=['x1', 'x2', 'x3'],
... mean=np.array([1, -3, 4]),
... cov=np.array([[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]]))
>>> dis.variables
['x1', 'x2', 'x3']
>>> dis.mean
array([[ 1],
[-3],
[4]]))
>>> dis.cov
array([[4, 2, -2],
[2, 5, -5],
[-2, -5, 8]])
>>> dis.assignment([0, 0, 0])
0.0014805631279234139
"""
no_of_var = len(variables)
self.variables = variables
self.mean = np.asarray(np.reshape(mean, (no_of_var, 1)), dtype=float)
self.covariance = np.asarray(cov, dtype=float)
self._precision_matrix = None
if len(mean) != no_of_var:
raise ValueError("Length of mean_vector must be equal to the",
"number of variables.")
if self.covariance.shape != (no_of_var, no_of_var):
raise ValueError("The Covariance matrix should be a square matrix",
" with order equal to the number of variables. ",
"Got: {got_shape}, Expected: {exp_shape}".format
(got_shape=self.covariance.shape,
exp_shape=(no_of_var, no_of_var)))
@property
def pdf(self):
"""
Returns the probability density function(pdf).
Returns
-------
function: The probability density function of the distribution.
Examples
--------
>>> from pgmpy.factors.distributions import GaussianDistribution
>>> dist = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]])
>>> dist.pdf
<function pgmpy.factors.distributions.GaussianDistribution.GaussianDistribution.pdf.<locals>.<lambda>>
>>> dist.pdf([0, 0, 0])
0.0014805631279234139
"""
return lambda *args: multivariate_normal.pdf(
args, self.mean.reshape(1, len(self.variables))[0], self.covariance)
def assignment(self, *x):
"""
Returns the probability value of the PDF at the given parameter values.
Parameters
----------
*x: int, float
The point at which the value of the pdf needs to be computed. The
number of values passed should be equal to the number of variables
in the distribution.
Returns
-------
float: float
The probability value at the point.
Examples
--------
>>> from pgmpy.factors.distributions import GaussianDistribution
>>> dist = GaussianDistribution(variables=['x1', 'x2'],
... mean=[0, 0],
... cov=[[1, 0],
[0, 1]])
>>> dist.assignment(0, 0)
0.15915494309189535
"""
return self.pdf(*x)
@property
def precision_matrix(self):
"""
Returns the precision matrix of the distribution.
Precision is defined as the inverse of the variance. This method returns
the inverse matrix of the covariance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]]))
>>> dis.precision_matrix
array([[ 0.3125 , -0.125 , 0. ],
[-0.125 , 0.58333333, 0.33333333],
[ 0. , 0.33333333, 0.33333333]])
"""
if self._precision_matrix is None:
self._precision_matrix = np.linalg.inv(self.covariance)
return self._precision_matrix
def marginalize(self, variables, inplace=True):
"""
Modifies the distribution with marginalized values.
Parameters
----------
variables: iterator over any hashable object.
List of variables over which marginalization is to be done.
inplace: boolean
If inplace=True it will modify the distribution itself,
else would return a new distribution.
Returns
-------
GaussianDistribution or None :
if inplace=True (default) returns None
if inplace=False return a new GaussianDistribution instance
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]]))
>>> dis.variables
['x1', 'x2', 'x3']
>>> dis.mean
array([[ 1],
[-3],
[ 4]])
>>> dis.covariance
array([[ 4, 2, -2],
[ 2, 5, -5],
[-2, -5, 8]])
>>> dis.marginalize(['x3'])
dis.variables
['x1', 'x2']
>>> dis.mean
array([[ 1.],
[-3.]]))
>>> dis.covariance
array([[4., 2.],
[2., 5.]])
"""
if not isinstance(variables, list):
raise TypeError("variables: Expected type list or array-like,"
"got type {var_type}".format(
var_type=type(variables)))
phi = self if inplace else self.copy()
index_to_keep = [self.variables.index(var) for var in self.variables
if var not in variables]
phi.variables = [phi.variables[index] for index in index_to_keep]
phi.mean = phi.mean[index_to_keep]
phi.covariance = phi.covariance[np.ix_(index_to_keep, index_to_keep)]
phi._precision_matrix = None
if not inplace:
return phi
def reduce(self, values, inplace=True):
"""
Reduces the distribution to the context of the given variable values.
The formula for the obtained conditional distribution is given by -
For,
.. math:: N(X_j | X_i = x_i) ~ N(mu_{j.i} ; sig_{j.i})
where,
.. math:: mu_{j.i} = mu_j + sig_{j, i} * {sig_{i, i}^{-1}} * (x_i - mu_i)
.. math:: sig_{j.i} = sig_{j, j} - sig_{j, i} * {sig_{i, i}^{-1}} * sig_{i, j}
Parameters
----------
values: list, array-like
A list of tuples of the form (variable_name, variable_value).
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new ContinuosFactor object.
Returns
-------
GaussianDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new GaussianDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]])
>>> dis.variables
['x1', 'x2', 'x3']
>>> dis.mean
array([[ 1.],
[-3.],
[ 4.]])
>>> dis.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
>>> dis.reduce([('x1', 7)])
>>> dis.variables
['x2', 'x3']
>>> dis.mean
array([[ 0.],
[ 1.]])
>>> dis.covariance
array([[ 4., -4.],
[-4., 7.]])
"""
if not isinstance(values, list):
raise TypeError("values: Expected type list or array-like, ",
"got type {var_type}".format(
var_type=type(values)))
phi = self if inplace else self.copy()
var_to_reduce = [var for var, value in values]
# index_to_keep -> j vector
index_to_keep = [self.variables.index(var) for var in self.variables
if var not in var_to_reduce]
# index_to_reduce -> i vector
index_to_reduce = [self.variables.index(var) for var in var_to_reduce]
mu_j = self.mean[index_to_keep]
mu_i = self.mean[index_to_reduce]
x_i = np.array([value for var, value in values]).reshape(len(index_to_reduce), 1)
sig_i_j = self.covariance[np.ix_(index_to_reduce, index_to_keep)]
sig_j_i = self.covariance[np.ix_(index_to_keep, index_to_reduce)]
sig_i_i_inv = np.linalg.inv(self.covariance[np.ix_(index_to_reduce, index_to_reduce)])
sig_j_j = self.covariance[np.ix_(index_to_keep, index_to_keep)]
phi.variables = [self.variables[index] for index in index_to_keep]
phi.mean = mu_j + np.dot(np.dot(sig_j_i, sig_i_i_inv), x_i - mu_i)
phi.covariance = sig_j_j - np.dot(np.dot(sig_j_i, sig_i_i_inv), sig_i_j)
phi._precision_matrix = None
if not inplace:
return phi
def normalize(self, inplace=True):
"""
Normalizes the distribution. In case of a Gaussian Distribution the
distribution is always normalized, therefore this method doesn't do
anything and has been implemented only for a consistent API across
distributions.
"""
phi = self if inplace else self.copy()
# The pdf of a Joint Gaussian distrinution is always
# normalized. Hence, no changes.
if not inplace:
return phi
def copy(self):
"""
Return a copy of the distribution.
Returns
-------
GaussianDistribution: copy of the distribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> gauss_dis = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]])
>>> copy_dis = gauss_dis.copy()
>>> copy_dis.variables
['x1', 'x2', 'x3']
>>> copy_dis.mean
array([[ 1],
[-3],
[ 4]])
>>> copy_dis.covariance
array([[ 4, 2, -2],
[ 2, 5, -5],
[-2, -5, 8]])
>>> copy_dis.precision_matrix
array([[ 0.3125 , -0.125 , 0. ],
[-0.125 , 0.58333333, 0.33333333],
[ 0. , 0.33333333, 0.33333333]])
"""
copy_distribution = GaussianDistribution(variables=self.variables,
mean=self.mean.copy(),
cov=self.covariance.copy())
if self._precision_matrix is not None:
copy_distribution._precision_matrix = self._precision_matrix.copy()
return copy_distribution
def to_canonical_factor(self):
u"""
Returns an equivalent CanonicalDistribution object.
The formulas for calculating the cannonical factor parameters
for N(μ; Σ) = C(K; h; g) are as follows -
K = sigma^(-1)
h = sigma^(-1) * mu
g = -(0.5) * mu.T * sigma^(-1) * mu -
log((2*pi)^(n/2) * det(sigma)^(0.5))
where,
K,h,g are the canonical factor parameters
sigma is the covariance_matrix of the distribution,
mu is the mean_vector of the distribution,
mu.T is the transpose of the matrix mu,
and det(sigma) is the determinant of the matrix sigma.
Example
-------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]])
>>> phi = dis.to_canonical_factor()
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.K
array([[0.3125, -0.125, 0.],
[-0.125, 0.5833, 0.333],
[ 0., 0.333, 0.333]])
>>> phi.h
array([[ 0.6875],
[-0.54166],
[ 0.33333]]))
>>> phi.g
-6.51533
"""
from pgmpy.factors.continuous import CanonicalDistribution
mu = self.mean
sigma = self.covariance
K = self.precision_matrix
h = np.dot(K, mu)
g = -(0.5) * np.dot(mu.T, h)[0, 0] - np.log(
np.power(2 * np.pi, len(self.variables)/2) *
np.power(abs(np.linalg.det(sigma)), 0.5))
return CanonicalDistribution(self.variables, K, h, g)
def _operate(self, other, operation, inplace=True):
"""
Gives the CanonicalDistribution operation (product or divide) with
the other factor.
Parameters
----------
other: CanonicalDistribution
The CanonicalDistribution to be multiplied.
operation: String
'product' for multiplication operation and
'divide' for division operation.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1 * dis2
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
phi = self.to_canonical_factor()._operate(
other.to_canonical_factor(), operation, inplace=False).to_joint_gaussian()
if not inplace:
return phi
def product(self, other, inplace=True):
"""
TODO: Make it work when using `*` instead of product.
Returns the product of two gaussian distributions.
Parameters
----------
other: GaussianDistribution
The GaussianDistribution to be multiplied.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
GaussianDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.product(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
return self._operate(other, operation='product', inplace=inplace)
def divide(self, other, inplace=True):
"""
Returns the division of two gaussian distributions.
Parameters
----------
other: GaussianDistribution
The GaussianDistribution to be divided.
inplace: boolean
If True, modifies the distribution itself, otherwise returns a new
GaussianDistribution object.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None.
if inplace=False returns a new CanonicalDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1.divide(dis2, inplace=False)
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
return self._operate(other, operation='divide', inplace=inplace)
def __repr__(self):
return "GaussianDistribution representing N({var}) at {address}".format(
var=self.variables, address=hex(id(self)))
def __mul__(self, other):
return self.product(other, inplace=False)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.divide(other, inplace=False)
__div__ = __truediv__
def __eq__(self, other):
if not (isinstance(self, GaussianDistribution) and isinstance(self, GaussianDistribution)):
return False
elif set(self.scope()) != set(other.scope()):
return False
else:
# Computing transform_index to be able to easily have variables in same order.
transform_index = [other.index(var) for var in self.variables]
if not np.allclose(self.mean, other.mean[transform_index]):
return False
else:
mid_cov = other.covariance[transform_index, :]
transform_cov = mid_cov[:, transform_index]
if not np.allclose(self.covariance, transform_cov):
return False
return True
| khalibartan/pgmpy | pgmpy/factors/distributions/GaussianDistribution.py | Python | mit | 20,036 | [
"Gaussian"
] | 5ca9ea8c3fb0f2458eb7d640f880e604242850e54c0e78fb357320d3ef2d4382 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
- debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
# set to generate locales:
- debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
# Accept oracle license
- debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: true
vtype: select
# Specifying package you can register/return the list of questions and current values
- debconf:
name: tzdata
'''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']),
value = dict(required=False, type='str', aliases=['answer']),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| camradal/ansible | lib/ansible/modules/system/debconf.py | Python | gpl-3.0 | 5,870 | [
"Brian"
] | 8170345569c56102025dc0ce3dc78261809cafe3738034deaff7df9326151d87 |
"""
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
from sklearn.model_selection import StratifiedKFold
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((covar_type,
GMM(n_components=n_classes, covariance_type=covar_type,
init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators / 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/mixture/plot_gmm_covariances.py | Python | mit | 4,257 | [
"Gaussian"
] | 9c1d6f04025b97898cffd19ac6efbac2df9644350065193d26ae7452d76acff7 |
'''
Created on Aug 5, 2014
@author: gearsad, alucard
@change: Fixed GetOrientation(), Re-factored Code, added FK [Gears]
@version: v0.5
'''
import vtk
class SceneObject(object):
'''
This is a basic superclass for any object that will be included in the 3D scene.
'''
def __init__(self, renderers, parent = None):
'''
Constructor with the renderer passed in
'''
# Initialize all the variables so that they're unique to self
# The children SceneObjects for this SceneObject - both rotationally and positionally bound to the parent
self.childrenObjects = []
# Set the parent for this SceneObject - makes it positionally and rotationally bound
self.parent = parent
# The actor
# Ref - http://www.vtk.org/doc/nightly/html/classvtkActor.html
self.vtkActor = vtk.vtkActor()
self.__renderers = renderers
# Add the actor to all the renderers
for renderer in renderers:
renderer.AddActor(self.vtkActor)
def RemoveSceneObject(self):
'''
Remove the actor and the children from the scene.
'''
for child in self.childrenObjects:
child.RemoveSceneObject()
self.childrenObjects = []
# Now clear the parent.
for renderer in self.__renderers:
renderer.RemoveActor(self.vtkActor)
self.parent = None
def UpdateFromParent(self):
'''
Update the transform matrices from the parent if it exists - part of the forward kinematics of SceneObject.
'''
if self.parent is not None:
self.parent.vtkActor.ComputeMatrix()
parentMatrix = self.parent.vtkActor.GetMatrix()
self.vtkActor.SetUserMatrix(parentMatrix)
for sceneObject in self.childrenObjects:
sceneObject.UpdateFromParent()
def SetSceneObjectPosition(self, positionVec3):
'''
Set the position of this SceneObject and update the children if they exist - part of the forward kinematics of SceneObject.
'''
self.vtkActor.SetPosition(positionVec3[0], positionVec3[1], positionVec3[2])
# Update all the children)
self.UpdateFromParent()
def GetSceneObjectPosition(self):
'''
You got it, get the current relative position from the vtkActor. Will be a tuple because VTK likes it like that (rowr!).
'''
return self.vtkActor.GetPosition()
def SetSceneObjectOrientation(self, orientationVec3):
'''
Set the orientation of this SceneObject and update the children if they exist - part of the forward kinematics of SceneObject.
'''
self.vtkActor.SetOrientation(orientationVec3[0], orientationVec3[1], orientationVec3[2])
# Update all the children
self.UpdateFromParent()
| GearsAD/semisorted_arnerve | arnerve/scene/SceneObject.py | Python | mit | 2,944 | [
"VTK"
] | 33af763880dd8ba622fa1f2e5803e215312e42fdcda877e91a878ee7c37d7462 |
from __future__ import absolute_import
import fluent.syntax.ast as FTL
from fluent.migrate.helpers import transforms_from
from fluent.migrate.helpers import VARIABLE_REFERENCE, TERM_REFERENCE
from fluent.migrate import REPLACE, COPY
index = "firefox/mobile/index.lang"
mobile_2019 = "firefox/mobile-2019.lang"
def migrate(ctx):
"""Migrate bedrock/firefox/templates/firefox/mobile/index.html, part {index}."""
ctx.add_transforms(
"firefox/mobile.ftl",
"firefox/mobile.ftl",
[
FTL.Message(
id=FTL.Identifier("firefox-mobile-download-the-firefox-browser"),
value=REPLACE(
mobile_2019,
"Download the Firefox Browser on your Mobile for iOS and Android",
{
"Firefox Browser": TERM_REFERENCE("brand-name-firefox-browser"),
"Android": TERM_REFERENCE("brand-name-android"),
"iOS": TERM_REFERENCE("brand-name-ios"),
}
)
),
FTL.Message(
id=FTL.Identifier("firefox-mobile-firefox-browser-for-mobile"),
value=REPLACE(
mobile_2019,
"Firefox Browser for Mobile blocks over 2000 trackers by default, giving you the privacy you deserve and the speed you need in a private mobile browser.",
{
"Firefox Browser": TERM_REFERENCE("brand-name-firefox-browser"),
}
)
),
] + transforms_from("""
firefox-mobile-firefox = { -brand-name-firefox }
firefox-mobile-firefox-browser = { -brand-name-firefox-browser }
firefox-mobile-get-the-mobile-browser-built = {COPY(mobile_2019, "Get the mobile browser built for you, not advertisers",)}
""", mobile_2019=mobile_2019) + [
FTL.Message(
id=FTL.Identifier("firefox-mobile-check-out-firefox-again-its"),
value=REPLACE(
mobile_2019,
"Check out Firefox again. It’s fast, private and on your side. For iOS and Android.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
"Android": TERM_REFERENCE("brand-name-android"),
"iOS": TERM_REFERENCE("brand-name-ios"),
}
)
),
] + transforms_from("""
firefox-mobile-get-automatic-privacy-on-mobile = {COPY(mobile_2019, "Get automatic privacy on mobile",)}
firefox-mobile-super-fast-private-by-default = {COPY(mobile_2019, "Super fast. Private by default. Blocks 2000+ online trackers.",)}
""", mobile_2019=mobile_2019) + [
FTL.Message(
id=FTL.Identifier("firefox-mobile-get-firefox-mobile"),
value=REPLACE(
mobile_2019,
"Get Firefox Mobile",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
firefox-mobile-block-online-trackers-and = {COPY(mobile_2019, "Block online trackers and invasive ads",)}
firefox-mobile-privacy-protection-by-default = {COPY(mobile_2019, "Privacy protection by default",)}
""", mobile_2019=mobile_2019) + [
FTL.Message(
id=FTL.Identifier("firefox-mobile-leave-no-trace-with-private"),
value=REPLACE(
mobile_2019,
"Leave no trace with <a href=\"%s\">Private Browsing mode</a>. When you close out, your history and cookies are deleted.",
{
"%%": "%",
"%s": VARIABLE_REFERENCE("url"),
}
)
),
] + transforms_from("""
firefox-mobile-stop-companies-from-following = {COPY(mobile_2019, "Stop companies from following you",)}
""", mobile_2019=mobile_2019) + [
FTL.Message(
id=FTL.Identifier("firefox-mobile-stay-off-their-radar-with"),
value=REPLACE(
mobile_2019,
"Stay off their radar with <a href=\"%s\">Firefox Tracking Protection</a>",
{
"%%": "%",
"%s": VARIABLE_REFERENCE("url"),
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
firefox-mobile-discover-products-that-keep = {COPY(mobile_2019, "Discover products that keep you safe",)}
firefox-mobile-sync-your-history-passwords = {COPY(mobile_2019, "Sync your history, passwords, and bookmarks. Send tabs across all of your devices.",)}
""", mobile_2019=mobile_2019) + [
FTL.Message(
id=FTL.Identifier("firefox-mobile-android-only"),
value=REPLACE(
mobile_2019,
"Android only",
{
"Android": TERM_REFERENCE("brand-name-android"),
}
)
),
FTL.Message(
id=FTL.Identifier("firefox-mobile-make-android-your-own"),
value=REPLACE(
mobile_2019,
"Make Android your own",
{
"Android": TERM_REFERENCE("brand-name-android"),
}
)
),
FTL.Message(
id=FTL.Identifier("firefox-mobile-customize-your-firefox-mobile"),
value=REPLACE(
mobile_2019,
"Customize your Firefox mobile browser with <a href=\"%s\">extensions</a> to block ads, manage passwords, stop Facebook from tracking you and more.",
{
"%%": "%",
"%s": VARIABLE_REFERENCE("url"),
"Facebook": TERM_REFERENCE("brand-name-facebook"),
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
firefox-mobile-find-it-fast-with-a-smart = {COPY(mobile_2019, "Find it fast with a smart search bar",)}
""", mobile_2019=mobile_2019) + [
FTL.Message(
id=FTL.Identifier("firefox-mobile-firefox-anticipates-your-needs"),
value=REPLACE(
mobile_2019,
"Firefox anticipates your needs with smart search suggestions and quick access to the sites you visit most.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
firefox-mobile-the-privacy-you-deserve-the = {COPY(mobile_2019, "The privacy you deserve. The speed you need.",)}
""", mobile_2019=mobile_2019) + [
FTL.Message(
id=FTL.Identifier("firefox-mobile-get-firefox-for-mobile"),
value=REPLACE(
mobile_2019,
"Get Firefox for mobile",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
firefox-mobile-send-a-download-link-to-your = {COPY(mobile_2019, "Send a download link to your phone.",)}
firefox-mobile-scan-the-qr-code-to-get-started = {COPY(mobile_2019, "Scan the QR code to get started",)}
""", mobile_2019=mobile_2019)
)
| hoosteeno/bedrock | lib/fluent_migrations/firefox/mobile/index.py | Python | mpl-2.0 | 7,671 | [
"VisIt"
] | e307084dd721377ebe5335767c3c03a280c32e9b4af00d4254746fb940dba6f5 |
"""
Tests for the memory estimators on JK objects
"""
import psi4
import pytest
from .utils import *
def _build_system(basis):
mol = psi4.geometry("""
Ar 0 0 0
Ar 0 0 5
Ar 0 0 15
Ar 0 0 25
Ar 0 0 35
""")
#psi4.set_options({"INTS_TOLERANCE": 0.0})
basis = psi4.core.BasisSet.build(mol, target=basis)
aux = psi4.core.BasisSet.build(basis.molecule(), "DF_BASIS_SCF",
psi4.core.get_option("SCF", "DF_BASIS_SCF"), "JKFIT",
basis.name(), basis.has_puream())
return basis, aux
@pytest.mark.parametrize("basis,jk_type,estimate",[
# Zero temps
["cc-pvdz", "DIRECT", 0],
["cc-pvdz", "OUT_OF_CORE", 0],
# pvdz tests
["cc-pvdz", "MEM_DF", 1590520],
["cc-pvdz", "DISK_DF", 1286244],
["cc-pvdz", "CD", 2916000],
["cc-pvdz", "PK", 65610000],
# 5z tests
["cc-pv5z", "MEM_DF", 57020770],
["cc-pv5z", "DISK_DF", 26984120],
]) # yapf: disable
def test_jk_memory_estimate(basis, jk_type, estimate):
basis, aux = _build_system(basis)
jk = psi4.core.JK.build(basis, aux=aux, jk_type=jk_type, do_wK=False, memory=1e9)
assert compare_integers(estimate, jk.memory_estimate(), "{} memory estimate".format(jk_type))
| jturney/psi4 | tests/pytests/test_jkmemory.py | Python | lgpl-3.0 | 1,305 | [
"Psi4"
] | b91349a1de885da769aa9d73cfc096bbfaf9e8c991bc763046839713c6c5fa8e |
"""
wc_server v0.01
web client server
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
#
# server provides a web based client interface
#
__appversion__ = "0.01a"
print "Genetic Bitcoin Web Client Server v%s"%__appversion__
# connect to the xml server
#
import gene_server_config
import xmlrpclib
import json
import time
import socket
import paths
from bottle import route, run, static_file, redirect
#define the server port
PORT = 8080
#cross-platform hack to get the local ip address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("www.google.com",80))
ip_address = s.getsockname()[0]
s.close()
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
#make sure the port number matches the server.
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
print "Connected to",__server__,":",__port__
#utility functions
def ppdict(d,nest=0):
#pretty print a dict
if nest > 0:
output = '<br>'
else:
output = ''
try:
for key in d.keys():
if type(d[key]) != type({}):
output += "---> "*nest + '<b>' + str(key) + '</b>' + ': ' + str(d[key]) + '<br>'
else:
output += '<b>' + str(key) + '</b>'+ ':' + ppdict(d[key],nest + 1) + '<br>'
except:
output += str(d)
return output
#define client functions
@route('/')
def index():
f = open('./report/system.templ','r')
template = f.read()
f.close()
gdhl = json.loads(server.get_gene_def_hash_list())
dgdh = json.loads(server.get_default_gene_def_hash())
pid = 'WC_SERVER'
server.pid_register_client(pid,dgdh)
trigger = "-"*80 + '<br>'
trigger += "Current Volitility Quartile: " + str(server.get_active_quartile()) + '<br>'
trigger += "Buy Order Trigger* @ $"+"%.2f"%json.loads(server.get_target(pid))['buy'] + '<br>' * 2
trigger += "* Will report $0 if target is too far away from the current price.<br> bcbookie also uses additional logic to screen potential orders.<br>"
trigger += "-"*80 + '<br>' * 2
clients = "-"*80 + '<br>'
clients += "Gene Library (" + str(len(gdhl)) + ')<br>'
for gdh in gdhl:
clients += "----><a href='./set_default_db/%s'>"%gdh + gdh + "</a><br>"
try:
clients += "-------->" + json.loads(server.get_gene_def(gdh))['name'] + '<br>'
except:
pass
clients += "Default Gene Def Hash: " + dgdh + '<br>'
clients += "-"*80 + '<br>' * 2
clients += "-"*80 + '<br>'
pid_list = json.loads(server.pid_list(180))
clients += "Active Clients (" + str(len(pid_list)) + ')<br>'
for apid in pid_list:
clients += "----> "+ apid + '<br>'
clients += "-"*80 + '<br>' * 2
best = "-"*80 + '<br>'
best += "Highest scoring genes (per quartile)" + '<br>'
best += "-"*80 + '<br>'
for quartile in [1,2,3,4]:
try:
ag = json.loads(server.get(60*60*24,quartile,pid))
except:
ag = {"Gene server didn't return a dictionary.":"Gene server didn't return a dictionary."}
best += "-"*80 + '<br>'
best += "Quartile: " + str(quartile) + " :: " + str(time.ctime()) + '<br>'
best += ppdict(ag) + '<br>'
best = best.replace('\n','<br>')
pids = json.loads(server.get_pids())
monitor = "-"*80 + '<br>'
monitor += "Process monitor info (by PID)" + '<br>'
monitor += "-"*80 + '<br>'
monitor += ppdict(pids) + '<br>'*2
monitor = monitor.replace('\n','<br>')
template = template.replace('{LAST_UPDATE}',time.ctime())
template = template.replace('{SYS_TRIGGER}',trigger)
template = template.replace('{SYS_MONITOR}',monitor)
template = template.replace('{SYS_CLIENTS}',clients)
template = template.replace('{SYS_BEST_GENES}',best)
return template
@route('/set_default_db/<db_hash>')
def set_default_db(db_hash = None):
server.set_default_gene_def_hash(db_hash)
return redirect("/")
@route('/report/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./report')
@route('/img/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./report/img')
@route('/js/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./report/js')
run(host=ip_address, port=PORT)
print "http://" + ip_address + ":" + str(PORT)
| stahn/ga-bitbot | wc_server.py | Python | gpl-3.0 | 5,063 | [
"Brian"
] | 1f0b691f8af14099384b5d6b488628d0c139691d517f3901fe48a2a49d51f0af |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Auto-Regressive models for time series data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
class ARModel(model.TimeSeriesModel):
"""Auto-regressive model, both linear and non-linear.
Features to the model include time and values of input_window_size timesteps,
and times for output_window_size timesteps. These are passed through zero or
more hidden layers, and then fed to a loss function (e.g. squared loss).
Note that this class can also be used to regress against time only by setting
the input_window_size to zero.
"""
SQUARED_LOSS = "squared_loss"
NORMAL_LIKELIHOOD_LOSS = "normal_likelihood_loss"
def __init__(self,
periodicities,
input_window_size,
output_window_size,
num_features,
num_time_buckets=10,
loss=NORMAL_LIKELIHOOD_LOSS,
hidden_layer_sizes=None):
"""Constructs an auto-regressive model.
Args:
periodicities: periodicities of the input data, in the same units as the
time feature. Note this can be a single value or a list of values for
multiple periodicities.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empiricaly seems to give a better fit.
num_features: number of input features per time step.
num_time_buckets: Number of buckets into which to divide (time %
periodicity) for generating time based features.
loss: Loss function to use for training. Currently supported values are
SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for
NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For
SQUARED_LOSS, the evaluation loss is reported based on un-scaled
observations and predictions, while the training loss is computed on
normalized data (if input statistics are available).
hidden_layer_sizes: list of sizes of hidden layers.
"""
self.input_window_size = input_window_size
self.output_window_size = output_window_size
if hidden_layer_sizes is None:
hidden_layer_sizes = []
self.hidden_layer_sizes = hidden_layer_sizes
self.window_size = self.input_window_size + self.output_window_size
self.loss = loss
super(ARModel, self).__init__(
num_features=num_features)
assert num_time_buckets > 0
self._buckets = int(num_time_buckets)
if periodicities is None or not periodicities:
periodicities = []
elif (not isinstance(periodicities, list) and
not isinstance(periodicities, tuple)):
periodicities = [periodicities]
self._periods = [int(p) for p in periodicities]
for p in self._periods:
assert p > 0
assert len(self._periods) or self.input_window_size
assert output_window_size > 0
def get_start_state(self):
# State which matches the format we'll return later. Typically this will not
# be used by the model directly, but the shapes and dtypes should match so
# that the serving input_receiver_fn gets placeholder shapes correct.
return (array_ops.zeros([self.input_window_size], dtype=dtypes.int64),
array_ops.zeros(
[self.input_window_size, self.num_features], dtype=self.dtype))
# TODO(allenl,agarwal): Support sampling for AR.
def random_model_parameters(self, seed=None):
pass
def generate(self, number_of_series, series_length,
model_parameters=None, seed=None):
pass
def _predicted_covariance_op(self, activations, num_values):
activation, activation_size = activations[-1]
if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
log_sigma_square = model_utils.fully_connected(
activation,
activation_size,
self.output_window_size * num_values,
name="log_sigma_square",
activation=None)
predicted_covariance = gen_math_ops.exp(log_sigma_square)
predicted_covariance = array_ops.reshape(
predicted_covariance, [-1, self.output_window_size, num_values])
else:
shape = array_ops.stack([
array_ops.shape(activation)[0],
constant_op.constant(self.output_window_size),
constant_op.constant(num_values)
])
predicted_covariance = array_ops.ones(shape=shape, dtype=activation.dtype)
return predicted_covariance
def _predicted_mean_op(self, activations):
activation, activation_size = activations[-1]
predicted_mean = model_utils.fully_connected(
activation,
activation_size,
self.output_window_size * self.num_features,
name="predicted_mean",
activation=None)
return array_ops.reshape(predicted_mean,
[-1, self.output_window_size, self.num_features])
def _create_hidden_stack(self, activation, activation_size):
activations = []
for layer_number, layer_size in enumerate(self.hidden_layer_sizes):
# TODO(agarwal): Migrate to fully_connected in tf slim
activation = model_utils.fully_connected(
activation, activation_size, layer_size,
name="layer_{}".format(layer_number))
activation_size = layer_size
activations.append((activation, activation_size))
return activations
def prediction_ops(self, times, values):
"""Compute model predictions given input data.
Args:
times: A [batch size, self.window_size] integer Tensor, the first
self.input_window_size times in each part of the batch indicating
input features, and the last self.output_window_size times indicating
prediction times.
values: A [batch size, self.input_window_size, self.num_features] Tensor
with input features.
Returns:
Tuple (predicted_mean, predicted_covariance), where each element is a
Tensor with shape [batch size, self.output_window_size,
self.num_features].
"""
times.get_shape().assert_is_compatible_with([None, self.window_size])
activations = []
if self.input_window_size:
values.get_shape().assert_is_compatible_with(
[None, self.input_window_size, self.num_features])
# Create input features.
if self._periods:
_, time_features = self._compute_time_features(times)
activation_size = self.window_size * self._buckets * len(self._periods)
activation = array_ops.reshape(time_features, [-1, activation_size])
else:
activation_size = 0
activation = None
if self.input_window_size:
inp = array_ops.slice(values, [0, 0, 0], [-1, self.input_window_size, -1])
inp_size = self.input_window_size * self.num_features
inp = array_ops.reshape(inp, [-1, inp_size])
if activation is not None:
activation = array_ops.concat([inp, activation], 1)
else:
activation = inp
activation_size += inp_size
assert activation_size
activations.append((activation, activation_size))
# Create hidden layers.
activations += self._create_hidden_stack(activation, activation_size)
# Create mean and convariance ops.
predicted_mean = self._predicted_mean_op(activations)
predicted_covariance = self._predicted_covariance_op(activations,
self.num_features)
return {"activations": activations,
"mean": predicted_mean,
"covariance": predicted_covariance}
def loss_op(self, targets, prediction_ops):
"""Create loss_op."""
prediction = prediction_ops["mean"]
if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
covariance = prediction_ops["covariance"]
sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))
normal = distributions.Normal(loc=targets, scale=sigma)
loss_op = -math_ops.reduce_sum(normal.log_prob(prediction))
else:
assert self.loss == ARModel.SQUARED_LOSS, self.loss
loss_op = math_ops.reduce_sum(math_ops.square(prediction - targets))
loss_op /= math_ops.cast(
math_ops.reduce_prod(array_ops.shape(targets)), loss_op.dtype)
return loss_op
# TODO(allenl, agarwal): Consider better ways of warm-starting predictions.
def predict(self, features):
"""Computes predictions multiple steps into the future.
Args:
features: A dictionary with the following key/value pairs:
PredictionFeatures.TIMES: A [batch size, predict window size]
integer Tensor of times, after the window of data indicated by
`STATE_TUPLE`, to make predictions for.
PredictionFeatures.STATE_TUPLE: A tuple of (times, values), times with
shape [batch size, self.input_window_size], values with shape [batch
size, self.input_window_size, self.num_features] representing a
segment of the time series before `TIMES`. This data is used
to start of the autoregressive computation. This should have data for
at least self.input_window_size timesteps.
Returns:
A dictionary with keys, "mean", "covariance". The
values are Tensors of shape [batch_size, predict window size,
num_features] and correspond to the values passed in `TIMES`.
"""
predict_times = math_ops.cast(
ops.convert_to_tensor(features[PredictionFeatures.TIMES]), dtypes.int32)
batch_size = array_ops.shape(predict_times)[0]
num_predict_values = array_ops.shape(predict_times)[1]
prediction_iterations = ((num_predict_values + self.output_window_size - 1)
// self.output_window_size)
# Pad predict_times so as to have exact multiple of self.output_window_size
# values per example.
padding_size = (prediction_iterations * self.output_window_size -
num_predict_values)
padding = array_ops.zeros([batch_size, padding_size], predict_times.dtype)
predict_times = control_flow_ops.cond(
padding_size > 0, lambda: array_ops.concat([predict_times, padding], 1),
lambda: predict_times)
state = features[PredictionFeatures.STATE_TUPLE]
(state_times, state_values) = state
state_times = math_ops.cast(
ops.convert_to_tensor(state_times), dtypes.int32)
state_values = ops.convert_to_tensor(state_values, dtype=self.dtype)
initial_input_times = predict_times[:, :self.output_window_size]
if self.input_window_size > 0:
initial_input_times = array_ops.concat(
[state_times[:, -self.input_window_size:], initial_input_times], 1)
values_size = array_ops.shape(state_values)[1]
times_size = array_ops.shape(state_times)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(values_size, self.input_window_size),
check_ops.assert_equal(values_size, times_size)
]):
initial_input_values = state_values[:, -self.input_window_size:, :]
else:
initial_input_values = 0
# Iterate over the predict_times, predicting self.output_window_size values
# in each iteration.
def _while_condition(iteration_number, *unused_args):
return math_ops.less(iteration_number, prediction_iterations)
def _while_body(iteration_number, input_times, input_values,
mean_ta, covariance_ta):
"""Predict self.output_window_size values."""
prediction_ops = self.prediction_ops(input_times, input_values)
predicted_mean = prediction_ops["mean"]
predicted_covariance = prediction_ops["covariance"]
offset = self.output_window_size * gen_math_ops.minimum(
iteration_number + 1, prediction_iterations - 1)
if self.input_window_size > 0:
if self.output_window_size < self.input_window_size:
new_input_values = array_ops.concat(
[input_values[:, self.output_window_size:, :], predicted_mean], 1)
new_input_times = array_ops.concat([
input_times[:, self.output_window_size:],
predict_times[:, offset:offset + self.output_window_size]
], 1)
else:
new_input_values = predicted_mean[:, -self.input_window_size:, :]
new_input_times = predict_times[
:,
offset - self.input_window_size:offset + self.output_window_size]
else:
new_input_values = input_values
new_input_times = predict_times[:,
offset:offset + self.output_window_size]
new_input_times.set_shape(initial_input_times.get_shape())
new_mean_ta = mean_ta.write(iteration_number, predicted_mean)
if isinstance(covariance_ta, tensor_array_ops.TensorArray):
new_covariance_ta = covariance_ta.write(iteration_number,
predicted_covariance)
else:
new_covariance_ta = covariance_ta
return (iteration_number + 1,
new_input_times,
new_input_values,
new_mean_ta,
new_covariance_ta)
# Note that control_flow_ops.while_loop doesn't seem happy with None. Hence
# using 0 for cases where we don't want to predict covariance.
covariance_ta_init = (tensor_array_ops.TensorArray(
dtype=self.dtype, size=prediction_iterations)
if self.loss != ARModel.SQUARED_LOSS else 0.)
mean_ta_init = tensor_array_ops.TensorArray(
dtype=self.dtype, size=prediction_iterations)
_, _, _, mean_ta, covariance_ta = control_flow_ops.while_loop(
_while_condition, _while_body, [
0, initial_input_times, initial_input_values, mean_ta_init,
covariance_ta_init
])
def _parse_ta(values_ta):
"""Helper function to parse the returned TensorArrays."""
if not isinstance(values_ta, tensor_array_ops.TensorArray):
return None
predictions_length = prediction_iterations * self.output_window_size
# Shape [prediction_iterations, batch_size, self.output_window_size,
# self.num_features]
values_packed = values_ta.stack()
# Transpose to move batch dimension outside.
output_values = array_ops.reshape(
array_ops.transpose(values_packed, [1, 0, 2, 3]),
array_ops.stack([batch_size, predictions_length, -1]))
# Clip to desired size
return output_values[:, :num_predict_values, :]
predicted_mean = _parse_ta(mean_ta)
predicted_covariance = _parse_ta(covariance_ta)
if predicted_covariance is None:
predicted_covariance = array_ops.ones_like(predicted_mean)
# Transform and scale the mean and covariance appropriately.
predicted_mean = self._scale_back_data(predicted_mean)
predicted_covariance = self._scale_back_variance(predicted_covariance)
return {"mean": predicted_mean,
"covariance": predicted_covariance}
def _process_window(self, features, mode):
"""Compute model outputs on a single window of data."""
# TODO(agarwal): Use exogenous features
times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtypes.int64)
values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
original_values = values
# Extra shape checking for the window size (above that in
# `head.create_estimator_spec`).
expected_times_shape = [None, self.window_size]
if not times.get_shape().is_compatible_with(expected_times_shape):
raise ValueError(
("ARModel with input_window_size={input_window_size} "
"and output_window_size={output_window_size} expects "
"feature '{times_feature}' to have shape (batch_size, "
"{window_size}) (for any batch_size), but got shape {times_shape}. "
"If you are using RandomWindowInputFn, set "
"window_size={window_size} or adjust the input_window_size and "
"output_window_size arguments to ARModel.").format(
input_window_size=self.input_window_size,
output_window_size=self.output_window_size,
times_feature=TrainEvalFeatures.TIMES,
window_size=self.window_size,
times_shape=times.get_shape()))
values = self._scale_data(values)
if self.input_window_size > 0:
input_values = values[:, :self.input_window_size, :]
else:
input_values = None
prediction_ops = self.prediction_ops(times, input_values)
prediction = prediction_ops["mean"]
covariance = prediction_ops["covariance"]
targets = array_ops.slice(values, [0, self.input_window_size, 0],
[-1, -1, -1])
targets.get_shape().assert_is_compatible_with(prediction.get_shape())
if (mode == estimator_lib.ModeKeys.EVAL
and self.loss == ARModel.SQUARED_LOSS):
# Report an evaluation loss which matches the expected
# (observed - predicted) ** 2.
# Note that this affects only evaluation; the training loss is unaffected.
loss = self.loss_op(
self._scale_back_data(targets),
{"mean": self._scale_back_data(prediction_ops["mean"])})
else:
loss = self.loss_op(targets, prediction_ops)
# Scale back the prediction.
prediction = self._scale_back_data(prediction)
covariance = self._scale_back_variance(covariance)
return model.ModelOutputs(
loss=loss,
end_state=(times[:, -self.input_window_size:],
values[:, -self.input_window_size:, :]),
predictions={"mean": prediction, "covariance": covariance,
"observed": original_values[:, -self.output_window_size:]},
prediction_times=times[:, -self.output_window_size:])
def get_batch_loss(self, features, mode, state):
"""Computes predictions and a loss.
Args:
features: A dictionary (such as is produced by a chunker) with the
following key/value pairs (shapes are given as required for training):
TrainEvalFeatures.TIMES: A [batch size, self.window_size] integer
Tensor with times for each observation. To train on longer
sequences, the data should first be chunked.
TrainEvalFeatures.VALUES: A [batch size, self.window_size,
self.num_features] Tensor with values for each observation.
When evaluating, `TIMES` and `VALUES` must have a window size of at
least self.window_size, but it may be longer, in which case the last
window_size - self.input_window_size times (or fewer if this is not
divisible by self.output_window_size) will be evaluated on with
non-overlapping output windows (and will have associated
predictions). This is primarily to support qualitative
evaluation/plotting, and is not a recommended way to compute evaluation
losses (since there is no overlap in the output windows, which for
window-based models is an undesirable bias).
mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).
state: Unused
Returns:
A model.ModelOutputs object.
Raises:
ValueError: If `mode` is not TRAIN or EVAL, or if static shape information
is incorrect.
"""
features = {feature_name: ops.convert_to_tensor(feature_value)
for feature_name, feature_value in features.items()}
if mode == estimator_lib.ModeKeys.TRAIN:
# For training, we require the window size to be self.window_size as
# iterating sequentially on larger windows could introduce a bias.
return self._process_window(features, mode=mode)
elif mode == estimator_lib.ModeKeys.EVAL:
# For evaluation, we allow the user to pass in a larger window, in which
# case we try to cover as much of the window as possible without
# overlap. Quantitative evaluation is more efficient/correct with fixed
# windows matching self.window_size (as with training), but this looping
# allows easy plotting of "in-sample" predictions.
times = features[TrainEvalFeatures.TIMES]
times.get_shape().assert_has_rank(2)
static_window_size = times.get_shape()[1].value
if (static_window_size is not None
and static_window_size < self.window_size):
raise ValueError(
("ARModel requires a window of at least input_window_size + "
"output_window_size to evaluate on (input_window_size={}, "
"output_window_size={}, and got shape {} for feature '{}' (batch "
"size, window size)).").format(
self.input_window_size, self.output_window_size,
times.get_shape(), TrainEvalFeatures.TIMES))
num_iterations = ((array_ops.shape(times)[1] - self.input_window_size)
// self.output_window_size)
output_size = num_iterations * self.output_window_size
# Rather than dealing with overlapping windows of output, discard a bit at
# the beginning if output windows don't cover evenly.
crop_length = output_size + self.input_window_size
features = {feature_name: feature_value[:, -crop_length:]
for feature_name, feature_value in features.items()}
# Note that, unlike the ARModel's predict() while_loop and the
# SequentialTimeSeriesModel while_loop, each iteration here can run in
# parallel, since we are not feeding predictions or state from previous
# iterations.
def _while_condition(iteration_number, loss_ta, mean_ta, covariance_ta):
del loss_ta, mean_ta, covariance_ta # unused
return iteration_number < num_iterations
def _while_body(iteration_number, loss_ta, mean_ta, covariance_ta):
"""Perform a processing step on a single window of data."""
base_offset = iteration_number * self.output_window_size
model_outputs = self._process_window(
features={
feature_name:
feature_value[:, base_offset:base_offset + self.window_size]
for feature_name, feature_value in features.items()},
mode=mode)
# This code needs to be updated if new predictions are added in
# self._process_window
assert len(model_outputs.predictions) == 3
assert "mean" in model_outputs.predictions
assert "covariance" in model_outputs.predictions
assert "observed" in model_outputs.predictions
return (iteration_number + 1,
loss_ta.write(
iteration_number, model_outputs.loss),
mean_ta.write(
iteration_number, model_outputs.predictions["mean"]),
covariance_ta.write(
iteration_number, model_outputs.predictions["covariance"]))
_, loss_ta, mean_ta, covariance_ta = control_flow_ops.while_loop(
_while_condition, _while_body,
[0,
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations)])
values = math_ops.cast(features[TrainEvalFeatures.VALUES],
dtype=self.dtype)
batch_size = array_ops.shape(times)[0]
prediction_shape = [batch_size, self.output_window_size * num_iterations,
self.num_features]
previous_state_times, previous_state_values = state
# Make sure returned state always has windows of self.input_window_size,
# even if we were passed fewer than self.input_window_size points this
# time.
if self.input_window_size > 0:
new_state_times = array_ops.concat(
[previous_state_times,
math_ops.cast(times, dtype=dtypes.int64)],
axis=1)[:, -self.input_window_size:]
new_state_times.set_shape((None, self.input_window_size))
new_state_values = array_ops.concat(
[previous_state_values,
self._scale_data(values)], axis=1)[:, -self.input_window_size:, :]
new_state_values.set_shape((None, self.input_window_size,
self.num_features))
else:
# There is no state to keep, and the strided slices above do not handle
# input_window_size=0.
new_state_times = previous_state_times
new_state_values = previous_state_values
return model.ModelOutputs(
loss=math_ops.reduce_mean(loss_ta.stack(), axis=0),
end_state=(new_state_times, new_state_values),
predictions={
"mean": array_ops.reshape(
array_ops.transpose(mean_ta.stack(), [1, 0, 2, 3]),
prediction_shape),
"covariance": array_ops.reshape(
array_ops.transpose(covariance_ta.stack(), [1, 0, 2, 3]),
prediction_shape),
"observed": values[:, -output_size:]},
prediction_times=times[:, -output_size:])
else:
raise ValueError(
"Unknown mode '{}' passed to get_batch_loss.".format(mode))
def _compute_time_features(self, time):
"""Compute some features on the time value."""
batch_size = array_ops.shape(time)[0]
num_periods = len(self._periods)
# Reshape to 3D.
periods = constant_op.constant(
self._periods, shape=[1, 1, num_periods, 1], dtype=time.dtype)
time = array_ops.reshape(time, [batch_size, -1, 1, 1])
window_offset = time / self._periods
# Cast to appropriate type and scale to [0, 1) range
mod = (math_ops.cast(time % periods, self.dtype) * self._buckets /
math_ops.cast(periods, self.dtype))
# Bucketize based on some fixed width intervals. For a value t and interval
# [a, b), we return (t - a) if a <= t < b, else 0.
intervals = array_ops.reshape(
math_ops.range(self._buckets, dtype=self.dtype),
[1, 1, 1, self._buckets])
mod = nn_ops.relu(mod - intervals)
mod = array_ops.where(mod < 1.0, mod, array_ops.zeros_like(mod))
return window_offset, mod
class AnomalyMixtureARModel(ARModel):
"""Model data as a mixture of normal and anomaly distributions.
Note that this model works by changing the loss function to reduce the penalty
when predicting an anomalous target. However the predictions are still based
on anomalous input features, and this may affect the quality of fit. One
possible solution is to downweight/filter anomalous inputs, but that requires
more sequential processing instead of completely random windows.
"""
GAUSSIAN_ANOMALY = "gaussian"
CAUCHY_ANOMALY = "cauchy"
def __init__(self,
periodicities,
anomaly_prior_probability,
input_window_size,
output_window_size,
num_features,
anomaly_distribution=GAUSSIAN_ANOMALY,
num_time_buckets=10,
hidden_layer_sizes=None):
assert (anomaly_prior_probability < 1.0 and
anomaly_prior_probability > 0.0)
self._anomaly_prior_probability = anomaly_prior_probability
assert anomaly_distribution in [
AnomalyMixtureARModel.GAUSSIAN_ANOMALY,
AnomalyMixtureARModel.CAUCHY_ANOMALY]
self._anomaly_distribution = anomaly_distribution
super(AnomalyMixtureARModel, self).__init__(
periodicities=periodicities,
num_features=num_features,
num_time_buckets=num_time_buckets,
input_window_size=input_window_size,
output_window_size=output_window_size,
loss=ARModel.NORMAL_LIKELIHOOD_LOSS,
hidden_layer_sizes=hidden_layer_sizes)
def _create_anomaly_ops(self, times, values, prediction_ops_dict):
anomaly_log_param = variable_scope.get_variable(
"anomaly_log_param",
shape=[],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
# Anomaly param is the variance for Gaussian and scale for Cauchy
# distribution.
prediction_ops_dict["anomaly_params"] = gen_math_ops.exp(anomaly_log_param)
def prediction_ops(self, times, values):
prediction_ops_dict = super(AnomalyMixtureARModel, self).prediction_ops(
times, values)
self._create_anomaly_ops(times, values, prediction_ops_dict)
return prediction_ops_dict
def _anomaly_log_prob(self, targets, prediction_ops):
prediction = prediction_ops["mean"]
if self._anomaly_distribution == AnomalyMixtureARModel.GAUSSIAN_ANOMALY:
anomaly_variance = prediction_ops["anomaly_params"]
anomaly_sigma = math_ops.sqrt(
gen_math_ops.maximum(anomaly_variance, 1e-5))
normal = distributions.Normal(loc=targets, scale=anomaly_sigma)
log_prob = normal.log_prob(prediction)
else:
assert self._anomaly_distribution == AnomalyMixtureARModel.CAUCHY_ANOMALY
anomaly_scale = prediction_ops["anomaly_params"]
cauchy = distributions.StudentT(
df=array_ops.ones([], dtype=anomaly_scale.dtype),
loc=targets,
scale=anomaly_scale)
log_prob = cauchy.log_prob(prediction)
return log_prob
def loss_op(self, targets, prediction_ops):
"""Create loss_op."""
prediction = prediction_ops["mean"]
covariance = prediction_ops["covariance"]
# Normal data log probability.
sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))
normal1 = distributions.Normal(loc=targets, scale=sigma)
log_prob1 = normal1.log_prob(prediction)
log_prob1 += math_ops.log(1 - self._anomaly_prior_probability)
# Anomaly log probability.
log_prob2 = self._anomaly_log_prob(targets, prediction_ops)
log_prob2 += math_ops.log(self._anomaly_prior_probability)
# We need to compute log(exp(log_prob1) + exp(log_prob2). For numerical
# stability, we rewrite the expression as below.
p1 = gen_math_ops.minimum(log_prob1, log_prob2)
p2 = gen_math_ops.maximum(log_prob1, log_prob2)
mixed_log_prob = p2 + math_ops.log(1 + gen_math_ops.exp(p1 - p2))
loss_op = -math_ops.reduce_sum(mixed_log_prob)
loss_op /= math_ops.cast(
math_ops.reduce_prod(array_ops.shape(targets)), self.dtype)
return loss_op
| eadgarchen/tensorflow | tensorflow/contrib/timeseries/python/timeseries/ar_model.py | Python | apache-2.0 | 31,904 | [
"Gaussian"
] | 0aa9d187f70caa2b333a566a6e6283b5a01014cf7cb234be0cdb4f25fc20caf6 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005, 2006 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" General slaves for branch management"""
from kiwi.datatypes import ValidationError
from stoqlib.api import api
from stoqlib.domain.person import Branch, Employee
from stoqlib.gui.editors.baseeditor import BaseEditorSlave
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class BranchDetailsSlave(BaseEditorSlave):
gladefile = 'BranchDetailsSlave'
model_type = Branch
proxy_widgets = ('active_check',
'manager',
'crt',
'can_execute_foreign_work_orders',
'acronym')
crt_options = (
('1 - Simples Nacional', 1),
('2 - Simples Nacional – excesso de sublimite da receita bruta', 2),
('3 - Regime Normal', 3),
)
def _setup_manager_entry(self):
employees = Employee.get_active_employees(self.store)
self.manager.prefill(api.for_person_combo(employees))
def _setup_crt_combo(self):
self.crt.prefill(self.crt_options)
def setup_proxies(self):
if api.sysparam.compare_object('MAIN_COMPANY', self.model):
self.active_check.set_sensitive(False)
self.inactive_check.set_sensitive(False)
self.label1.set_sensitive(False)
self._setup_manager_entry()
self._setup_crt_combo()
self.proxy = self.add_proxy(self.model,
BranchDetailsSlave.proxy_widgets)
def on_acronym__validate(self, widget, value):
# This will allow the user to set an empty value to this field
if not value:
return
if self.model.check_acronym_exists(value):
return ValidationError(
_('A company with this acronym already exists'))
| andrebellafronte/stoq | stoqlib/gui/slaves/branchslave.py | Python | gpl-2.0 | 2,676 | [
"VisIt"
] | d70f768b773c894405b80717d158eb24fd36f27f0bbc40b0e99cfdeba4a2c213 |
# -*- coding: utf-8 -*-
u"""Shadow execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdp
from sirepo.template import template_common
from sirepo.template.template_common import ModelUnits
import re
import sirepo.sim_data
_SIM_DATA, SIM_TYPE, SCHEMA = sirepo.sim_data.template_globals()
BEAM_STATS_FILE = 'beam_stats.json'
_SHADOW_OUTPUT_FILE = 'shadow-output.dat'
_CENTIMETER_FIELDS = {
'aperture': ['position', 'horizontalSize', 'verticalSize', 'horizontalOffset', 'verticalOffset'],
'crl': ['position', 'pilingThickness', 'rmirr', 'focalDistance', 'lensThickness', 'lensDiameter'],
'crystal': ['position', 'halfWidthX1', 'halfWidthX2', 'halfLengthY1', 'halfLengthY2', 'externalOutlineMajorAxis', 'externalOutlineMinorAxis', 'internalOutlineMajorAxis', 'internalOutlineMinorAxis', 'ssour', 'simag', 'rmirr', 'r_maj', 'r_min', 'param', 'axmaj', 'axmin', 'ell_the', 'thickness', 'r_johansson', 'offx', 'offy', 'offz'],
'electronBeam': ['sigmax', 'sigmaz', 'epsi_x', 'epsi_z', 'epsi_dx', 'epsi_dz'],
'emptyElement': ['position'],
'geometricSource': ['wxsou', 'wzsou', 'sigmax', 'sigmaz', 'wysou', 'sigmay'],
'grating': ['position', 'halfWidthX1', 'halfWidthX2', 'halfLengthY1', 'halfLengthY2', 'externalOutlineMajorAxis', 'externalOutlineMinorAxis', 'internalOutlineMajorAxis', 'internalOutlineMinorAxis', 'ssour', 'simag', 'rmirr', 'r_maj', 'r_min', 'param', 'axmaj', 'axmin', 'ell_the', 'rulingDensityCenter', 'holo_r1', 'holo_r2', 'dist_fan', 'hunt_h', 'hunt_l', 'offx', 'offy', 'offz'],
'histogramReport': ['distanceFromSource'],
'lens': ['position', 'focal_x', 'focal_z'],
'mirror': ['position', 'halfWidthX1', 'halfWidthX2', 'halfLengthY1', 'halfLengthY2', 'externalOutlineMajorAxis', 'externalOutlineMinorAxis', 'internalOutlineMajorAxis', 'internalOutlineMinorAxis', 'ssour', 'simag', 'rmirr', 'r_maj', 'r_min', 'param', 'axmaj', 'axmin', 'ell_the', 'prereflDensity', 'mlayerSubstrateDensity', 'mlayerEvenSublayerDensity', 'mlayerOddSublayerDensity', 'offx', 'offy', 'offz'],
'obstacle': ['position', 'horizontalSize', 'verticalSize', 'horizontalOffset', 'verticalOffset'],
'plotXYReport': ['distanceFromSource'],
'rayFilter': ['distance', 'x1', 'x2', 'z1', 'z2'],
'watch': ['position'],
'zonePlate': ['position', 'diameter'],
}
_FIELD_ALIAS = PKDict(
externalOutlineMajorAxis='rwidx2',
externalOutlineMinorAxis='rlen2',
halfLengthY1='rlen1',
halfLengthY2='rlen2',
halfWidthX1='rwidx1',
halfWidthX2='rwidx2',
horizontalOffset='cx_slit[0]',
horizontalSize='rx_slit[0]',
internalOutlineMajorAxis='rwidx1',
internalOutlineMinorAxis='rlen1',
rulingDensity='ruling',
rulingDensityCenter='ruling',
rulingDensityPolynomial='ruling',
singleEnergyValue='ph1',
verticalOffset='cz_slit[0]',
verticalSize='rz_slit[0]',
)
_LOWERCASE_FIELDS = set(['focal_x', 'focal_z'])
_WIGGLER_TRAJECTORY_FILENAME = 'xshwig.sha'
def stateless_compute_compute_harmonic_photon_energy(data):
return _compute_harmonic_photon_energy(data)
def get_data_file(run_dir, model, frame, **kwargs):
if model == 'beamStatisticsReport':
return BEAM_STATS_FILE
return _SHADOW_OUTPUT_FILE
def post_execution_processing(
success_exit=True,
is_parallel=False,
run_dir=None,
**kwargs
):
if success_exit or is_parallel:
return None
return _parse_shadow_log(run_dir)
def python_source_for_model(data, model):
data.report = model
if not model:
beamline = data.models.beamline
watch_id = None
for b in beamline:
if b.type == 'watch':
watch_id = b.id
if watch_id:
data.report = '{}{}'.format(_SIM_DATA.WATCHPOINT_REPORT, watch_id)
else:
data.report = 'plotXYReport'
return '''
{}
import Shadow.ShadowTools
Shadow.ShadowTools.plotxy(beam, 1, 3, nbins=100, nolost=1)
'''.format(_generate_parameters_file(data, is_parallel=True))
def remove_last_frame(run_dir):
pass
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_harmonic_photon_energy(data):
from orangecontrib.shadow.util.undulator.source_undulator import SourceUndulator
from syned.storage_ring.electron_beam import ElectronBeam
from syned.storage_ring.magnetic_structures.undulator import Undulator
undulator = data.undulator
ebeam = data.undulatorBeam
su = SourceUndulator(
syned_electron_beam=ElectronBeam(energy_in_GeV=ebeam.energy),
syned_undulator=Undulator(
K_horizontal=undulator.k_horizontal,
K_vertical=undulator.k_vertical,
period_length=undulator.period / 1000,
number_of_periods=int(undulator.length / (undulator.period / 1000)),
),
)
su.set_energy_monochromatic_at_resonance(int(undulator.energy_harmonic))
return PKDict(
photon_energy=su._EMIN,
maxangle=su._MAXANGLE * 1e6,
)
def _divide_drifts(beamline, count):
pos = 0
res = []
current_id = 1e5
for item in beamline:
if _is_disabled(item):
continue
if item.position - pos > 1e-3:
delta = (item.position - pos) / count
while (pos + delta) < item.position:
pos += delta
res.append(PKDict(
alpha=0,
id=current_id,
position=pos,
title='D',
type='emptyElement',
))
current_id += 1
res.append(item)
pos = item.position
return res
def _eq(item, field, *values):
t = SCHEMA.model[item.type][field][1]
for v, n in SCHEMA.enum[t]:
if item[field] == v:
return n in values
raise AssertionError(
'{}: value not found for model={} field={} type={}'.format(
item[field], item.type, field, t))
def _field_value(name, field, value):
return "\n{}.{} = {}".format(
name,
field if field in _LOWERCASE_FIELDS else field.upper(),
value,
)
def _fields(name, item, fields):
res = ''
for f in fields:
field_name = _FIELD_ALIAS.get(f, f)
res += _field_value(name, field_name, item[f])
return res
def _generate_autotune_element(item):
res = _item_field(item, ['f_central'])
if item.type == 'grating' or item.f_central == '0':
res += _item_field(item, ['t_incidence', 't_reflection'])
if item.f_central == '1':
res += _item_field(item, ['f_phot_cent'])
if item.f_phot_cent == '0':
res += _item_field(item, ['phot_cent'])
elif item.f_phot_cent == '1':
res += _item_field(item, ['r_lambda'])
return res
def _generate_beamline_optics(models, last_id=None, calc_beam_stats=False):
beamline = models.beamline
if calc_beam_stats:
beamline = _divide_drifts(beamline, models.beamStatisticsReport.driftDivisions)
res = ''
prev_position = source_position = 0
last_element = False
count = 0
for i in range(len(beamline)):
item = beamline[i]
trace_method = 'traceOE'
if _is_disabled(item):
continue
count += 1
source_distance = item.position - prev_position
from_source = item.position - source_position
image_distance = 0
for j in range(i + 1, len(beamline)):
next_item = beamline[j]
if _is_disabled(next_item) or next_item.type == 'emptyElement':
continue
image_distance = next_item.position - item.position
break
theta_recalc_required = item.type in ('crystal', 'grating') \
and item.f_default == '1' and item.f_central == '1' \
and item.fmirr != '5'
if item.type == 'crl':
count, res = _generate_crl(item, source_distance, count, res, calc_beam_stats)
elif item.type == 'zonePlate':
count, res = _generate_zone_plate(item, source_distance, count, res, _photon_energy(models), calc_beam_stats)
else:
res += '\n\noe = Shadow.OE()' + _field_value('oe', 'dummy', '1.0')
if item.type == 'aperture' or item.type == 'obstacle':
res += _generate_screen(item)
elif item.type == 'crystal':
res += _generate_element(item, from_source, image_distance)
res += _generate_crystal(item)
elif item.type == 'emptyElement':
res += "\n" + 'oe.set_empty(ALPHA={})'.format(item.alpha)
elif item.type == 'grating':
res += _generate_element(item, from_source, image_distance)
res += _generate_grating(item)
elif item.type == 'lens':
trace_method = 'traceIdealLensOE'
res += _item_field(item, ['focal_x', 'focal_z'])
elif item.type == 'mirror':
res += _generate_element(item, from_source, image_distance)
res += _generate_mirror(item)
elif item.type == 'watch':
res += "\n" + 'oe.set_empty()'
if last_id and last_id == int(item.id):
last_element = True
else:
raise RuntimeError('unknown item type: {}'.format(item))
if theta_recalc_required:
res += '''
# use shadow to calculate THETA from the default position
# but do not advance the original beam to the image depth
calc_beam = beam.duplicate()
calc_oe = oe.duplicate()
calc_oe.F_DEFAULT = 1
calc_oe.T_SOURCE = calc_oe.SSOUR
calc_oe.T_IMAGE = calc_oe.SIMAG
calc_beam.traceOE(calc_oe, 1)
oe.THETA = calc_oe.T_INCIDENCE * 180.0 / math.pi
'''
res += _generate_trace(source_distance, trace_method, count)
if calc_beam_stats:
res += '\n' + 'pos = calculate_stats(pos, oe)'
if last_element:
break
prev_position = item.position
if item.type != 'emptyElement':
source_position = item.position
return res
def _generate_bending_magnet(data):
return _source_field(data.models.electronBeam, ['sigmax', 'sigmaz', 'epsi_x', 'epsi_z', 'bener', 'epsi_dx', 'epsi_dz', 'f_pol']) \
+ _source_field(data.models.sourceDivergence, ['hdiv1', 'hdiv2', 'vdiv1', 'vdiv2']) \
+ _field_value('source', 'f_phot', 0) \
+ _field_value('source', 'fsource_depth', 4) \
+ _field_value('source', 'f_color', 3) \
+ _source_field(data.models.bendingMagnet, ['r_magnet', 'ph1', 'ph2', 'fdistr']) \
+ _field_value('source', 'r_aladdin', 'source.R_MAGNET * 100')
def _generate_crl(item, source_distance, count, res, calc_beam_stats):
for n in range(item.numberOfLenses):
res += _generate_crl_lens(
item,
n == 0,
n == (item.numberOfLenses - 1),
count,
source_distance,
calc_beam_stats,
)
count += 2
return count - 1, res
def _generate_crl_lens(item, is_first, is_last, count, source, calc_beam_stats):
half_lens = item.lensThickness / 2.0
source_width = item.pilingThickness / 2.0 - half_lens
diameter = item.rmirr * 2.0
def _half(is_obj, **values):
is_ima = not is_obj
values = PKDict(values)
# "10" is "conic", but it's only valid if useCCC, which
# are the external coefficients. The "shape" values are still
# valid
which = 'obj' if is_obj else 'ima'
values.update({
'r_attenuation_' + which: item.attenuationCoefficient,
'r_ind_' + which: item.refractionIndex,
})
if _eq(item, 'fmirr', 'Spherical', 'Paraboloid'):
ccc = [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, diameter, 0.0]
if bool(_eq(item, 'initialCurvature', 'Convex')) == is_ima:
values.f_convex = 1
else:
# Inverse diameter for concave surface
ccc[8] = -ccc[8]
if _eq(item, 'useCCC', 'Yes'):
if 'f_convex' in values:
del values['f_convex']
if _eq(item, 'fmirr', 'Paraboloid'):
ccc[2] = 0.0
values.ccc = 'numpy.array([{}])'.format(', '.join(map(str, ccc)))
if is_ima:
values.update(
t_image=half_lens,
t_source=(source if is_first else 0.0) + source_width,
)
else:
values.update(
t_image=source_width,
t_source=half_lens,
)
fields = sorted(values.keys())
res = '''
oe = Shadow.OE(){}
beam.traceOE(oe, {})'''.format(_fields('oe', values, fields), count + is_obj)
if calc_beam_stats:
res += '\n' + 'pos = calculate_stats(pos, oe)'
return res
common = PKDict(
dummy=1.0,
fwrite=3,
)
# Same for all lenses (afaict)
common.update(
f_ext=1,
f_refrac=1,
t_incidence=0.0,
t_reflection=180.0,
)
common.fmirr = item.fmirr
if not _eq(item, 'fmirr', 'Plane'):
if _eq(item, 'useCCC', 'Yes'):
common.fmirr = 10
if _eq(item, 'fcyl', 'Yes'):
common.update(
fcyl=item.fcyl,
cil_ang=item.cil_ang,
)
if _eq(item, 'fmirr', 'Paraboloid'):
common.param = item.rmirr
else:
common.rmirr = item.rmirr
common.fhit_c = item.fhit_c
if _eq(item, 'fhit_c', 'Finite'):
lens_radius = item.lensDiameter / 2.0
common.update(
fshape=2,
rlen2=lens_radius,
rwidx2=lens_radius,
)
return _half(0, **common) + _half(1, **common)
def _generate_crystal(item):
res = _field_value('oe', 'f_crystal', '1')
res += _generate_autotune_element(item)
res += _item_field(item, ['f_refrac', 'f_mosaic'])
if item.f_mosaic == '0':
res += _item_field(item, ['f_bragg_a', 'f_johansson'])
if item.f_bragg_a == '1':
res += _item_field(item, ['a_bragg', 'thickness'])
if item.f_refrac == '1':
res += _item_field(item, ['order'])
if item.f_johansson == '1':
res += _field_value('oe', 'f_ext', '1')
res += _item_field(item, ['r_johansson'])
elif item.f_mosaic == '1':
res += _item_field(item, ['spread_mos', 'thickness', 'mosaic_seed'])
bragg_filename = 'crystal-bragg-{}.txt'.format(item.id)
res += "\n" + "bragg(interactive=False, DESCRIPTOR='{}', H_MILLER_INDEX={}, K_MILLER_INDEX={}, L_MILLER_INDEX={}, TEMPERATURE_FACTOR={}, E_MIN={}, E_MAX={}, E_STEP={}, SHADOW_FILE='{}')".format(
item.braggMaterial,
item.braggMillerH,
item.braggMillerK,
item.braggMillerL,
item.braggTemperaturFactor,
item.braggMinEnergy,
item.braggMaxEnergy,
item.braggEnergyStep,
bragg_filename,
)
res += _field_value('oe', 'file_refl', "b'{}'".format(bragg_filename))
return res
def _generate_element(item, from_source, to_focus):
if item.f_ext == '0':
# always override f_default - generated t_image is always 0.0
if item.f_default == '1':
item.ssour = from_source
item.simag = to_focus
item.theta = item.t_incidence
item.f_default = '0'
res = _item_field(item, ['fmirr', 'alpha', 'fhit_c'])
if item.fmirr in ('1', '2', '3', '4', '7'):
res += _item_field(item, ['f_ext'])
if item.f_ext == '0':
res += _item_field(item, ['f_default', 'ssour', 'simag', 'theta'])
if item.fmirr in ('1', '2', '4', '7'):
res += _item_field(item, ['f_convex', 'fcyl'])
if item.fcyl == '1':
res += _item_field(item, ['cil_ang'])
if item.fmirr == '1':
if item.f_ext == '1':
res += _item_field(item, ['rmirr'])
elif item.fmirr in ('2', '7'):
if item.f_ext == '1':
res += _item_field(item, ['axmaj', 'axmin', 'ell_the'])
elif item.fmirr == '3':
res += _item_field(item, ['f_torus'])
if item.f_ext == '1':
res += _item_field(item, ['r_maj', 'r_min'])
elif item.fmirr == '4':
if item.f_ext == '0':
res += _item_field(item, ['f_side'])
else:
res += _item_field(item, ['param'])
if item.fhit_c == '1':
res += _item_field(item, ['fshape'])
if item.fshape == '1':
res += _item_field(item, ['halfWidthX1', 'halfWidthX2', 'halfLengthY1', 'halfLengthY2'])
else:
res += _item_field(item, ['externalOutlineMajorAxis', 'externalOutlineMinorAxis'])
if item.fshape == '3':
res += _item_field(item, ['internalOutlineMajorAxis', 'internalOutlineMinorAxis'])
if 'offx' in item:
misalignment = ''
for f in ('offx', 'offy', 'offz', 'x_rot', 'y_rot', 'z_rot'):
if item[f] != 0:
misalignment += _item_field(item, [f])
if misalignment:
res += _field_value('oe', 'f_move', '1')
res += misalignment
return res
def _generate_geometric_source(data):
geo = data.models.geometricSource
res = _source_field(geo, ['fsour', 'wxsou', 'wzsou', 'sigmax', 'sigmaz', 'fdistr', 'sigdix', 'sigdiz', 'cone_max', 'cone_min', 'fsource_depth', 'wysou', 'sigmay', 'f_color', 'f_polar', 'f_coher', 'pol_angle', 'pol_deg']) \
+ _source_field(data.models.sourceDivergence, ['hdiv1', 'hdiv2', 'vdiv1', 'vdiv2']) \
+ _field_value('source', 'f_phot', 0)
if geo.f_color == '1':
res += _source_field(geo, ['singleEnergyValue'])
else:
res += _source_field(geo, ['ph1', 'ph2'])
return res
def _generate_grating(item):
res = _field_value('oe', 'f_grating', '1')
res += _generate_autotune_element(item)
res += _item_field(item, ['f_ruling', 'order'])
if item.f_ruling in ('0', '1'):
res += _item_field(item, ['rulingDensity'])
elif item.f_ruling == '2':
res += _item_field(item, ['holo_r1', 'holo_r2', 'holo_del', 'holo_gam', 'holo_w', 'holo_rt1', 'holo_rt2', 'f_pw', 'f_pw_c', 'f_virtual'])
elif item.f_ruling == '3':
res += _item_field(item, ['rulingDensityCenter'])
elif item.f_ruling == '5':
res += _item_field(item, ['rulingDensityPolynomial', 'f_rul_abs', 'rul_a1', 'rul_a2', 'rul_a3', 'rul_a4'])
if item.f_central == '1':
res += _item_field(item, ['f_mono'])
if item.f_mono == '4':
res += _item_field(item, ['f_hunt', 'hunt_h', 'hunt_l', 'blaze'])
return res
def _generate_mirror(item):
item.t_reflection = item.t_incidence
res = _item_field(item, ['t_incidence', 't_reflection'])
if item.f_reflec in ('1', '2'):
res += _item_field(item, ['f_reflec'])
if item.f_refl == '0':
prerefl_filename = 'mirror-prerefl-{}.txt'.format(item.id)
res += "\n" + "prerefl(interactive=False, SYMBOL='{}', DENSITY={}, FILE='{}', E_MIN={}, E_MAX={}, E_STEP={})".format(
item.prereflElement,
item.prereflDensity,
prerefl_filename,
item.reflectivityMinEnergy,
item.reflectivityMaxEnergy,
item.prereflStep,
)
res += _field_value('oe', 'file_refl', "b'{}'".format(prerefl_filename))
elif item.f_refl == '2':
mlayer_filename = 'mirror-pre_mlayer-{}.txt'.format(item.id)
res += "\n" + "pre_mlayer(interactive=False, FILE='{}',E_MIN={},E_MAX={},S_DENSITY={},S_MATERIAL='{}',E_DENSITY={},E_MATERIAL='{}',O_DENSITY={},O_MATERIAL='{}',N_PAIRS={},THICKNESS={},GAMMA={},ROUGHNESS_EVEN={},ROUGHNESS_ODD={})".format(
mlayer_filename,
item.reflectivityMinEnergy,
item.reflectivityMaxEnergy,
item.mlayerSubstrateDensity,
item.mlayerSubstrateMaterial,
item.mlayerEvenSublayerDensity,
item.mlayerEvenSublayerMaterial,
item.mlayerOddSublayerDensity,
item.mlayerOddSublayerMaterial,
item.mlayerBilayerNumber,
item.mlayerBilayerThickness,
item.mlayerGammaRatio,
item.mlayerEvenRoughness,
item.mlayerOddRoughness,
)
res += _field_value('oe', 'file_refl', "b'{}'".format(mlayer_filename))
res += _item_field(item, ['f_refl', 'f_thick'])
return res
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
_validate_data(data, SCHEMA)
_scale_units(data)
v = template_common.flatten_data(data.models, PKDict())
r = data.report
report_model = data.models[r]
beamline = data.models.beamline
v.shadowOutputFile = _SHADOW_OUTPUT_FILE
if _has_zone_plate(beamline):
v.zonePlateMethods = template_common.render_jinja(SIM_TYPE, v, 'zone_plate.py')
if v.simulation_sourceType == 'bendingMagnet':
v.bendingMagnetSettings = _generate_bending_magnet(data)
elif v.simulation_sourceType == 'geometricSource':
v.geometricSourceSettings = _generate_geometric_source(data)
elif v.simulation_sourceType == 'wiggler':
v.wigglerSettings = _generate_wiggler(data)
v.wigglerTrajectoryFilename = _WIGGLER_TRAJECTORY_FILENAME
v.wigglerTrajectoryInput = ''
if data.models.wiggler.b_from in ('1', '2'):
v.wigglerTrajectoryInput = _SIM_DATA.shadow_wiggler_file(data.models.wiggler.trajFile)
elif v.simulation_sourceType == 'undulator':
v.undulatorSettings = template_common.render_jinja(SIM_TYPE, v, 'undulator.py')
if r == 'initialIntensityReport':
v.distanceFromSource = beamline[0].position if beamline else template_common.DEFAULT_INTENSITY_DISTANCE
elif r == 'beamStatisticsReport':
v.simulation_npoint = 10000
v.beamlineOptics = _generate_beamline_optics(data.models, calc_beam_stats=True)
v.beamStatsFile = BEAM_STATS_FILE
assert v.simulation_sourceType in ('bendingMagnet', 'geometricSource', 'undulator')
if v.simulation_sourceType == 'geometricSource':
if v.geometricSource_f_color == '1':
v.photonEnergy = v.geometricSource_singleEnergyValue
else:
v.photonEnergy = (v.geometricSource_ph1 + v.geometricSource_ph2) / 2
elif v.simulation_sourceType == 'undulator':
if v.undulator_select_energy == 'range':
v.photonEnergy = (v.undulator_emin + v.undulator_emax) / 2
else:
v.photonEnergy = v.undulator_photon_energy
elif v.simulation_sourceType == 'bendingMagnet':
v.photonEnergy = v.bendingMagnet_ph1
return template_common.render_jinja(SIM_TYPE, v, 'beam_statistics.py')
elif _SIM_DATA.is_watchpoint(r):
v.beamlineOptics = _generate_beamline_optics(data.models, last_id=_SIM_DATA.watchpoint_id(r))
else:
v.distanceFromSource = report_model.distanceFromSource
return template_common.render_jinja(SIM_TYPE, v)
def _generate_screen(item):
return "\n" + 'oe.set_empty().set_screens()' \
+ _field_value('oe', 'i_slit[0]', '1') \
+ _field_value('oe', 'k_slit[0]', 0 if item.shape == '0' else 1) \
+ _field_value('oe', 'i_stop[0]', 0 if item.type == 'aperture' else 1) \
+ _item_field(item, ['horizontalSize', 'verticalSize', 'horizontalOffset', 'verticalOffset'])
def _generate_trace(source_distance, trace_method, count):
return _field_value('oe', 'fwrite', '3') \
+ _field_value('oe', 't_image', 0.0) \
+ _field_value('oe', 't_source', source_distance) \
+ "\n" + 'beam.{}(oe, {})'.format(trace_method, count)
def _generate_wiggler(data):
return _source_field(data.models.electronBeam, ['sigmax', 'sigmaz', 'epsi_x', 'epsi_z', 'bener', 'epsi_dx', 'epsi_dz']) \
+ _source_field(data.models.wiggler, ['ph1', 'ph2']) \
+ _field_value('source', 'fdistr', 0) \
+ _field_value('source', 'fsource_depth', 0) \
+ _field_value('source', 'f_wiggler', 1) \
+ _field_value('source', 'conv_fact', 100.0) \
+ _field_value('source', 'hdiv1', 1.0) \
+ _field_value('source', 'hdiv2', 1.0) \
+ _field_value('source', 'vdiv1', 1.0) \
+ _field_value('source', 'vdiv2', 1.0) \
+ _field_value('source', 'f_color', 0) \
+ _field_value('source', 'f_phot', 0) \
+ _field_value('source', 'file_traj', "b'{}'".format(_WIGGLER_TRAJECTORY_FILENAME))
def _generate_zone_plate(item, source_distance, count, res, energy, calc_beam_stats):
# all conversions to meters should be handled by ModelUnits
res += f'''
zp = zone_plate_simulator(
{item.zone_plate_type},
{item.width_coating},
{item.height},
{item.diameter * 1e-2},
{item.b_min},
'{item.zone_plate_material}',
'{item.template_material}',
{energy} * 1e-3,
{item.n_points},
)
'''
# circular aperture
res += '\noe = Shadow.OE()' + _field_value('oe', 'dummy', '1.0')
res += _generate_screen(PKDict(
type='aperture',
shape='1',
horizontalSize=item.diameter,
horizontalOffset=0,
verticalSize=item.diameter,
verticalOffset=0,
)) + _generate_trace(source_distance, 'traceOE', count)
if calc_beam_stats:
res += '\n' + 'pos = calculate_stats(pos, oe)'
# lens
count += 1
res += '\n\noe = Shadow.OE()' + _field_value('oe', 'dummy', '1.0')
res += _item_field(PKDict(
focal_x='zp.focal_distance * 1e2',
focal_z='zp.focal_distance * 1e2',
), ['focal_x', 'focal_z']) + _generate_trace(0, 'traceIdealLensOE', count)
if calc_beam_stats:
res += '\n' + 'pos = calculate_stats(pos, oe)'
if not calc_beam_stats:
# do not trace through zone plate for stats - not enough particles
count += 1
res += f'\n\ntrace_through_zone_plate(beam, zp, {item.last_index})\n'
return count, res
def _init_model_units():
def _scale(v, factor, is_native):
scale = 0.1 ** factor
return v * scale if is_native else v / scale
def _mm2_to_cm2(v, is_native):
return _scale(v, 2, is_native)
def _mm3_to_cm3(v, is_native):
return _scale(v, 3, is_native)
def _mm4_to_cm4(v, is_native):
return _scale(v, 4, is_native)
def _mm5_to_cm5(v, is_native):
return _scale(v, 5, is_native)
res = ModelUnits(PKDict({
x: PKDict({ y: 'cm_to_m' for y in _CENTIMETER_FIELDS[x]}) for x in _CENTIMETER_FIELDS.keys()
}))
res.unit_def.grating.pkupdate(PKDict({
'rul_a1': _mm2_to_cm2,
'rul_a2': _mm3_to_cm3,
'rul_a3': _mm4_to_cm4,
'rul_a4': _mm5_to_cm5,
'rulingDensity': 'mm_to_cm',
'rulingDensityCenter': 'mm_to_cm',
'rulingDensityPolynomial': 'mm_to_cm',
}))
return res
def _has_zone_plate(beamline):
for item in beamline:
if item.type == 'zonePlate':
return True
return False
def _is_disabled(item):
return 'isDisabled' in item and item.isDisabled
def _item_field(item, fields):
return _fields('oe', item, fields)
def _parse_shadow_log(run_dir):
if run_dir.join(template_common.RUN_LOG).exists():
text = pkio.read_text(run_dir.join(template_common.RUN_LOG))
for line in text.split("\n"):
if re.search(r'invalid chemical formula', line):
return 'A mirror contains an invalid reflectivity material'
m = re.search('ValueError: (.*)?', line)
if m:
return m.group(1)
return 'an unknown error occurred'
def _photon_energy(models):
source_type = models.simulation.sourceType
if source_type == 'undulator':
if models.undulator.select_energy == 'range':
return (models.undulator.emin + models.undulator.emax) / 2
return models.undulator.photon_energy
if source_type == 'geometricSource':
if models.geometricSource.f_color == '1':
return models.geometricSource.singleEnergyValue
return (models[source_type].ph1 + models[source_type].ph2) / 2
def _scale_units(data):
for name in _MODEL_UNITS.unit_def:
if name in data.models:
_MODEL_UNITS.scale_to_native(name, data.models[name])
for item in data.models.beamline:
if item.type in _MODEL_UNITS.unit_def:
_MODEL_UNITS.scale_to_native(item.type, item)
def _source_field(model, fields):
return _fields('source', model, fields)
def _validate_data(data, schema):
template_common.validate_models(data, schema)
if data.models.simulation.sourceType == 'undulator':
und = data.models.undulator
if und.select_energy == 'single':
und.emin = und.photon_energy
und.emax = und.photon_energy
if und.emin == und.emax:
und.ng_e = 1
_MODEL_UNITS = _init_model_units()
| radiasoft/sirepo | sirepo/template/shadow.py | Python | apache-2.0 | 29,799 | [
"CRYSTAL"
] | 2f5c10696c849b6deab1d83accf31698f61490f6d66ba5fb1907b48a90548316 |
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2012-2013 Esteban Tovagliari, Jupiter Jazz Limited
# Copyright (c) 2014-2016 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from sys import hexversion as appleseed_python_hexversion
if appleseed_python_hexversion < 0x030000F0:
# Python 2.x
from _appleseedpython import ILogTarget
else:
# Python 3.x
from ._appleseedpython import ILogTarget
class ConsoleLogTarget(ILogTarget):
def __init__(self, stream):
ILogTarget.__init__(self)
self.__stream = stream
def write(self, category, file, line, header, message):
lines = message.split('\n')
for line in lines:
self.__stream.write(header + line + '\n')
class FileLogTarget(ILogTarget):
def __init__(self):
ILogTarget.__init__(self)
self.__file = None
def open(self, filename):
if self.is_open():
self.close()
self.__file = open(filename, "w")
def close(self):
if self.is_open():
self.__file.close()
self.__file = None
def is_open(self):
return self.__file != None
def write(self, category, file, line, header, message):
if self.is_open():
lines = message.split('\n')
for line in lines:
self.__file.write(header + line + '\n')
| docwhite/appleseed | src/appleseed.python/logging.py | Python | mit | 2,551 | [
"VisIt"
] | 3d225da51c5a1bdbb6cf82bfdf94aff15947478df0c3bf0c5e8023bd047cfa7e |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tempfile
from functools import reduce
import numpy
from pyscf import lib
from pyscf import gto, scf, ao2mo
from pyscf.tools import fcidump
import tempfile
mol = gto.Mole()
mol.atom = '''
N 0.0000000000 0.0000000000 0.0000000000
N 0.0000000000 0.0000000000 1.0977000000
'''
mol.basis = 'sto-3g'
mol.symmetry = 'D2h'
mol.charge = 0
mol.spin = 0 #2*S; multiplicity-1
mol.verbose = 0
mol.build(0, 0)
mf = scf.RHF(mol).run()
def tearDownModule():
global mol, mf
del mol, mf
class KnownValues(unittest.TestCase):
def test_from_chkfile(self):
tmpfcidump = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fcidump.from_chkfile(tmpfcidump.name, mf.chkfile, tol=1e-15,
molpro_orbsym=True)
def test_from_integral(self):
tmpfcidump = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
h1 = reduce(numpy.dot, (mf.mo_coeff.T, mf.get_hcore(), mf.mo_coeff))
h2 = ao2mo.full(mf._eri, mf.mo_coeff)
fcidump.from_integrals(tmpfcidump.name, h1, h2, h1.shape[0],
mol.nelectron, tol=1e-15)
def test_read(self):
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('''&FCI NORB=4,
NELEC=4, MS2=0, ISYM=1,
ORBSYM=1,2,3,4,
&END
0.42 1 1 1 1
0.33 1 1 2 2
0.07 1 1 3 1
0.46 1 1 0 0
0.13 1 2 0 0
1.1 0 0 0 0
''')
f.flush()
result = fcidump.read(f.name)
self.assertEqual(result['ISYM'], 1)
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('''&FCI NORB=4, NELEC=4, MS2=0, ISYM=1,ORBSYM=1,2,3,4, &END
0.42 1 1 1 1
0.33 1 1 2 2
0.07 1 1 3 1
0.46 1 1 0 0
0.13 1 2 0 0
1.1 0 0 0 0
''')
f.flush()
result = fcidump.read(f.name)
self.assertEqual(result['MS2'], 0)
def test_to_scf(self):
'''Test from_scf and to_scf'''
tmpfcidump = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fcidump.from_scf(mf, tmpfcidump.name)
mf1 = fcidump.to_scf(tmpfcidump.name)
mf1.init_guess = mf.make_rdm1()
mf1.kernel()
self.assertTrue(abs(mf1.e_tot - mf.e_tot).max() < 1e-9)
self.assertTrue(numpy.array_equal(mf.orbsym, mf1.orbsym))
if __name__ == "__main__":
print("Full Tests for fcidump")
unittest.main()
| sunqm/pyscf | pyscf/tools/test/test_fcidump.py | Python | apache-2.0 | 2,967 | [
"PySCF"
] | ec12c347a77ddb93c591d9d8fb85555f73df10b3c57bdcbf43e9687111690c7d |
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes corresponding to Newick trees, also used for Nexus trees.
See classes in `Bio.Nexus`: Trees.Tree, Trees.NodeData, and Nodes.Chain.
"""
__docformat__ = "restructuredtext en"
from Bio.Phylo import BaseTree
class Tree(BaseTree.Tree):
"""Newick Tree object."""
def __init__(self, root=None, rooted=False, id=None, name=None, weight=1.0):
BaseTree.Tree.__init__(self, root=root or Clade(),
rooted=rooted, id=id, name=name)
self.weight = weight
class Clade(BaseTree.Clade):
"""Newick Clade (sub-tree) object."""
def __init__(self, branch_length=None, name=None, clades=None,
confidence=None, comment=None):
BaseTree.Clade.__init__(self, branch_length=branch_length,
name=name, clades=clades, confidence=confidence)
self.comment = comment
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Phylo/Newick.py | Python | gpl-2.0 | 1,112 | [
"Biopython"
] | 7774ac0624eabe70ac18e9e320d489c5f5a5e92ff4fe8cb895508e5f558af8d0 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
btab format, used by BLAST and NUCMER, found spec here:
<http://www.agcol.arizona.edu/~matthew/formats.html>
btab format used by aat, found spec here:
<http://ergatis.diagcomputing.org/cgi/documentation.cgi?article=components&page=aat_aa>
"""
import sys
from jcvi.formats.base import LineFile, must_open
from jcvi.formats.gff import valid_gff_type
from jcvi.apps.base import OptionParser, ActionDispatcher
class BtabLine (object):
def __init__(self, row, aat_dialect=False):
args = row.strip().split("\t")
self.nargs = len(args) # number of columns
# query attributes
self.query = args[0].split()[0]
self.qLen = int(args[2])
self.qStart = int(args[6])
self.qStop = int(args[7])
self.qFrame = int(args[16])
self.qStrand = "-" if args[17] == "Minus" else "+"
# subject attributes
self.subject = args[5]
self.sStart = int(args[8])
self.sStop = int(args[9])
self.sDesc = args[15]
self.sLen = int(args[18])
if self.qStrand == "-":
self.sStart, self.sStop = self.sStop, self.sStart
# pct id/sim
self.pctid = float(args[10])
self.pctsim = float(args[11])
# search metadata
self.date = args[1]
self.method = args[3]
self.database = args[4]
if aat_dialect:
self.score = float(args[12]) # domain score
self.chainNum = int(args[13]) # index of subject in btab file
self.segmentNum = int(args[14]) # match_part index of query
# build a unique key by joining query id, subject id and subject index
self.key = "-".join(str(x) for x in (self.query, self.subject, self.chainNum))
else:
self.score = float(args[13])
self.evalue = float(args[19])
self.pvalue = float(args[20]) if len(args) > 20 else None
def __getitem__(self, key):
return getattr(self, key)
@property
def blastline(self):
# some fields are not represented so ignore
return "\t".join((self.query, self.subject + " " + self.sDesc,
"%.2f" % self.pctid,
"0", "0", "0",
"%d" % self.qStart, "%d" % self.qStop,
"%d" % self.sStart, "%d" % self.sStop,
"%.1g" % self.evalue, "%.1f" % self.score))
def gffline(self, source=None, type="match_part", primary_tag="Parent", id=None):
source = self.method if not source else source
if type not in valid_gff_type:
score = "{0:.2f}".format(self.pctid)
target = " ".join(str(x) for x in [self.subject, self.sStart, self.sStop])
attributes = ";".join(str(x) for x in [primary_tag + "=" + id, "Target=" + target])
else:
score = "."
note = "\"{0}\"".format(self.sDesc) if " " in self.sDesc else self.sDesc
attributes = ";".join(str(x) for x in [primary_tag + "=" + id, "Name=" + self.subject, \
"Note=" + note])
line = "\t".join(str(x) for x in [self.query, source, type, self.qStart, self.qStop, \
score, self.qStrand, ".", attributes])
return line
class Btab(LineFile):
def __init__(self, filename, aat_dialect=False):
super(Btab, self).__init__(filename)
for line in must_open(filename):
if line[0] == "#":
continue
self.append(BtabLine(line, aat_dialect=aat_dialect))
def main():
actions = (
('blast', 'convert btab to BLAST -m8 format'),
('bed', 'convert btab to bed format'),
('gff', 'convert from btab (generated by AAT) to gff3 format'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def blast(args):
"""
%prog blast btabfile
Convert to BLAST -m8 format.
"""
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
print b.blastline
def bed(args):
"""
%prog bed btabfile
Convert btab to bed format.
"""
from jcvi.formats.blast import BlastLine
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
Bline = BlastLine(b.blastline)
print Bline.bedline
def gff(args):
"""
%prog gff btabfile
Convert btab file generated by AAT to gff3 format.
"""
from jcvi.utils.range import range_minmax
from jcvi.formats.gff import valid_gff_parent_child, valid_gff_type
p = OptionParser(gff.__doc__)
p.add_option("--source", default=None, help="Specify GFF source." +
" By default, it picks algorithm used to generate btab file." +
" [default: %default]")
p.add_option("--type", default="protein_match", choices=valid_gff_type,
help="GFF feature type [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btabdict = {}
btab = Btab(btabfile, aat_dialect=True)
osource = opts.source or "aat"
otype = opts.type
octype = valid_gff_parent_child[otype]
for b in btab:
nargs = b.nargs
id = b.query + "-" + otype + "{0:05d}".format(b.chainNum)
key = b.key
if key not in btabdict:
btabdict[key] = { 'id': id,
'method': b.method,
'query': b.query,
'subject': b.subject,
'strand': b.qStrand,
'sDesc': b.sDesc,
'coords': [],
'children': []
}
btabdict[key]['coords'].append((b.qStart, b.qStop))
btabdict[key]['children'].append(b.gffline(source=osource, type=octype, id=id))
for v in btabdict.itervalues():
b = BtabLine("\t".join(str(x) for x in [0] * nargs), aat_dialect=True)
id = v['id']
b.query = v['query']
b.method = v['method']
b.subject = v['subject']
b.qStrand = v['strand']
b.sDesc = v['sDesc']
b.qStart, b.qStop = range_minmax(v['coords'])
print b.gffline(source=osource, type=otype, primary_tag="ID", id=id)
print "\n".join(v['children'])
if __name__ == '__main__':
main()
| sgordon007/jcvi_062915 | formats/btab.py | Python | bsd-2-clause | 6,660 | [
"BLAST"
] | 4c58eed4694e7a11a8898d066d573972fe93d2b257cb46e173c20041ed76b2bf |
# -*- coding: utf-8 -*-
"""
sphinx.writers.manpage
~~~~~~~~~~~~~~~~~~~~~~
Manual page writer, extended for Sphinx custom nodes.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
try:
from docutils.writers.manpage import MACRO_DEF, Writer, \
Translator as BaseTranslator
has_manpage_writer = True
except ImportError:
# define the classes in any case, sphinx.application needs it
Writer = BaseTranslator = object
has_manpage_writer = False
from sphinx import addnodes
from sphinx.locale import admonitionlabels, versionlabels, _
from sphinx.util.osutil import ustrftime
class ManualPageWriter(Writer):
def __init__(self, builder):
Writer.__init__(self)
self.builder = builder
def translate(self):
visitor = ManualPageTranslator(self.builder, self.document)
self.visitor = visitor
self.document.walkabout(visitor)
self.output = visitor.astext()
class ManualPageTranslator(BaseTranslator):
"""
Custom translator.
"""
def __init__(self, builder, *args, **kwds):
BaseTranslator.__init__(self, *args, **kwds)
self.builder = builder
self.in_productionlist = 0
# first title is the manpage title
self.section_level = -1
# docinfo set by man_pages config value
self._docinfo['title'] = self.document.settings.title
self._docinfo['subtitle'] = self.document.settings.subtitle
if self.document.settings.authors:
# don't set it if no author given
self._docinfo['author'] = self.document.settings.authors
self._docinfo['manual_section'] = self.document.settings.section
# docinfo set by other config values
self._docinfo['title_upper'] = self._docinfo['title'].upper()
if builder.config.today:
self._docinfo['date'] = builder.config.today
else:
self._docinfo['date'] = ustrftime(builder.config.today_fmt
or _('%B %d, %Y'))
self._docinfo['copyright'] = builder.config.copyright
self._docinfo['version'] = builder.config.version
self._docinfo['manual_group'] = builder.config.project
# since self.append_header() is never called, need to do this here
self.body.append(MACRO_DEF)
# overwritten -- added quotes around all .TH arguments
def header(self):
tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\""
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def visit_start_of_file(self, node):
pass
def depart_start_of_file(self, node):
pass
def visit_desc(self, node):
self.visit_definition_list(node)
def depart_desc(self, node):
self.depart_definition_list(node)
def visit_desc_signature(self, node):
self.visit_definition_list_item(node)
self.visit_term(node)
def depart_desc_signature(self, node):
self.depart_term(node)
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.body.append(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
def depart_desc_parameter(self, node):
pass
def visit_desc_optional(self, node):
self.body.append('[')
def depart_desc_optional(self, node):
self.body.append(']')
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
self.visit_definition(node)
def depart_desc_content(self, node):
self.depart_definition(node)
def visit_refcount(self, node):
self.body.append(self.defs['emphasis'][0])
def depart_refcount(self, node):
self.body.append(self.defs['emphasis'][1])
def visit_versionmodified(self, node):
self.visit_paragraph(node)
text = versionlabels[node['type']] % node['version']
if len(node):
text += ': '
else:
text += '.'
self.body.append(text)
def depart_versionmodified(self, node):
self.depart_paragraph(node)
def visit_termsep(self, node):
self.body.append(', ')
raise nodes.SkipNode
# overwritten -- we don't want source comments to show up
def visit_comment(self, node):
raise nodes.SkipNode
# overwritten -- added ensure_eol()
def visit_footnote(self, node):
self.ensure_eol()
BaseTranslator.visit_footnote(self, node)
# overwritten -- handle footnotes rubric
def visit_rubric(self, node):
self.ensure_eol()
if len(node.children) == 1:
rubtitle = node.children[0].astext()
if rubtitle in ('Footnotes', _('Footnotes')):
self.body.append('.SH ' + self.deunicode(rubtitle).upper() +
'\n')
raise nodes.SkipNode
else:
self.body.append('.sp\n')
def depart_rubric(self, node):
pass
def visit_seealso(self, node):
self.visit_admonition(node)
def depart_seealso(self, node):
self.depart_admonition(node)
# overwritten -- use our own label translations
def visit_admonition(self, node, name=None):
if name:
self.body.append('.IP %s\n' %
self.deunicode(admonitionlabels.get(name, name)))
def visit_productionlist(self, node):
self.ensure_eol()
names = []
self.in_productionlist += 1
self.body.append('.sp\n.nf\n')
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
for production in node:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.defs['strong'][0])
self.body.append(self.deunicode(lastname))
self.body.append(self.defs['strong'][1])
self.body.append(' ::= ')
else:
self.body.append('%s ' % (' '*len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('\n.fi\n')
self.in_productionlist -= 1
raise nodes.SkipNode
def visit_production(self, node):
pass
def depart_production(self, node):
pass
# overwritten -- don't emit a warning for images
def visit_image(self, node):
if 'alt' in node.attributes:
self.body.append(_('[image: %s]') % node['alt'] + '\n')
self.body.append(_('[image]') + '\n')
raise nodes.SkipNode
# overwritten -- don't visit inner marked up nodes
def visit_reference(self, node):
self.body.append(self.defs['reference'][0])
self.body.append(node.astext())
self.body.append(self.defs['reference'][1])
uri = node.get('refuri', '')
if uri.startswith('mailto:') or uri.startswith('http:') or \
uri.startswith('https:') or uri.startswith('ftp:'):
# if configured, put the URL after the link
if self.builder.config.man_show_urls and \
node.astext() != uri:
if uri.startswith('mailto:'):
uri = uri[7:]
self.body.extend([
' <',
self.defs['strong'][0], uri, self.defs['strong'][1],
'>'])
raise nodes.SkipNode
def visit_centered(self, node):
self.ensure_eol()
self.body.append('.sp\n.ce\n')
def depart_centered(self, node):
self.body.append('\n.ce 0\n')
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_highlightlang(self, node):
pass
def depart_highlightlang(self, node):
pass
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_acks(self, node):
self.ensure_eol()
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
self.body.append('\n')
raise nodes.SkipNode
def visit_hlist(self, node):
self.visit_bullet_list(node)
def depart_hlist(self, node):
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_literal_emphasis(self, node):
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
return self.depart_emphasis(node)
def visit_abbreviation(self, node):
pass
def depart_abbreviation(self, node):
pass
# overwritten: handle section titles better than in 0.6 release
def visit_title(self, node):
if isinstance(node.parent, addnodes.seealso):
self.body.append('.IP "')
return
elif isinstance(node.parent, nodes.section):
if self.section_level == 0:
# skip the document title
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH %s\n' %
self.deunicode(node.astext().upper()))
raise nodes.SkipNode
return BaseTranslator.visit_title(self, node)
def depart_title(self, node):
if isinstance(node.parent, addnodes.seealso):
self.body.append('"\n')
return
return BaseTranslator.depart_title(self, node)
def visit_raw(self, node):
if 'manpage' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sphinx/writers/manpage.py | Python | agpl-3.0 | 11,052 | [
"VisIt"
] | bfea26110de611d572c3612825c3549370b6b8e3f5722117c330352d4d84e2b7 |
# -*- coding: utf-8 -*-
#
# String constants for the RHN Register TUI/GUI.
# Copyright (c) 2000--2015 Red Hat, Inc.
#
# Author:
# James Slagle <jslagle@redhat.com>
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
_ = t.ugettext
COPYRIGHT_TEXT = _(u"Copyright © 2006--2010 Red Hat, Inc. All rights reserved.")
# Connect Window
CONNECT_WINDOW = _("Attempting to contact the Red Hat Network server.")
CONNECT_WINDOW_TEXT = _("We are attempting to contact the Red Hat "
"Network server at %s.")
CONNECT_WINDOW_TEXT2 = _("A proxy was specified at %s.")
# Start Window
START_REGISTER_WINDOW = _("Registering for software updates")
START_REGISTER_TEXT = _("This assistant will guide you through "
"connecting your system to "
"Red Hat Network (RHN) to receive software "
"updates, including "
"security updates, to keep your system supported "
"and compliant. "
"You will need the following at this time:\n\n"
" * A network connection\n"
" * Your Red Hat Login & password\n"
" * The location of a Red Hat Network Satellite "
"or Proxy (optional)\n\n")
# Why Register Window
WHY_REGISTER = _("Why Should I Connect to RHN? ...")
WHY_REGISTER_WINDOW = _("Why Register")
WHY_REGISTER_TEXT = _("Connecting your system to Red Hat Network allows you to take full "
"advantage of the benefits of a paid subscription, including:")
WHY_REGISTER_SEC = _("Security & Updates:")
WHY_REGISTER_DLD = _("Downloads & Upgrades:")
WHY_REGISTER_SUPP = _("Support:")
WHY_REGISTER_COMP = _("Compliance:")
WHY_REGISTER_SEC_TXT = _("Receive the latest software updates, including security updates, keeping this "
"Red Hat Enterprise Linux system updated and secure.")
WHY_REGISTER_DLD_TXT = _("Download installation images for Red Hat Enterprise Linux releases, "
"including new releases.")
WHY_REGISTER_SUPP_TXT = _("Access to the technical support experts at Red Hat or Red Hat's partners for help "
"with any issues you might encounter with this system.")
WHY_REGISTER_COMP_TXT = _("Stay in compliance with your subscription agreement "
"and manage subscriptions "
"for systems connected to your account at "
"http://rhn.redhat.com/.")
WHY_REGISTER_TIP = _("Tip: Red Hat values your privacy: "
"http://www.redhat.com/legal/privacy_statement.html")
BACK_REGISTER = _("Take me back to the registration")
# Confirm Quit Window
CONFIRM_QUIT = _("Software update setup unsuccessful")
CONFIRM_QUIT_SURE = _("Are you sure you don't want to connect your system to Red Hat Network? "
"You'll miss out on the benefits of a Red Hat Enterprise Linux subscription:\n")
CONFIRM_QUIT_WILLNOT = _("You will not be able to take advantage of these subscription privileges without connecting "
"your system to Red Hat Network.\n")
CONTINUE_REGISTERING = _("Take me back to the setup process.")
REGISTER_LATER2 = _("I'll register later.")
# Info Window
REGISTER_WINDOW = _("Enter you account information")
LOGIN_PROMPT = _("Please enter your login information for the %s Red "
"Hat Network Satellite:\n\n")
HOSTED_LOGIN = _("Red Hat Login:")
LOGIN = _("Login:")
PASSWORD = _("Password:")
LOGIN_TIP = _("Tip: Forgot your login or password? Contact your "
"Satellite's Organization Administrator.")
USER_REQUIRED = _("Please enter a desired login.")
PASSWORD_REQUIRED = _("Please enter and verify a password.")
# OS Release Window
SELECT_OSRELEASE = _("Select operating system release")
OS_VERSION = _("Operating System version:")
MINOR_RELEASE = _(" Minor Release: ")
LIMITED_UPDATES = _("Limited Updates Only")
ALL_UPDATES = _("All available updates")
CONFIRM_OS_RELEASE_SELECTION = _("Confirm operating system release selection")
CONFIRM_OS_ALL = _("Your system will be subscribed to the base"
" software channel to receive all available"
" updates.")
# Hardware Window
HARDWARE_WINDOW = _("Create your system profile - Hardware")
HARDWARE_WINDOW_DESC1 = _("A Profile Name is a descriptive name that"
" you choose to identify this System Profile"
" on the Red Hat Network web pages. Optionally,"
" include a computer serial or identification number.")
HARDWARE_WINDOW_DESC2 = _("Additional hardware information including PCI"
" devices, disk sizes and mount points will be"
" included in the profile.")
HARDWARE_WINDOW_CHECKBOX = _("Include the following information about hardware"
" and network:")
# Packages Window
PACKAGES_WINDOW = _("Create your system profile - Packages")
PACKAGES_WINDOW_DESC1 = _("RPM information is important to determine what"
" updated software packages are relevant to this"
" system.")
PACKAGES_WINDOW_DESC2 = _("Include RPM packages installed on this system"
" in my System Profile")
PACKAGES_WINDOW_UNCHECK = _("You may deselect individual packages by"
" unchecking them below.")
PACKAGES_WINDOW_PKGLIST = _("Building Package List")
# Product Window
HOSTED_LOGIN_PROMPT = _("Please enter your login information for Red "
"Hat Network (http://rhn.redhat.com/):\n\n")
HOSTED_LOGIN_TIP = _("Tip: Forgot your login or password? "
"Visit: https://rhn.redhat.com/rhn/sales/LoginInfo.do")
EMAIL = _("*Email Address:")
SYSTEM_ALREADY_SETUP = _("System already registered")
SYSTEM_ALREADY_REGISTERED = _("It appears this system has already been set up for software updates:")
SYSTEM_ALREADY_REGISTERED_CONT = _("Are you sure you would like to continue?")
RHSM_SYSTEM_ALREADY_REGISTERED = _("This system has already been registered with RHN"
" using RHN certficate-based technology.\n\n"
"The tool you are using is attempting to re-register"
" using RHN Classic technology. Red Hat recommends "
"(except in a few cases) that customers only register with RHN once.\n\n"
"To learn more about RHN registration and technologies please consult this"
" Knowledge Base Article: https://access.redhat.com/kb/docs/DOC-45563")
# Send Window
SEND_WINDOW = _("Send Profile Information to Red Hat Network")
SEND_WINDOW_DESC = _("We are finished collecting information for the System Profile.\n\n"
"Press \"Next\" to send this System Profile to Red Hat Network. "
"Click \"Cancel\" and no information will be sent. "
"You can run the registration program later by "
"typing `rhn_register` at the command line.")
# Sending Window
SENDING_WINDOW = _("Sending Profile to Red Hat Network")
# Finish Window
FINISH_WINDOW = _("Finish setting up software updates")
FINISH_WINDOW_TEXT_TUI = _("You may now run 'yum update' from this system's "
"command line to get the latest "
"software updates from Red Hat Network. You will need to run this "
"periodically to "
"get the latest updates. Alternatively, you may configure this "
"system for automatic software updates (also known as 'auto errata update') "
"via the Red Hat Network web interface. (Instructions for this are in chapter 6 "
"of the RHN Reference Guide, available from the 'Help' button in the main Red "
"Hat Network web interface.)")
# Review Window
REVIEW_WINDOW = _("Review system subscription details")
REVIEW_WINDOW_PROMPT = _("Please review the subscription details below:")
SUB_NUM = _("The installation number %s was activated during "
"this system's initial connection to Red Hat Network.")
SUB_NUM_RESULT = _("Subscriptions have been activated for the following "
"Red Hat products/services:")
CHANNELS_TITLE = _("Software channel subscriptions:")
OK_CHANNELS = _("This system will receive updates from the "
"following Red Hat Network software channels:")
CHANNELS_SAT_WARNING = _("Warning: If an installed product on this system "
"is not listed above, you "
"will not receive updates or support for that "
"product. If you would like "
"to receive updates for that product, please "
"login to your satellite web interface "
"and subscribe this system to the appropriate "
"software channels to get updates for that "
"product. See Kbase article 11313 "
"for more details. "
"(http://kbase.redhat.com/faq/docs/DOC-11313)")
CHANNELS_HOSTED_WARNING = _("Warning: If an installed product on this system "
"is not listed above, you "
"will not receive updates or support for that "
"product. If you would like "
"to receive updates for that product, please "
"visit http://rhn.redhat.com/ "
"and subscribe this system to the appropriate "
"software channels to get updates for that "
"product. See Kbase article 11313 "
"for more details. "
"(http://kbase.redhat.com/faq/docs/DOC-11313)")
YUM_PLUGIN_WARNING = _("Warning: yum-rhn-plugin is not present, could not enable it.\n"
"Automatic updates will not work.")
YUM_PLUGIN_CONF_CHANGED = _("Note: yum-rhn-plugin has been enabled.")
YUM_PLUGIN_CONF_ERROR = _("Warning: An error occurred during enabling yum-rhn-plugin.\n"
"yum-rhn-plugin is not enabled.\n"
"Automatic updates will not work.")
FAILED_CHANNELS = _("You were unable to be subscribed to the following "
"software channels because there were insufficient "
"subscriptions available in your account:")
NO_BASE_CHANNEL = _(
"This system was unable to subscribe to any software channels. Your system "
"will not receive any software updates to keep it secure and supported. There "
"are a few things you can try to resolve this situation:\n(1) Log in to "
"http://rhn.redhat.com/ and unentitle an inactive system at "
"Your RHN > Subscription Management > System Entitlements.\n"
"(2) Purchase an additional Red Hat Enterprise Linux subscription at "
"http://www.redhat.com/store/.\n(3) Activate a new "
"installation number at http://www.redhat.com/now/. Once you make the "
"appropriate active subscriptions available in your account, you may browse "
"to this system's profile in the RHN web interface and subscribe this system "
"to software channels via the software > software channels tab.")
SLOTS_TITLE = _("RHN service level:")
OK_SLOTS = _("Depending on what RHN modules are associated with a system, you'll "
"enjoy different benefits of Red Hat Network. The following are the "
"RHN modules associated with this system:")
SLOTS = SLOTS_TITLE + "\n" + OK_SLOTS + "\n%s"
FAILED_SLOTS = _("This system was unable to be associated with the "
"following RHN module(s) because there were "
"insufficient subscriptions available in your account:")
UPDATES = _("Update module: per-system updates, email errata "
"notifications, errata information")
MANAGEMENT = _("Management module: automatic updates, systems "
"grouping, systems permissions, system package profiling")
PROVISIONING = _("Provisioning module: bare-metal provisioning, existing state provisioning, "
"rollbacks, configuration management")
MONITORING = _("Monitoring module: pre-defined and custom system "
"performance probes, system performance email "
"notifications, graphs of system performance")
VIRT = _("Virtualization module: software updates for a limited number of "
"virtual guests on this system.")
VIRT_PLATFORM = _("Virtualization Platform module: software updates for an "
"unlimited number virtual guests of this system, access to additional "
"software channels for guests of this system.")
VIRT_FAILED = _("<b>Warning:</b> Any guest systems you create on this system "
"and register to RHN will consume Red Hat Enterprise Linux "
"subscriptions beyond this host system's subscription. You will need "
"to: (1) make a virtualization or virtualization platform system "
"entitlement available and (2) apply that system entitlement to this "
"system in RHN's web interface if you do not want virtual guests of "
"this system to consume additional subscriptions.")
NO_SYS_ENTITLEMENT = _("This system was unable to be associated with "
"any RHN service level modules. This system will not receive any software "
"updates to keep it secure and supported. There "
"are a few things you can try to resolve this situation:\n(1) Log in to "
"http://rhn.redhat.com/ and unentitle an inactive system at "
"Your RHN > Subscription Management > System Entitlements.\n"
"(2) Purchase an additional Red Hat Enterprise Linux subscription at "
"http://www.redhat.com/store/.\n(3) Activate a new "
"installation number at http://www.redhat.com/now/. Once you make the "
"appropriate active subscriptions available in your account, you may browse "
"to this system's profile in the RHN web interface, delete the profile, and "
"re-connect this system to Red Hat Network.")
ACTIVATION_KEY = _("Universal default activation key detected\n"
"A universal default activation key was detected in your RHN organization. "
"What this means is that a set of properties (software channel subscriptions, "
"Red Hat Network service, package installations, system group memberships, etc.) "
"for your system's connection to Red Hat Network "
"have been determined by the activation key rather than your "
"installation number. "
"You may also refer to the RHN Reference Guide, section 6.4.6 for more details "
"about activation keys (http://rhn.redhat.com/rhn/help/reference/)\n"
"Universal Default activation key: %s")
# Error Messages.
FATAL_ERROR = _("Fatal Error")
WARNING = _("Warning")
HOSTED_CONNECTION_ERROR = _("We can't contact the Red Hat Network Server.\n\n"
"Double check the location provided - is '%s' correct?\n"
"If not, you can correct it and try again.\n\n"
"Make sure that the network connection on this system is operational.\n\n"
"This system will not be able to successfully receive software updates "
"from Red Hat without connecting to a Red Hat Network server")
BASECHANNELERROR = _("Architecture: %s, OS Release: %s, OS "
"Version: %s")
SERVER_TOO_OLD = _("This server doesn't support functionality "
"needed by this version of the software update"
" setup client. Please try again with a newer "
"server.")
SSL_CERT_ERROR_MSG = _("<b><span size=\"16000\">Incompatible Certificate File</span></b>\n\n"
"The certificate you provided, <b>%s</b>, is not compatible with "
" the Red Hat Network server at <b>%s</b>. You may want to double-check"
" that you have provided a valid certificate file."
" Are you sure you have provided the correct certificate, and that"
" the certificate file has not been corrupted?\n\n"
"Please try again with a different certificate file.")
SSL_CERT_EXPIRED = _("<b><span size=\"12000\">Incompatible Certificate File</span></b>\n\n"
" The certificate is expired. Please ensure you have the correct "
" certificate and your system time is correct.")
SSL_CERT_FILE_NOT_FOUND_ERRER = _("Please verify the value of sslCACert in "
"/etc/sysconfig/rhn/up2date")
ACT_KEY_USAGE_LIMIT_ERROR = _("Problem registering system.\n\n"
"A universal default activation key limits the "
"number of systems which can connect to "
"the RHN organization associated with your "
"login. To allow this system to connect, "
"please contact your RHN organization "
"administrator to increase the number of "
"systems allowed to connect or to disable "
"this universal default activation key. "
"More details can be found in Red Hat "
"Knowledgebase Article #7924 at "
"http://kbase.redhat.com/faq/FAQ_61_7924.shtm ")
CHANNEL_PAGE_TIP = _("\n Tip: Minor releases with a '*' are currently"
" supported by Red Hat.\n\n")
CHANNEL_PAGE_WARNING = _("Warning:You will not be able to limit this"
" system to minor release that is older than"
" the recent minor release if you select this"
" option.\n")
CONFIRM_OS_WARNING = _("Your system will be subscribed to %s \n"
"base software channel. You will not be\n"
"able to move this system to an earlier release\n"
"(you will be able to move to a newer release).\n"
"Are you sure you would like to continue?")
# Navigation
OK = _("OK")
ERROR = _("Error")
NEXT = _("Next")
BACK = _("Back")
CANCEL = _("Cancel")
NO_CANCEL = _("No, Cancel")
YES_CONT = _("Yes, Continue")
DESELECT = _("Press <space> to deselect the option.")
| davidhrbac/spacewalk | client/debian/packages-already-in-debian/rhn-client-tools/src/up2date_client/rhnreg_constants.py | Python | gpl-2.0 | 20,209 | [
"VisIt"
] | d449611b88bf14d2415283630b99c685b13881f5f3312164427c6b97e3fb0e9e |
"""
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
for l in range(N_REGIONS):
pl.contour(labels == l, contours=1,
colors=[pl.cm.spectral(l / float(N_REGIONS)), ])
pl.xticks(())
pl.yticks(())
pl.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
pl.show()
| JT5D/scikit-learn | examples/cluster/plot_lena_segmentation.py | Python | bsd-3-clause | 2,421 | [
"Brian"
] | facf65b980bc9891fa47ed027d22d6f1f7251cb7a149376d436389cd25915892 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: NOAA APT Satellite Receiver
# Author: Brian McLaughlin
# Generated: Thu Mar 31 13:26:49 2016
##################################################
import threading
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
from PyQt4 import Qt
from PyQt4.QtCore import QObject, pyqtSlot
from apt_am_demod import apt_am_demod # grc-generated hier_block
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import gr, blocks
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import math
import sip
class apt_rx(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "NOAA APT Satellite Receiver")
Qt.QWidget.__init__(self)
self.setWindowTitle("NOAA APT Satellite Receiver")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "apt_rx")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
self._lock = threading.RLock()
##################################################
# Variables
##################################################
self.satellite_select = satellite_select = 137.62
self.valid_gains = valid_gains = [0.0, 0.9, 1.4, 2.7, 3.7, 7.7, 8.7, 12.5, 14.4, 15.7, 16.6, 19.7, 20.7, 22.9, 25.4, 28.0, 29.7, 32.8, 33.8, 36.4, 37.2, 38.6, 40.2, 42.1, 43.4, 43.9, 44.5, 48.0, 49.6]
self.satellite_frequency = satellite_frequency = satellite_select * 1e6
self.rf_samp_rate = rf_samp_rate = 2.048e6
self.max_doppler = max_doppler = 3000
self.fsk_deviation_hz = fsk_deviation_hz = 17000
self.am_carrier = am_carrier = 2400
self.tuner_frequency = tuner_frequency = satellite_frequency - (rf_samp_rate / 4)
self.rf_gain = rf_gain = valid_gains[-1]
self.rail_level = rail_level = 0.5
self.processing_rate = processing_rate = 256000
self.fm_bandwidth = fm_bandwidth = (2 * (fsk_deviation_hz + am_carrier)) + max_doppler
self.baud_rate = baud_rate = 4160
##################################################
# Blocks
##################################################
self.tabs_top = Qt.QTabWidget()
self.tabs_top_widget_0 = Qt.QWidget()
self.tabs_top_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_top_widget_0)
self.tabs_top_grid_layout_0 = Qt.QGridLayout()
self.tabs_top_layout_0.addLayout(self.tabs_top_grid_layout_0)
self.tabs_top.addTab(self.tabs_top_widget_0, "RF Recieve")
self.tabs_top_widget_1 = Qt.QWidget()
self.tabs_top_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_top_widget_1)
self.tabs_top_grid_layout_1 = Qt.QGridLayout()
self.tabs_top_layout_1.addLayout(self.tabs_top_grid_layout_1)
self.tabs_top.addTab(self.tabs_top_widget_1, "APT Signal")
self.tabs_top_widget_2 = Qt.QWidget()
self.tabs_top_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_top_widget_2)
self.tabs_top_grid_layout_2 = Qt.QGridLayout()
self.tabs_top_layout_2.addLayout(self.tabs_top_grid_layout_2)
self.tabs_top.addTab(self.tabs_top_widget_2, "APT Baseband")
self.top_grid_layout.addWidget(self.tabs_top, 2, 0, 1, 4)
self.tabs_rf = Qt.QTabWidget()
self.tabs_rf_widget_0 = Qt.QWidget()
self.tabs_rf_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_rf_widget_0)
self.tabs_rf_grid_layout_0 = Qt.QGridLayout()
self.tabs_rf_layout_0.addLayout(self.tabs_rf_grid_layout_0)
self.tabs_rf.addTab(self.tabs_rf_widget_0, "Spectrum")
self.tabs_rf_widget_1 = Qt.QWidget()
self.tabs_rf_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_rf_widget_1)
self.tabs_rf_grid_layout_1 = Qt.QGridLayout()
self.tabs_rf_layout_1.addLayout(self.tabs_rf_grid_layout_1)
self.tabs_rf.addTab(self.tabs_rf_widget_1, "Waterfall")
self.tabs_rf_widget_2 = Qt.QWidget()
self.tabs_rf_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_rf_widget_2)
self.tabs_rf_grid_layout_2 = Qt.QGridLayout()
self.tabs_rf_layout_2.addLayout(self.tabs_rf_grid_layout_2)
self.tabs_rf.addTab(self.tabs_rf_widget_2, "Scope")
self.tabs_top_layout_0.addWidget(self.tabs_rf)
self.tabs_apt_data = Qt.QTabWidget()
self.tabs_apt_data_widget_0 = Qt.QWidget()
self.tabs_apt_data_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_apt_data_widget_0)
self.tabs_apt_data_grid_layout_0 = Qt.QGridLayout()
self.tabs_apt_data_layout_0.addLayout(self.tabs_apt_data_grid_layout_0)
self.tabs_apt_data.addTab(self.tabs_apt_data_widget_0, "Scope")
self.tabs_apt_data_widget_1 = Qt.QWidget()
self.tabs_apt_data_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tabs_apt_data_widget_1)
self.tabs_apt_data_grid_layout_1 = Qt.QGridLayout()
self.tabs_apt_data_layout_1.addLayout(self.tabs_apt_data_grid_layout_1)
self.tabs_apt_data.addTab(self.tabs_apt_data_widget_1, "Raster")
self.tabs_top_layout_1.addWidget(self.tabs_apt_data)
self._satellite_select_options = [137.62, 137.9125, 137.1]
self._satellite_select_labels = ['NOAA 15 (137.62 MHz)', 'NOAA 18 (137.9125 MHz)', 'NOAA 19 (137.1 MHz)']
self._satellite_select_tool_bar = Qt.QToolBar(self)
self._satellite_select_tool_bar.addWidget(Qt.QLabel("Satellite Select"+": "))
self._satellite_select_combo_box = Qt.QComboBox()
self._satellite_select_tool_bar.addWidget(self._satellite_select_combo_box)
for label in self._satellite_select_labels: self._satellite_select_combo_box.addItem(label)
self._satellite_select_callback = lambda i: Qt.QMetaObject.invokeMethod(self._satellite_select_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._satellite_select_options.index(i)))
self._satellite_select_callback(self.satellite_select)
self._satellite_select_combo_box.currentIndexChanged.connect(
lambda i: self.set_satellite_select(self._satellite_select_options[i]))
self.top_grid_layout.addWidget(self._satellite_select_tool_bar, 0, 0, 1, 1)
self._rf_gain_options = valid_gains
self._rf_gain_labels = map(str, self._rf_gain_options)
self._rf_gain_tool_bar = Qt.QToolBar(self)
self._rf_gain_tool_bar.addWidget(Qt.QLabel("RF Gain"+": "))
self._rf_gain_combo_box = Qt.QComboBox()
self._rf_gain_tool_bar.addWidget(self._rf_gain_combo_box)
for label in self._rf_gain_labels: self._rf_gain_combo_box.addItem(label)
self._rf_gain_callback = lambda i: Qt.QMetaObject.invokeMethod(self._rf_gain_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._rf_gain_options.index(i)))
self._rf_gain_callback(self.rf_gain)
self._rf_gain_combo_box.currentIndexChanged.connect(
lambda i: self.set_rf_gain(self._rf_gain_options[i]))
self.top_grid_layout.addWidget(self._rf_gain_tool_bar, 0, 1 , 1, 1)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
satellite_frequency, #fc
processing_rate // 2, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.50)
self.qtgui_waterfall_sink_x_0.enable_grid(False)
if not False:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-40, 0)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.tabs_rf_layout_1.addWidget(self._qtgui_waterfall_sink_x_0_win)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_f(
baud_rate / 2, #size
baud_rate, #samp_rate
'APT Full Line', #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0.set_y_axis(-0.5, 1.5)
self.qtgui_time_sink_x_0_0.set_y_label("Amplitude", "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0.01, 0, 'SyncA')
self.qtgui_time_sink_x_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0.enable_control_panel(False)
if not False:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.tabs_apt_data_layout_0.addWidget(self._qtgui_time_sink_x_0_0_win)
self.qtgui_time_sink_x_0 = qtgui.time_sink_c(
1024, #size
processing_rate // 2, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label("Amplitude", "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2*1):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.tabs_rf_layout_2.addWidget(self._qtgui_time_sink_x_0_win)
self.qtgui_time_raster_sink_x_0 = qtgui.time_raster_sink_f(
baud_rate,
120*3,
baud_rate // 2,
([]),
([]),
"",
1,
)
self.qtgui_time_raster_sink_x_0.set_update_time(0.10)
self.qtgui_time_raster_sink_x_0.set_intensity_range(-0.5, 1.5)
self.qtgui_time_raster_sink_x_0.enable_grid(False)
labels = ["", "", "", "", "",
"", "", "", "", ""]
colors = [1, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_raster_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_raster_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_raster_sink_x_0.set_color_map(i, colors[i])
self.qtgui_time_raster_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_raster_sink_x_0_win = sip.wrapinstance(self.qtgui_time_raster_sink_x_0.pyqwidget(), Qt.QWidget)
self.tabs_apt_data_layout_1.addWidget(self._qtgui_time_raster_sink_x_0_win)
self.qtgui_freq_sink_x_1_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
16.64e3, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_1_0.set_update_time(0.10)
self.qtgui_freq_sink_x_1_0.set_y_axis(-40, 0)
self.qtgui_freq_sink_x_1_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1_0.enable_autoscale(False)
self.qtgui_freq_sink_x_1_0.enable_grid(False)
self.qtgui_freq_sink_x_1_0.set_fft_average(0.2)
self.qtgui_freq_sink_x_1_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_1_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_1_0.set_plot_pos_half(not True)
labels = ["Raw", "AGC Output", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_1_0.pyqwidget(), Qt.QWidget)
self.tabs_top_layout_2.addWidget(self._qtgui_freq_sink_x_1_0_win)
self.qtgui_freq_sink_x_1 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
satellite_frequency, #fc
processing_rate // 2, #bw
"", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_1.set_update_time(0.10)
self.qtgui_freq_sink_x_1.set_y_axis(-100, 0)
self.qtgui_freq_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1.enable_autoscale(False)
self.qtgui_freq_sink_x_1.enable_grid(False)
self.qtgui_freq_sink_x_1.set_fft_average(0.2)
self.qtgui_freq_sink_x_1.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_1.set_plot_pos_half(not True)
labels = ["Raw", "AGC Output", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_1.pyqwidget(), Qt.QWidget)
self.tabs_rf_layout_0.addWidget(self._qtgui_freq_sink_x_1_win)
self.low_pass_filter_0_0 = filter.fir_filter_ccf(1, firdes.low_pass(
1, processing_rate // 2, fm_bandwidth + 1e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0 = filter.fir_filter_ccf(2, firdes.low_pass(
1, processing_rate, 60e3, 15e3, firdes.WIN_HAMMING, 6.76))
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, processing_rate,True)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, "/Users/bjmclaug/Downloads/noaa-12_256k.dat", False)
self.blocks_file_meta_sink_0 = blocks.file_meta_sink(gr.sizeof_float*1, "/Users/bjmclaug/source/stem_station/noaa12_sample.dat", baud_rate, 1, blocks.GR_FILE_FLOAT, False, baud_rate * (60 * 20), "", True)
self.blocks_file_meta_sink_0.set_unbuffered(False)
self.blocks_complex_to_float_0 = blocks.complex_to_float(1)
self.apt_am_demod_0 = apt_am_demod(
parameter_apt_gain=1,
parameter_samp_rate=processing_rate / 2,
)
self.analog_rail_ff_0_0 = analog.rail_ff(-rail_level, rail_level)
self.analog_rail_ff_0 = analog.rail_ff(-rail_level, rail_level)
self.analog_quadrature_demod_cf_0 = analog.quadrature_demod_cf((processing_rate // 2)/(2*math.pi*fsk_deviation_hz/8.0))
self.analog_agc3_xx_0 = analog.agc3_cc(0.25, 0.5, 0.9, 1.0, 1)
self.analog_agc3_xx_0.set_max_gain(1)
##################################################
# Connections
##################################################
self.connect((self.analog_agc3_xx_0, 0), (self.blocks_complex_to_float_0, 0))
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.apt_am_demod_0, 0))
self.connect((self.analog_rail_ff_0, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self.analog_rail_ff_0_0, 0), (self.blocks_float_to_complex_0, 1))
self.connect((self.apt_am_demod_0, 0), (self.blocks_file_meta_sink_0, 0))
self.connect((self.apt_am_demod_0, 0), (self.qtgui_time_raster_sink_x_0, 0))
self.connect((self.apt_am_demod_0, 0), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.blocks_complex_to_float_0, 0), (self.analog_rail_ff_0, 0))
self.connect((self.blocks_complex_to_float_0, 1), (self.analog_rail_ff_0_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_float_to_complex_0, 0), (self.low_pass_filter_0_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_agc3_xx_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.qtgui_freq_sink_x_1, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.analog_quadrature_demod_cf_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.qtgui_freq_sink_x_1, 1))
self.connect((self.low_pass_filter_0_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.qtgui_waterfall_sink_x_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "apt_rx")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_satellite_select(self):
return self.satellite_select
def set_satellite_select(self, satellite_select):
with self._lock:
self.satellite_select = satellite_select
self.set_satellite_frequency(self.satellite_select * 1e6)
self._satellite_select_callback(self.satellite_select)
def get_valid_gains(self):
return self.valid_gains
def set_valid_gains(self, valid_gains):
with self._lock:
self.valid_gains = valid_gains
self.set_rf_gain(self.valid_gains[-1])
def get_satellite_frequency(self):
return self.satellite_frequency
def set_satellite_frequency(self, satellite_frequency):
with self._lock:
self.satellite_frequency = satellite_frequency
self.set_tuner_frequency(self.satellite_frequency - (self.rf_samp_rate / 4))
self.qtgui_freq_sink_x_1.set_frequency_range(self.satellite_frequency, self.processing_rate // 2)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.satellite_frequency, self.processing_rate // 2)
def get_rf_samp_rate(self):
return self.rf_samp_rate
def set_rf_samp_rate(self, rf_samp_rate):
with self._lock:
self.rf_samp_rate = rf_samp_rate
self.set_tuner_frequency(self.satellite_frequency - (self.rf_samp_rate / 4))
def get_max_doppler(self):
return self.max_doppler
def set_max_doppler(self, max_doppler):
with self._lock:
self.max_doppler = max_doppler
self.set_fm_bandwidth((2 * (self.fsk_deviation_hz + self.am_carrier)) + self.max_doppler)
def get_fsk_deviation_hz(self):
return self.fsk_deviation_hz
def set_fsk_deviation_hz(self, fsk_deviation_hz):
with self._lock:
self.fsk_deviation_hz = fsk_deviation_hz
self.set_fm_bandwidth((2 * (self.fsk_deviation_hz + self.am_carrier)) + self.max_doppler)
self.analog_quadrature_demod_cf_0.set_gain((self.processing_rate // 2)/(2*math.pi*self.fsk_deviation_hz/8.0))
def get_am_carrier(self):
return self.am_carrier
def set_am_carrier(self, am_carrier):
with self._lock:
self.am_carrier = am_carrier
self.set_fm_bandwidth((2 * (self.fsk_deviation_hz + self.am_carrier)) + self.max_doppler)
def get_tuner_frequency(self):
return self.tuner_frequency
def set_tuner_frequency(self, tuner_frequency):
with self._lock:
self.tuner_frequency = tuner_frequency
def get_rf_gain(self):
return self.rf_gain
def set_rf_gain(self, rf_gain):
with self._lock:
self.rf_gain = rf_gain
self._rf_gain_callback(self.rf_gain)
def get_rail_level(self):
return self.rail_level
def set_rail_level(self, rail_level):
with self._lock:
self.rail_level = rail_level
self.analog_rail_ff_0.set_lo(-self.rail_level)
self.analog_rail_ff_0.set_hi(self.rail_level)
self.analog_rail_ff_0_0.set_lo(-self.rail_level)
self.analog_rail_ff_0_0.set_hi(self.rail_level)
def get_processing_rate(self):
return self.processing_rate
def set_processing_rate(self, processing_rate):
with self._lock:
self.processing_rate = processing_rate
self.analog_quadrature_demod_cf_0.set_gain((self.processing_rate // 2)/(2*math.pi*self.fsk_deviation_hz/8.0))
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.processing_rate, 60e3, 15e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.processing_rate // 2, self.fm_bandwidth + 1e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.qtgui_freq_sink_x_1.set_frequency_range(self.satellite_frequency, self.processing_rate // 2)
self.qtgui_time_sink_x_0.set_samp_rate(self.processing_rate // 2)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.satellite_frequency, self.processing_rate // 2)
self.apt_am_demod_0.set_parameter_samp_rate(self.processing_rate / 2)
self.blocks_throttle_0.set_sample_rate(self.processing_rate)
def get_fm_bandwidth(self):
return self.fm_bandwidth
def set_fm_bandwidth(self, fm_bandwidth):
with self._lock:
self.fm_bandwidth = fm_bandwidth
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.processing_rate // 2, self.fm_bandwidth + 1e3, 1e3, firdes.WIN_HAMMING, 6.76))
def get_baud_rate(self):
return self.baud_rate
def set_baud_rate(self, baud_rate):
with self._lock:
self.baud_rate = baud_rate
self.qtgui_time_sink_x_0_0.set_samp_rate(self.baud_rate)
self.qtgui_time_raster_sink_x_0.set_num_cols(self.baud_rate // 2)
def main(top_block_cls=apt_rx, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| SpinStabilized/stem_station | grc_files/apt_rx.py | Python | gpl-3.0 | 27,999 | [
"Brian"
] | bbd66f090d352ba80ab1d8565cc71972458ee9ae44e2e7163e1d5eb597bfe45b |
# proxy module
from __future__ import absolute_import
from mayavi.scripts.util import *
| enthought/etsproxy | enthought/mayavi/scripts/util.py | Python | bsd-3-clause | 88 | [
"Mayavi"
] | e8bf2234608071c274c1a59a0f15c660e1fc3aab715bf8c3e1e1eefe323a90d3 |
# Copyright 2004-2010 PyTom <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy
import random
def compiling(loc):
file, number = loc
renpy.game.exception_info = "Compiling ATL code at %s:%d" % (file, number)
def executing(loc):
file, number = loc
renpy.game.exception_info = "Executing ATL code at %s:%d" % (file, number)
# A map from the name of a time warp function to the function itself.
warpers = { }
def atl_warper(f):
name = f.func_name
warpers[name] = f
return f
# The pause warper is used internally when no other warper is
# specified.
@atl_warper
def pause(t):
if t < 1.0:
return 0.0
else:
return 1.0
position = object()
# A dictionary giving property names and the corresponding default
# values.
PROPERTIES = {
"pos" : (position, position),
"xpos" : position,
"ypos" : position,
"anchor" : (position, position),
"xanchor" : position,
"yanchor" : position,
"xaround" : position,
"yaround" : position,
"xanchoraround" : float,
"yanchoraround" : float,
"align" : (float, float),
"xalign" : float,
"yalign" : float,
"rotate" : float,
"xzoom" : float,
"yzoom" : float,
"zoom" : float,
"alpha" : float,
"around" : (position, position),
"alignaround" : (float, float),
"angle" : float,
"radius" : float,
"crop" : (float, float, float, float),
"size" : (int, int),
"corner1" : (float, float),
"corner2" : (float, float),
"subpixel" : bool,
"delay" : float,
}
def correct_type(v, b, ty):
"""
Corrects the type of v to match ty. b is used to inform the match.
"""
if ty is position:
return type(b)(v)
else:
return ty(v)
def interpolate(t, a, b, type):
"""
Linearly interpolate the arguments.
"""
if t >= 1.0:
return b
# Recurse into tuples.
if isinstance(b, tuple):
return tuple(interpolate(t, i, j, ty) for i, j, ty in zip(a, b, type))
# Deal with booleans, nones, etc.
elif b is None or isinstance(b, bool):
if t >= 1.0:
return b
else:
return a
# Interpolate everything else.
else:
if a is None:
a = 0
return correct_type(a + t * (b - a), b, type)
# Interpolate the value of a spline. This code is based on Aenakume's code,
# from 00splines.rpy.
def interpolate_spline(t, spline):
if isinstance(spline[-1], tuple):
return tuple(interpolate_spline(t, i) for i in zip(*spline))
if len(spline) == 2:
t_p = 1.0 - t
rv = t_p * spline[0] + t * spline[-1]
elif len(spline) == 3:
t_pp = (1.0 - t)**2
t_p = 2 * t * (1.0 - t)
t2 = t**2
rv = t_pp * spline[0] + t_p * spline[1] + t2 * spline[2]
elif len(spline) == 4:
t_ppp = (1.0 - t)**3
t_pp = 3 * t * (1.0 - t)**2
t_p = 3 * t**2 * (1.0 - t)
t3 = t**3
rv = t_ppp * spline[0] + t_pp * spline[1] + t_p * spline[2] + t3 * spline[3]
else:
raise Exception("ATL can't interpolate splines of length %d." % len(spline))
return correct_type(rv, spline[-1], position)
# This is the context used when compiling an ATL statement. It stores the
# scopes that are used to evaluate the various expressions in the statement,
# and has a method to do the evaluation and return a result.
class Context(object):
def __init__(self, context):
self.context = context
def eval(self, expr):
return eval(expr, renpy.store.__dict__, self.context)
# This is intended to be subclassed by ATLTransform. It takes care of
# managing ATL execution, which allows ATLTransform itself to not care
# much about the contents of this file.
class ATLTransformBase(renpy.object.Object):
# Compatibility with older saves.
parameters = renpy.ast.ParameterInfo([ ], [ ], None, None)
def __init__(self, atl, context, parameters):
super(ATLTransformBase, self).__init__()
if parameters is None:
parameters = ATLTransformBase.parameters
# The parameters that we take.
self.parameters = parameters
# The raw code that makes up this ATL statement.
self.atl = atl
# The context in which execution occurs.
self.context = Context(context)
# The code after it has been compiled into a block.
self.block = None
# The properties of the block, if it contains only an
# Interpolation.
self.properties = None
# The state of the statement we are executing. As this can be
# shared between more than one object (in the case of a hide),
# the data must not be altered.
self.atl_state = None
# Are we done?
self.done = False
# The transform event we are going to process.
self.transform_event = None
# The transform event we last processed.
self.last_transform_event = None
# The child transform event we last processed.
self.last_child_transform_event = None
def take_execution_state(self, t):
"""
Updates self to begin executing from the same point as t. This
requires that t.atl is self.atl.
"""
self.done = t.done
self.block = t.block
self.atl_state = t.atl_state
self.transform_event = t.transform_event
self.last_transform_event = t.last_transform_event
self.last_child_transform_event = t.last_child_transform_event
def __call__(self, *args, **kwargs):
context = self.context.context.copy()
for k, v in self.parameters.parameters:
if v is not None:
context[k] = renpy.python.py_eval(v)
positional = list(self.parameters.positional)
args = list(args)
child = self.child
if not positional and args:
child = args.pop(0)
# Handle positional arguments.
while positional and args:
name = positional.pop(0)
value = args.pop(0)
if name in kwargs:
raise Exception('Parameter %r is used as both a positional and keyword argument to a transition.' % name)
context[name] = value
if args:
raise Exception("Too many arguments passed to ATL transform.")
# Handle keyword arguments.
for k, v in kwargs.iteritems():
if k in positional:
positional.remove(k)
context[k] = v
elif k in context:
context[k] = v
elif k == 'child':
child = v
else:
raise Exception('Parameter %r is not known by ATL Transform.' % k)
# Create a new ATL Transform.
parameters = renpy.ast.ParameterInfo({}, positional, None, None)
rv = renpy.display.motion.ATLTransform(
atl=self.atl,
child=child,
style=self.style_arg,
context=context,
parameters=parameters)
rv.take_state(self)
return rv
def compile(self):
"""
Compiles the ATL code into a block. As necessary, updates the
properties.
"""
if self.parameters.positional and self.parameters.positional[0][1] is None:
raise Exception("Cannot compile ATL Transform, as it's missing positional parameter %s." % self.parameters.positional[0])
old_exception_info = renpy.game.exception_info
self.block = self.atl.compile(self.context)
if len(self.block.statements) == 1 \
and isinstance(self.block.statements[0], Interpolation):
interp = self.block.statements[0]
if interp.duration == 0 and interp.properties:
self.properties = interp.properties[:]
renpy.game.exception_info = old_exception_info
def execute(self, trans, st, at):
if self.done:
return None
if not self.block:
self.compile()
# Propagate transform_events from children.
if self.child:
if self.child.transform_event != self.last_child_transform_event:
self.last_child_transform_event = self.child.transform_event
self.transform_event = self.child.transform_event
# Hide request.
if trans.hide_request:
self.transform_event = "hide"
# Notice transform events.
if self.transform_event != self.last_transform_event:
event = self.transform_event
self.last_transform_event = self.transform_event
else:
event = None
old_exception_info = renpy.game.exception_info
if self.atl.animation:
timebase = at
else:
timebase = st
action, arg, pause = self.block.execute(trans, timebase, self.atl_state, event)
renpy.game.exception_info = old_exception_info
# print "Executing", self, self.state, self.xpos, self.ypos
if action == "continue":
self.atl_state = arg
else:
self.done = True
return pause
def predict(self, callback):
self.atl.predict(self.context, callback)
def visit(self):
if not self.block:
self.compile()
return self.children + self.block.visit()
# The base class for raw ATL statements.
class RawStatement(renpy.object.Object):
def __init__(self, loc):
super(RawStatement, self).__init__()
self.loc = loc
# Compiles this RawStatement into a Statement, by using ctx to
# evaluate expressions as necessary.
def compile(self, ctx):
raise Exception("Compile not implemented.")
# Predicts the images used by this statement.
def predict(self, ctx, callback):
return
# The base class for compiled ATL Statements.
class Statement(renpy.object.Object):
def __init__(self, loc):
super(Statement, self).__init__()
self.loc = loc
# trans is the transform we're working on.
# st is the time since this statement started executing.
# state is the state stored by this statement, or None if
# we've just started executing this statement.
# event is an event we're triggering.
#
# "continue", state, pause - Causes this statement to execute
# again, with the given state passed in the second time around.
#
#
# "next", timeleft, pause - Causes the next statement to execute,
# with timeleft being the amount of time left after this statement
# finished.
#
# "event", (name, timeleft), pause - Causes an event to be reported,
# and control to head up to the event handler.
#
# "repeat", (count, timeleft), pause - Causes the repeat behavior
# to occur.
#
# As the Repeat statement can only appear in a block, only Block
# needs to deal with the repeat behavior.
#
# Pause is the amount of time until execute should be called again,
# or None if there's no need to call execute ever again.
def execute(self, trans, st, state, event):
raise Exception("Not implemented.")
# Return a list of displayable children.
def visit(self):
return [ ]
# This represents a Raw ATL block.
class RawBlock(RawStatement):
# Should we use the animation timebase or the showing timebase?
animation = False
def __init__(self, loc, statements, animation):
super(RawBlock, self).__init__(loc)
# A list of RawStatements in this block.
self.statements = statements
self.animation = animation
def compile(self, ctx):
compiling(self.loc)
statements = [ i.compile(ctx) for i in self.statements ]
return Block(self.loc, statements)
def predict(self, ctx, callback):
for i in self.statements:
i.predict(ctx, callback)
# A compiled ATL block.
class Block(Statement):
def __init__(self, loc, statements):
super(Block, self).__init__(loc)
# A list of statements in the block.
self.statements = statements
# The start times of various statements.
self.times = [ ]
for i, s in enumerate(statements):
if isinstance(s, Time):
self.times.append((s.time, i + 1))
self.times.sort()
def execute(self, trans, st, state, event):
executing(self.loc)
# Unpack the state.
if state is not None:
index, start, loop_start, repeats, times, child_state = state
else:
index, start, loop_start, repeats, times, child_state = 0, 0, 0, 0, self.times[:], None
# What we might be returning.
action = "continue"
arg = None
pause = None
while action == "continue":
# Target is the time we're willing to execute to.
# Max_pause is how long we'll wait before executing again.
# If we have times queued up, then use them to inform target
# and time.
if times:
time, tindex = times[0]
target = min(time, st)
max_pause = time - target
# Otherwise, take the defaults.
else:
target = st
max_pause = 15
while True:
# If we've hit the last statement, it's the end of
# this block.
if index >= len(self.statements):
return "next", target - start, None
# Find the statement and try to run it.
stmt = self.statements[index]
action, arg, pause = stmt.execute(trans, target - start, child_state, event)
# On continue, persist our state.
if action == "continue":
if pause is None:
pause = max_pause
action, arg, pause = "continue", (index, start, loop_start, repeats, times, arg), min(max_pause, pause)
break
elif action == "event":
return action, arg, pause
# On next, advance to the next statement in the block.
elif action == "next":
index += 1
start = target - arg
child_state = None
# On repeat, either terminate the block, or go to
# the first statement.
elif action == "repeat":
count, arg = arg
loop_end = target - arg
duration = loop_end - loop_start
# Figure how many durations can occur between the
# start of the loop and now.
new_repeats = int((target - loop_start) / duration)
if duration <= 0:
raise Exception("ATL appears to be in an infinite loop.")
if count is not None:
if repeats + new_repeats >= count:
new_repeats = count - repeats
loop_start += new_repeats * duration
return "next", target - loop_start, None
repeats += new_repeats
loop_start = loop_start + new_repeats * duration
start = loop_start
index = 0
child_state = None
if times:
time, tindex = times[0]
if time <= target:
times.pop(0)
index = tindex
start = time
child_state = None
continue
return action, arg, pause
def visit(self):
return [ j for i in self.statements for j in i.visit() ]
# This can become one of four things:
#
# - A pause.
# - An interpolation (which optionally can also reference other
# blocks, as long as they're not time-dependent, and have the same
# arity as the interpolation).
# - A call to another block.
# - A command to change the image, perhaps with a transition.
#
# We won't decide which it is until runtime, as we need the
# values of the variables here.
class RawMultipurpose(RawStatement):
warp_function = None
def __init__(self, loc):
super(RawMultipurpose, self).__init__(loc)
self.warper = None
self.duration = None
self.properties = [ ]
self.expressions = [ ]
self.splines = [ ]
self.revolution = None
self.circles = "0"
def add_warper(self, name, duration, warp_function):
self.warper = name
self.duration = duration
self.warp_function = warp_function
def add_property(self, name, exprs):
self.properties.append((name, exprs))
def add_expression(self, expr, with_clause):
self.expressions.append((expr, with_clause))
def add_revolution(self, revolution):
self.revolution = revolution
def add_circles(self, circles):
self.circles = circles
def add_spline(self, name, exprs):
self.splines.append((name, exprs))
def compile(self, ctx):
compiling(self.loc)
# Figure out what kind of statement we have. If there's no
# interpolator, and no properties, than we have either a
# call, or a child statement.
if (self.warper is None and
self.warp_function is None and
not self.properties and
not self.splines and
len(self.expressions) == 1):
expr, withexpr = self.expressions[0]
child = ctx.eval(expr)
if withexpr:
transition = ctx.eval(withexpr)
else:
transition = None
if isinstance(child, (int, float)):
return Interpolation(self.loc, "pause", child, [ ], None, 0, [ ])
if isinstance(child, ATLTransformBase):
child.compile()
return child.block
else:
return Child(self.loc, child, transition)
compiling(self.loc)
# Otherwise, we probably have an interpolation statement.
if self.warp_function:
warper = ctx.eval(self.warp_function)
else:
warper = self.warper or "pause"
if warper not in warpers:
raise Exception("ATL Warper %s is unknown at runtime." % warper)
properties = [ ]
for name, expr in self.properties:
if name not in PROPERTIES:
raise Exception("ATL Property %s is unknown at runtime." % property)
value = ctx.eval(expr)
properties.append((name, value))
splines = [ ]
for name, exprs in self.splines:
if name not in PROPERTIES:
raise Exception("ATL Property %s is unknown at runtime." % property)
values = [ ctx.eval(i) for i in exprs ]
splines.append((name, values))
for expr, with_ in self.expressions:
try:
value = ctx.eval(expr)
except:
raise Exception("Could not evaluate expression %r when compiling ATL." % expr)
if not isinstance(value, ATLTransformBase):
raise Exception("Expression %r is not an ATL transform, and so cannot be included in an ATL interpolation." % expr)
value.compile()
if value.properties is None:
raise Exception("ATL transform %r is too complicated to be included in interpolation." % expr)
properties.extend(value.properties)
duration = ctx.eval(self.duration)
circles = ctx.eval(self.circles)
return Interpolation(self.loc, warper, duration, properties, self.revolution, circles, splines)
def predict(self, ctx, callback):
for i, j in self.expressions:
try:
i = ctx.eval(i)
except:
continue
if isinstance(i, ATLTransformBase):
i.atl.predict(ctx, callback)
return
try:
i = renpy.easy.displayable(i)
except:
continue
if isinstance(i, renpy.display.core.Displayable):
i.predict(callback)
# This lets us have an ATL transform as our child.
class RawContainsExpr(RawStatement):
def __init__(self, loc, expr):
super(RawContainsExpr, self).__init__(loc)
self.expression = expr
def compile(self, ctx):
compiling(self.loc)
child = ctx.eval(self.expression)
return Child(self.loc, child, None)
# This allows us to have multiple children, inside a Fixed.
class RawChild(RawStatement):
def __init__(self, loc, child):
super(RawChild, self).__init__(loc)
self.children = [ child ]
def compile(self, ctx):
box = renpy.display.layout.MultiBox(layout='fixed')
for i in self.children:
box.add(renpy.display.motion.ATLTransform(i, context=ctx.context))
return Child(self.loc, box, None)
# This changes the child of this statement, optionally with a transition.
class Child(Statement):
def __init__(self, loc, child, transition):
super(Child, self).__init__(loc)
self.child = renpy.easy.displayable(child)
self.transition = transition
def execute(self, trans, st, state, event):
executing(self.loc)
old_child = trans.raw_child
if old_child is not None and self.transition is not None:
child = self.transition(old_widget=old_child,
new_widget=self.child)
else:
child = self.child
trans.set_child(child)
trans.raw_child = self.child
return "next", st, None
def visit(self):
return [ self.child ]
# This causes interpolation to occur.
class Interpolation(Statement):
def __init__(self, loc, warper, duration, properties, revolution, circles, splines):
super(Interpolation, self).__init__(loc)
self.warper = warper
self.duration = duration
self.properties = properties
self.splines = splines
# The direction we revolve in: cw, ccw, or None.
self.revolution = revolution
# The number of complete circles we make.
self.circles = circles
def execute(self, trans, st, state, event):
executing(self.loc)
warper = warpers.get(self.warper, self.warper)
if self.duration:
complete = min(1.0, st / self.duration)
else:
complete = 1.0
complete = warper(complete)
if state is None:
# Create a new transform state, and apply the property
# changes to it.
newts = renpy.display.motion.TransformState()
newts.take_state(trans.state)
for k, v in self.properties:
setattr(newts, k, v)
# Now, the things we change linearly are in the difference
# between the new and old states.
linear = trans.state.diff(newts)
revolution = None
splines = [ ]
# Clockwise revolution.
if self.revolution is not None:
# Remove various irrelevant motions.
for i in [ 'xpos', 'ypos',
'xanchor', 'yanchor',
'xaround', 'yaround',
'xanchoraround', 'yanchoraround',
]:
linear.pop(i, None)
if newts.xaround is not None:
# Ensure we rotate around the new point.
trans.state.xaround = newts.xaround
trans.state.yaround = newts.yaround
trans.state.xanchoraround = newts.xanchoraround
trans.state.yanchoraround = newts.yanchoraround
# Get the start and end angles and radii.
startangle = trans.state.angle
endangle = newts.angle
startradius = trans.state.radius
endradius = newts.radius
# Make sure the revolution is in the appropriate direction,
# and contains an appropriate number of circles.
if self.revolution == "clockwise":
if endangle < startangle:
startangle -= 360
startangle -= self.circles * 360
elif self.revolution == "counterclockwise":
if endangle > startangle:
startangle += 360
startangle += self.circles * 360
# Store the revolution.
revolution = (startangle, endangle, startradius, endradius)
# Figure out the splines.
for name, values in self.splines:
splines.append((name, [ getattr(trans.state, name) ] + values))
state = (linear, revolution, splines)
else:
linear, revolution, splines = state
# Linearly interpolate between the things in linear.
for k, (old, new) in linear.iteritems():
value = interpolate(complete, old, new, PROPERTIES[k])
setattr(trans.state, k, value)
# Handle the revolution.
if revolution is not None:
startangle, endangle, startradius, endradius = revolution
trans.state.angle = interpolate(complete, startangle, endangle, float)
trans.state.radius = interpolate(complete, startradius, endradius, float)
# Handle any splines we might have.
for name, values in splines:
value = interpolate_spline(complete, values)
setattr(trans.state, name, value)
if st >= self.duration:
return "next", st - self.duration, None
else:
if not self.properties and not self.revolution and not self.splines:
return "continue", state, self.duration - st
else:
return "continue", state, 0
# Implementation of the repeat statement.
class RawRepeat(RawStatement):
def __init__(self, loc, repeats):
super(RawRepeat, self).__init__(loc)
self.repeats = repeats
def compile(self, ctx):
compiling(self.loc)
repeats = self.repeats
if repeats is not None:
repeats = ctx.eval(repeats)
return Repeat(self.loc, repeats)
class Repeat(Statement):
def __init__(self, loc, repeats):
super(Repeat, self).__init__(loc)
self.repeats = repeats
def execute(self, trans, st, state, event):
return "repeat", (self.repeats, st), 0
# Parallel statement.
class RawParallel(RawStatement):
def __init__(self, loc, block):
super(RawParallel, self).__init__(loc)
self.blocks = [ block ]
def compile(self, ctx):
return Parallel(self.loc, [i.compile(ctx) for i in self.blocks])
def predict(self, ctx, callback):
for i in self.blocks:
i.predict(ctx, callback)
class Parallel(Statement):
def __init__(self, loc, blocks):
super(Parallel, self).__init__(loc)
self.blocks = blocks
def execute(self, trans, st, state, event):
executing(self.loc)
if state is None:
state = [ (i, None) for i in self.blocks ]
# The amount of time left after finishing this block.
left = [ ]
# The duration of the pause.
pauses = [ ]
# The new state structure.
newstate = [ ]
for i, istate in state:
action, arg, pause = i.execute(trans, st, istate, event)
if pause is not None:
pauses.append(pause)
if action == "continue":
newstate.append((i, arg))
elif action == "next":
left.append(arg)
elif action == "event":
return action, arg, pause
if newstate:
return "continue", newstate, min(pauses)
else:
return "next", min(left), None
def visit(self):
return [ j for i in self.blocks for j in i.visit() ]
# The choice statement.
class RawChoice(RawStatement):
def __init__(self, loc, chance, block):
super(RawChoice, self).__init__(loc)
self.choices = [ (chance, block) ]
def compile(self, ctx):
compiling(self.loc)
return Choice(self.loc, [ (ctx.eval(chance), block.compile(ctx)) for chance, block in self.choices])
def predict(self, ctx, callback):
for i, j in self.choices:
j.predict(ctx, callback)
class Choice(Statement):
def __init__(self, loc, choices):
super(Choice, self).__init__(loc)
self.choices = choices
def execute(self, trans, st, state, event):
executing(self.loc)
if state is None:
total = 0
for chance, choice in self.choices:
total += chance
n = random.uniform(0, total)
for chance, choice in self.choices:
if n < chance:
break
n -= chance
cstate = None
else:
choice, cstate = state
action, arg, pause = choice.execute(trans, st, cstate, event)
if action == "continue":
return "continue", (choice, arg), pause
else:
return action, arg, None
def visit(self):
return [ j for i in self.choices for j in i[1].visit() ]
# The Time statement.
class RawTime(RawStatement):
def __init__(self, loc, time):
super(RawTime, self).__init__(loc)
self.time = time
def compile(self, ctx):
compiling(self.loc)
return Time(self.loc, ctx.eval(self.time))
class Time(Statement):
def __init__(self, loc, time):
super(Time, self).__init__(loc)
self.time = time
def execute(self, trans, st, state, event):
return "continue", None, None
# The On statement.
class RawOn(RawStatement):
def __init__(self, loc, name, block):
super(RawOn, self).__init__(loc)
self.handlers = { name : block }
def compile(self, ctx):
compiling(self.loc)
handlers = { }
for k, v in self.handlers.iteritems():
handlers[k] = v.compile(ctx)
return On(self.loc, handlers)
def predict(self, ctx, callback):
for i in self.handlers.itervalues():
i.predict(ctx, callback)
class On(Statement):
def __init__(self, loc, handlers):
super(On, self).__init__(loc)
self.handlers = handlers
def execute(self, trans, st, state, event):
executing(self.loc)
# If it's our first time through, start in the start state.
if state is None:
name, start, cstate = ("start", st, None)
else:
name, start, cstate = state
# If we have an external event, and we have a handler for it,
# handle it.
if event in self.handlers:
# Do not allow people to abort the hide handler with another
# event.
if name != "hide":
name = event
start = st
cstate = None
while True:
# If we don't have a handler, return until we change event.
if name not in self.handlers:
return "continue", (name, start, cstate), None
action, arg, pause = self.handlers[name].execute(trans, st - start, cstate, event)
# If we get a continue, save our state.
if action == "continue":
# If it comes from a hide block, indicate that.
if name == "hide":
trans.hide_response = False
return "continue", (name, start, arg), pause
# If we get a next, then try going to the default
# event, unless we're already in default, in which case we
# go to None.
elif action == "next":
if name == "default" or name == "hide":
name = None
else:
name = "default"
start = st - arg
cstate = None
continue
# If we get an event, then either handle it if we can, or
# pass it up the stack if we can't.
elif action == "event":
name, arg = arg
if name in self.handlers:
start = max(st - arg, st - 30)
cstate = None
continue
return "event", (name, arg), None
def visit(self):
return [ j for i in self.handlers.itervalues() for j in i.visit() ]
# Event statement.
class RawEvent(RawStatement):
def __init__(self, loc, name):
super(RawEvent, self).__init__(loc)
self.name = name
def compile(self, ctx):
return Event(self.loc, self.name)
class Event(Statement):
def __init__(self, loc, name):
super(Event, self).__init__(loc)
self.name = name
def execute(self, trans, st, state, event):
return "event", (self.name, st), None
class RawFunction(RawStatement):
def __init__(self, loc, expr):
super(RawFunction, self).__init__(loc)
self.expr = expr
def compile(self, ctx):
compiling(self.loc)
return Function(self.loc, ctx.eval(self.expr))
class Function(Statement):
def __init__(self, loc, function):
super(Function, self).__init__(loc)
self.function = function
def execute(self, trans, st, state, event):
fr = self.function(trans, st, trans.at)
if fr is not None:
return "continue", None, fr
else:
return "next", 0, None
# This parses an ATL block.
def parse_atl(l):
l.advance()
block_loc = l.get_location()
statements = [ ]
animation = False
while not l.eob:
loc = l.get_location()
if l.keyword('repeat'):
repeats = l.simple_expression()
statements.append(RawRepeat(loc, repeats))
elif l.keyword('block'):
l.require(':')
l.expect_eol()
l.expect_block('block')
block = parse_atl(l.subblock_lexer())
statements.append(block)
elif l.keyword('contains'):
expr = l.simple_expression()
if expr:
l.expect_noblock('contains expression')
statements.append(RawContainsExpr(loc, expr))
else:
l.require(':')
l.expect_eol()
l.expect_block('contains')
block = parse_atl(l.subblock_lexer())
statements.append(RawChild(loc, block))
elif l.keyword('parallel'):
l.require(':')
l.expect_eol()
l.expect_block('parallel')
block = parse_atl(l.subblock_lexer())
statements.append(RawParallel(loc, block))
elif l.keyword('choice'):
chance = l.simple_expression()
if not chance:
chance = "1.0"
l.require(':')
l.expect_eol()
l.expect_block('choice')
block = parse_atl(l.subblock_lexer())
statements.append(RawChoice(loc, chance, block))
elif l.keyword('on'):
name = l.require(l.word)
l.require(':')
l.expect_eol()
l.expect_block('on')
block = parse_atl(l.subblock_lexer())
statements.append(RawOn(loc, name, block))
elif l.keyword('time'):
time = l.require(l.simple_expression)
l.expect_noblock('time')
statements.append(RawTime(loc, time))
elif l.keyword('function'):
expr = l.require(l.simple_expression)
l.expect_noblock('function')
statements.append(RawFunction(loc, expr))
elif l.keyword('event'):
name = l.require(l.word)
l.expect_noblock('event')
statements.append(RawEvent(loc, name))
elif l.keyword('pass'):
l.expect_noblock('pass')
statements.append(None)
elif l.keyword('animation'):
l.expect_noblock('animation')
animation = True
else:
# If we can't assign it it a statement more specifically,
# we try to parse it into a RawMultipurpose. That will
# then be turned into another statement, as appropriate.
# The RawMultipurpose we add things to.
rm = renpy.atl.RawMultipurpose(loc)
# Is the last clause an expression?
last_expression = False
# Is this clause an expression?
this_expression = False
# First, look for a warper.
cp = l.checkpoint()
warper = l.name()
if warper in warpers:
duration = l.require(l.simple_expression)
warp_function = None
elif warper == "warp":
warper = None
warp_function = l.require(l.simple_expression)
duration = l.require(l.simple_expression)
else:
l.revert(cp)
warper = None
warp_function = None
duration = "0"
rm.add_warper(warper, duration, warp_function)
# Now, look for properties and simple_expressions.
while True:
# Update expression status.
last_expression = this_expression
this_expression = False
if l.keyword('pass'):
continue
# Parse revolution keywords.
if l.keyword('clockwise'):
rm.add_revolution('clockwise')
continue
if l.keyword('counterclockwise'):
rm.add_revolution('counterclockwise')
continue
if l.keyword('circles'):
expr = l.require(l.simple_expression)
rm.add_circles(expr)
# Try to parse a property.
cp = l.checkpoint()
prop = l.name()
if prop in PROPERTIES:
expr = l.require(l.simple_expression)
# We either have a property or a spline. It's the
# presence of knots that determine which one it is.
knots = [ ]
while l.keyword('knot'):
knots.append(l.require(l.simple_expression))
if knots:
knots.append(expr)
rm.add_spline(prop, knots)
else:
rm.add_property(prop, expr)
continue
# Otherwise, try to parse it as a simple expressoon,
# with an optional with clause.
l.revert(cp)
expr = l.simple_expression()
if not expr:
break
if last_expression:
l.error('ATL statement contains two expressions in a row; is one of them a misspelled property? If not, separate them with pass.')
this_expression = True
if l.keyword("with"):
with_expr = l.require(l.simple_expression)
else:
with_expr = None
rm.add_expression(expr, with_expr)
l.expect_noblock('ATL')
statements.append(rm)
if l.eol():
l.advance()
continue
l.require(",", "comma or end of line")
# Merge together statements that need to be merged together.
merged = [ ]
old = None
for new in statements:
if isinstance(old, RawParallel) and isinstance(new, RawParallel):
old.blocks.extend(new.blocks)
continue
elif isinstance(old, RawChoice) and isinstance(new, RawChoice):
old.choices.extend(new.choices)
continue
elif isinstance(old, RawChild) and isinstance(new, RawChild):
old.children.extend(new.children)
continue
elif isinstance(old, RawOn) and isinstance(new, RawOn):
old.handlers.update(new.handlers)
continue
# None is a pause statement, which gets skipped, but also
# prevents things from combining.
elif new is None:
old = new
continue
merged.append(new)
old = new
return RawBlock(block_loc, merged, animation)
| MSEMJEJME/ReAlistair | renpy/atl.py | Python | gpl-2.0 | 45,015 | [
"VisIt"
] | bddf22d9d6557f6a42774978e9ea457cb2f8619bb24d884e29a609a392e944ff |
"""View routing."""
import calendar
import datetime
import json
import os
import threading
import flask
from flask import (Blueprint, current_app, redirect, render_template, redirect,
request, url_for)
from sqlalchemy import func
from app import global_stats, parse
from app.helpers import add_months
from app.models import AntagObjective, Death, Explosion, Match, PopulationSnapshot as PopSnap, db
from config import basedir
blueprint = Blueprint('blueprint', __name__, static_folder='static')
@blueprint.route('/')
@blueprint.route('/index')
def index():
"""Respond with view for index page."""
matchesTotal = Match.query.count()
if matchesTotal is 0:
matchesTotal = 1
explosionratio = Explosion.query.count() / float(matchesTotal)
deathratio = Death.query.count() / float(matchesTotal)
nuked = Match.query.filter(Match.nuked).count()
lastmatch = Match.query.order_by(Match.id.desc()).first()
startdate = datetime.datetime.now()
enddate = startdate - datetime.timedelta(days=30)
mapPlayrate = db.session.query(Match.mapname, func.count(Match.id))\
.group_by(Match.mapname)\
.filter(
Match.starttime <= startdate,
Match.starttime >= enddate
)\
.all()
mapPlayrate = db.session.query(Match.mapname, func.count(Match.id)).group_by(Match.mapname).all()
# Map percentage
# for mapx in matchCounts:
# mapx[1] = mapx[1] / float(matchesTotal) * 100
return render_template('index.html', matchcount=matchesTotal, nukedcount=nuked, explosionratio=explosionratio,
deathratio=deathratio, match=lastmatch,
mapPlayrate=mapPlayrate)
# @blueprint.route('/import')
# def test():
# url='http://game.ss13.moe/stats/statistics_2016.31.01.7.txt'
# if request.args.get('url'):
# url = request.args.get('url')
# print(url)
# return parse.parse_url(url)
@blueprint.route('/matchlist')
@blueprint.route('/matchlist/<int:page>')
def matchlist(page=1):
"""Respond with view for paginated match list."""
query = Match.query.order_by(Match.id.desc())
paginatedMatches = query.paginate(page, current_app.config['MATCHES_PER_PAGE'], False)
return render_template('matchlist.html', matches=paginatedMatches.items, pagination=paginatedMatches)
@blueprint.route('/global')
def globalpage():
return render_template('global.html')
@blueprint.route('/global/gamemode')
def globalgamemodes(timespan="monthly", month=None, year=None):
"""Respond with view for global statistics for gamemodes, with optional timespan grouping. Currently only all time or by month."""
query_timespan = request.args.get("timespan") if request.args.get("timespan") else "monthly"
request_month = int(request.args.get("month") if request.args.get("month") else datetime.datetime.now().month)
request_year = int(request.args.get("year") if request.args.get("year") else datetime.datetime.now().year)
request_starttime = datetime.datetime(year=request_year, month=request_month, day=1)
request_timespan = (query_timespan, request_starttime)
next_page = add_months(request_starttime, 1)
prev_page = add_months(request_starttime, -1)
(stats, counts) = global_stats.get_formatted_global_stats(request_timespan)
return render_template('globalstats.html', matchData=stats,
timespan=query_timespan,
query_start=request_starttime,
matchCounts=counts,
nextpage=next_page,
prevpage=prev_page)
@blueprint.route('/global/population')
def globalpopulation():
"""Respond with view for global statitics for population, chunked by hour, over the course of the last 30 days."""
startdate = datetime.datetime.now()
enddate = startdate - datetime.timedelta(days=30)
q = db.session.query(
func.avg(PopSnap.popcount),
func.strftime('%H', PopSnap.time)
).filter(
PopSnap.time <= startdate,
PopSnap.time >= enddate
).group_by(func.strftime('%H', PopSnap.time)).all()
counts = [el[0] for el in q] # first piece of each grouped result
hours = [el[1] for el in q] # second piece of each grouped result
return render_template('populationstats.html', counts=counts, hours=hours)
@blueprint.route('/globalstats')
def globalstats_redir():
return redirect(url_for(".globalpage"))
@blueprint.route('/match/latest')
def latest_match():
"""Redirect to latest match."""
lastmatch = Match.query.order_by(Match.id.desc()).first()
return redirect(url_for('blueprint.match', id=lastmatch.id))
@blueprint.route('/match/<id>')
def match(id=0):
"""Respond with view for a match."""
match = Match.query.get(id)
if match is not None:
return render_template('match.html', match=Match.query.get(id))
abort(404)
# This is the route that the bot will use to notify the app to process files.
@blueprint.route('/alert_new_file')
def alert_new_file():
"""A GET request for this URL will cause the server to check for new statfiles in the configured dir."""
if current_app.parse_lock.locked():
return 'Already parsing.', 530
with current_app.parse_lock:
thread = threading.Thread(target=parse.batch_parse, args=[current_app._get_current_object()])
thread.start()
return 'OK, parsing', 200
return 'Already parsing.', 531
@blueprint.route('/changelog')
def changelog_view():
"""wow a changelog"""
return render_template('changelog.html')
@blueprint.route('/error') # legacy
def errorpage():
"""Error view."""
return render_template('500.html'), 500
@blueprint.context_processor
def utility_processor():
"""Define helper methods for Jinja2 templates."""
def modethumb(name):
"""Return a URL for an image related to the match mode."""
name = name.lower()
if os.path.isfile(os.path.join(basedir,
'app', 'static', 'img', 'modethumbs', name + '.png')):
return flask.url_for('static', filename='img/modethumbs/' + name + '.png')
else:
return flask.url_for('static', filename='img/modethumbs/othermode.png')
def antag_objs(matchid, antagkey):
"""Retrieve the objectives for an antag from a given match."""
return db.session.query(Match).get(matchid).antagobjs.filter(AntagObjective.mindkey == antagkey)
# def add_months(sourcedate, months):
# """Add months to original date. Returns a datetime."""
# month = sourcedate.month - 1 + months
# year = int(sourcedate.year + month / 12)
# month = month % 12 + 1
# day = min(sourcedate.day, calendar.monthrange(year, month)[1])
# return datetime.date(year, month, day)
def population_timeline_chart_data(matchid):
"""Get some population data for Chart.JS in JSON format."""
ps = Match.query.get(matchid).populationstats.all()
labels = []
popcounts = []
lowestPop = 100
for snapshot in ps:
labels.append(snapshot.time.strftime('%H:%M'))
popcounts.append(snapshot.popcount)
if snapshot.popcount is None or snapshot.popcount < lowestPop:
lowestPop = snapshot.popcount
return json.dumps(labels), json.dumps(popcounts), lowestPop
return dict(add_months=add_months, antag_objs=antag_objs, modethumb=modethumb,
population_timeline_chart_data=population_timeline_chart_data)
@blueprint.app_template_filter('format_timestamp')
def format_timestamp(value, format='matchtime'):
"""Format textual timestamps into more readable timestamps."""
if format == 'matchtime':
# yyyy mm dd hh mm ss
value = value.split('.')
return "{} {} {}:{}".format(calendar.month_name[int(value[1])], int(value[2]), int(value[3]), value[4])
elif format == 'shortmatchtime':
value = value.split('.')
return "{}/{} {}:{}".format(int(value[1]), int(value[2]), int(value[3]), value[4])
elif format == 'hhmm': # datetime hour/min
value = value.split('.')
return "{}:{}".format(value[4], value[5])
@blueprint.app_template_filter('obj_successfail')
def obj_successfail(succeeded):
"""Return a styled span to show if an antag was successful or not.
Keyword arguments:
succeeded -- Boolean. Did the antag win?
"""
if succeeded:
return "<span class='objective success'>Success</span>"
else:
return "<span class='objective failure'>Failure</span>"
@blueprint.app_template_filter('obj_pretty')
def obj_pretty(objective):
"""Make antag objectives pretty for template views."""
if objective.objective_type == u'/datum/objective/assassinate':
return 'Asassinate {} the {}.'.format(objective.target_name, objective.target_role)
else:
return objective.objective_desc
| gbasood/vgstation-statistics-viewer | app/public/views.py | Python | mit | 8,987 | [
"MOE"
] | 34afb369b0129bf55c2b233b5d8d5e657b312aefb87b419f8c05be6dafac6be2 |
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable
from Tools.GetEcmInfo import GetEcmInfo
from Poll import Poll
from Components.Converter.ChannelNumbers import channelnumbers
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
( "0x100", "0x1ff", "Seca", "S", True ),
( "0x500", "0x5ff", "Via", "V", True ),
( "0x600", "0x6ff", "Irdeto", "I", True ),
( "0x900", "0x9ff", "NDS", "Nd", True ),
( "0xb00", "0xbff", "Conax", "Co", True ),
( "0xd00", "0xdff", "CryptoW", "Cw", True ),
( "0xe00", "0xeff", "PowerVU", "P", False ),
("0x1700", "0x17ff", "Beta", "B", True ),
("0x1800", "0x18ff", "Nagra", "N", True ),
("0x2600", "0x2600", "Biss", "Bi", False ),
("0x4ae0", "0x4ae1", "Dre", "D", False ),
("0x4aee", "0x4aee", "BulCrypt", "B1", False ),
("0x5581", "0x5581", "BulCrypt", "B2", False )
)
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available","B1", False),
("CryptoCaidBulCrypt2Available","B2", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if info.getInfo(iServiceInformation.sIsCrypted) == 1:
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if color != "\c007?7?7?" or caid_entry[4]:
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
def createCryptoSeca(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x100', 16) <= int(self.current_caid, 16) <= int('0x1ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x100', 16) <= caid <= int('0x1ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'S'
res += "\c00??????"
return res
def createCryptoVia(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x500', 16) <= int(self.current_caid, 16) <= int('0x5ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x500', 16) <= caid <= int('0x5ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'V'
res += "\c00??????"
return res
def createCryptoIrdeto(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x600', 16) <= int(self.current_caid, 16) <= int('0x6ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x600', 16) <= caid <= int('0x6ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'I'
res += "\c00??????"
return res
def createCryptoNDS(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x900', 16) <= int(self.current_caid, 16) <= int('0x9ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x900', 16) <= caid <= int('0x9ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'NDS'
res += "\c00??????"
return res
def createCryptoConax(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xb00', 16) <= int(self.current_caid, 16) <= int('0xbff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0xb00', 16) <= caid <= int('0xbff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CO'
res += "\c00??????"
return res
def createCryptoCryptoW(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xd00', 16) <= int(self.current_caid, 16) <= int('0xdff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0xd00', 16) <= caid <= int('0xdff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CW'
res += "\c00??????"
return res
def createCryptoPowerVU(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xe00', 16) <= int(self.current_caid, 16) <= int('0xeff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0xe00', 16) <= caid <= int('0xeff', 16):
color="\c00eeee00"
except:
pass
res = color + 'P'
res += "\c00??????"
return res
def createCryptoBeta(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1700', 16) <= int(self.current_caid, 16) <= int('0x17ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x1700', 16) <= caid <= int('0x17ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'B'
res += "\c00??????"
return res
def createCryptoNagra(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1800', 16) <= int(self.current_caid, 16) <= int('0x18ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x1800', 16) <= caid <= int('0x18ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'N'
res += "\c00??????"
return res
def createCryptoBiss(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x2600', 16) <= int(self.current_caid, 16) <= int('0x26ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x2600', 16) <= caid <= int('0x26ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'BI'
res += "\c00??????"
return res
def createCryptoDre(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x4ae0', 16) <= int(self.current_caid, 16) <= int('0x4ae1', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x4ae0', 16) <= caid <= int('0x4ae1', 16):
color="\c00eeee00"
except:
pass
res = color + 'DC'
res += "\c00??????"
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID), int(self.current_ecmpid,16))
except:
pass
return ""
def createResolution(self, info):
xres = info.getInfo(iServiceInformation.sVideoWidth)
if xres == -1:
return ""
yres = info.getInfo(iServiceInformation.sVideoHeight)
mode = ("i", "p", "", " ")[info.getInfo(iServiceInformation.sProgressive)]
fps = str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000)
if int(fps) <= 0:
fps = ""
return str(xres) + "x" + str(yres) + mode + fps
def createVideoCodec(self, info):
return ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "")[info.getInfo(iServiceInformation.sVideoType)]
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
tsid = info.getInfo(iServiceInformation.sTSID)
onid = info.getInfo(iServiceInformation.sONID)
if vpid < 0 : vpid = 0
if apid < 0 : apid = 0
if pcrpid < 0 : pcrpid = 0
if sidpid < 0 : sidpid = 0
if tsid < 0 : tsid = 0
if onid < 0 : onid = 0
return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid)
def createTransponderInfo(self, fedata, feraw):
if not feraw or not fedata:
return ""
if "DVB-T" in feraw.get("tuner_type"):
tmp = addspace(self.createChannelNumber(fedata, feraw)) + self.createFrequency(fedata) + "/" + self.createPolarization(fedata)
else:
tmp = addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))
return addspace(self.createTunerSystem(fedata)) + tmp + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) \
+ addspace(self.createModulation(fedata)) + self.createOrbPos(feraw)
def createFrequency(self, feraw):
frequency = feraw.get("frequency")
if frequency:
return str(frequency)
return ""
def createChannelNumber(self, fedata, feraw):
channel = channelnumbers.getChannelNumber(feraw.get("frequency"), feraw.get("tuner_number"))
if channel:
return _("CH") + "%s" % channel
return ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate)
return ""
def createPolarization(self, fedata):
polarization = fedata.get("polarization_abbreviation")
if polarization:
return polarization
return ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
if code_rate_lp and code_rate_hp:
return code_rate_lp + "-" + code_rate_hp
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
tunertype = feraw.get("tuner_type")
if tunertype:
return tunertype
return ""
def createTunerSystem(self, fedata):
tunersystem = fedata.get("system")
if tunersystem:
return tunersystem
return ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata,feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createTransponderName(self,feraw):
orb_pos = ""
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
if orbpos == 3590:
orb_pos = 'Thor/Intelsat'
elif orbpos == 3560:
orb_pos = 'Amos (4'
elif orbpos == 3550:
orb_pos = 'Atlantic Bird'
elif orbpos == 3530:
orb_pos = 'Nilesat/Atlantic Bird'
elif orbpos == 3520:
orb_pos = 'Atlantic Bird'
elif orbpos == 3475:
orb_pos = 'Atlantic Bird'
elif orbpos == 3460:
orb_pos = 'Express'
elif orbpos == 3450:
orb_pos = 'Telstar'
elif orbpos == 3420:
orb_pos = 'Intelsat'
elif orbpos == 3380:
orb_pos = 'Nss'
elif orbpos == 3355:
orb_pos = 'Intelsat'
elif orbpos == 3325:
orb_pos = 'Intelsat'
elif orbpos == 3300:
orb_pos = 'Hispasat'
elif orbpos == 3285:
orb_pos = 'Intelsat'
elif orbpos == 3170:
orb_pos = 'Intelsat'
elif orbpos == 3150:
orb_pos = 'Intelsat'
elif orbpos == 3070:
orb_pos = 'Intelsat'
elif orbpos == 3045:
orb_pos = 'Intelsat'
elif orbpos == 3020:
orb_pos = 'Intelsat 9'
elif orbpos == 2990:
orb_pos = 'Amazonas'
elif orbpos == 2900:
orb_pos = 'Star One'
elif orbpos == 2880:
orb_pos = 'AMC 6 (72'
elif orbpos == 2875:
orb_pos = 'Echostar 6'
elif orbpos == 2860:
orb_pos = 'Horizons'
elif orbpos == 2810:
orb_pos = 'AMC5'
elif orbpos == 2780:
orb_pos = 'NIMIQ 4'
elif orbpos == 2690:
orb_pos = 'NIMIQ 1'
elif orbpos == 3592:
orb_pos = 'Thor/Intelsat'
elif orbpos == 2985:
orb_pos = 'Echostar 3,12'
elif orbpos == 2830:
orb_pos = 'Echostar 8'
elif orbpos == 2630:
orb_pos = 'Galaxy 19'
elif orbpos == 2500:
orb_pos = 'Echostar 10,11'
elif orbpos == 2502:
orb_pos = 'DirectTV 5'
elif orbpos == 2410:
orb_pos = 'Echostar 7 Anik F3'
elif orbpos == 2391:
orb_pos = 'Galaxy 23'
elif orbpos == 2390:
orb_pos = 'Echostar 9'
elif orbpos == 2412:
orb_pos = 'DirectTV 7S'
elif orbpos == 2310:
orb_pos = 'Galaxy 27'
elif orbpos == 2311:
orb_pos = 'Ciel 2'
elif orbpos == 2120:
orb_pos = 'Echostar 2'
else:
orb_pos = str((float(3600 - orbpos)) / 10.0) + "W"
elif orbpos > 0:
if orbpos == 192:
orb_pos = 'Astra 1F'
elif orbpos == 130:
orb_pos = 'Hot Bird 6,7A,8'
elif orbpos == 235:
orb_pos = 'Astra 1E'
elif orbpos == 1100:
orb_pos = 'BSat 1A,2A'
elif orbpos == 1101:
orb_pos = 'N-Sat 110'
elif orbpos == 1131:
orb_pos = 'KoreaSat 5'
elif orbpos == 1440:
orb_pos = 'SuperBird 7,C2'
elif orbpos == 1006:
orb_pos = 'AsiaSat 2'
elif orbpos == 1030:
orb_pos = 'Express A2'
elif orbpos == 1056:
orb_pos = 'Asiasat 3S'
elif orbpos == 1082:
orb_pos = 'NSS 11'
elif orbpos == 881:
orb_pos = 'ST1'
elif orbpos == 900:
orb_pos = 'Yamal 201'
elif orbpos == 917:
orb_pos = 'Mesat'
elif orbpos == 950:
orb_pos = 'Insat 4B'
elif orbpos == 951:
orb_pos = 'NSS 6'
elif orbpos == 765:
orb_pos = 'Telestar'
elif orbpos == 785:
orb_pos = 'ThaiCom 5'
elif orbpos == 800:
orb_pos = 'Express'
elif orbpos == 830:
orb_pos = 'Insat 4A'
elif orbpos == 850:
orb_pos = 'Intelsat 709'
elif orbpos == 750:
orb_pos = 'Abs'
elif orbpos == 720:
orb_pos = 'Intelsat'
elif orbpos == 705:
orb_pos = 'Eutelsat W5'
elif orbpos == 685:
orb_pos = 'Intelsat'
elif orbpos == 620:
orb_pos = 'Intelsat 902'
elif orbpos == 600:
orb_pos = 'Intelsat 904'
elif orbpos == 570:
orb_pos = 'Nss'
elif orbpos == 530:
orb_pos = 'Express AM22'
elif orbpos == 480:
orb_pos = 'Eutelsat 2F2'
elif orbpos == 450:
orb_pos = 'Intelsat'
elif orbpos == 420:
orb_pos = 'Turksat 2A'
elif orbpos == 400:
orb_pos = 'Express AM1'
elif orbpos == 390:
orb_pos = 'Hellas Sat 2'
elif orbpos == 380:
orb_pos = 'Paksat 1'
elif orbpos == 360:
orb_pos = 'Eutelsat Sesat'
elif orbpos == 335:
orb_pos = 'Astra 1M'
elif orbpos == 330:
orb_pos = 'Eurobird 3'
elif orbpos == 328:
orb_pos = 'Galaxy 11'
elif orbpos == 315:
orb_pos = 'Astra 5A'
elif orbpos == 310:
orb_pos = 'Turksat'
elif orbpos == 305:
orb_pos = 'Arabsat'
elif orbpos == 285:
orb_pos = 'Eurobird 1'
elif orbpos == 284:
orb_pos = 'Eurobird/Astra'
elif orbpos == 282:
orb_pos = 'Eurobird/Astra'
elif orbpos == 1220:
orb_pos = 'AsiaSat'
elif orbpos == 1380:
orb_pos = 'Telstar 18'
elif orbpos == 260:
orb_pos = 'Badr 3/4'
elif orbpos == 255:
orb_pos = 'Eurobird 2'
elif orbpos == 215:
orb_pos = 'Eutelsat'
elif orbpos == 216:
orb_pos = 'Eutelsat W6'
elif orbpos == 210:
orb_pos = 'AfriStar 1'
elif orbpos == 160:
orb_pos = 'Eutelsat W2'
elif orbpos == 100:
orb_pos = 'Eutelsat W1'
elif orbpos == 90:
orb_pos = 'Eurobird 9'
elif orbpos == 70:
orb_pos = 'Eutelsat W3A'
elif orbpos == 50:
orb_pos = 'Sirius 4'
elif orbpos == 48:
orb_pos = 'Sirius 4'
elif orbpos == 30:
orb_pos = 'Telecom 2'
else:
orb_pos = str((float(orbpos)) / 10.0) + "E"
return orb_pos
def createProviderName(self,info):
return info.getInfoString(iServiceInformation.sProvider)
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.value) > 0:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBar(info)
else:
return ""
if self.type == "CryptoSeca":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSeca(info)
else:
return ""
if self.type == "CryptoVia":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoVia(info)
else:
return ""
if self.type == "CryptoIrdeto":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoIrdeto(info)
else:
return ""
if self.type == "CryptoNDS":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNDS(info)
else:
return ""
if self.type == "CryptoConax":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoConax(info)
else:
return ""
if self.type == "CryptoCryptoW":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoCryptoW(info)
else:
return ""
if self.type == "CryptoBeta":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBeta(info)
else:
return ""
if self.type == "CryptoNagra":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNagra(info)
else:
return ""
if self.type == "CryptoBiss":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBiss(info)
else:
return ""
if self.type == "CryptoDre":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoDre(info)
else:
return ""
if self.type == "CryptoSpecial":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
else:
return ""
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings")
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
fedata = self.fedata
if not feraw or not fedata:
return ""
if self.type == "All":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.value) > 0:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + addspace(self.createTransponderName(feraw)) + "\n"\
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n"\
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + addspace(self.createTransponderName(feraw)) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) + addspace(self.createTransponderName(feraw))\
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo2line":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createTransponderName(feraw)) + '\n'\
+ addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))\
+ addspace(self.createSymbolRate(fedata, feraw)) + self.createModulation(fedata) + '-' + addspace(self.createFEC(fedata, feraw))
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata,feraw)
if self.type == "TransponderFrequency":
return self.createFrequency(feraw)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata, feraw)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata,feraw)
if self.type == "PIDInfo":
return self.createPIDInfo(info)
if self.type == "TerrestrialChannelNumber":
return self.createChannelNumber(fedata, feraw)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if caid_entry[3] == request_caid:
if request_selected:
if int(caid_entry[0], 16) <= int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
| Open-Plus/opgui | lib/python/Components/Converter/PliExtraInfo.py | Python | gpl-2.0 | 25,040 | [
"Galaxy"
] | fff895bbdd4b707aef7e387a59a859be5d21527b55bd4745e9b2144264198271 |
#!/usr/bin/env python3
desc="""Calculate completeness of transcripts
Assumes sense read orientation (as in direct RNA-seq ONT).
Support spliced alignments and mapping quality filtering.
By default ignores secondary alignments, duplicates and quality failed reads.
Takes into account only the first read from pair.
USAGE: f=minimap2.ref10/20190219.bam; ../src/bam2transcript_completeness.py -v -i $f -b ../ref10/DANRE.gff3 | plot_hist.py -b 25 -t $f -o $f.transcript_completeness.png
"""
epilog="""Author: l.p.pryszcz+git@gmail.com
Oxford, 27/02/2019
"""
import os, sys, pysam
from datetime import datetime
import numpy as np
def description2name(desc):
"""Return name from GFF/GTF description"""
k2v = {kv.split('=')[0]: "=".join(kv.split('=')[1:])
for kv in desc.split(';') if kv}
if "ID" in k2v:
return k2v["ID"]
else:
return "NoName"
def load_intervals(fn, verbose=1, ftypefilter="gene"):
"""Return chr2intervals and number of entries"""
chr2intervals = {}
for i, rec in enumerate(open(fn)):
if rec.startswith('#') or not rec.strip():
continue
rdata = rec.split('\t')
score, strand = 1, "+"
# GTF / GFF
if fn.endswith(('gtf', 'gff', 'gff3')):
chrom, source, ftype, s, e, score, strand = rdata[:7]
s, e = int(s)-1, int(e)
name = description2name(rdata[8])
if name.startswith('gene:'): name=name[5:]
# BED
else:
# unstranded intervals
if len(rdata)<6:
chrom, s, e = rdata[:3]
name = i
else:
chrom, s, e, name, score, strand = rdata[:6]
s, e = int(s), int(e)
ftype = ''
# filter by feature type
if ftypefilter and ftypefilter!=ftype:
continue
if strand=="+":
strand = 0
else:
strand = 1
# add chromosome
if chrom not in chr2intervals:
chr2intervals[chrom] = []
# store interval
data = (s, e, strand, name)
chr2intervals[chrom].append(data)
# define numpy datatype
dtype = np.dtype({'names': ['start', 'end', 'strand', 'entry_id'], \
'formats': ['uint32', 'uint32', 'bool_', 'object']})
for chrom, data in chr2intervals.items():
chr2intervals[chrom] = np.array(data, dtype=dtype)
return chr2intervals, i
def _filter(a, mapq=0):
"""Return True if poor quality alignment"""
if a.mapq<mapq or a.is_secondary or a.is_duplicate or a.is_qcfail:
return True
def buffer_intervals(c2i, ivals, rname, pos, aend, maxp, pref, bufferSize):
"""Return invervals buffer for faster selection"""
if rname != pref:
maxp = 0
if aend > maxp:
# get ref chrom
c = rname
s, e = pos, aend+bufferSize
# update intervals
if c in c2i:
# select intervals that either start, end or encompass current window/buffer
ivals = c2i[c][np.any([np.all([ c2i[c]['start']>=s, c2i[c]['start']<=e ], axis=0),
np.all([ c2i[c]['end'] >=s, c2i[c]['end'] <=e ], axis=0),
np.all([ c2i[c]['start']< s, c2i[c]['end'] > e ], axis=0)], axis=0)]
else:
ivals = []
#sys.stderr.write(" new buffer with %s intervals: %s:%s-%s\n"%(len(ivals),c,s,e))
# store current reference and max position
pref = rname
maxp = e
#print(ivals, rname, pos, aend, pref, bufferSize, c2i.keys)
return ivals, maxp, pref
def count_overlapping_intervals(blocks, is_reverse, ivals, out, verbose=0):
"""Count overlapping intervals with given read alignment.
The algorithm support spliced alignments. """
# skip if not ivals
if not len(ivals):
return 0.
## get intervals overlapping with given alignment blocks
# start overlapping with interval - here I'm assuming sense read orientation
d = [np.all([ (s+e)/2.>=ivals['start'], (s+e)/2.<=ivals['end'], is_reverse==ivals['strand'] ], axis=0) for s, e in blocks]
# select intervals fulfilling any of above
selected = ivals[np.any(d, axis=0)]#; print selected
# check if any matches, as sometimes empty cause problems
name, overlap = "-", 0.0
for s, e, strand, _name in selected:
# get only parts of the read the aligned to gene/transcript
re = blocks[-1][-1] if blocks[-1][-1] < e else e
rs = blocks[0][0] if blocks[0][0] > s else s
# get best match
_overlap = 1.*(re-rs)/(e-s)
if _overlap>overlap:
name, overlap = _name, _overlap
#print selected, is_reverse, blocks[0][0], blocks[-1][-1], overlap
out.write("%s\t%s\n"%(name, overlap))
def alignment_iterator(bam, mapq, verbose):
"""Iterate alignments from BAM"""
# open BAM
sam = pysam.AlignmentFile(bam)
# count alg quality ok
qok = 0
# keep info about previous read
pa, strands = 0, []
for i, a in enumerate(sam, 1):
#if i>1e5: break
#if i<84*1e5: continue
if verbose and not i%1e5:
sys.stderr.write(' %i algs; %i ok \r'%(i, qok))
# filter poor quality
if _filter(a, mapq):
continue
qok += 1
yield a.blocks, a.is_reverse, a.pos, a.aend, sam.references[a.rname]
# info
if verbose:
sys.stderr.write(' %i alignments processed.\n'%i)
def bam2overlap(bam, bed, out=sys.stdout, mapq=0, bufferSize=1000000, verbose=1):
"""Calculate coverage for genome intervals."""
# load intervals
if verbose:
sys.stderr.write("Loading intervals...\n")
c2i, entries = load_intervals(bed, verbose)
if verbose:
sys.stderr.write(" %s intervals from %s chromosomes loaded!\n"%(entries, len(c2i)) )
# parse alignments & count interval overlaps
if verbose:
sys.stderr.write("Parsing alignments...\n")
# write header
out.write("# gene\tread overlap\n")
ivals, maxp, pref = [], 0, 0
for blocks, strands, pos, aend, rname in alignment_iterator(bam, mapq, verbose): #alignment_iterator_samtools(bam, mapq, verbose):
# update ivals
ivals, maxp, pref = buffer_intervals(c2i, ivals, rname, pos, aend, maxp, pref, bufferSize)
# add alignments
count_overlapping_intervals(blocks, strands, ivals, out, verbose)
def main():
import argparse
usage = "%(prog)s -v" #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='1.1')
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose")
parser.add_argument("-i", "--bam", required=True,
help="BAM file")
parser.add_argument("-b", "--bed", required=True,
help="BED/GTF/GFF interval file")
parser.add_argument("-o", "--output", default=sys.stdout, type=argparse.FileType('w'),
help="output stream [stdout]")
parser.add_argument("-q", "--mapq", default=10, type=int,
help="min mapping quality for variants [%(default)s]")
parser.add_argument("--bufferSize", default=100000, type=int,
help="buffer size for intervals [%(default)s]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
# calculate coverage from bam for given intervals
bam2overlap(o.bam, o.bed, o.output, o.mapq, o.bufferSize, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n"%dt)
| lpryszcz/bin | bam2transcript_completeness.py | Python | gpl-3.0 | 8,163 | [
"pysam"
] | 093a04c1c9fcb94644c6893b1c4689ccbb72174167621b949db96507addfbdaf |
#!/usr/bin/python
import argparse
import hashlib
import sys
import os
import subprocess
import signal
import getpass
import simplejson
from termcolor import colored
import ConfigParser
import StringIO
import functools
import time
import random
import string
from configobj import ConfigObj
import tempfile
import pwd, grp
import traceback
import uuid
import yaml
import re
import OpenSSL
import glob
from shutil import copyfile
from zstacklib import *
import jinja2
import socket
import struct
import fcntl
import commands
import threading
import itertools
import platform
from datetime import datetime, timedelta
import multiprocessing
mysql_db_config_script='''
echo "modify my.cnf"
if [ -f /etc/mysql/mariadb.conf.d/50-server.cnf ]; then
#ubuntu 16.04
mysql_conf=/etc/mysql/mariadb.conf.d/50-server.cnf
elif [ -f /etc/mysql/my.cnf ]; then
# Ubuntu 14.04
mysql_conf=/etc/mysql/my.cnf
elif [ -f /etc/my.cnf ]; then
# centos
mysql_conf=/etc/my.cnf
fi
sed -i 's/^bind-address/#bind-address/' $mysql_conf
sed -i 's/^skip-networking/#skip-networking/' $mysql_conf
sed -i 's/^bind-address/#bind-address/' $mysql_conf
grep 'binlog_format=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a binlog_format=mixed\' $mysql_conf
fi
grep 'log_bin_trust_function_creators=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log_bin_trust_function_creators=1"
sed -i '/\[mysqld\]/a log_bin_trust_function_creators=1\' $mysql_conf
fi
grep 'expire_logs=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "expire_logs=30"
sed -i '/\[mysqld\]/a expire_logs=30\' $mysql_conf
fi
grep 'max_binlog_size=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_binlog_size=500m"
sed -i '/\[mysqld\]/a max_binlog_size=500m\' $mysql_conf
fi
grep 'log-bin=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log-bin=mysql-binlog"
sed -i '/\[mysqld\]/a log-bin=mysql-binlog\' $mysql_conf
fi
grep 'max_connections' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_connections=1024"
sed -i '/\[mysqld\]/a max_connections=1024\' $mysql_conf
else
echo "max_connections=1024"
sed -i 's/max_connections.*/max_connections=1024/g' $mysql_conf
fi
grep '^character-set-server' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a character-set-server=utf8\' $mysql_conf
fi
grep '^skip-name-resolve' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
sed -i '/\[mysqld\]/a skip-name-resolve\' $mysql_conf
fi
grep 'tmpdir' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
mysql_tmp_path="/var/lib/zstack-mysql-tmp"
if [ ! -x "$mysql_tmp_path" ]; then
mkdir "$mysql_tmp_path"
chown mysql:mysql "$mysql_tmp_path"
chmod 1777 "$mysql_tmp_path"
fi
echo "tmpdir=$mysql_tmp_path"
sed -i "/\[mysqld\]/a tmpdir=$mysql_tmp_path" $mysql_conf
fi
'''
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def loop_until_timeout(timeout, interval=1):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
current_time = time.time()
expired = current_time + timeout
while current_time < expired:
if f(*args, **kwargs):
return True
time.sleep(interval)
current_time = time.time()
return False
return inner
return wrap
def find_process_by_cmdline(cmdlines):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
with open(os.path.join('/proc', pid, 'cmdline'), 'r') as fd:
cmdline = fd.read()
is_find = True
for c in cmdlines:
if c not in cmdline:
is_find = False
break
if not is_find:
continue
return pid
except IOError:
continue
return None
def ssh_run_full(ip, cmd, params=[], pipe=True):
remote_path = '/tmp/%s.sh' % uuid.uuid4()
script = '''/bin/bash << EOF
cat << EOF1 > %s
%s
EOF1
/bin/bash %s %s
ret=$?
rm -f %s
exit $ret
EOF''' % (remote_path, cmd, remote_path, ' '.join(params), remote_path)
scmd = ShellCmd('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "%s"' % (ip, script), pipe=pipe)
scmd(False)
return scmd
def ssh_run(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
def ssh_run_no_pipe(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params, False)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
class CtlError(Exception):
pass
def warn(msg):
sys.stdout.write(colored('WARNING: %s\n' % msg, 'yellow'))
def error(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
sys.exit(1)
def error_not_exit(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
def info(*msg):
if len(msg) == 1:
out = '%s\n' % ''.join(msg)
else:
out = ''.join(msg)
sys.stdout.write(out)
def get_detail_version():
detailed_version_file = os.path.join(ctl.zstack_home, "VERSION")
if os.path.exists(detailed_version_file):
with open(detailed_version_file, 'r') as fd:
detailed_version = fd.read()
return detailed_version
else:
return None
def check_ip_port(host, port):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, int(port)))
return result == 0
def compare_version(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version2), normalize(version1))
def get_zstack_version(db_hostname, db_port, db_user, db_password):
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select version from schema_version order by version desc"
ret = query.query()
versions = [r['version'] for r in ret]
versions.sort(cmp=compare_version)
version = versions[0]
return version
def get_default_gateway_ip():
'''This function will return default route gateway ip address'''
with open("/proc/net/route") as gateway:
try:
for item in gateway:
fields = item.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
if fields[7] == '00000000':
return socket.inet_ntoa(struct.pack("=L", int(fields[2], 16)))
except ValueError:
return None
def get_default_ip():
cmd = ShellCmd("""dev=`ip route|grep default|head -n 1|awk -F "dev" '{print $2}' | awk -F " " '{print $1}'`; ip addr show $dev |grep "inet "|awk '{print $2}'|head -n 1 |awk -F '/' '{print $1}'""")
cmd(False)
return cmd.stdout.strip()
def get_yum_repo_from_property():
yum_repo = ctl.read_property('Ansible.var.zstack_repo')
if not yum_repo:
return yum_repo
# avoid http server didn't start when install package
if 'zstack-mn' in yum_repo:
yum_repo = yum_repo.replace("zstack-mn","zstack-local")
if 'qemu-kvm-ev-mn' in yum_repo:
yum_repo = yum_repo.replace("qemu-kvm-ev-mn","qemu-kvm-ev")
return yum_repo
def get_host_list(table_name):
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select * from %s" % table_name
host_vo = query.query()
return host_vo
def get_vrouter_list():
ip_list = []
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select ip from VmNicVO where deviceId = 0 and vmInstanceUuid in (select uuid from VirtualRouterVmVO)"
vrouter_ip_list = query.query()
for ip in vrouter_ip_list:
ip_list.append(ip['ip'])
return ip_list
def get_ha_mn_list(conf_file):
with open(conf_file, 'r') as fd:
ha_conf_content = yaml.load(fd.read())
mn_list = ha_conf_content['host_list'].split(',')
return mn_list
def stop_mevoco(host_post_info):
command = "zstack-ctl stop_node && zstack-ctl stop_ui"
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
logger.error("[ HOST: %s ] INFO: shell command: '%s' failed" % (host_post_info.host, command))
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
else:
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def start_mevoco(host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
logger.error("[ HOST: %s ] FAIL: shell command: '%s' failed" % (host_post_info.host, command))
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
else:
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
class ExceptionWrapper(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if globals().get('verbose', False) and exc_type and exc_val and exc_tb:
error_not_exit(''.join(traceback.format_exception(exc_type, exc_val, exc_tb)))
if exc_type == CtlError:
return
if exc_val:
error('%s\n%s' % (str(exc_val), self.msg))
def on_error(msg):
return ExceptionWrapper(msg)
def error_if_tool_is_missing(tool):
if shell_return('which %s' % tool) != 0:
raise CtlError('cannot find tool "%s", please install it and re-run' % tool)
def expand_path(path):
if path.startswith('~'):
return os.path.expanduser(path)
else:
return os.path.abspath(path)
def validate_ip(s):
a = s.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
def check_host_info_format(host_info, with_public_key=False):
'''check install ha and install multi mn node info format'''
if '@' not in host_info:
if with_public_key is False:
error("Host connect info: '%s' is wrong, should follow format: 'root:password@host_ip', please check your input!" % host_info)
if with_public_key is True:
error("Host connect info: '%s' is wrong, should follow format: 'root@host_ip', please check your input!" % host_info)
else:
# get user and password
if ':' not in host_info.split('@')[0]:
if with_public_key is False:
error("Host connect information should follow format: 'root:password@host_ip', please check your input!")
else:
user = host_info.split('@')[0]
password = ""
else:
if with_public_key is False:
user = host_info.split('@')[0].split(':')[0]
password = host_info.split('@')[0].split(':')[1]
if user != "root":
error("Only root user can be supported, please change user to root")
# get ip and port
if ':' not in host_info.split('@')[1]:
ip = host_info.split('@')[1]
port = '22'
else:
ip = host_info.split('@')[1].split(':')[0]
port = host_info.split('@')[1].split(':')[1]
if validate_ip(ip) is False:
error("Ip : %s is invalid" % ip)
return (user, password, ip, port)
def check_host_password(password, ip):
command ='timeout 10 sshpass -p "%s" ssh -q -o UserKnownHostsFile=/dev/null -o PubkeyAuthentication=no -o ' \
'StrictHostKeyChecking=no root@%s echo ""' % (password, ip)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Connect to host: '%s' with password '%s' failed! Please check password firstly and make sure you have "
"disabled UseDNS in '/etc/ssh/sshd_config' on %s" % (ip, password, ip))
def check_host_connection_with_key(ip, user="root", private_key=""):
command ='timeout 5 sshpass ssh -q %s@%s echo ""' % (user, ip)
(status, stdout, stderr) = shell_return_stdout_stderr(command)
if status != 0:
error("Connect to host: '%s' with private key: '%s' failed, please transfer your public key "
"to remote host firstly then make sure the host address is valid" % (ip, private_key))
def get_ip_by_interface(device_name):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack('256s', device_name[:15])
)[20:24])
def start_remote_mn( host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
(status, output) = commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(UpgradeHACmd.private_key_name, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
class SpinnerInfo(object):
spinner_status = {}
def __init__(self):
self.output = ""
self.name = ""
class ZstackSpinner(object):
def __init__(self, spinner_info):
self.output = spinner_info.output
self.name = spinner_info.name
self.spinner = itertools.cycle("|/~\\")
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
time.sleep(.2)
while SpinnerInfo.spinner_status[self.name]:
sys.stdout.write("\r %s: ... %s " % (self.output, next(self.spinner)))
sys.stdout.flush()
time.sleep(.1)
print "\r %s: ... %s" % (self.output, colored("PASS","green"))
class Ansible(object):
def __init__(self, yaml, host='localhost', debug=False, ssh_key='none'):
self.yaml = yaml
self.host = host
self.debug = debug
self.ssh_key = ssh_key
def __call__(self, *args, **kwargs):
error_if_tool_is_missing('ansible-playbook')
cmd = '''
yaml_file=`mktemp`
cat <<EOF >> $$yaml_file
$yaml
EOF
ansible_cmd="ansible-playbook $$yaml_file -i '$host,'"
if [ $debug -eq 1 ]; then
ansible_cmd="$$ansible_cmd -vvvv"
fi
if [ "$ssh_key" != "none" ]; then
ansible_cmd="$$ansible_cmd --private-key=$ssh_key"
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i $ssh_key $host 'echo hi > /dev/null'
else
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no $host 'echo hi > /dev/null'
fi
if [ $$? -ne 0 ]; then
ansible_cmd="$$ansible_cmd --ask-pass"
fi
eval $$ansible_cmd
ret=$$?
rm -f $$yaml_file
exit $$ret
'''
t = string.Template(cmd)
cmd = t.substitute({
'yaml': self.yaml,
'host': self.host,
'debug': int(self.debug),
'ssh_key': self.ssh_key
})
with on_error('Ansible failure'):
try:
shell_no_pipe(cmd)
except CtlError:
raise Exception('see prior Ansible log for detailed information')
def ansible(yaml, host='localhost', debug=False, ssh_key=None):
Ansible(yaml, host, debug, ssh_key or 'none')()
def reset_dict_value(dict_name, value):
return dict.fromkeys(dict_name, value)
def check_zstack_user():
try:
pwd.getpwnam('zstack')
except KeyError:
raise CtlError('cannot find user account "zstack", your installation seems incomplete')
try:
grp.getgrnam('zstack')
except KeyError:
raise CtlError('cannot find user account "zstack", your installation seems incomplete')
class UseUserZstack(object):
def __init__(self):
self.root_uid = None
self.root_gid = None
check_zstack_user()
def __enter__(self):
self.root_uid = os.getuid()
self.root_gid = os.getgid()
self.root_home = os.environ['HOME']
os.setegid(grp.getgrnam('zstack').gr_gid)
os.seteuid(pwd.getpwnam('zstack').pw_uid)
os.environ['HOME'] = os.path.expanduser('~zstack')
def __exit__(self, exc_type, exc_val, exc_tb):
os.seteuid(self.root_uid)
os.setegid(self.root_gid)
os.environ['HOME'] = self.root_home
def use_user_zstack():
return UseUserZstack()
class PropertyFile(object):
def __init__(self, path, use_zstack=True):
self.path = path
self.use_zstack = use_zstack
if not os.path.isfile(self.path):
raise CtlError('cannot find property file at %s' % self.path)
with on_error("errors on reading %s" % self.path):
self.config = ConfigObj(self.path, write_empty_values=True)
def read_all_properties(self):
with on_error("errors on reading %s" % self.path):
return self.config.items()
def delete_properties(self, keys):
for k in keys:
if k in self.config:
del self.config[k]
with use_user_zstack():
self.config.write()
def read_property(self, key):
with on_error("errors on reading %s" % self.path):
return self.config.get(key, None)
def write_property(self, key, value):
with on_error("errors on writing (%s=%s) to %s" % (key, value, self.path)):
if self.use_zstack:
with use_user_zstack():
self.config[key] = value
self.config.write()
else:
self.config[key] = value
self.config.write()
def write_properties(self, lst):
with on_error("errors on writing list of key-value%s to %s" % (lst, self.path)):
if self.use_zstack:
with use_user_zstack():
for key, value in lst:
self.config[key] = value
self.config.write()
else:
for key, value in lst:
self.config[key] = value
self.config.write()
class CtlParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error:%s\n' % message)
self.print_help()
sys.exit(1)
class Ctl(object):
DEFAULT_ZSTACK_HOME = '/usr/local/zstack/apache-tomcat/webapps/zstack/'
USER_ZSTACK_HOME_DIR = os.path.expanduser('~zstack')
LAST_ALIVE_MYSQL_IP = "MYSQL_LATEST_IP"
LAST_ALIVE_MYSQL_PORT = "MYSQL_LATEST_PORT"
LOGGER_DIR = "/var/log/zstack/"
LOGGER_FILE = "zstack-ctl.log"
ZSTACK_UI_HOME = '/usr/local/zstack/zstack-ui/'
ZSTACK_UI_KEYSTORE = ZSTACK_UI_HOME + 'ui.keystore.p12'
ZSTACK_UI_KEYSTORE_CP = ZSTACK_UI_KEYSTORE + '.cp'
def __init__(self):
self.commands = {}
self.command_list = []
self.main_parser = CtlParser(prog='zstackctl', description="ZStack management tool", formatter_class=argparse.RawTextHelpFormatter)
self.main_parser.add_argument('-v', help="verbose, print execution details", dest="verbose", action="store_true", default=False)
self.zstack_home = None
self.properties_file_path = None
self.ui_properties_file_path = None
self.verbose = False
self.extra_arguments = None
def register_command(self, cmd):
assert cmd.name, "command name cannot be None"
assert cmd.description, "command description cannot be None"
self.commands[cmd.name] = cmd
self.command_list.append(cmd)
def locate_zstack_home(self):
env_path = os.path.expanduser(SetEnvironmentVariableCmd.PATH)
if os.path.isfile(env_path):
env = PropertyFile(env_path)
self.zstack_home = env.read_property('ZSTACK_HOME')
if not self.zstack_home:
self.zstack_home = os.environ.get('ZSTACK_HOME', None)
if not self.zstack_home:
warn('ZSTACK_HOME is not set, default to %s' % self.DEFAULT_ZSTACK_HOME)
self.zstack_home = self.DEFAULT_ZSTACK_HOME
if not os.path.isdir(self.zstack_home):
raise CtlError('cannot find ZSTACK_HOME at %s, please set it in .bashrc or use zstack-ctl setenv ZSTACK_HOME=path' % self.zstack_home)
os.environ['ZSTACK_HOME'] = self.zstack_home
self.properties_file_path = os.path.join(self.zstack_home, 'WEB-INF/classes/zstack.properties')
self.ui_properties_file_path = os.path.join(Ctl.ZSTACK_UI_HOME, 'zstack.ui.properties')
self.ssh_private_key = os.path.join(self.zstack_home, 'WEB-INF/classes/ansible/rsaKeys/id_rsa')
self.ssh_public_key = os.path.join(self.zstack_home, 'WEB-INF/classes/ansible/rsaKeys/id_rsa.pub')
if not os.path.isfile(self.properties_file_path):
warn('cannot find %s, your ZStack installation may have crashed' % self.properties_file_path)
def get_env(self, name):
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
return env.read_property(name)
def delete_env(self, name):
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.delete_properties([name])
def put_envs(self, vs):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
shell('su - zstack -c "mkdir -p %s"' % os.path.dirname(SetEnvironmentVariableCmd.PATH))
shell('su - zstack -c "touch %s"' % SetEnvironmentVariableCmd.PATH)
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.write_properties(vs)
def run(self):
create_log(Ctl.LOGGER_DIR, Ctl.LOGGER_FILE)
if os.getuid() != 0:
raise CtlError('zstack-ctl needs root privilege, please run with sudo')
if os.path.exists(Ctl.ZSTACK_UI_HOME) and not os.path.exists(self.ui_properties_file_path):
os.mknod(self.ui_properties_file_path)
os.chmod(self.ui_properties_file_path, 438)
metavar_list = []
for n,cmd in enumerate(self.command_list):
if cmd.hide is False:
metavar_list.append(cmd.name)
else:
self.command_list[n].description = None
metavar_string = '{' + ','.join(metavar_list) + '}'
subparsers = self.main_parser.add_subparsers(help="All sub-commands", dest="sub_command_name", metavar=metavar_string)
for cmd in self.command_list:
if cmd.description is not None:
cmd.install_argparse_arguments(subparsers.add_parser(cmd.name, help=cmd.description + '\n\n'))
else:
cmd.install_argparse_arguments(subparsers.add_parser(cmd.name))
args, self.extra_arguments = self.main_parser.parse_known_args(sys.argv[1:])
self.verbose = args.verbose
globals()['verbose'] = self.verbose
cmd = self.commands[args.sub_command_name]
if cmd.need_zstack_home():
self.locate_zstack_home()
if cmd.need_zstack_user():
check_zstack_user()
cmd(args)
def internal_run(self, cmd_name, args=''):
cmd = self.commands[cmd_name]
assert cmd, 'cannot find command %s' % cmd_name
params = [cmd_name]
params.extend(args.split())
args_obj, _ = self.main_parser.parse_known_args(params)
if cmd.need_zstack_home():
self.locate_zstack_home()
if cmd.need_zstack_user():
check_zstack_user()
cmd(args_obj)
def read_property_list(self, key):
prop = PropertyFile(self.properties_file_path)
ret = []
for name, value in prop.read_all_properties():
if name.startswith(key):
ret.append((name, value))
return ret
def read_all_properties(self):
prop = PropertyFile(self.properties_file_path)
return prop.read_all_properties()
def read_property(self, key):
prop = PropertyFile(self.properties_file_path)
val = prop.read_property(key)
# our code assume all values are strings
if isinstance(val, list):
return ','.join(val)
else:
return val
def write_properties(self, properties):
prop = PropertyFile(self.properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_properties(properties)
def write_property(self, key, value):
prop = PropertyFile(self.properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_property(key, value)
def read_ui_property(self, key):
prop = PropertyFile(self.ui_properties_file_path)
val = prop.read_property(key)
# our code assume all values are strings
if isinstance(val, list):
return ','.join(val)
else:
return val
def write_ui_properties(self, properties):
prop = PropertyFile(self.ui_properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_properties(properties)
def write_ui_property(self, key, value):
prop = PropertyFile(self.ui_properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_property(key, value)
def get_db_url(self):
db_url = self.read_property("DB.url")
if not db_url:
db_url = self.read_property('DbFacadeDataSource.jdbcUrl')
if not db_url:
raise CtlError("cannot find DB url in %s. please set DB.url" % self.properties_file_path)
return db_url
def get_ui_db_url(self):
db_url = self.read_ui_property("db_url")
if not db_url:
raise CtlError("cannot find zstack_ui db url in %s. please set db_url" % self.ui_properties_file_path)
return db_url
def get_live_mysql_portal(self, ui=False):
if ui:
hostname_ports, user, password = self.get_ui_database_portal()
else:
hostname_ports, user, password = self.get_database_portal()
last_ip = ctl.get_env(self.LAST_ALIVE_MYSQL_IP)
last_port = ctl.get_env(self.LAST_ALIVE_MYSQL_PORT)
if last_ip and last_port and (last_ip, last_port) in hostname_ports:
first = (last_ip, last_port)
lst = [first]
for it in hostname_ports:
if it != first:
lst.append(it)
hostname_ports = lst
errors = []
for hostname, port in hostname_ports:
if password:
sql = 'mysql --host=%s --port=%s --user=%s --password=%s -e "select 1"' % (hostname, port, user, password)
else:
sql = 'mysql --host=%s --port=%s --user=%s -e "select 1"' % (hostname, port, user)
cmd = ShellCmd(sql)
cmd(False)
if cmd.return_code == 0:
# record the IP and port, so next time we will try them first
ctl.put_envs([
(self.LAST_ALIVE_MYSQL_IP, hostname),
(self.LAST_ALIVE_MYSQL_PORT, port)
])
return hostname, port, user, password
errors.append('failed to connect to the mysql server[hostname:%s, port:%s, user:%s, password:%s]: %s %s' % (
hostname, port, user, password, cmd.stderr, cmd.stdout
))
raise CtlError('\n'.join(errors))
def get_database_portal(self):
db_user = self.read_property("DB.user")
if not db_user:
db_user = self.read_property('DbFacadeDataSource.user')
if not db_user:
raise CtlError("cannot find DB user in %s. please set DB.user" % self.properties_file_path)
db_password = self.read_property("DB.password")
if db_password is None:
db_password = self.read_property('DbFacadeDataSource.password')
if db_password is None:
raise CtlError("cannot find DB password in %s. please set DB.password" % self.properties_file_path)
db_url = self.get_db_url()
host_name_ports = []
def parse_hostname_ports(prefix):
ips = db_url.lstrip(prefix).lstrip('/').split('/')[0]
ips = ips.split(',')
for ip in ips:
if ":" in ip:
hostname, port = ip.split(':')
host_name_ports.append((hostname, port))
else:
host_name_ports.append((ip, '3306'))
if db_url.startswith('jdbc:mysql:loadbalance:'):
parse_hostname_ports('jdbc:mysql:loadbalance:')
elif db_url.startswith('jdbc:mysql:'):
parse_hostname_ports('jdbc:mysql:')
return host_name_ports, db_user, db_password
def get_ui_database_portal(self):
db_user = self.read_ui_property("db_username")
if not db_user:
raise CtlError("cannot find zstack_ui db username in %s. please set db_username" % self.ui_properties_file_path)
db_password = self.read_ui_property("db_password")
if db_password is None:
raise CtlError("cannot find zstack_ui db password in %s. please set db_password" % self.ui_properties_file_path)
db_url = self.get_ui_db_url()
host_name_ports = []
def parse_hostname_ports(prefix):
ips = db_url.lstrip(prefix).lstrip('/').split('/')[0]
ips = ips.split(',')
for ip in ips:
if ":" in ip:
hostname, port = ip.split(':')
host_name_ports.append((hostname, port))
else:
host_name_ports.append((ip, '3306'))
if db_url.startswith('jdbc:mysql:loadbalance:'):
parse_hostname_ports('jdbc:mysql:loadbalance:')
elif db_url.startswith('jdbc:mysql:'):
parse_hostname_ports('jdbc:mysql:')
return host_name_ports, db_user, db_password
def check_if_management_node_has_stopped(self, force=False):
db_hostname, db_port, db_user, db_password = self.get_live_mysql_portal()
def get_nodes():
query = MySqlCommandLineQuery()
query.user = db_user
query.password = db_password
query.host = db_hostname
query.port = db_port
query.table = 'zstack'
query.sql = 'select hostname,heartBeat from ManagementNodeVO'
return query.query()
def check():
nodes = get_nodes()
if nodes:
node_ips = [n['hostname'] for n in nodes]
raise CtlError('there are some management nodes%s are still running. Please stop all of them before performing the database upgrade.'
'If you are sure they have stopped, use option --force and run this command again.\n'
'If you are upgrade by all in on installer, use option -F and run all in one installer again.\n'
'WARNING: the database may crash if you run this command with --force but without stopping management nodes' % node_ips)
def bypass_check():
nodes = get_nodes()
if nodes:
node_ips = [n['hostname'] for n in nodes]
info("it seems some nodes%s are still running. As you have specified option --force, let's wait for 10s to make sure those are stale records. Please be patient." % node_ips)
time.sleep(10)
new_nodes = get_nodes()
for n in new_nodes:
for o in nodes:
if o['hostname'] == n['hostname'] and o['heartBeat'] != n['heartBeat']:
raise CtlError("node[%s] is still Running! Its heart-beat changed from %s to %s in last 10s. Please make sure you really stop it" %
(n['hostname'], o['heartBeat'], n['heartBeat']))
if force:
bypass_check()
else:
check()
ctl = Ctl()
def script(cmd, args=None, no_pipe=False):
if args:
t = string.Template(cmd)
cmd = t.substitute(args)
fd, script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(cmd)
try:
if ctl.verbose:
info('execute script:\n%s\n' % cmd)
if no_pipe:
shell_no_pipe('bash %s' % script_path)
else:
shell('bash %s' % script_path)
finally:
os.remove(script_path)
class ShellCmd(object):
def __init__(self, cmd, workdir=None, pipe=True):
self.cmd = cmd
if pipe:
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=workdir)
else:
self.process = subprocess.Popen(cmd, shell=True, cwd=workdir)
self.return_code = None
self.stdout = None
self.stderr = None
def raise_error(self):
err = []
err.append('failed to execute shell command: %s' % self.cmd)
err.append('return code: %s' % self.process.returncode)
err.append('stdout: %s' % self.stdout)
err.append('stderr: %s' % self.stderr)
raise CtlError('\n'.join(err))
def __call__(self, is_exception=True):
if ctl.verbose:
info('executing shell command[%s]:' % self.cmd)
(self.stdout, self.stderr) = self.process.communicate()
if is_exception and self.process.returncode != 0:
self.raise_error()
self.return_code = self.process.returncode
if ctl.verbose:
info(simplejson.dumps({
"shell" : self.cmd,
"return_code" : self.return_code,
"stdout": self.stdout,
"stderr": self.stderr
}, ensure_ascii=True, sort_keys=True, indent=4))
return self.stdout
def shell(cmd, is_exception=True):
return ShellCmd(cmd)(is_exception)
def shell_no_pipe(cmd, is_exception=True):
return ShellCmd(cmd, pipe=False)(is_exception)
def shell_return(cmd):
scmd = ShellCmd(cmd)
scmd(False)
return scmd.return_code
def shell_return_stdout_stderr(cmd):
scmd = ShellCmd(cmd)
scmd(False)
return (scmd.return_code, scmd.stdout, scmd.stderr)
class Command(object):
def __init__(self):
self.name = None
self.description = None
self.hide = False
self.cleanup_routines = []
self.quiet = False
def install_argparse_arguments(self, parser):
pass
def install_cleanup_routine(self, func):
self.cleanup_routines.append(func)
def need_zstack_home(self):
return True
def need_zstack_user(self):
return True
def __call__(self, *args, **kwargs):
try:
self.run(*args)
if not self.quiet:
logger.info('Start running command [ zstack-ctl %s ]' % ' '.join(sys.argv[1:]))
finally:
for c in self.cleanup_routines:
c()
def run(self, args):
raise CtlError('the command is not implemented')
def create_check_ui_status_command(timeout=10, ui_ip='127.0.0.1', ui_port='5000', if_https=False):
protocol = 'https' if if_https else 'http'
if shell_return('which wget') == 0:
return ShellCmd(
'''wget --no-proxy -O- --tries=%s --no-check-certificate --timeout=1 %s://%s:%s/health''' % (timeout, protocol, ui_ip, ui_port))
elif shell_return('which curl') == 0:
return ShellCmd(
'''curl -k --noproxy --connect-timeout=1 --retry %s --retry-delay 0 --retry-max-time %s --max-time %s %s://%s:%s/health''' % (
timeout, timeout, timeout, protocol, ui_ip, ui_port))
else:
return None
def create_check_mgmt_node_command(timeout=10, mn_node='127.0.0.1'):
USE_CURL = 0
USE_WGET = 1
NO_TOOL = 2
def use_tool():
cmd = ShellCmd('which wget')
cmd(False)
if cmd.return_code == 0:
return USE_WGET
else:
cmd = ShellCmd('which curl')
cmd(False)
if cmd.return_code == 0:
return USE_CURL
else:
return NO_TOOL
what_tool = use_tool()
if what_tool == USE_CURL:
return ShellCmd('''curl --noproxy --connect-timeout=1 --retry %s --retry-delay 0 --retry-max-time %s --max-time %s -H "Content-Type: application/json" -d '{"org.zstack.header.apimediator.APIIsReadyToGoMsg": {}}' http://%s:8080/zstack/api''' % (timeout, timeout, timeout, mn_node))
elif what_tool == USE_WGET:
return ShellCmd('''wget --no-proxy -O- --tries=%s --timeout=1 --header=Content-Type:application/json --post-data='{"org.zstack.header.apimediator.APIIsReadyToGoMsg": {}}' http://%s:8080/zstack/api''' % (timeout, mn_node))
else:
return None
def find_process_by_cmdline(keyword):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
with open(os.path.join('/proc', pid, 'cmdline'), 'r') as fd:
cmdline = fd.read()
if keyword not in cmdline:
continue
return pid
except IOError:
continue
return None
class MySqlCommandLineQuery(object):
def __init__(self):
self.user = None
self.password = None
self.host = 'localhost'
self.port = 3306
self.sql = None
self.table = None
def query(self):
assert self.user, 'user cannot be None'
assert self.sql, 'sql cannot be None'
assert self.table, 'table cannot be None'
sql = "%s\G" % self.sql
if self.password:
cmd = '''mysql -u %s -p%s --host %s --port %s -t %s -e "%s"''' % (self.user, self.password, self.host,
self.port, self.table, sql)
else:
cmd = '''mysql -u %s --host %s --port %s -t %s -e "%s"''' % (self.user, self.host, self.port, self.table, sql)
output = shell(cmd)
output = output.strip(' \t\n\r')
ret = []
if not output:
return ret
current = None
for l in output.split('\n'):
if current is None and not l.startswith('*********'):
raise CtlError('cannot parse mysql output generated by the sql "%s", output:\n%s' % (self.sql, output))
if l.startswith('*********'):
if current:
ret.append(current)
current = {}
else:
l = l.strip()
if ":" in l:
key, value = l.split(':', 1)
current[key.strip()] = value[1:]
if current:
ret.append(current)
return ret
class ShowStatusCmd(Command):
def __init__(self):
super(ShowStatusCmd, self).__init__()
self.name = 'status'
self.description = 'show ZStack status and information.'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, root@192.168.0.10, to show the management node status on a remote machine')
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def _stop_remote(self, args):
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl status"' % args.host)
def run(self, args):
self.quiet = args.quiet
if args.host:
self._stop_remote(args)
return
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
log_path = os.path.normpath(log_path)
info_list = [
"ZSTACK_HOME: %s" % ctl.zstack_home,
"zstack.properties: %s" % ctl.properties_file_path,
"log4j2.xml: %s" % os.path.join(os.path.dirname(ctl.properties_file_path), 'log4j2.xml'),
"PID file: %s" % os.path.join(os.path.expanduser('~zstack'), "management-server.pid"),
"log file: %s" % log_path
]
def check_zstack_status():
cmd = create_check_mgmt_node_command()
def write_status(status):
info('MN status: %s' % status)
if not cmd:
write_status('cannot detect status, no wget and curl installed')
return
cmd(False)
pid = get_management_node_pid()
if cmd.return_code != 0:
if pid:
write_status('%s, the management node seems to become zombie as it stops responding APIs but the '
'process(PID: %s) is still running. Please stop the node using zstack-ctl stop_node' %
(colored('Unknown', 'yellow'), pid))
else:
write_status(colored('Stopped', 'red'))
return False
if 'false' in cmd.stdout:
write_status('Starting, should be ready in a few seconds')
elif 'true' in cmd.stdout:
write_status(colored('Running', 'green') + ' [PID:%s]' % pid)
else:
write_status('Unknown')
def show_version():
try:
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
except:
info('version: %s' % colored('unknown, MySQL is not running', 'yellow'))
return
if db_password:
cmd = ShellCmd('''mysql -u %s -p%s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_password, db_hostname, db_port))
else:
cmd = ShellCmd('''mysql -u %s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_hostname, db_port))
cmd(False)
if cmd.return_code != 0:
info('version: %s' % colored('unknown, MySQL is not running', 'yellow'))
return
out = cmd.stdout
if 'schema_version' not in out:
version = '0.6'
else:
version = get_zstack_version(db_hostname, db_port, db_user, db_password)
detailed_version = get_detail_version()
if detailed_version is not None:
info('version: %s (%s)' % (version, detailed_version))
else:
info('version: %s' % version)
info('\n'.join(info_list))
show_version()
s = check_zstack_status()
if s is not None and not s:
boot_error_log = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'bootError.log')
if os.path.exists(boot_error_log):
info(colored('Management server met an error as below:', 'yellow'))
with open(boot_error_log, 'r') as fd:
info(colored(fd.read(), 'red'))
ctl.internal_run('ui_status', args='-q')
class DeployDBCmd(Command):
DEPLOY_DB_SCRIPT_PATH = "WEB-INF/classes/deploydb.sh"
ZSTACK_PROPERTY_FILE = "WEB-INF/classes/zstack.properties"
def __init__(self):
super(DeployDBCmd, self).__init__()
self.name = "deploydb"
self.description = (
"deploy a new ZStack database, create a user 'zstack' with password specified in '--zstack-password',\n"
"and update zstack.properties if --no-update is not set.\n"
"\nDANGER: this will erase the existing ZStack database.\n"
"NOTE: If the database is running on a remote host, please make sure you have granted privileges to the root user by:\n"
"\n\tGRANT ALL PRIVILEGES ON *.* TO 'root'@'%%' IDENTIFIED BY 'your_root_password' WITH GRANT OPTION;\n"
"\tFLUSH PRIVILEGES;\n"
)
ctl.register_command(self)
def update_db_config(self):
update_db_config_script = mysql_db_config_script
fd, update_db_config_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(update_db_config_script)
info('update_db_config_script_path is: %s' % update_db_config_script_path)
ShellCmd('bash %s' % update_db_config_script_path)()
os.remove(update_db_config_script_path)
def install_argparse_arguments(self, parser):
parser.add_argument('--root-password', help='root user password of MySQL. [DEFAULT] empty password')
parser.add_argument('--zstack-password', help='password of user "zstack". [DEFAULT] empty password')
parser.add_argument('--host', help='IP or DNS name of MySQL host; default is localhost', default='localhost')
parser.add_argument('--port', help='port of MySQL host; default is 3306', type=int, default=3306)
parser.add_argument('--no-update', help='do NOT update database information to zstack.properties; if you do not know what this means, do not use it', action='store_true', default=False)
parser.add_argument('--drop', help='drop existing zstack database', action='store_true', default=False)
parser.add_argument('--keep-db', help='keep existing zstack database and not raise error.', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysql')
script_path = os.path.join(ctl.zstack_home, self.DEPLOY_DB_SCRIPT_PATH)
if not os.path.exists(script_path):
error('cannot find %s, your ZStack installation may have been corrupted, please reinstall it' % script_path)
property_file_path = os.path.join(ctl.zstack_home, self.ZSTACK_PROPERTY_FILE)
if not os.path.exists(property_file_path):
error('cannot find %s, your ZStack installation may have been corrupted, please reinstall it' % property_file_path)
if args.root_password:
check_existing_db = 'mysql --user=root --password=%s --host=%s --port=%s -e "use zstack"' % (args.root_password, args.host, args.port)
else:
check_existing_db = 'mysql --user=root --host=%s --port=%s -e "use zstack"' % (args.host, args.port)
self.update_db_config()
cmd = ShellCmd(check_existing_db)
cmd(False)
if not args.root_password:
args.root_password = "''"
if not args.zstack_password:
args.zstack_password = "''"
if cmd.return_code == 0 and not args.drop:
if args.keep_db:
info('detected existing zstack database and keep it; if you want to drop it, please append parameter --drop, instead of --keep-db\n')
else:
raise CtlError('detected existing zstack database; if you are sure to drop it, please append parameter --drop or use --keep-db to keep the database')
else:
cmd = ShellCmd('bash %s root %s %s %s %s' % (script_path, args.root_password, args.host, args.port, args.zstack_password))
cmd(False)
if cmd.return_code != 0:
if ('ERROR 1044' in cmd.stdout or 'ERROR 1044' in cmd.stderr) or ('Access denied' in cmd.stdout or 'Access denied' in cmd.stderr):
raise CtlError(
"failed to deploy database, access denied; if your root password is correct and you use IP rather than localhost,"
"it's probably caused by the privileges are not granted to root user for remote access; please see instructions in 'zstack-ctl -h'."
"error details: %s, %s\n" % (cmd.stdout, cmd.stderr)
)
else:
cmd.raise_error()
if not args.no_update:
if args.zstack_password == "''":
args.zstack_password = ''
properties = [
("DB.user", "zstack"),
("DB.password", args.zstack_password),
("DB.url", 'jdbc:mysql://%s:%s' % (args.host, args.port)),
]
ctl.write_properties(properties)
info('Successfully deployed ZStack database and updated corresponding DB information in %s' % property_file_path)
class DeployUIDBCmd(Command):
DEPLOY_UI_DB_SCRIPT_PATH = "WEB-INF/classes/deployuidb.sh"
ZSTACK_UI_PROPERTY_FILE = "zstack.ui.properties"
def __init__(self):
super(DeployUIDBCmd, self).__init__()
self.name = "deploy_ui_db"
self.description = (
"Deploy a new zstack_ui database.\n"
"\nDANGER: this will erase the existing zstack_ui database.\n"
"NOTE: If the database is running on a remote host, please make sure you have granted privileges to the root user by:\n"
"\n\tGRANT ALL PRIVILEGES ON *.* TO 'root'@'%%' IDENTIFIED BY 'your_root_password' WITH GRANT OPTION;\n"
"\tFLUSH PRIVILEGES;\n"
)
ctl.register_command(self)
def update_db_config(self):
update_db_config_script = mysql_db_config_script
fd, update_db_config_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(update_db_config_script)
info('update_db_config_script_path is: %s' % update_db_config_script_path)
ShellCmd('bash %s' % update_db_config_script_path)()
os.remove(update_db_config_script_path)
def install_argparse_arguments(self, parser):
parser.add_argument('--root-password', help='root user password of MySQL. [DEFAULT] empty password')
parser.add_argument('--zstack-ui-password', help='password of user "zstack_ui". [DEFAULT] empty password')
parser.add_argument('--host', help='IP or DNS name of MySQL host; default is localhost', default='localhost')
parser.add_argument('--port', help='port of MySQL host; default is 3306', type=int, default=3306)
parser.add_argument('--drop', help='drop existing zstack ui database', action='store_true', default=False)
parser.add_argument('--no-update', help='do NOT update database information to zstack.ui.properties; if you do not know what this means, do not use it', action='store_true', default=False)
parser.add_argument('--keep-db', help='keep existing zstack ui database and not raise error.', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysql')
script_path = os.path.join(ctl.zstack_home, self.DEPLOY_UI_DB_SCRIPT_PATH)
if not os.path.exists(script_path):
error('cannot find %s, your zstack installation may have been corrupted, please reinstall it' % script_path)
if args.root_password:
check_existing_db = 'mysql --user=root --password=%s --host=%s --port=%s -e "use zstack_ui"' % (args.root_password, args.host, args.port)
else:
check_existing_db = 'mysql --user=root --host=%s --port=%s -e "use zstack_ui"' % (args.host, args.port)
self.update_db_config()
cmd = ShellCmd(check_existing_db)
cmd(False)
if not args.root_password:
args.root_password = "''"
if not args.zstack_ui_password:
args.zstack_ui_password = "''"
if cmd.return_code == 0 and not args.drop:
if args.keep_db:
info('detected existing zstack_ui database and keep it; if you want to drop it, please append parameter --drop, instead of --keep-db\n')
else:
raise CtlError('detected existing zstack_ui database; if you are sure to drop it, please append parameter --drop or use --keep-db to keep the database')
else:
cmd = ShellCmd('bash %s root %s %s %s %s' % (script_path, args.root_password, args.host, args.port, args.zstack_ui_password))
cmd(False)
if cmd.return_code != 0:
if ('ERROR 1044' in cmd.stdout or 'ERROR 1044' in cmd.stderr) or ('Access denied' in cmd.stdout or 'Access denied' in cmd.stderr):
raise CtlError(
"failed to deploy zstack_ui database, access denied; if your root password is correct and you use IP rather than localhost,"
"it's probably caused by the privileges are not granted to root user for remote access; please see instructions in 'zstack-ctl -h'."
"error details: %s, %s\n" % (cmd.stdout, cmd.stderr)
)
else:
cmd.raise_error()
if not args.no_update:
if args.zstack_ui_password == "''":
args.zstack_ui_password = ''
properties = [
("db_url", 'jdbc:mysql://%s:%s' % (args.host, args.port)),
("db_username", "zstack_ui"),
("db_password", args.zstack_ui_password),
]
ctl.write_ui_properties(properties)
info('Successfully deployed zstack_ui database')
class TailLogCmd(Command):
def __init__(self):
super(TailLogCmd, self).__init__()
self.name = 'taillog'
self.description = "shortcut to print management node log to stdout"
ctl.register_command(self)
def run(self, args):
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
log_path = os.path.normpath(log_path)
if not os.path.isfile(log_path):
raise CtlError('cannot find %s' % log_path)
script = ShellCmd('tail -f %s' % log_path, pipe=False)
script()
class ConfigureCmd(Command):
def __init__(self):
super(ConfigureCmd, self).__init__()
self.name = 'configure'
self.description = "configure zstack.properties"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, root@192.168.0.10, to set properties in zstack.properties on the remote machine')
parser.add_argument('--duplicate-to-remote', help='SSH URL, for example, root@192.168.0.10, to copy zstack.properties on this machine to the remote machine')
parser.add_argument('--use-file', help='path to a file that will be used to as zstack.properties')
def _configure_remote_node(self, args):
shell_no_pipe('ssh %s "/usr/bin/zstack-ctl configure %s"' % (args.host, ' '.join(ctl.extra_arguments)))
def _duplicate_remote_node(self, args):
tmp_file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
tmp_file_name = os.path.join('/tmp/', tmp_file_name)
with open(ctl.properties_file_path, 'r') as fd:
txt = fd.read()
cmd = '''ssh -T %s << EOF
cat <<EOT > %s
%s
EOT
if [ $? != 0 ]; then
print "cannot create temporary properties file"
exit 1
fi
/usr/bin/zstack-ctl configure --use-file %s
ret=$?
rm -f %s
exit $ret
EOF
'''
shell_no_pipe(cmd % (args.duplicate_to_remote, tmp_file_name, txt, tmp_file_name, tmp_file_name))
info("successfully copied %s to remote machine %s" % (ctl.properties_file_path, args.duplicate_to_remote))
def _use_file(self, args):
path = os.path.expanduser(args.use_file)
if not os.path.isfile(path):
raise CtlError('cannot find file %s' % path)
shell('cp -f %s %s' % (path, ctl.properties_file_path))
def run(self, args):
if args.use_file:
self._use_file(args)
return
if args.duplicate_to_remote:
self._duplicate_remote_node(args)
return
if not ctl.extra_arguments:
raise CtlError('please input properties that are in format of "key=value" split by space')
if args.host:
self._configure_remote_node(args)
return
properties = [l.split('=', 1) for l in ctl.extra_arguments]
ctl.write_properties(properties)
def get_management_node_pid():
DEFAULT_PID_FILE_PATH = os.path.join(os.path.expanduser('~zstack'), "management-server.pid")
pid = find_process_by_cmdline('appName=zstack')
if pid:
return pid
pid_file_path = ctl.read_property('pidFilePath')
if not pid_file_path:
pid_file_path = DEFAULT_PID_FILE_PATH
if not os.path.exists(pid_file_path):
return None
def is_zstack_process(pid):
cmdline = os.path.join('/proc/%s/cmdline' % pid)
with open(cmdline, 'r') as fd:
content = fd.read()
return 'appName=zstack' in content
with open(pid_file_path, 'r') as fd:
pid = fd.read()
try:
pid = int(pid)
proc_pid = '/proc/%s' % pid
if os.path.exists(proc_pid):
if is_zstack_process(pid):
return pid
else:
return None
except Exception:
return None
return None
class StopAllCmd(Command):
def __init__(self):
super(StopAllCmd, self).__init__()
self.name = 'stop'
self.description = 'stop all ZStack related services including zstack management node, web UI' \
' if those services are installed'
ctl.register_command(self)
def run(self, args):
def stop_mgmt_node():
info(colored('Stopping ZStack management node, it may take a few minutes...', 'blue'))
ctl.internal_run('stop_node')
def stop_ui():
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv) and not os.path.exists(ctl.ZSTACK_UI_HOME):
info('skip stopping web UI, it is not installed')
return
info(colored('Stopping ZStack web UI, it may take a few minutes...', 'blue'))
ctl.internal_run('stop_ui')
stop_ui()
stop_mgmt_node()
class StartAllCmd(Command):
def __init__(self):
super(StartAllCmd, self).__init__()
self.name = 'start'
self.description = 'start all ZStack related services including zstack management node, web UI' \
' if those services are installed'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--daemon', help='Start ZStack in daemon mode. Only used with systemd.', action='store_true', default=True)
def run(self, args):
def start_mgmt_node():
info(colored('Starting ZStack management node, it may take a few minutes...', 'blue'))
if args.daemon:
ctl.internal_run('start_node', '--daemon')
else:
ctl.internal_run('start_node')
def start_ui():
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv) and not os.path.exists(ctl.ZSTACK_UI_HOME):
info('skip starting web UI, it is not installed')
return
info(colored('Starting ZStack web UI, it may take a few minutes...', 'blue'))
ctl.internal_run('start_ui')
start_mgmt_node()
start_ui()
class StartCmd(Command):
START_SCRIPT = '../../bin/startup.sh'
SET_ENV_SCRIPT = '../../bin/setenv.sh'
BEAN_CONTEXT_REF_XML = "WEB-INF/classes/beanRefContext.xml"
MINIMAL_CPU_NUMBER = 4
#MINIMAL_MEM_SIZE unit is KB, here is 6GB, in linxu, 6GB is 5946428 KB
#Save some memory for kdump etc. The actual limitation is 5000000KB
MINIMAL_MEM_SIZE = 5000000
def __init__(self):
super(StartCmd, self).__init__()
self.name = 'start_node'
self.description = 'start the ZStack management node on this machine'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, root@192.168.0.10, to start the management node on a remote machine')
parser.add_argument('--timeout', help='Wait for ZStack Server startup timeout, default is 300 seconds.', default=300)
parser.add_argument('--daemon', help='Start ZStack in daemon mode. Only used with systemd.', action='store_true', default=False)
parser.add_argument('--simulator', help='Start Zstack in simulator mode.', action='store_true', default=False)
def _start_remote(self, args):
info('it may take a while because zstack-ctl will wait for management node ready to serve API')
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl start_node --timeout=%s"' % (args.host, args.timeout))
def check_cpu_mem(self):
if multiprocessing.cpu_count() < StartCmd.MINIMAL_CPU_NUMBER:
error("CPU number should not less than %d" % StartCmd.MINIMAL_CPU_NUMBER)
status, output = commands.getstatusoutput("cat /proc/meminfo | grep MemTotal | awk -F \":\" '{print $2}' | awk -F \" \" '{print $1}'")
if status == 0:
if int(output) < StartCmd.MINIMAL_MEM_SIZE:
error("Memory size should not less than %d KB" % StartCmd.MINIMAL_MEM_SIZE)
else:
warn("Can't get system memory size from /proc/meminfo")
def check_hostname(self):
hn = shell('hostname').strip()
if '.' in hn:
error("The hostname cannot contain '.', current hostname is '%s'.\n"
"Please use the following commands to modify hostname and reset rabbitmq:\n"
" # hostnamectl set-hostname $NEW_HOSTNAME\n"
" # zstack-ctl reset_rabbitmq" % hn)
def run(self, args):
self.check_cpu_mem()
self.check_hostname()
if args.host:
self._start_remote(args)
return
# clean the error log before booting
boot_error_log = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'bootError.log')
shell('rm -f %s' % boot_error_log)
pid = get_management_node_pid()
if pid:
info('the management node[pid:%s] is already running' % pid)
return
else:
shell('rm -f %s' % os.path.join(os.path.expanduser('~zstack'), "management-server.pid"))
def check_java_version():
ver = shell('java -version 2>&1 | grep -w version')
if '1.8' not in ver:
raise CtlError('ZStack requires Java8, your current version is %s\n'
'please run "update-alternatives --config java" to set Java to Java8')
def check_8080():
if shell_return('netstat -nap | grep :8080[[:space:]] | grep LISTEN > /dev/null') == 0:
raise CtlError('8080 is occupied by some process. Please use netstat to find out and stop it')
def check_9090():
if shell_return('netstat -nap | grep :9090[[:space:]] | grep LISTEN | grep -v prometheus > /dev/null') == 0:
raise CtlError('9090 is occupied by some process. Please use netstat to find out and stop it')
def check_msyql():
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
if not check_ip_port(db_hostname, db_port):
raise CtlError('unable to connect to %s:%s, please check if the MySQL is running and the firewall rules' % (db_hostname, db_port))
with on_error('unable to connect to MySQL'):
shell('mysql --host=%s --user=%s --password=%s --port=%s -e "select 1"' % (db_hostname, db_user, db_password, db_port))
def open_iptables_port(protocol, port_list):
distro = platform.dist()[0]
if type(port_list) is not list:
error("port list should be list")
for port in port_list:
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'(iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT && service iptables save)' % (protocol, protocol, port, protocol, protocol, port))
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'(iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT && /etc/init.d/iptables-persistent save)' % (protocol, protocol, port, protocol, protocol, port))
else:
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT ' % (protocol, protocol, port, protocol, protocol, port))
def check_rabbitmq():
RABBIT_PORT = 5672
def check_username_password_if_need(ip, username, password):
if not username or not password:
return
cmd = ShellCmd('curl -u %s:%s http://%s:15672/api/whoami' % (username, password, ip))
cmd(False)
if cmd.return_code == 7:
warn('unable to connect to the rabbitmq management plugin at %s:15672. The possible reasons are:\n'
' 1) the plugin is not installed, you can install it by "rabbitmq-plugins enable rabbitmq_management,"\n'
' then restart the rabbitmq by "service rabbitmq-server restart"\n'
' 2) the port 15672 is blocked by the firewall\n'
'without the plugin, we cannot check the validity of the rabbitmq username/password configured in zstack.properties' % ip)
elif cmd.return_code != 0:
cmd.raise_error()
else:
if 'error' in cmd.stdout:
raise CtlError('unable to connect to the rabbitmq server[ip:%s] with username/password configured in zstack.properties.\n'
'If you have reset the rabbimtq server, get the username/password from zstack.properties and do followings on the rabbitmq server:\n'
'1) rabbitmqctl add_user $username $password\n'
'2) rabbitmqctl set_user_tags $username administrator\n'
'3) rabbitmqctl set_permissions -p / $username ".*" ".*" ".*"\n' % ip)
with on_error('unable to get RabbitMQ server IPs from %s, please check CloudBus.serverIp.0'):
ips = ctl.read_property_list('CloudBus.serverIp.')
if not ips:
raise CtlError('no RabbitMQ IPs defined in %s, please specify it use CloudBus.serverIp.0=the_ip' % ctl.properties_file_path)
rabbit_username = ctl.read_property('CloudBus.rabbitmqUsername')
rabbit_password = ctl.read_property('CloudBus.rabbitmqPassword')
if rabbit_password and not rabbit_username:
raise CtlError('CloudBus.rabbitmqPassword is set but CloudBus.rabbitmqUsername is missing in zstack.properties')
elif not rabbit_password and rabbit_username:
raise CtlError('CloudBus.rabbitmqUsername is set but CloudBus.rabbitmqPassword is missing in zstack.properties')
success = False
workable_ip = None
for key, ip in ips:
if ":" in ip:
ip, port = ip.split(':')
else:
port = RABBIT_PORT
if check_ip_port(ip, port):
workable_ip = ip
success = True
else:
warn('cannot connect to the RabbitMQ server[ip:%s, port:%s]' % (ip, RABBIT_PORT))
if not success:
raise CtlError('cannot connect to all RabbitMQ servers[ip:%s, port:%s] defined in %s, please reset rabbitmq by: "zstack-ctl reset_rabbitmq"' %
(ips, RABBIT_PORT, ctl.properties_file_path))
else:
check_username_password_if_need(workable_ip, rabbit_username, rabbit_password)
def prepare_qemu_kvm_repo():
OLD_QEMU_KVM_VERSION = 'qemu-kvm-ev-2.6.0'
NEW_QEMU_KVM_VERSION = 'qemu-kvm-ev-2.9.0'
DEFAULT_QEMU_KVM_PATH = '/opt/zstack-dvd/Extra/qemu-kvm-ev'
if len(glob.glob("/opt/zstack-dvd/Packages/centos-release-7-2.*.rpm")) > 0:
local_repo_version = 'c72'
EXPERIMENTAL_QEMU_KVM_PATH = '/opt/zstack-dvd/Extra/' + NEW_QEMU_KVM_VERSION
else:
local_repo_version = 'c74'
EXPERIMENTAL_QEMU_KVM_PATH = '/opt/zstack-dvd/Extra/' + OLD_QEMU_KVM_VERSION
# version combinations that need to mount qemu-kvm-ev
version_matrix = {'c72': NEW_QEMU_KVM_VERSION, 'c74': OLD_QEMU_KVM_VERSION}
qemu_version = ctl.read_property('KvmHost.qemu_kvm.version')
if version_matrix[local_repo_version] == qemu_version:
cmd = ShellCmd("umount %s; mount --bind %s %s" % (DEFAULT_QEMU_KVM_PATH, EXPERIMENTAL_QEMU_KVM_PATH, DEFAULT_QEMU_KVM_PATH))
else:
cmd = ShellCmd("umount %s" % DEFAULT_QEMU_KVM_PATH)
cmd(False)
def prepare_setenv():
setenv_path = os.path.join(ctl.zstack_home, self.SET_ENV_SCRIPT)
catalina_opts = [
'-Djava.net.preferIPv4Stack=true',
'-Dcom.sun.management.jmxremote=true',
'-Djava.security.egd=file:/dev/./urandom',
]
if ctl.extra_arguments:
catalina_opts.extend(ctl.extra_arguments)
upgrade_params = ctl.get_env('ZSTACK_UPGRADE_PARAMS')
if upgrade_params:
catalina_opts.extend(upgrade_params.split(' '))
co = ctl.get_env('CATALINA_OPTS')
if co:
info('use CATALINA_OPTS[%s] set in environment zstack environment variables; check out them by "zstack-ctl getenv"' % co)
catalina_opts.extend(co.split(' '))
def has_opt(prefix):
for opt in catalina_opts:
if opt.startswith(prefix):
return True
return False
if not has_opt('-Xms'):
catalina_opts.append('-Xms512M')
if not has_opt('-Xmx'):
catalina_opts.append('-Xmx4096M')
with open(setenv_path, 'w') as fd:
fd.write('export CATALINA_OPTS=" %s"' % ' '.join(catalina_opts))
def start_mgmt_node():
shell('sudo -u zstack sh %s -DappName=zstack' % os.path.join(ctl.zstack_home, self.START_SCRIPT))
info("successfully started Tomcat container; now it's waiting for the management node ready for serving APIs, which may take a few seconds")
def wait_mgmt_node_start():
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
timeout = int(args.timeout)
@loop_until_timeout(timeout)
def check():
if os.path.exists(boot_error_log):
with open(boot_error_log, 'r') as fd:
raise CtlError('the management server fails to boot; details can be found in the log[%s],'
'here is a brief of the error:\n%s' % (log_path, fd.read()))
cmd = create_check_mgmt_node_command(1, 'localhost')
cmd(False)
return cmd.return_code == 0
if not check():
mgmt_ip = ctl.read_property('management.server.ip')
if mgmt_ip:
mgmt_ip = '[ management.server.ip = %s ]' % mgmt_ip
else:
mgmt_ip = ''
raise CtlError('no management-node-ready message received within %s seconds%s, please check error in log file %s' % (timeout, mgmt_ip, log_path))
def prepareBeanRefContextXml():
if args.simulator:
beanXml = "simulator/zstack-simulator2.xml"
info("--simulator is set, ZStack will start in simulator mode")
else:
beanXml = "zstack.xml"
shell('sudo -u zstack sed -i "s#<value>.*</value>#<value>%s</value>#" %s' % (beanXml, os.path.join(ctl.zstack_home, self.BEAN_CONTEXT_REF_XML)))
user = getpass.getuser()
if user != 'root':
raise CtlError('please use sudo or root user')
check_java_version()
check_8080()
check_9090()
check_msyql()
check_rabbitmq()
prepare_qemu_kvm_repo()
prepare_setenv()
open_iptables_port('udp',['123'])
prepareBeanRefContextXml()
start_mgmt_node()
#sleep a while, since zstack won't start up so quickly
time.sleep(5)
try:
wait_mgmt_node_start()
except CtlError as e:
try:
info("the management node failed to start, stop it now ...")
ctl.internal_run('stop_node')
except:
pass
raise e
if not args.daemon:
shell('which systemctl >/dev/null 2>&1; [ $? -eq 0 ] && systemctl start zstack', is_exception = False)
info('successfully started management node')
ctl.delete_env('ZSTACK_UPGRADE_PARAMS')
class StopCmd(Command):
STOP_SCRIPT = "../../bin/shutdown.sh"
def __init__(self):
super(StopCmd, self).__init__()
self.name = 'stop_node'
self.description = 'stop the ZStack management node on this machine'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, root@192.168.0.10, to stop the management node on a remote machine')
parser.add_argument('--force', '-f', help='force kill the java process, without waiting.', action="store_true", default=False)
def _stop_remote(self, args):
if args.force:
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl stop_node --force"' % args.host)
else:
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl stop_node"' % args.host)
def run(self, args):
if args.host:
self._stop_remote(args)
return
# for zstack-local repo upgrade
DEFAULT_QEMU_KVM_PATH = '/opt/zstack-dvd/Extra/qemu-kvm-ev'
cmd = ShellCmd("umount %s" % DEFAULT_QEMU_KVM_PATH)
cmd(False)
pid = get_management_node_pid()
if not pid:
info('the management node has been stopped')
return
timeout = 30
if not args.force:
@loop_until_timeout(timeout)
def wait_stop():
return get_management_node_pid() is None
shell('bash %s' % os.path.join(ctl.zstack_home, self.STOP_SCRIPT))
if wait_stop():
info('successfully stopped management node')
return
pid = get_management_node_pid()
if pid:
if not args.force:
info('unable to stop management node within %s seconds, kill it' % timeout)
with on_error('unable to kill -9 %s' % pid):
shell('kill -9 %s' % pid)
class RestartNodeCmd(Command):
def __init__(self):
super(RestartNodeCmd, self).__init__()
self.name = 'restart_node'
self.description = 'restart the management node'
ctl.register_command(self)
def run(self, args):
ctl.internal_run('stop_node')
ctl.internal_run('start_node')
class SaveConfigCmd(Command):
DEFAULT_PATH = '~/.zstack/'
def __init__(self):
super(SaveConfigCmd, self).__init__()
self.name = 'save_config'
self.description = 'save ZStack configuration from ZSTACK_HOME to specified folder'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--save-to', help='the folder where ZStack configurations should be saved')
def run(self, args):
path = args.save_to
if not path:
path = self.DEFAULT_PATH
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
properties_file_path = os.path.join(path, 'zstack.properties')
shell('yes | cp %s %s' % (ctl.properties_file_path, properties_file_path))
ssh_private_key_path = os.path.join(path, 'id_rsa')
ssh_public_key_path = os.path.join(path, 'id_rsa.pub')
shell('yes | cp %s %s' % (ctl.ssh_private_key, ssh_private_key_path))
shell('yes | cp %s %s' % (ctl.ssh_public_key, ssh_public_key_path))
info('successfully saved %s to %s' % (ctl.properties_file_path, properties_file_path))
class RestoreConfigCmd(Command):
DEFAULT_PATH = '~/.zstack/'
def __init__(self):
super(RestoreConfigCmd, self).__init__()
self.name = "restore_config"
self.description = 'restore ZStack configuration from specified folder to ZSTACK_HOME'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--restore-from', help='the folder where ZStack configurations should be found')
def run(self, args):
path = args.restore_from
if not path:
path = self.DEFAULT_PATH
path = os.path.expanduser(path)
if os.path.isdir(path):
properties_file_path = os.path.join(path, 'zstack.properties')
elif os.path.isfile(path):
properties_file_path = path
else:
raise CtlError('cannot find zstack.properties at %s' % path)
shell('yes | cp %s %s' % (properties_file_path, ctl.properties_file_path))
ssh_private_key_path = os.path.join(path, 'id_rsa')
ssh_public_key_path = os.path.join(path, 'id_rsa.pub')
shell('yes | cp %s %s' % (ssh_private_key_path, ctl.ssh_private_key))
shell('yes | cp %s %s' % (ssh_public_key_path, ctl.ssh_public_key))
info('successfully restored zstack.properties and ssh identity keys from %s to %s' % (properties_file_path, ctl.properties_file_path))
class InstallDbCmd(Command):
def __init__(self):
super(InstallDbCmd, self).__init__()
self.name = "install_db"
self.description = (
"install MySQL database on a target machine which can be a remote machine or the local machine."
"\nNOTE: you may need to set --login-password to password of previous MySQL root user, if the machine used to have MySQL installed and removed."
"\nNOTE: if you hasn't setup public key for ROOT user on the remote machine, this command will prompt you for password of SSH ROOT user for the remote machine."
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='host IP, for example, 192.168.0.212, please specify the real IP rather than "localhost" or "127.0.0.1" when installing on local machine; otherwise management nodes on other machines cannot access the DB.', required=True)
parser.add_argument('--root-password', help="new password of MySQL root user; an empty password is used if both this option and --login-password option are omitted")
parser.add_argument('--login-password', help="login password of MySQL root user; an empty password is used if this option is omitted."
"\n[NOTE] this option is needed only when the machine has MySQL previously installed and removed; the old MySQL root password will be left in the system,"
"you need to input it in order to reset root password for the new installed MySQL.", default=None)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--no-backup', help='do NOT backup the database. If the database is very large and you have manually backup it, using this option will fast the upgrade process. [DEFAULT] false', default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
if not args.yum:
args.yum = get_yum_repo_from_property()
script = ShellCmd("ip addr | grep 'inet ' | awk '{print $2}' | awk -F '/' '{print $1}'")
script(True)
current_host_ips = script.stdout.split('\n')
yaml = '''---
- hosts: $host
remote_user: root
vars:
root_password: $root_password
login_password: $login_password
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
script: $pre_install_script
- name: install MySQL for RedHat 6 through user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mysql mysql-server
register: install_result
- name: install MySQL for RedHat 6 through system defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo == 'false'
shell: "yum clean metadata; yum --nogpgcheck install -y mysql mysql-server "
register: install_result
- name: install MySQL for RedHat 7 from local
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mariadb mariadb-server iptables-services
register: install_result
- name: install MySQL for RedHat 7 from local
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y mariadb mariadb-server iptables-services
register: install_result
- name: install MySQL for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- mariadb-client
- mariadb-server
- iptables-persistent
register: install_result
- name: open 3306 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 3306 -j ACCEPT && service iptables save)
- name: open 3306 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 3306 -j ACCEPT && /etc/init.d/iptables-persistent save)
- name: run post-install script
script: $post_install_script
- name: enable MySQL daemon on RedHat 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7'
service: name=mysqld state=restarted enabled=yes
- name: enable MySQL daemon on RedHat 7
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7'
service: name=mariadb state=restarted enabled=yes
- name: enable MySQL on Ubuntu
when: ansible_os_family == 'Debian'
service: name=mysql state=restarted enabled=yes
- name: change root password
shell: $change_password_cmd
register: change_root_result
ignore_errors: yes
- name: grant remote access
when: change_root_result.rc == 0
shell: $grant_access_cmd
- name: rollback MySQL installation on RedHat 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and change_root_result.rc != 0 and install_result.changed == True
shell: rpm -ev mysql mysql-server
- name: rollback MySQL installation on RedHat 7
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and change_root_result.rc != 0 and install_result.changed == True
shell: rpm -ev mariadb mariadb-server
- name: rollback MySql installation on Ubuntu
when: ansible_os_family == 'Debian' and change_root_result.rc != 0 and install_result.changed == True
apt: pkg={{item}} state=absent update_cache=yes
with_items:
- mysql-client
- mysql-server
- name: failure
fail: >
msg="failed to change root password of MySQL, see prior error in task 'change root password'; the possible cause
is the machine used to have MySQL installed and removed, the previous password of root user is remaining on the
machine; try using --login-password. We have rolled back the MySQL installation so you can safely run install_db
again with --login-password set."
when: change_root_result.rc != 0 and install_result.changed == False
'''
if not args.root_password and not args.login_password:
args.root_password = '''"''"'''
more_cmd = ' '
for ip in current_host_ips:
if not ip:
continue
more_cmd += "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '' WITH GRANT OPTION;" % ip
grant_access_cmd = '''/usr/bin/mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '' WITH GRANT OPTION; GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '' WITH GRANT OPTION; %s FLUSH PRIVILEGES;"''' % (args.host, more_cmd)
else:
if not args.root_password:
args.root_password = args.login_password
more_cmd = ' '
for ip in current_host_ips:
if not ip:
continue
more_cmd += "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION;" % (ip, args.root_password)
grant_access_cmd = '''/usr/bin/mysql -u root -p%s -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION; GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION; %s FLUSH PRIVILEGES;"''' % (args.root_password, args.root_password, args.host, args.root_password, more_cmd)
if args.login_password is not None:
change_root_password_cmd = '/usr/bin/mysqladmin -u root -p{{login_password}} password {{root_password}}'
else:
change_root_password_cmd = '/usr/bin/mysqladmin -u root password {{root_password}}'
pre_install_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
###################
#Check DNS hijacking
###################
hostname=`hostname`
pintret=`ping -c 1 -W 2 $hostname 2>/dev/null | head -n1`
echo $pintret | grep 'PING' > /dev/null
[ $? -ne 0 ] && exit 0
ip=`echo $pintret | cut -d' ' -f 3 | cut -d'(' -f 2 | cut -d')' -f 1`
ip_1=`echo $ip | cut -d'.' -f 1`
[ "127" = "$ip_1" ] && exit 0
ip addr | grep $ip > /dev/null
[ $? -eq 0 ] && exit 0
echo "The hostname($hostname) of your machine is resolved to IP($ip) which is none of IPs of your machine.
It's likely your DNS server has been hijacking, please try fixing it or add \"ip_of_your_host $hostname\" to /etc/hosts.
DNS hijacking will cause MySQL and RabbitMQ not working."
exit 1
'''
fd, pre_install_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_install_script)
def cleanup_pre_install_script():
os.remove(pre_install_script_path)
self.install_cleanup_routine(cleanup_pre_install_script)
post_install_script = mysql_db_config_script
fd, post_install_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(post_install_script)
def cleanup_post_install_script():
os.remove(post_install_script_path)
self.install_cleanup_routine(cleanup_post_install_script)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'change_password_cmd': change_root_password_cmd,
'root_password': args.root_password,
'login_password': args.login_password,
'grant_access_cmd': grant_access_cmd,
'pre_install_script': pre_install_script_path,
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'post_install_script': post_install_script_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
class UpgradeHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
private_key_name = conf_dir + "ha_key"
conf_file = conf_dir + "ha.yaml"
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
community_iso = "/opt/ZStack-Community-x86_64-DVD-1.4.0.iso"
bridge = ""
SpinnerInfo.spinner_status = {'upgrade_repo':False,'stop_mevoco':False, 'upgrade_mevoco':False,'upgrade_db':False,
'backup_db':False, 'check_init':False, 'start_mevoco':False}
ha_config_content = None
def __init__(self):
super(UpgradeHACmd, self).__init__()
self.name = "upgrade_ha"
self.description = "upgrade high availability environment for ZStack-Enterprise."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--zstack-enterprise-installer','--enterprise',
help="The new zstack-enterprise installer package, get it from http://cdn.zstack.io/product_downloads/zstack-enterprise/",
required=True)
parser.add_argument('--iso',
help="get it from http://cdn.zstack.io/product_downloads/iso/",
required=True)
def upgrade_repo(self, iso, tmp_iso, host_post_info):
command = (
"yum clean --enablerepo=zstack-local metadata && pkg_list=`rsync | grep \"not installed\" | awk"
" '{ print $2 }'` && for pkg in $pkg_list; do yum --disablerepo=* --enablerepo=zstack-local install "
"-y $pkg; done;")
run_remote_command(command, host_post_info)
command = "mkdir -p %s" % tmp_iso
run_remote_command(command, host_post_info)
command = "mount -o loop %s %s" % (iso, tmp_iso)
run_remote_command(command, host_post_info)
command = "rsync -au --delete %s /opt/zstack-dvd/" % tmp_iso
run_remote_command(command, host_post_info)
command = "umount %s" % tmp_iso
run_remote_command(command, host_post_info)
command = "rm -rf %s" % tmp_iso
run_remote_command(command, host_post_info)
def check_file_exist(self, file, host_post_info_list):
if os.path.isabs(file) is False:
error("Make sure you pass file name with absolute path")
else:
if os.path.isfile(file) is False:
error("Didn't find file %s" % file)
else:
for host_post_info in host_post_info_list:
if file_dir_exist("path=%s" % file, host_post_info) is False:
copy_arg = CopyArg()
copy_arg.src = file
copy_arg.dest = file
copy(copy_arg, host_post_info)
# do not enable due to lot of customer version
def check_file_md5sum(self):
pass
def check_mn_running(self,host_post_info):
cmd = create_check_mgmt_node_command(timeout=4, mn_node=host_post_info.host)
cmd(False)
if cmd.return_code != 0:
error("Check management node %s status failed, make sure the status is running before upgrade" % host_post_info.host)
else:
if 'false' in cmd.stdout:
error('The management node %s is starting, please wait a few seconds to upgrade' % host_post_info.host)
elif 'true' in cmd.stdout:
return 0
else:
error('The management node %s status is: Unknown, please start the management node before upgrade' % host_post_info.host)
def upgrade_mevoco(self, mevoco_installer, host_post_info):
mevoco_dir = os.path.dirname(mevoco_installer)
mevoco_bin = os.path.basename(mevoco_installer)
command = "rm -rf /tmp/zstack_upgrade.lock && cd %s && bash %s -u -i " % (mevoco_dir, mevoco_bin)
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(UpgradeHACmd.private_key_name, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def run(self, args):
# create log
create_log(UpgradeHACmd.logger_dir, UpgradeHACmd.logger_file)
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
if os.path.isfile(UpgradeHACmd.conf_file) is not True:
error("Didn't find HA config file %s, please contact support for upgrade" % UpgradeHACmd.conf_file)
host_inventory = UpgradeHACmd.conf_dir + 'host'
yum_repo = get_yum_repo_from_property()
private_key_name = UpgradeHACmd.conf_dir+ "ha_key"
if args.iso is None:
community_iso = UpgradeHACmd.community_iso
else:
community_iso = args.iso
mn_list = get_ha_mn_list(UpgradeHACmd.conf_file)
host1_ip = mn_list[0]
host2_ip = mn_list[1]
if len(mn_list) > 2:
host3_ip = mn_list[2]
# init host1 parameter
self.host1_post_info = HostPostInfo()
self.host1_post_info.host = host1_ip
self.host1_post_info.host_inventory = host_inventory
self.host1_post_info.private_key = private_key_name
self.host1_post_info.yum_repo = yum_repo
self.host1_post_info.post_url = ""
# init host2 parameter
self.host2_post_info = HostPostInfo()
self.host2_post_info.host = host2_ip
self.host2_post_info.host_inventory = host_inventory
self.host2_post_info.private_key = private_key_name
self.host2_post_info.yum_repo = yum_repo
self.host2_post_info.post_url = ""
if len(mn_list) > 2:
# init host3 parameter
self.host3_post_info = HostPostInfo()
self.host3_post_info.host = host3_ip
self.host3_post_info.host_inventory = host_inventory
self.host3_post_info.private_key = private_key_name
self.host3_post_info.yum_repo = yum_repo
self.host3_post_info.post_url = ""
UpgradeHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info]
if len(mn_list) > 2:
UpgradeHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info, self.host3_post_info]
for host in UpgradeHACmd.host_post_info_list:
# to do check mn all running
self.check_mn_running(host)
for file in [args.mevoco_installer, community_iso]:
self.check_file_exist(file, UpgradeHACmd.host_post_info_list)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade repo"
spinner_info.name = "upgrade_repo"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_repo'] = True
ZstackSpinner(spinner_info)
rand_dir_name = uuid.uuid4()
tmp_iso = "/tmp/%s/iso/" % rand_dir_name
for host_post_info in UpgradeHACmd.host_post_info_list:
self.upgrade_repo(community_iso, tmp_iso, host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Stopping mevoco"
spinner_info.name = "stop_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
stop_mevoco(host_post_info)
# backup db before upgrade
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to backup database"
spinner_info.name = "backup_db"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['backup_db'] = True
ZstackSpinner(spinner_info)
(status, output) = commands.getstatusoutput("zstack-ctl dump_mysql >> /dev/null 2>&1")
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade mevoco"
spinner_info.name = "upgrade_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
self.upgrade_mevoco(args.mevoco_installer, host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade database"
spinner_info.name = "upgrade_db"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_db'] = True
ZstackSpinner(spinner_info)
(status, output) = commands.getstatusoutput("zstack-ctl upgrade_db")
if status != 0:
error("Upgrade mysql failed: %s" % output)
else:
logger.debug("SUCC: shell command: 'zstack-ctl upgrade_db' successfully" )
spinner_info = SpinnerInfo()
spinner_info.output = "Starting mevoco"
spinner_info.name = "start_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
start_remote_mn(host_post_info)
SpinnerInfo.spinner_status['start_mevoco'] = False
time.sleep(.2)
info(colored("\nUpgrade HA successfully!","blue"))
class AddManagementNodeCmd(Command):
SpinnerInfo.spinner_status = {'check_init':False,'add_key':False,'deploy':False,'config':False,'start':False,'install_ui':False}
install_pkgs = ['openssl']
logger_dir = '/var/log/zstack/'
logger_file = 'zstack-ctl.log'
def __init__(self):
super(AddManagementNodeCmd, self).__init__()
self.name = "add_multi_management"
self.description = "add multi management node."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host-list','--hosts',nargs='+',
help="All hosts connect info follow below format, example: 'zstack-ctl add_multi_management --hosts root:passwd1@host1_ip root:passwd2@host2_ip ...' ",
required=True)
parser.add_argument('--force-reinstall','-f',action="store_true", default=False)
parser.add_argument('--ssh-key',
help="the path of private key for SSH login $host; if provided, Ansible will use the "
"specified key as private key to SSH login the $host, default will use zstack private key",
default=None)
def add_public_key_to_host(self, key_path, host_info):
command ='timeout 10 sshpass -p "%s" ssh-copy-id -o UserKnownHostsFile=/dev/null -o PubkeyAuthentication=no' \
' -o StrictHostKeyChecking=no -i %s root@%s' % (host_info.remote_pass, key_path, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Copy public key '%s' to host: '%s' failed:\n %s" % (key_path, host_info.host, output))
def deploy_mn_on_host(self,args, host_info, key):
if args.force_reinstall is True:
command = 'zstack-ctl install_management_node --host=%s --ssh-key="%s" --force-reinstall' % (host_info.remote_user+':'+host_info.remote_pass+'@'+host_info.host, key)
else:
command = 'zstack-ctl install_management_node --host=%s --ssh-key="%s"' % (host_info.remote_user+':'+host_info.remote_pass+'@'+host_info.host, key)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("deploy mn on host %s failed:\n %s" % (host_info.host, output))
def install_ui_on_host(self, key, host_info):
command = 'zstack-ctl install_ui --host=%s --ssh-key=%s' % (host_info.host, key)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("deploy ui on host %s failed:\n %s" % (host_info.host, output))
def config_mn_on_host(self, key, host_info):
command = "mkdir -p `dirname %s`" % ctl.properties_file_path
run_remote_command(command, host_info)
command = "scp -i %s %s root@%s:%s" % (key, ctl.properties_file_path, host_info.host, ctl.properties_file_path)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("copy config to host %s failed:\n %s" % (host_info.host, output))
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl configure " \
"management.server.ip=%s && zstack-ctl save_config" % (key, host_info.host, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("config management server %s failed:\n %s" % (host_info.host, output))
def start_mn_on_host(self, host_info, key):
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl " \
"start_node " % (key, host_info.host)
(status, output) = commands.getstatusoutput(command)
command = "ln -s /opt/zstack-dvd/ /usr/local/zstack/apache-tomcat/webapps/zstack/static/zstack-dvd"
run_remote_command(command, host_info, True, True)
if status != 0:
error("start node on host %s failed:\n %s" % (host_info.host, output))
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl " \
"start_ui" % (key, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("start ui on host %s failed:\n %s" % (host_info.host, output))
def install_packages(self, pkg_list, host_info):
distro = platform.dist()[0]
if distro == "centos":
for pkg in pkg_list:
yum_install_package(pkg, host_info)
elif distro == "Ubuntu":
apt_install_packages(pkg_list, host_info)
def run(self, args):
create_log(AddManagementNodeCmd.logger_dir, AddManagementNodeCmd.logger_file)
host_info_list = []
if args.ssh_key is None:
args.ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = args.ssh_key.split('.')[0]
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
for host in args.host_list:
inventory_file = ctl.zstack_home + "/../../../ansible/hosts"
host_info = HostPostInfo()
host_info.private_key = private_key
host_info.host_inventory = inventory_file
(host_info.remote_user, host_info.remote_pass, host_info.host, host_info.remote_port) = check_host_info_format(host)
check_host_password(host_info.remote_pass, host_info.host)
command = "cat %s | grep %s || echo %s >> %s" % (inventory_file, host_info.host, host_info.host, inventory_file)
(status, output) = commands.getstatusoutput(command)
if status != 0 :
error(output)
host_info_list.append(host_info)
for host_info in host_info_list:
spinner_info = SpinnerInfo()
spinner_info.output = "Add public key to host %s" % host_info.host
spinner_info.name = 'add_key'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['add_key'] = True
ZstackSpinner(spinner_info)
self.add_public_key_to_host(args.ssh_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Deploy management node to host %s" % host_info.host
spinner_info.name = 'deploy'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['deploy'] = True
ZstackSpinner(spinner_info)
self.deploy_mn_on_host(args, host_info, private_key)
self.install_packages(AddManagementNodeCmd.install_pkgs, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Config management node on host %s" % host_info.host
spinner_info.name = 'config'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['config'] = True
ZstackSpinner(spinner_info)
self.config_mn_on_host(private_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Install UI on host %s" % host_info.host
spinner_info.name = 'install_ui'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['install_ui'] = True
ZstackSpinner(spinner_info)
self.install_ui_on_host(private_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on host %s" % host_info.host
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start'] = True
ZstackSpinner(spinner_info)
self.start_mn_on_host(host_info,private_key)
SpinnerInfo.spinner_status['start'] = False
time.sleep(0.2)
info(colored("\nAll management nodes add successfully",'blue'))
class RecoverHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
conf_file = conf_dir + "ha.yaml"
host_inventory = conf_dir + 'host'
private_key = conf_dir + 'ha_key'
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
bridge = ""
SpinnerInfo.spinner_status = {'cluster':False, 'mysql':False,'mevoco':False, 'check_init':False, 'cluster':False}
ha_config_content = None
def __init__(self):
super(RecoverHACmd, self).__init__()
self.name = "recover_ha"
self.description = "Recover high availability environment for Mevoco."
ctl.register_command(self)
def stop_mysql_service(self, host_post_info):
command = "service mysql stop"
run_remote_command(command, host_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", host_post_info,
return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", host_post_info)
def reboot_cluster_service(self, host_post_info):
service_status("haproxy", "state=started", host_post_info)
service_status("keepalived", "state=started", host_post_info)
service_status("rabbitmq-server", "state=started", host_post_info)
def recover_mysql(self, host_post_info, host_post_info_list):
for host_info in host_post_info_list:
self.stop_mysql_service(host_info)
command = "service mysql bootstrap"
status, output = run_remote_command(command,host_post_info,True,True)
if status is False:
return False
for host_info in host_post_info_list:
if host_info.host != host_post_info.host:
command = "service mysql start"
status, output = run_remote_command(command,host_info,True,True)
if status is False:
return False
command = "service mysql restart"
status, output = run_remote_command(command,host_post_info,True,True)
return status
def sync_prometheus(self, host_post_info):
# sync prometheus data
sync_arg = SyncArg()
sync_arg.src = '/var/lib/zstack/prometheus/'
sync_arg.dest = '/var/lib/zstack/prometheus/'
sync(sync_arg, host_post_info)
def run(self, args):
create_log(UpgradeHACmd.logger_dir, UpgradeHACmd.logger_file)
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
host3_exist = False
if os.path.isfile(RecoverHACmd.conf_file) is not True:
error("Didn't find HA config file %s, please use traditional 'zstack-ctl install_ha' to recover your cluster" % RecoverHACmd.conf_file)
if os.path.exists(RecoverHACmd.conf_file):
with open(RecoverHACmd.conf_file, 'r') as f:
RecoverHACmd.ha_config_content = yaml.load(f)
if RecoverHACmd.ha_config_content['host_list'] is None:
error("Didn't find host_list in config file %s" % RecoverHACmd.conf_file)
host_list = RecoverHACmd.ha_config_content['host_list'].split(',')
if len(host_list) == 2:
host1_ip = host_list[0]
host2_ip = host_list[1]
if len(host_list) == 3:
host3_exist = True
host3_ip = host_list[2]
if os.path.exists(RecoverHACmd.conf_file) and RecoverHACmd.ha_config_content is not None :
if "bridge_name" in RecoverHACmd.ha_config_content:
RecoverHACmd.bridge = RecoverHACmd.ha_config_content['bridge_name']
else:
error("Didn't find 'bridge_name' in config file %s" % RecoverHACmd.conf_file)
local_ip = get_ip_by_interface(RecoverHACmd.bridge)
host_post_info_list = []
# init host1 parameter
host1_post_info = HostPostInfo()
host1_post_info.host = host1_ip
host1_post_info.host_inventory = RecoverHACmd.host_inventory
host1_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host1_post_info)
host2_post_info = HostPostInfo()
host2_post_info.host = host2_ip
host2_post_info.host_inventory = RecoverHACmd.host_inventory
host2_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host2_post_info)
if host3_exist is True:
host3_post_info = HostPostInfo()
host3_post_info.host = host3_ip
host3_post_info.host_inventory = RecoverHACmd.host_inventory
host3_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host3_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery mysql"
spinner_info.name = "mysql"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['mysql'] = True
ZstackSpinner(spinner_info)
mysql_recover_status = False
for host_post_info in host_post_info_list:
recover_status = self.recover_mysql(host_post_info, host_post_info_list)
if recover_status is True:
mysql_recover_status = True
break
if mysql_recover_status is False:
error("Recover mysql failed! Please check log /var/log/zstack/ha.log")
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery cluster"
spinner_info.name = "cluster"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['cluster'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
self.reboot_cluster_service(host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to sync monitor data"
spinner_info.name = "prometheus"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['prometheus'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
if host_post_info.host != local_ip:
self.sync_prometheus(host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
start_remote_mn(host_post_info)
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
time.sleep(.3)
info(colored("The cluster has been recovered successfully!", "blue"))
class InstallHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
conf_file = conf_dir + "ha.yaml"
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
bridge = ""
SpinnerInfo.spinner_status = {'mysql':False,'rabbitmq':False, 'haproxy_keepalived':False,
'Mevoco':False, 'stop_mevoco':False, 'check_init':False, 'recovery_cluster':False}
ha_config_content = None
def __init__(self):
super(InstallHACmd, self).__init__()
self.name = "install_ha"
self.description = "install high availability environment for Mevoco."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host1-info','--h1',
help="The first host connect info follow below format: 'root:password@ip_address' ",
required=True)
parser.add_argument('--host2-info','--h2',
help="The second host connect info follow below format: 'root:password@ip_address' ",
required=True)
parser.add_argument('--host3-info','--h3',
help="The third host connect info follow below format: 'root:password@ip_address' ",
default=False)
parser.add_argument('--vip',
help="The virtual IP address for HA setup",
default=None)
parser.add_argument('--gateway',
help="The gateway IP address for HA setup",
default=None)
parser.add_argument('--bridge',
help="The bridge device name, default is br_eth0",
)
parser.add_argument('--mysql-root-password','--root-pass',
help="Password of MySQL root user", default="zstack123")
parser.add_argument('--mysql-user-password','--user-pass',
help="Password of MySQL user zstack", default="zstack123")
parser.add_argument('--rabbit-password','--rabbit-pass',
help="RabbitMQ password; if set, the password will be created on RabbitMQ for username "
"specified by --rabbit-username. [DEFAULT] rabbitmq default password",
default="zstack123")
parser.add_argument('--drop', action='store_true', default=False,
help="Force delete mysql data for re-deploy HA")
parser.add_argument('--keep-db', action='store_true', default=False,
help='keep existing zstack database and not raise error')
parser.add_argument('--recovery-from-this-host','--recover',
action='store_true', default=False,
help="This argument for admin to recovery mysql from the last shutdown mysql server")
parser.add_argument('--perfect-mode', action='store_true', default=False,
help="This mode will re-connect mysql faster")
def get_formatted_netmask(self, device_name):
'''This function will return formatted netmask. eg. 172.20.12.16/24 will return 24'''
netmask = socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
35099, struct.pack('256s', device_name))[20:24])
formatted_netmask = sum([bin(int(x)).count('1') for x in netmask.split('.')])
return formatted_netmask
def run(self, args):
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
if args.bridge is None:
InstallHACmd.bridge = 'br_eth0'
else:
InstallHACmd.bridge = args.bridge
if os.path.exists(InstallHACmd.conf_file):
with open(InstallHACmd.conf_file, 'r') as f:
InstallHACmd.ha_config_content = yaml.load(f)
if args.vip is None and args.recovery_from_this_host is False:
error("Install HA must assign a vip")
# check gw ip is available
if args.gateway is None:
if get_default_gateway_ip() is None:
error("Can't get the gateway IP address from system, please check your route table or pass specific " \
"gateway through \"--gateway\" argument")
else:
gateway_ip = get_default_gateway_ip()
else:
gateway_ip = args.gateway
(status, output) = commands.getstatusoutput('ping -c 1 %s' % gateway_ip)
if status != 0:
error("The gateway %s unreachable!" % gateway_ip)
# check input host info
host1_info = args.host1_info
host1_connect_info_list = check_host_info_format(host1_info)
args.host1 = host1_connect_info_list[2]
args.host1_password = host1_connect_info_list[1]
host2_info = args.host2_info
host2_connect_info_list = check_host_info_format(host2_info)
args.host2 = host2_connect_info_list[2]
args.host2_password = host2_connect_info_list[1]
if args.host3_info is not False:
host3_info = args.host3_info
host3_connect_info_list = check_host_info_format(host3_info)
args.host3 = host3_connect_info_list[2]
args.host3_password = host3_connect_info_list[1]
# check root password is available
if args.host1_password != args.host2_password:
error("Host1 password and Host2 password must be the same, Please change one of them!")
elif args.host3_info is not False:
if not args.host1_password == args.host2_password == args.host3_password:
error("All hosts root password must be the same. Please check your host password!")
check_host_password(args.host1_password, args.host1)
check_host_password(args.host2_password, args.host2)
if args.host3_info is not False:
check_host_password(args.host3_password, args.host3)
# check image type
zstack_local_repo = os.path.isfile("/etc/yum.repos.d/zstack-local.repo")
galera_repo = os.path.isfile("/etc/yum.repos.d/galera.repo")
if zstack_local_repo is False or galera_repo is False:
error("This feature only support ZStack community CentOS 7 image")
# check network configuration
interface_list = os.listdir('/sys/class/net/')
if InstallHACmd.bridge not in interface_list and args.recovery_from_this_host is False:
error("Make sure you have already run the 'zs-network-setting' to setup the network environment, or set the"
" bridge name with --bridge, default bridge name is br_eth0 ")
if InstallHACmd.bridge.split('br_')[1] not in interface_list:
error("bridge %s should add the interface %s, make sure you have setup the interface or specify the right"
" bridge name" % (InstallHACmd.bridge, InstallHACmd.bridge.split('br_')[1]))
# check keepalived label should not longer than 15 characters
if len(InstallHACmd.bridge) >= 13:
error("bridge name length cannot be longer than 13 characters")
# check user start this command on host1
if args.recovery_from_this_host is False:
local_ip = get_ip_by_interface(InstallHACmd.bridge)
if args.host1 != local_ip:
error("Please run this command at host1 %s, or change your host1 ip to local host ip" % args.host1)
# check user input wrong host2 ip
if args.host2 == args.host1:
error("The host1 and host2 should not be the same ip address!")
elif args.host3_info is not False:
if args.host2 == args.host3 or args.host1 == args.host3:
error("The host1, host2 and host3 should not be the same ip address!")
# create log
create_log(InstallHACmd.logger_dir, InstallHACmd.logger_file)
# create config
if not os.path.exists(InstallHACmd.conf_dir):
os.makedirs(InstallHACmd.conf_dir)
yum_repo = get_yum_repo_from_property()
private_key_name = InstallHACmd.conf_dir+ "ha_key"
public_key_name = InstallHACmd.conf_dir+ "ha_key.pub"
if os.path.isfile(public_key_name) is not True:
command = "echo -e 'y\n'|ssh-keygen -q -t rsa -N \"\" -f %s" % private_key_name
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Generate private key %s failed! Generate manually or rerun the process!" % private_key_name)
with open(public_key_name) as public_key_file:
public_key = public_key_file.read()
# create inventory file
with open('%s/host' % InstallHACmd.conf_dir,'w') as f:
f.writelines([args.host1+'\n', args.host2+'\n'])
if args.host3_info is not False:
with open('%s/host' % InstallHACmd.conf_dir,'w') as f:
f.writelines([args.host1+'\n', args.host2+'\n', args.host3+'\n'])
#host_inventory = '%s,%s' % (args.host1, args.host2)
host_inventory = InstallHACmd.conf_dir + 'host'
# init host1 parameter
self.host1_post_info = HostPostInfo()
self.host1_post_info.host = args.host1
self.host1_post_info.host_inventory = host_inventory
self.host1_post_info.private_key = private_key_name
self.host1_post_info.yum_repo = yum_repo
self.host1_post_info.vip = args.vip
self.host1_post_info.gateway_ip = gateway_ip
self.host1_post_info.rabbit_password = args.rabbit_password
self.host1_post_info.mysql_password = args.mysql_root_password
self.host1_post_info.mysql_userpassword = args.mysql_user_password
self.host1_post_info.post_url = ""
self.host_post_info_list.append(self.host1_post_info)
# init host2 parameter
self.host2_post_info = HostPostInfo()
self.host2_post_info.host = args.host2
self.host2_post_info.host_inventory = host_inventory
self.host2_post_info.private_key = private_key_name
self.host2_post_info.yum_repo = yum_repo
self.host2_post_info.vip = args.vip
self.host2_post_info.gateway_ip = gateway_ip
self.host2_post_info.rabbit_password = args.rabbit_password
self.host2_post_info.mysql_password = args.mysql_root_password
self.host2_post_info.mysql_userpassword = args.mysql_user_password
self.host2_post_info.post_url = ""
self.host_post_info_list.append(self.host2_post_info)
if args.host3_info is not False:
# init host3 parameter
self.host3_post_info = HostPostInfo()
self.host3_post_info.host = args.host3
self.host3_post_info.host_inventory = host_inventory
self.host3_post_info.private_key = private_key_name
self.host3_post_info.yum_repo = yum_repo
self.host3_post_info.vip = args.vip
self.host3_post_info.gateway_ip = gateway_ip
self.host3_post_info.rabbit_password = args.rabbit_password
self.host3_post_info.mysql_password = args.mysql_root_password
self.host3_post_info.mysql_userpassword = args.mysql_user_password
self.host3_post_info.post_url = ""
self.host_post_info_list.append(self.host3_post_info)
# init all variables in map
local_map = {
"mysql_connect_timeout" : 60000,
"mysql_socket_timeout" : 60000
}
if args.perfect_mode is True:
local_map['mysql_connect_timeout'] = 2000
local_map['mysql_socket_timeout'] = 2000
add_public_key_command = 'if [ ! -d ~/.ssh ]; then mkdir -p ~/.ssh; chmod 700 ~/.ssh; fi && if [ ! -f ~/.ssh/authorized_keys ]; ' \
'then touch ~/.ssh/authorized_keys; chmod 600 ~/.ssh/authorized_keys; fi && pub_key="%s";grep ' \
'"%s" ~/.ssh/authorized_keys > /dev/null; if [ $? -eq 1 ]; ' \
'then echo "%s" >> ~/.ssh/authorized_keys; fi && exit 0;'\
% (public_key.strip('\n'), public_key.strip('\n'), public_key.strip('\n'))
# add ha public key to host1
ssh_add_public_key_command = "sshpass -p \"%s\" ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s'" % \
(args.host1_password, args.host1, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# add ha public key to host2
ssh_add_public_key_command = "sshpass -p \"%s\" ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s' " % \
(args.host2_password, args.host2, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# add ha public key to host3
if args.host3_info is not False:
ssh_add_public_key_command = "sshpass -p \"%s\" ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s' " % \
(args.host3_password, args.host3, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# sync ansible key in two host
copy_arg = CopyArg()
copy_arg.src = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/"
copy_arg.dest = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/"
copy(copy_arg,self.host2_post_info)
command = "chmod 600 %s" % copy_arg.src + "id_rsa"
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg,self.host3_post_info)
run_remote_command(command, self.host3_post_info)
# check whether to recovery the HA cluster
if args.recovery_from_this_host is True:
if os.path.exists(InstallHACmd.conf_file) and InstallHACmd.ha_config_content is not None and args.bridge is None:
if "bridge_name" in InstallHACmd.ha_config_content:
InstallHACmd.bridge = InstallHACmd.ha_config_content['bridge_name']
local_ip = get_ip_by_interface(InstallHACmd.bridge)
if local_ip != args.host1 and local_ip != args.host2:
if args.host3_info is not False:
if local_ip != args.host3:
error("Make sure you are running the 'zs-network-setting' command on host1 or host2 or host3")
else:
error("Make sure you are running the 'zs-network-setting' command on host1 or host2")
# stop mevoco
spinner_info = SpinnerInfo()
spinner_info.output = "Stop Mevoco on all management nodes"
spinner_info.name = "stop_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['stop_mevoco'] = True
ZstackSpinner(spinner_info)
for host_info in self.host_post_info_list:
stop_mevoco(host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery mysql from this host"
spinner_info.name = "recovery_cluster"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['recovery_cluster'] = True
ZstackSpinner(spinner_info)
# kill mysql process to make sure mysql bootstrap can work
service_status("mysql", "state=stopped", self.host1_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", self.host1_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host1_post_info)
service_status("mysql", "state=stopped", self.host2_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]] ", self.host2_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host2_post_info)
if args.host3_info is not False:
service_status("mysql", "state=stopped", self.host3_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", self.host3_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host3_post_info)
command = "service mysql bootstrap"
(status, output) = commands.getstatusoutput(command)
if status != 0:
error(output)
else:
#command = "service mysql start"
if local_ip == self.host1_post_info.host:
# make sure vip will be on this host, so start haproxy firstly
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=started", self.host2_post_info)
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
if args.host3_info is not False:
#run_remote_command(command, self.host3_post_info)
service_status("mysql","state=started", self.host3_post_info)
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=restarted", self.host1_post_info)
elif local_ip == self.host2_post_info.host:
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=started", self.host1_post_info)
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
if args.host3_info is not False:
#run_remote_command(command, self.host3_post_info)
service_status("mysql","state=started", self.host3_post_info)
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=restarted", self.host2_post_info)
else:
# localhost must be host3
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=started", self.host1_post_info)
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
service_status("mysql","state=started", self.host2_post_info)
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=restarted", self.host3_post_info)
# sync prometheus data
sync_arg = SyncArg()
sync_arg.src = '/var/lib/zstack/prometheus/'
sync_arg.dest = '/var/lib/zstack/prometheus/'
sync(sync_arg, self.host2_post_info)
if args.host3_info is not False:
sync(sync_arg, self.host3_post_info)
# start mevoco
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
for host_info in self.host_post_info_list:
start_mevoco(host_info)
SpinnerInfo.spinner_status['mevoco'] = False
time.sleep(.2)
info("The cluster has been recovered!")
sys.exit(0)
# generate ha config
host_list = "%s,%s" % (self.host1_post_info.host, self.host2_post_info.host)
if args.host3_info is not False:
host_list = "%s,%s,%s" % (self.host1_post_info.host, self.host2_post_info.host, self.host3_post_info.host)
ha_conf_file = open(InstallHACmd.conf_file, 'w')
ha_info = {'vip':args.vip, 'gateway':self.host1_post_info.gateway_ip, 'bridge_name':InstallHACmd.bridge,
'mevoco_url':'http://' + args.vip + ':8888', 'cluster_url':'http://'+ args.vip +':9132/zstack', 'host_list':host_list}
yaml.dump(ha_info, ha_conf_file, default_flow_style=False)
command = "mkdir -p %s" % InstallHACmd.conf_dir
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
copy_arg = CopyArg()
copy_arg.src = InstallHACmd.conf_dir
copy_arg.dest = InstallHACmd.conf_dir
copy(copy_arg,self.host2_post_info)
command = "chmod 600 %s" % InstallHACmd.conf_dir + "ha_key"
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
run_remote_command(command, self.host3_post_info)
# get iptables from system config
service_status("iptables","state=restarted",self.host1_post_info)
service_status("iptables","state=restarted",self.host2_post_info)
if args.host3_info is not False:
service_status("iptables","state=restarted",self.host3_post_info)
# remove mariadb for avoiding conflict with mevoco install process
command = "rpm -q mariadb | grep 'not installed' || yum remove -y mariadb"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "hostnamectl set-hostname zstack-1"
run_remote_command(command, self.host1_post_info)
command = "hostnamectl set-hostname zstack-2"
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = "hostnamectl set-hostname zstack-3"
run_remote_command(command, self.host3_post_info)
# remove old zstack-1 and zstack-2 in hosts file
update_file("/etc/hosts", "regexp='\.*zstack\.*' state=absent", self.host1_post_info)
update_file("/etc/hosts", "regexp='\.*zstack\.*' state=absent", self.host2_post_info)
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host1_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host1_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host1_post_info)
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host2_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host2_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host2_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host3_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host3_post_info)
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host3_post_info)
#save iptables at last
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host2_post_info.host, self.host2_post_info.host)
run_remote_command(command, self.host1_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host3_post_info.host, self.host3_post_info.host)
run_remote_command(command, self.host1_post_info)
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host1_post_info.host, self.host1_post_info.host)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host3_post_info.host, self.host3_post_info.host)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host1_post_info.host, self.host1_post_info.host)
run_remote_command(command, self.host3_post_info)
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host2_post_info.host, self.host2_post_info.host)
run_remote_command(command, self.host3_post_info)
# stop haproxy and keepalived service for avoiding terminal status disturb
command = "service keepalived stop && service haproxy stop || echo True"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
#pass all the variables to other HA deploy process
InstallHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info]
if args.host3_info is not False:
InstallHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info, self.host3_post_info]
# setup mysql ha
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to deploy Mysql HA"
spinner_info.name = 'mysql'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mysql'] = True
ZstackSpinner(spinner_info)
MysqlHA()()
# setup rabbitmq ha
spinner_info = SpinnerInfo()
spinner_info.output ="Starting to deploy Rabbitmq HA"
spinner_info.name = 'rabbitmq'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['rabbitmq'] = True
ZstackSpinner(spinner_info)
RabbitmqHA()()
# setup haproxy and keepalived
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to deploy Haproxy and Keepalived"
spinner_info.name = 'haproxy_keepalived'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['haproxy_keepalived'] = True
ZstackSpinner(spinner_info)
HaproxyKeepalived()()
#install database on local management node
command = "zstack-ctl stop"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
if args.keep_db is True:
command = "zstack-ctl deploydb --keep-db --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
elif args.drop is True:
command = "zstack-ctl deploydb --drop --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
else:
command = "zstack-ctl deploydb --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
command = "zstack-ctl configure DB.url=jdbc:mysql://%s:53306/{database}?connectTimeout=%d\&socketTimeout=%d"\
% (args.vip, local_map['mysql_connect_timeout'], local_map['mysql_socket_timeout'])
run_remote_command(command, self.host1_post_info)
command = "zstack-ctl configure CloudBus.rabbitmqPassword=%s" % args.mysql_user_password
run_remote_command(command, self.host1_post_info)
# copy zstack-1 property to zstack-2 and update the management.server.ip
# update zstack-1 firstly
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.serverIp\.0' line='CloudBus.serverIp.0=%s'" % args.vip, self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.serverIp\.1' state=absent" , self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqUsername' line='CloudBus.rabbitmqUsername=zstack'",
self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqPassword' line='CloudBus.rabbitmqPassword=%s'"
% args.rabbit_password, self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqHeartbeatTimeout' line='CloudBus.rabbitmqHeartbeatTimeout=10'",
self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'" %
args.host1, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = ctl.properties_file_path
copy_arg.dest = ctl.properties_file_path
copy(copy_arg, self.host2_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'"
% args.host2, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg, self.host3_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'"
% args.host3, self.host3_post_info)
#finally, start zstack-1 and zstack-2
spinner_info = SpinnerInfo()
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
# Add zstack-ctl start to rc.local for auto recovery when system reboot
command = "service iptables save"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "zstack-ctl install_ui"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "zstack-ctl start"
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host1, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host1, output))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host2, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host2, output))
if args.host3_info is not False:
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host3, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host3, output))
SpinnerInfo.spinner_status['mevoco'] = False
time.sleep(0.2)
#sync imagestore key
copy_arg = CopyArg()
copy_arg.src = ctl.zstack_home + "/../../../imagestore/bin/certs/"
copy_arg.dest = ctl.zstack_home + "/../../../imagestore/bin/certs/"
copy(copy_arg, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg, self.host2_post_info)
print '''HA deploy finished!
Mysql user 'root' password: %s
Mysql user 'zstack' password: %s
Rabbitmq user 'zstack' password: %s
Mevoco is running, visit %s in Chrome or Firefox with default user/password : %s
You can check the cluster status at %s with user/passwd : %s
''' % (args.mysql_root_password, args.mysql_user_password, args.rabbit_password,
colored('http://%s:8888' % args.vip, 'blue'), colored('admin/password', 'yellow'),
colored('http://%s:9132/zstack' % args.vip, 'blue'), colored('zstack/zstack123', 'yellow'))
class HaproxyKeepalived(InstallHACmd):
def __init__(self):
super(HaproxyKeepalived, self).__init__()
self.name = "haproxy and keepalived init"
self.description = "haproxy and keepalived setup"
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.vip = self.host1_post_info.vip
self.gateway = self.host1_post_info.gateway_ip
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q haproxy keepalived"
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host1_post_info)
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host2_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host2_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host3_post_info)
command = "touch /var/log/haproxy.log"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host1_post_info)
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host2_post_info)
if len(self.host_post_info_list) == 3:
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host3_post_info)
service_status("rsyslog","state=restarted enabled=yes", self.host1_post_info)
service_status("rsyslog","state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rsyslog","state=restarted enabled=yes", self.host3_post_info)
haproxy_raw_conf = '''
global
log 127.0.0.1 local2 emerg alert crit err warning notice info debug
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 1m
timeout check 1m
timeout tunnel 60m
maxconn 6000
listen admin_stats 0.0.0.0:9132
mode http
stats uri /zstack
stats realm Global\ statistics
stats auth zstack:zstack123
listen proxy-mysql 0.0.0.0:53306
mode tcp
option tcplog
balance source
option httpchk OPTIONS * HTTP/1.1\\r\\nHost:\ www
server zstack-1 {{ host1 }}:3306 weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
option tcpka
listen proxy-rabbitmq 0.0.0.0:55672
mode tcp
balance source
timeout client 3h
timeout server 3h
server zstack-1 {{ host1 }}:5672 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
option tcpka
# dashboard not installed, so haproxy will report error
listen proxy-ui 0.0.0.0:8888
mode http
option http-server-close
balance source
server zstack-1 {{ host1 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5000 weight 10 check inter 3s rise 2 fall 2
option tcpka
'''
if len(self.host_post_info_list) == 3:
haproxy_raw_conf = '''
global
log 127.0.0.1 local2 emerg alert crit err warning notice info debug
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 1m
timeout check 1m
timeout tunnel 60m
maxconn 6000
listen admin_stats 0.0.0.0:9132
mode http
stats uri /zstack
stats realm Global\ statistics
stats auth zstack:zstack123
listen proxy-mysql 0.0.0.0:53306
mode tcp
option tcplog
balance source
option httpchk OPTIONS * HTTP/1.1\\r\\nHost:\ www
server zstack-1 {{ host1 }}:3306 weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
option tcpka
listen proxy-rabbitmq 0.0.0.0:55672
mode tcp
balance source
timeout client 3h
timeout server 3h
server zstack-1 {{ host1 }}:5672 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
option tcpka
# dashboard not installed, so haproxy will report error
listen proxy-ui 0.0.0.0:8888
mode http
option http-server-close
balance source
server zstack-1 {{ host1 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:5000 weight 10 check inter 3s rise 2 fall 2
option tcpka
'''
haproxy_conf_template = jinja2.Template(haproxy_raw_conf)
haproxy_host1_conf = haproxy_conf_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host
})
if len(self.host_post_info_list) == 3:
haproxy_host1_conf = haproxy_conf_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host,
'host3' : self.host3_post_info.host
})
# The host1 and host2 and host3 use the same config file
host1_config, haproxy_host1_conf_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(haproxy_host1_conf)
f1.close()
def cleanup_haproxy_config_file():
os.remove(haproxy_host1_conf_file)
self.install_cleanup_routine(cleanup_haproxy_config_file)
copy_arg = CopyArg()
copy_arg.src = haproxy_host1_conf_file
copy_arg.dest = "/etc/haproxy/haproxy.cfg"
copy(copy_arg,self.host1_post_info)
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
#config haproxy firewall
command = "iptables -C INPUT -p tcp -m tcp --dport 53306 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 53306 -j ACCEPT; " \
"iptables -C INPUT -p tcp -m tcp --dport 58080 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 58080 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 55672 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 55672 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 80 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 80 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 9132 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 9132 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 8888 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 8888 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 6033 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 6033 -j ACCEPT; service iptables save "
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
#config keepalived
keepalived_raw_config = '''
! Configuration File for keepalived
global_defs {
router_id HAPROXY_LOAD
}
vrrp_script Monitor_Haproxy {
script "/usr/local/bin/keepalived-kill.sh"
interval 2
weight 5
}
vrrp_instance VI_1 {
# use the same state with host2, so no master node, recovery will not race to control the vip
state BACKUP
interface {{ bridge }}
virtual_router_id {{ vrouter_id }}
priority {{ priority }}
nopreempt
advert_int 1
authentication {
auth_type PASS
auth_pass {{ auth_passwd }}
}
track_script {
Monitor_Haproxy
}
virtual_ipaddress {
{{ vip }}/{{ netmask }} label {{ bridge }}:0
}
}
'''
virtual_router_id = random.randint(1, 255)
auth_pass = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(15))
master_priority = 92
slave_priority = 91
second_slave_priority = 90
keepalived_template = jinja2.Template(keepalived_raw_config)
keepalived_host1_config = keepalived_template.render({
'bridge' : InstallHACmd.bridge,
'vrouter_id': virtual_router_id,
'priority': master_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
keepalived_host2_config = keepalived_template.render({
'bridge' : InstallHACmd.bridge,
'vrouter_id': virtual_router_id,
'priority': slave_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
if len(self.host_post_info_list) == 3:
keepalived_host3_config = keepalived_template.render({
'vrouter_id': virtual_router_id,
'priority': second_slave_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
host1_config, keepalived_host1_config_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(keepalived_host1_config)
f1.close()
host2_config, keepalived_host2_config_file = tempfile.mkstemp()
f2 = os.fdopen(host1_config, 'w')
f2.write(keepalived_host2_config)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, keepalived_host3_config_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(keepalived_host3_config)
f3.close()
def cleanup_keepalived_config_file():
os.remove(keepalived_host1_config_file)
os.remove(keepalived_host2_config_file)
if len(self.host_post_info_list) == 3:
os.remove(keepalived_host3_config_file)
self.install_cleanup_routine(cleanup_keepalived_config_file)
copy_arg = CopyArg()
copy_arg.src = keepalived_host1_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = keepalived_host2_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = keepalived_host3_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host3_post_info)
# copy keepalived-kill.sh to host
copy_arg = CopyArg()
copy_arg.src = "%s/conf/keepalived-kill.sh" % InstallHACmd.current_dir
copy_arg.dest = "/usr/local/bin/keepalived-kill.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg, self.host1_post_info)
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg, self.host3_post_info)
# restart haproxy and keepalived
service_status("keepalived", "state=restarted enabled=yes", self.host1_post_info)
service_status("keepalived", "state=restarted enabled=yes", self.host2_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host1_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("keepalived", "state=restarted enabled=yes", self.host3_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host3_post_info)
class MysqlHA(InstallHACmd):
def __init__(self):
super(MysqlHA, self).__init__()
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.mysql_password = self.host1_post_info.mysql_password
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q MariaDB-Galera-server xinetd rsync openssl-libs "
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s,mariadb install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# Generate galera config file and copy to host1 host2
galera_raw_config= '''[mysqld]
skip-name-resolve=1
character-set-server=utf8
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=2048
query_cache_size=0
query_cache_type=0
bind_address= {{ host1 }}
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://{{ host2 }},{{ host1 }}"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
'''
if len(self.host_post_info_list) == 3:
# Generate galera config file and copy to host1 host2 host3
galera_raw_config= '''[mysqld]
skip-name-resolve=1
character-set-server=utf8
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=2048
query_cache_size=0
query_cache_type=0
bind_address= {{ host1 }}
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://{{ host3 }},{{ host2 }},{{ host1 }}"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
'''
galera_config_template = jinja2.Template(galera_raw_config)
galera_config_host1 = galera_config_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host1 = galera_config_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host,
'host3' : self.host3_post_info.host
})
galera_config_host2 = galera_config_template.render({
'host1' : self.host2_post_info.host,
'host2' : self.host1_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host2 = galera_config_template.render({
'host1' : self.host2_post_info.host,
'host2' : self.host3_post_info.host,
'host3' : self.host1_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host3 = galera_config_template.render({
'host1' : self.host3_post_info.host,
'host2' : self.host1_post_info.host,
'host3' : self.host2_post_info.host
})
host1_config, galera_config_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(galera_config_host1)
f1.close()
host2_config, galera_config_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(galera_config_host2)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, galera_config_host3_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(galera_config_host3)
f3.close()
def cleanup_galera_config_file():
os.remove(galera_config_host1_file)
os.remove(galera_config_host2_file)
if len(self.host_post_info_list) == 3:
os.remove(galera_config_host3_file)
self.install_cleanup_routine(cleanup_galera_config_file)
copy_arg = CopyArg()
copy_arg.src = galera_config_host1_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = galera_config_host2_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = galera_config_host3_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host3_post_info)
# restart mysql service to enable galera config
command = "service mysql stop || true"
#service_status("mysql", "state=stopped", self.host1_post_info)
run_remote_command(command, self.host2_post_info)
#last stop node should be the first node to do bootstrap
run_remote_command(command, self.host1_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "service mysql bootstrap"
run_remote_command(command, self.host1_post_info)
run_remote_command("service mysql start && chkconfig mysql on", self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command("service mysql start && chkconfig mysql on", self.host3_post_info)
run_remote_command("service mysql restart && chkconfig mysql on", self.host1_post_info)
init_install = run_remote_command("mysql -u root --password='' -e 'exit' ", self.host1_post_info, return_status=True)
if init_install is True:
#command = "mysql -u root --password='' -Bse \"show status like 'wsrep_%%';\""
#galera_status = run_remote_command(command, self.host2_post_info)
#create zstack user
command =" mysql -u root --password='' -Bse 'grant ALL PRIVILEGES on *.* to zstack@\"localhost\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to zstack@\"zstack-1\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to zstack@\"%%\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to root@\"%%\" Identified by \"%s\";" \
"grant ALL PRIVILEGES on *.* to root@\"localhost\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES ON *.* TO root@\"%%\" IDENTIFIED BY \"%s\" WITH GRANT OPTION; " \
"flush privileges;'" % (self.host1_post_info.mysql_userpassword, self.host1_post_info.mysql_userpassword,
self.host1_post_info.mysql_userpassword,self.host1_post_info.mysql_password,
self.host1_post_info.mysql_password, self.host1_post_info.mysql_password)
(status, output) = run_remote_command(command, self.host1_post_info, True, True)
if status is False:
time.sleep(5)
(status, output) = run_remote_command(command, self.host1_post_info, True, True)
if status is False:
error("Failed to set mysql 'zstack' and 'root' password, the reason is %s" % output)
# config mysqlchk_status.sh on zstack-1 and zstack-2
mysqlchk_raw_script = '''#!/bin/sh
MYSQL_HOST="{{ host1 }}"
MYSQL_PORT="3306"
MYSQL_USERNAME="{{ mysql_username }}"
MYSQL_PASSWORD="{{ mysql_password }}"
/usr/bin/mysql -h$MYSQL_HOST -u$MYSQL_USERNAME -p$MYSQL_PASSWORD -e "show databases;" > /dev/null
if [ "$?" -eq 0 ]
then
# mysql is fine, return http 200
/bin/echo -e "HTTP/1.1 200 OK"
/bin/echo -e "Content-Type: Content-Type: text/plain"
/bin/echo -e "MySQL is running."
else
# mysql is fine, return http 503
/bin/echo -e "HTTP/1.1 503 Service Unavailable"
/bin/echo -e "Content-Type: Content-Type: text/plain"
/bin/echo -e "MySQL is *down*."
fi
'''
mysqlchk_template = jinja2.Template(mysqlchk_raw_script)
mysqlchk_script_host1 = mysqlchk_template.render({
'host1' : self.host1_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host1_post_info.mysql_userpassword
})
mysqlchk_script_host2 = mysqlchk_template.render({
'host1' : self.host2_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host2_post_info.mysql_userpassword
})
if len(self.host_post_info_list) == 3:
mysqlchk_script_host3 = mysqlchk_template.render({
'host1' : self.host3_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host3_post_info.mysql_userpassword
})
host1_config, mysqlchk_script_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(mysqlchk_script_host1)
f1.close()
host2_config, mysqlchk_script_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(mysqlchk_script_host2)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, mysqlchk_script_host3_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(mysqlchk_script_host3)
f3.close()
def cleanup_mysqlchk_script():
os.remove(mysqlchk_script_host1_file)
os.remove(mysqlchk_script_host2_file)
if len(self.host_post_info_list) == 3:
os.remove(mysqlchk_script_host3_file)
self.install_cleanup_routine(cleanup_mysqlchk_script)
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host1_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host2_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host3_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host3_post_info)
# check network
check_network_raw_script='''#!/bin/bash
MYSQL_HOST="{{ host }}"
MYSQL_PORT="3306"
MYSQL_USERNAME="root"
MYSQL_PASSWORD="{{ mysql_root_password }}"
# Checking partner ...
ping -c 4 -w 4 $1 > /dev/null 2>&1
if [ $? -ne 0 ]; then
# Checking gateway ...
ping -c 4 -w 4 $2 > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Network ERROR! Kill MySQL NOW!" >> /var/log/check-network.log
pgrep -f mysql | xargs kill -9
else
echo "Setting the primary of Galera." >> /var/log/check-network.log
/usr/bin/mysql -h$MYSQL_HOST -u$MYSQL_USERNAME -p$MYSQL_PASSWORD -e "SET GLOBAL wsrep_provider_options='pc.bootstrap=YES';" > /dev/null
fi
fi
TIMEST=`date`
echo $TIMEST >> /var/log/check-network.log
'''
galera_check_network = jinja2.Template(check_network_raw_script)
galera_check_network_host1 = galera_check_network.render({
'host' : self.host1_post_info.host,
'mysql_root_password' : self.host1_post_info.mysql_password
})
galera_check_network_host2 = galera_check_network.render({
'host' : self.host2_post_info.host,
'mysql_root_password' : self.host1_post_info.mysql_password
})
host1_config, galera_check_network_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(galera_check_network_host1)
f1.close()
host2_config, galera_check_network_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(galera_check_network_host2)
f2.close()
def cleanup_gelerachk_script():
os.remove(galera_check_network_host1_file)
os.remove(galera_check_network_host2_file)
self.install_cleanup_routine(cleanup_gelerachk_script)
copy_arg = CopyArg()
copy_arg.src = galera_check_network_host1_file
copy_arg.dest = "/usr/local/zstack/check-network.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = galera_check_network_host2_file
copy_arg.dest = "/usr/local/zstack/check-network.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host2_post_info)
# set cron task for network status
cron("check_node_2_status1","job=\"/usr/local/zstack/check-network.sh %s %s\"" % (self.host2_post_info.host,
self.host2_post_info.gateway_ip),
self.host1_post_info)
cron("check_node_2_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\"" % (self.host2_post_info.host,
self.host2_post_info.gateway_ip),
self.host1_post_info)
cron("check_node_1_status1","job=\"/usr/local/zstack/check-network.sh %s %s\"" % (self.host1_post_info.host,
self.host1_post_info.gateway_ip),
self.host2_post_info)
cron("check_node_1_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\"" % (self.host1_post_info.host,
self.host1_post_info.gateway_ip),
self.host2_post_info)
if len(self.host_post_info_list) == 3:
cron("check_node_1_status1","job=\"/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host1_post_info.host, self.host1_post_info.gateway_ip), self.host2_post_info)
cron("check_node_1_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host1_post_info.host, self.host1_post_info.gateway_ip), self.host2_post_info)
cron("check_node_2_status1","job=\"/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host2_post_info.host, self.host2_post_info.gateway_ip), self.host1_post_info)
cron("check_node_2_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host2_post_info.host, self.host2_post_info.gateway_ip), self.host1_post_info)
#config xinetd for service check
copy_arg = CopyArg()
copy_arg.src = "%s/conf/mysql-check" % InstallHACmd.current_dir
copy_arg.dest = "/etc/xinetd.d/mysql-check"
copy(copy_arg,self.host1_post_info)
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
# add service name
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host1_post_info)
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host3_post_info)
# start service
command = "systemctl daemon-reload"
run_remote_command(command,self.host1_post_info)
run_remote_command(command,self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command,self.host3_post_info)
service_status("xinetd","state=restarted enabled=yes",self.host1_post_info)
service_status("xinetd","state=restarted enabled=yes",self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("xinetd","state=restarted enabled=yes",self.host3_post_info)
# add crontab for backup mysql
cron("backup_zstack_db","minute='0' hour='1,13' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host1_post_info)
cron("backup_zstack_db","minute='0' hour='7,19' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host2_post_info)
if len(self.host_post_info_list) == 3:
cron("backup_zstack_db","minute='0' hour='1' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host1_post_info)
cron("backup_zstack_db","minute='0' hour='9' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host2_post_info)
cron("backup_zstack_db","minute='0' hour='17' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host3_post_info)
service_status("crond","state=started enabled=yes",self.host1_post_info)
service_status("crond","state=started enabled=yes",self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("crond","state=started enabled=yes",self.host3_post_info)
class RabbitmqHA(InstallHACmd):
def __init__(self):
super(RabbitmqHA, self).__init__()
self.name = "rabbitmq ha"
self.description = "rabbitmq HA setup"
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.rabbit_password= self.host1_post_info.rabbit_password
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q rabbitmq-server"
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s,mariadb install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# clear erlang process for new deploy
command = "echo True || pkill -f .*erlang.* > /dev/null 2>&1 && rm -rf /var/lib/rabbitmq/* "
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# to stop rabbitmq-server for new installation
service_status("rabbitmq-server","state=stopped", self.host1_post_info, True)
service_status("rabbitmq-server", "state=stopped", self.host2_post_info, True)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=stopped", self.host3_post_info, True)
# to start rabbitmq-server
service_status("rabbitmq-server","state=started enabled=yes", self.host1_post_info)
service_status("rabbitmq-server", "state=started enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=started enabled=yes", self.host3_post_info)
# add zstack user in this cluster
command = "rabbitmqctl add_user zstack %s" % self.rabbit_password
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmqctl set_user_tags zstack administrator"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmqctl change_password zstack %s" % self.rabbit_password
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = 'rabbitmqctl set_permissions -p \/ zstack ".*" ".*" ".*"'
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmq-plugins enable rabbitmq_management"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
service_status("rabbitmq-server","state=restarted enabled=yes", self.host1_post_info)
service_status("rabbitmq-server", "state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=restarted enabled=yes", self.host3_post_info)
class ResetRabbitCmd(Command):
def __init__(self):
super(ResetRabbitCmd, self).__init__()
self.name = "reset_rabbitmq"
self.description = "Reset RabbitMQ message broker on local machine based on current configuration in zstack.properties."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
pass
def run(self, args):
rabbitmq_ip = ctl.read_property('CloudBus.serverIp.0')
rabbitmq_user = ctl.read_property('CloudBus.rabbitmqUsername')
rabbitmq_passwd = ctl.read_property('CloudBus.rabbitmqPassword')
new_hostname = shell("hostname").strip()
info("hostname is %s now" % new_hostname)
if shell_return("service rabbitmq-server restart") != 0:
error("restart rabbitmq failed")
user_list = shell("export HOSTNAME=" + new_hostname + " && rabbitmqctl list_users | awk '{print $1}'").split(
"\n")
if rabbitmq_user in user_list:
shell("export HOSTNAME=" + new_hostname + " && rabbitmqctl delete_user %s" % rabbitmq_user)
shell("export HOSTNAME=" + new_hostname + " && rabbitmqctl add_user %s %s" % (rabbitmq_user, rabbitmq_passwd))
shell("export HOSTNAME=" + new_hostname + " && rabbitmqctl set_user_tags %s administrator" % rabbitmq_user)
shell("export HOSTNAME=" + new_hostname + " && rabbitmqctl set_permissions -p / %s \".*\" \".*\" \".*\"" % rabbitmq_user)
if shell_return("service rabbitmq-server restart") != 0:
error("restart rabbitmq failed")
info("reset rabbitmq success")
ip = get_default_ip()
replaced_ip = ip.replace(".", "\.")
shell("sed -i '/%s /c\%s %s' /etc/hosts" % (replaced_ip, ip, new_hostname))
class InstallRabbitCmd(Command):
def __init__(self):
super(InstallRabbitCmd, self).__init__()
self.name = "install_rabbitmq"
self.description = "install RabbitMQ message broker on local or remote machine."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='host IP, for example, 192.168.0.212, please specify the real IP rather than "localhost" or "127.0.0.1" when installing on local machine; otherwise management nodes on other machines cannot access the RabbitMQ.', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--no-update', help="don't update the IP address to 'CloudBus.serverIp.0' in zstack.properties", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--rabbit-username', help="RabbitMQ username; if set, the username will be created on RabbitMQ. [DEFAULT] rabbitmq default username", default=None)
parser.add_argument('--rabbit-password', help="RabbitMQ password; if set, the password will be created on RabbitMQ for username specified by --rabbit-username. [DEFAULT] rabbitmq default password", default=None)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
def run(self, args):
if (args.rabbit_password is None and args.rabbit_username) or (args.rabbit_username and args.rabbit_password is None):
raise CtlError('--rabbit-username and --rabbit-password must be both set or not set')
if not args.yum:
args.yum = get_yum_repo_from_property()
yaml = '''---
- hosts: $host
remote_user: root
vars:
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
script: $pre_install_script
- name: install RabbitMQ on RedHat OS from user defined yum repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y rabbitmq-server libselinux-python iptables-services
- name: install RabbitMQ on RedHat OS from online
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y rabbitmq-server libselinux-python iptables-services
- name: install iptables-persistent for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- iptables-persistent
- name: install RabbitMQ on Ubuntu OS
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- rabbitmq-server
- name: open 5672 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5672 -j ACCEPT
- name: open 5673 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5673 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5673 -j ACCEPT
- name: open 15672 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 15672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 15672 -j ACCEPT
- name: save iptables
when: ansible_os_family != 'RedHat'
shell: /etc/init.d/iptables-persistent save
- name: open 5672 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5672 -j ACCEPT
- name: open 5673 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5673 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5673 -j ACCEPT
- name: open 15672 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 15672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 15672 -j ACCEPT
- name: save iptables
when: ansible_os_family == 'RedHat'
shell: service iptables save
- name: install rabbitmq management plugin
shell: rabbitmq-plugins enable rabbitmq_management
- name: enable RabbitMQ
service: name=rabbitmq-server state=restarted enabled=yes
- name: post-install script
script: $post_install_script
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
###################
#Check DNS hijacking
###################
hostname=`hostname`
pintret=`ping -c 1 -W 2 $hostname 2>/dev/null | head -n1`
echo $pintret | grep 'PING' > /dev/null
[ $? -ne 0 ] && exit 0
ip=`echo $pintret | cut -d' ' -f 3 | cut -d'(' -f 2 | cut -d')' -f 1`
ip_1=`echo $ip | cut -d'.' -f 1`
[ "127" = "$ip_1" ] && exit 0
ip addr | grep $ip > /dev/null
[ $? -eq 0 ] && exit 0
echo "The hostname($hostname) of your machine is resolved to IP($ip) which is none of IPs of your machine.
It's likely your DNS server has been hijacking, please try fixing it or add \"ip_of_your_host $hostname\" to /etc/hosts.
DNS hijacking will cause MySQL and RabbitMQ not working."
exit 1
'''
fd, pre_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_script)
def cleanup_prescript():
os.remove(pre_script_path)
self.install_cleanup_routine(cleanup_prescript)
if args.rabbit_username and args.rabbit_password:
post_script = '''set -x
rabbitmqctl list_users|grep 'zstack'
if [ $$? -ne 0 ]; then
set -e
rabbitmqctl add_user $username $password
rabbitmqctl set_user_tags $username administrator
rabbitmqctl set_permissions -p / $username ".*" ".*" ".*"
fi
'''
t = string.Template(post_script)
post_script = t.substitute({
'username': args.rabbit_username,
'password': args.rabbit_password
})
else:
post_script = ''
fd, post_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(post_script)
def cleanup_postscript():
os.remove(post_script_path)
self.install_cleanup_routine(cleanup_postscript)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'pre_install_script': pre_script_path,
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'post_install_script': post_script_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
if not args.no_update:
ctl.write_property('CloudBus.serverIp.0', args.host)
info('updated CloudBus.serverIp.0=%s in %s' % (args.host, ctl.properties_file_path))
if args.rabbit_username and args.rabbit_password:
ctl.write_property('CloudBus.rabbitmqUsername', args.rabbit_username)
info('updated CloudBus.rabbitmqUsername=%s in %s' % (args.rabbit_username, ctl.properties_file_path))
ctl.write_property('CloudBus.rabbitmqPassword', args.rabbit_password)
info('updated CloudBus.rabbitmqPassword=%s in %s' % (args.rabbit_password, ctl.properties_file_path))
class ChangeMysqlPasswordCmd(Command):
def __init__(self):
super(ChangeMysqlPasswordCmd, self).__init__()
self.name = "change_mysql_password"
self.description = (
"Change mysql password for root or normal user"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--root-password','-root',
help="Current mysql root password",
required=True)
parser.add_argument('--user-name','-user',
help="The user you want to change password",
required=True)
parser.add_argument('--new-password','-new',
help="New mysql password of root or normal user",
required=True)
parser.add_argument('--remote-ip','-ip',
help="Mysql ip address if didn't install on localhost",
)
def check_username_password(self,args):
if args.remote_ip is not None:
status, output = commands.getstatusoutput("mysql -u root -p%s -h '%s' -e 'show databases;'" % (args.root_password, args.remote_ip))
else:
status, output = commands.getstatusoutput("mysql -u root -p%s -e 'show databases;'" % args.root_password)
if status != 0:
error(output)
def run(self, args):
self.check_username_password(args)
if args.user_name == 'zstack':
if args.remote_ip is not None:
sql = "mysql -u root -p'%s' -h '%s' -e \"UPDATE mysql.user SET Password=PASSWORD(\'%s\') , Host = \'%s\' WHERE USER=\'%s\';FLUSH PRIVILEGES;\"" % (args.root_password, args.remote_ip, args.new_password,args.remote_ip, args.user_name)
else:
sql = "mysql -u root -p'%s' -e \"UPDATE mysql.user SET Password=PASSWORD(\'%s\') WHERE USER=\'%s\';FLUSH PRIVILEGES;\"" % (args.root_password, args.new_password, args.user_name)
status, output = commands.getstatusoutput(sql)
if status != 0:
error(output)
info("Change mysql password for user '%s' successfully! " % args.user_name)
info(colored("Please change 'DB.password' in 'zstack.properties' then restart zstack to make the changes effective" , 'yellow'))
elif args.user_name == 'root':
if args.remote_ip is not None:
status, output = commands.getstatusoutput("mysqladmin -u %s -p'%s' password %s -h %s" % (args.user_name, args.root_password, args.new_password, args.remote_ip))
else:
status, output = commands.getstatusoutput("mysqladmin -u %s -p'%s' password %s" % (args.user_name, args.root_password, args.new_password))
if status != 0:
error(output)
info("Change mysql password for user '%s' successfully!" % args.user_name)
else:
error("Only support change 'zstack' and 'root' password")
class DumpMysqlCmd(Command):
mysql_backup_dir = "/var/lib/zstack/mysql-backup/"
remote_backup_dir = "/var/lib/zstack/from-zstack-remote-backup/"
ui_backup_dir = "/var/lib/zstack/ui/"
def __init__(self):
super(DumpMysqlCmd, self).__init__()
self.name = "dump_mysql"
self.description = (
"Dump mysql database for backup"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--file-name',
help="The filename prefix you want to save the backup database under local backup dir, default filename "
"prefix is 'zstack-backup-db', local backup dir is '/var/lib/zstack/mysql-backup/'",
default="zstack-backup-db")
parser.add_argument('--keep-amount',type=int,
help="The amount of backup files you want to keep, older backup files will be deleted, default number is 60",
default=60)
parser.add_argument('--host-info','--host','--h',
help="ZStack will sync the backup database and ui data to remote host dir '/var/lib/zstack/from-zstack-remote-backup/', "
"the host-info format: 'root@ip_address' ",
required=False)
parser.add_argument('--delete-expired-file','--delete','--d',
action='store_true',
help="ZStack will delete expired files under remote host backup dir /var/lib/zstack/from-zstack-remote-backup/ "
"to make sure the content under remote host backup dir synchronize with local backup dir",
required=False)
def sync_local_backup_db_to_remote_host(self, args, user, private_key, remote_host_ip):
(status, output, stderr) = shell_return_stdout_stderr("mkdir -p %s" % self.ui_backup_dir)
if status != 0:
error(stderr)
command ='timeout 10 sshpass ssh -q -i %s %s@%s "mkdir -p %s"' % (private_key, user, remote_host_ip, self.remote_backup_dir)
(status, output, stderr) = shell_return_stdout_stderr(command)
if status != 0:
error(stderr)
if args.delete_expired_file is True:
sync_command = "rsync -lr --delete -e 'ssh -i %s' %s %s %s:%s" % (private_key, self.mysql_backup_dir,
self.ui_backup_dir, remote_host_ip, self.remote_backup_dir)
else:
sync_command = "rsync -lr -e 'ssh -i %s' %s %s %s:%s" % (private_key, self.mysql_backup_dir,
self.ui_backup_dir, remote_host_ip, self.remote_backup_dir)
(status, output, stderr) = shell_return_stdout_stderr(sync_command)
if status != 0:
error(stderr)
def run(self, args):
(db_hostname, db_port, db_user, db_password) = ctl.get_live_mysql_portal()
(ui_db_hostname, ui_db_port, ui_db_user, ui_db_password) = ctl.get_live_mysql_portal(ui=True)
file_name = args.file_name
keep_amount = args.keep_amount
backup_timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
db_backup_dir = "/var/lib/zstack/mysql-backup/"
if os.path.exists(db_backup_dir) is False:
os.mkdir(db_backup_dir)
db_backup_name = db_backup_dir + file_name + "-" + backup_timestamp
if args.delete_expired_file is not False and args.host_info is None:
error("Please specify remote host info with '--host' before you want to delete remote host expired files")
if args.host_info is not None:
host_info = args.host_info
host_connect_info_list = check_host_info_format(host_info, with_public_key=True)
remote_host_user = host_connect_info_list[0]
remote_host_ip = host_connect_info_list[2]
key_path = os.path.expanduser('~%s' % remote_host_user) + "/.ssh/"
private_key= key_path + "id_rsa"
public_key= key_path + "id_rsa.pub"
if os.path.isfile(public_key) is not True:
error("Didn't find public key: %s" % public_key)
if os.path.isfile(private_key) is not True:
error("Didn't find private key: %s" % private_key)
check_host_connection_with_key(remote_host_ip, remote_host_user, private_key)
if db_hostname == "localhost" or db_hostname == "127.0.0.1":
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
command_1 = "mysqldump --databases -u %s %s -P %s zstack zstack_rest" % (db_user, db_connect_password, db_port)
else:
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
command_1 = "mysqldump --databases -u %s %s --host %s -P %s zstack zstack_rest" % (db_user, db_connect_password, db_hostname, db_port)
if ui_db_hostname == "localhost" or ui_db_hostname == "127.0.0.1":
if ui_db_password is None or ui_db_password == "":
ui_db_connect_password = ""
else:
ui_db_connect_password = "-p" + ui_db_password
command_2 = "mysqldump --databases -u %s %s -P %s zstack_ui" % (ui_db_user, ui_db_connect_password, ui_db_port)
else:
if ui_db_password is None or ui_db_password == "":
ui_db_connect_password = ""
else:
ui_db_connect_password = "-p" + ui_db_password
command_2 = "mysqldump --databases -u %s %s --host %s -P %s zstack_ui" % (ui_db_user, ui_db_connect_password, ui_db_hostname, ui_db_port)
cmd = ShellCmd("(%s; %s) | gzip > %s" % (command_1, command_2, db_backup_name + ".gz"))
cmd(True)
info("Backup mysql successfully! You can check the file at %s.gz" % db_backup_name)
# remove old file
if len(os.listdir(db_backup_dir)) > keep_amount:
backup_files_list = [s for s in os.listdir(db_backup_dir) if os.path.isfile(os.path.join(db_backup_dir, s))]
backup_files_list.sort(key=lambda s: os.path.getmtime(os.path.join(db_backup_dir, s)))
for expired_file in backup_files_list:
if expired_file not in backup_files_list[-keep_amount:]:
os.remove(db_backup_dir + expired_file)
#remote backup
if args.host_info is not None:
self.sync_local_backup_db_to_remote_host(args, remote_host_user, private_key, remote_host_ip)
if args.delete_expired_file is False:
info("Sync ZStack backup to remote host %s:%s successfully! " % (remote_host_ip, self.remote_backup_dir))
else:
info("Sync ZStack backup to remote host %s:%s and delete expired files on remote successfully! " % (remote_host_ip, self.remote_backup_dir))
class RestoreMysqlCmd(Command):
status, all_local_ip = commands.getstatusoutput("ip a")
def __init__(self):
super(RestoreMysqlCmd, self).__init__()
self.name = "restore_mysql"
self.description = (
"Restore mysql data from backup file"
)
self.hide = True
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--from-file', '-f',
help="The backup filename under /var/lib/zstack/mysql-backup/ ",
required=True)
parser.add_argument('--mysql-root-password',
help="mysql root password of zstack database",
default=None)
parser.add_argument('--ui-mysql-root-password',
help="mysql root password of zstack_ui database, same as --mysql-root-password by default",
default=None)
def test_mysql_connection(self, db_connect_password, db_port, db_hostname):
command = "mysql -uroot %s -P %s %s -e 'show databases' >> /dev/null 2>&1" \
% (db_connect_password, db_port, db_hostname)
try:
shell_no_pipe(command)
except:
db_connect_password = db_connect_password.split('-p')[1] if db_connect_password.startswith('-p') else db_connect_password
error("Failed to connect to jdbc:mysql://%s:%s with root password '%s'" % (db_hostname, db_port, db_connect_password))
def run(self, args):
(db_hostname, db_port, _, _) = ctl.get_live_mysql_portal()
(ui_db_hostname, ui_db_port, _, _) = ctl.get_live_mysql_portal(ui=True)
# only root user can restore database
db_password = args.mysql_root_password
ui_db_password = args.ui_mysql_root_password if args.ui_mysql_root_password is not None else args.mysql_root_password
db_backup_name = args.from_file
db_hostname_origin_cp = db_hostname
ui_db_hostname_origin_cp = ui_db_hostname
if os.path.exists(db_backup_name) is False:
error("Didn't find file: %s ! Stop recover database! " % db_backup_name)
error_if_tool_is_missing('gunzip')
# test mysql connection
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
if db_hostname == "localhost" or db_hostname == "127.0.0.1" or (db_hostname in RestoreMysqlCmd.all_local_ip):
db_hostname = ""
else:
db_hostname = "--host %s" % db_hostname
self.test_mysql_connection(db_connect_password, db_port, db_hostname)
if ui_db_password is None or ui_db_password == "":
ui_db_connect_password = ""
else:
ui_db_connect_password = "-p" + ui_db_password
if ui_db_hostname == "localhost" or ui_db_hostname == "127.0.0.1" or (ui_db_hostname in RestoreMysqlCmd.all_local_ip):
ui_db_hostname = ""
else:
ui_db_hostname = "--host %s" % ui_db_hostname
self.test_mysql_connection(ui_db_connect_password, ui_db_port, ui_db_hostname)
info("Backup mysql before restore data ...")
shell_no_pipe('zstack-ctl dump_mysql')
shell_no_pipe('zstack-ctl stop')
info("Starting restore zstack data ...")
for database in ['zstack','zstack_rest']:
command = "mysql -uroot %s -P %s %s -e 'drop database if exists %s; create database %s' >> /dev/null 2>&1" \
% (db_connect_password, db_port, db_hostname, database, database)
shell_no_pipe(command)
# modify DEFINER of view, trigger and so on
# from: /* ... */ /*!50017 DEFINER=`old_user`@`old_hostname`*/ /*...
# to: /* ... */ /*!50017 DEFINER=`root`@`new_hostname`*/ /*...
command = "gunzip < %s | sed -e '/DROP DATABASE IF EXISTS/d' -e '/CREATE DATABASE .* IF NOT EXISTS/d' |sed 's/DEFINER=`[^\*\/]*`@`[^\*\/]*`/DEFINER=`root`@`%s`/' | mysql -uroot %s %s -P %s --one-database %s" \
% (db_backup_name, db_hostname_origin_cp, db_connect_password, db_hostname, db_port, database)
shell_no_pipe(command)
info("Starting restore zstack_ui data ...")
command = "mysql -uroot %s -P %s %s -e 'drop database if exists zstack_ui; create database zstack_ui' >> /dev/null 2>&1" \
% (ui_db_connect_password, db_port, ui_db_hostname)
shell_no_pipe(command)
command = "gunzip < %s | sed -e '/DROP DATABASE IF EXISTS/d' -e '/CREATE DATABASE .* IF NOT EXISTS/d' |sed 's/DEFINER=`[^\*\/]*`@`[^\*\/]*`/DEFINER=`root`@`%s`/' | mysql -uroot %s %s -P %s --one-database zstack_ui" \
% (db_backup_name, ui_db_hostname_origin_cp, ui_db_connect_password, ui_db_hostname, ui_db_port)
shell_no_pipe(command)
#shell_no_pipe('zstack-ctl start_node')
info("Recover data successfully! You can start node by: zstack-ctl start")
class CollectLogCmd(Command):
zstack_log_dir = "/var/log/zstack/"
vrouter_log_dir = "/home/vyos/zvr/"
host_log_list = ['zstack.log','zstack-kvmagent.log','zstack-iscsi-filesystem-agent.log',
'zstack-agent/collectd.log','zstack-agent/server.log']
bs_log_list = ['zstack-sftpbackupstorage.log','ceph-backupstorage.log','zstack-store/zstore.log',
'fusionstor-backupstorage.log']
ps_log_list = ['ceph-primarystorage.log','fusionstor-primarystorage.log']
# management-server.log is not in the same dir, will collect separately
mn_log_list = ['deploy.log', 'ha.log', 'zstack-console-proxy.log', 'zstack.log', 'zstack-cli', 'zstack-ui.log',
'zstack-dashboard.log', 'zstack-ctl.log']
collect_lines = 100000
logger_dir = '/var/log/zstack/'
logger_file = 'zstack-ctl.log'
failed_flag = False
def __init__(self):
super(CollectLogCmd, self).__init__()
self.name = "collect_log"
self.description = (
"Collect log for diagnose"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--db', help='collect database for diagnose ', action="store_true", default=False)
parser.add_argument('--mn-only', help='only collect management log', action="store_true", default=False)
parser.add_argument('--full', help='collect full management logs and host logs', action="store_true", default=False)
parser.add_argument('--host', help='only collect management log and specific host log')
def get_db(self, collect_dir):
command = "cp `zstack-ctl dump_mysql | awk '{ print $10 }'` %s" % collect_dir
shell(command, False)
def compress_and_fetch_log(self, local_collect_dir, tmp_log_dir, host_post_info):
command = "cd %s && tar zcf ../collect-log.tar.gz ." % tmp_log_dir
run_remote_command(command, host_post_info)
fetch_arg = FetchArg()
fetch_arg.src = "%s/../collect-log.tar.gz " % tmp_log_dir
fetch_arg.dest = local_collect_dir
fetch_arg.args = "fail_on_missing=yes flat=yes"
fetch(fetch_arg, host_post_info)
command = "rm -rf %s %s/../collect-log.tar.gz" % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
(status, output) = commands.getstatusoutput("cd %s && tar zxf collect-log.tar.gz" % local_collect_dir)
if status != 0:
warn("Uncompress %s/collect-log.tar.gz meet problem: %s" % (local_collect_dir, output))
(status, output) = commands.getstatusoutput("rm -f %s/collect-log.tar.gz" % local_collect_dir)
def get_system_log(self, host_post_info, tmp_log_dir):
# collect uptime and last reboot log and dmesg
host_info_log = tmp_log_dir + "host_info"
command = "uptime > %s && last reboot >> %s && free -h >> %s && cat /proc/cpuinfo >> %s && ip addr >> %s && df -h >> %s" % \
(host_info_log, host_info_log, host_info_log, host_info_log, host_info_log, host_info_log)
run_remote_command(command, host_post_info, True, True)
command = "cp /var/log/dmesg* /var/log/messages %s" % tmp_log_dir
run_remote_command(command, host_post_info)
command = "route -n > %s/route_table" % tmp_log_dir
run_remote_command(command, host_post_info)
command = "iptables-save > %s/iptables_info" % tmp_log_dir
run_remote_command(command, host_post_info)
command = "journalctl -x > %s/journalctl_info" % tmp_log_dir
run_remote_command(command, host_post_info)
def get_pkg_list(self, host_post_info, tmp_log_dir):
command = "rpm -qa | sort > %s/pkg_list" % tmp_log_dir
run_remote_command(command, host_post_info)
def get_vrouter_log(self, host_post_info, collect_dir):
#current vrouter log is very small, so collect all logs for debug
if check_host_reachable(host_post_info) is True:
info("Collecting log from vrouter: %s ..." % host_post_info.host)
local_collect_dir = collect_dir + 'vrouter-%s/' % host_post_info.host
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.vrouter_log_dir
command = "mkdir -p %s " % tmp_log_dir
run_remote_command(command, host_post_info)
command = "/opt/vyatta/sbin/vyatta-save-config.pl && cp /config/config.boot %s" % tmp_log_dir
run_remote_command(command, host_post_info)
command = "cp %s/*.log %s/*.json %s" % (CollectLogCmd.vrouter_log_dir, CollectLogCmd.vrouter_log_dir,tmp_log_dir)
run_remote_command(command, host_post_info)
self.compress_and_fetch_log(local_collect_dir, tmp_log_dir, host_post_info)
else:
warn("Vrouter %s is unreachable!" % host_post_info.host)
def get_host_log(self, host_post_info, collect_dir, collect_full_log=False):
if check_host_reachable(host_post_info) is True:
info("Collecting log from host: %s ..." % host_post_info.host)
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.zstack_log_dir
local_collect_dir = collect_dir + 'host-%s/' % host_post_info.host
try:
# file system broken shouldn't block collect log process
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
command = "mkdir -p %s " % tmp_log_dir
run_remote_command(command, host_post_info)
for log in CollectLogCmd.host_log_list:
if 'zstack-agent' in log:
command = "mkdir -p %s" % tmp_log_dir + '/zstack-agent/'
run_remote_command(command, host_post_info)
host_log = CollectLogCmd.zstack_log_dir + '/' + log
collect_log = tmp_log_dir + '/' + log
if file_dir_exist("path=%s" % host_log, host_post_info):
if collect_full_log:
for num in range(1, 16):
log_name = "%s.%s.gz" % (host_log, num)
command = "/bin/cp -rf %s %s/" % (log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
command = "/bin/cp -rf %s %s/" % (host_log, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
else:
command = "tail -n %d %s > %s " % (CollectLogCmd.collect_lines, host_log, collect_log)
run_remote_command(command, host_post_info)
except SystemExit:
warn("collect log on host %s failed" % host_post_info.host)
logger.warn("collect log on host %s failed" % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
CollectLogCmd.failed_flag = True
run_remote_command(command, host_post_info)
return 1
command = 'test "$(ls -A "%s" 2>/dev/null)" || echo The directory is empty' % tmp_log_dir
(status, output) = run_remote_command(command, host_post_info, return_status=True, return_output=True)
if "The directory is empty" in output:
warn("Didn't find log on host: %s " % (host_post_info.host))
command = 'rm -rf %s' % tmp_log_dir
run_remote_command(command, host_post_info)
return 0
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir,tmp_log_dir,host_post_info)
else:
warn("Host %s is unreachable!" % host_post_info.host)
def get_storage_log(self, host_post_info, collect_dir, storage_type, collect_full_log=False):
collect_log_list = []
if check_host_reachable(host_post_info) is True:
info("Collecting log from %s storage: %s ..." % (storage_type, host_post_info.host))
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.zstack_log_dir
local_collect_dir = collect_dir + storage_type + '-' + host_post_info.host+ '/'
try:
# file system broken shouldn't block collect log process
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
command = "rm -rf %s && mkdir -p %s " % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
if '_ps' in storage_type:
collect_log_list = CollectLogCmd.ps_log_list
elif '_bs' in storage_type:
collect_log_list = CollectLogCmd.bs_log_list
else:
warn("unknown storage type: %s" % storage_type)
for log in collect_log_list:
if 'zstack-store' in log:
command = "mkdir -p %s" % tmp_log_dir + '/zstack-store/'
run_remote_command(command, host_post_info)
storage_agent_log = CollectLogCmd.zstack_log_dir + '/' + log
collect_log = tmp_log_dir + '/' + log
if file_dir_exist("path=%s" % storage_agent_log, host_post_info):
if collect_full_log:
for num in range(1, 16):
log_name = "%s.%s.gz" % (storage_agent_log, num)
command = "/bin/cp -rf %s %s/" % (log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
command = "/bin/cp -rf %s %s/" % (storage_agent_log, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
else:
command = "tail -n %d %s > %s " % (CollectLogCmd.collect_lines, storage_agent_log, collect_log)
run_remote_command(command, host_post_info)
except SystemExit:
logger.warn("collect log on storage: %s failed" % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
CollectLogCmd.failed_flag = True
run_remote_command(command, host_post_info)
command = 'test "$(ls -A "%s" 2>/dev/null)" || echo The directory is empty' % tmp_log_dir
(status, output) = run_remote_command(command, host_post_info, return_status=True, return_output=True)
if "The directory is empty" in output:
warn("Didn't find log on storage host: %s " % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
run_remote_command(command, host_post_info)
return 0
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir,tmp_log_dir, host_post_info)
else:
warn("%s storage %s is unreachable!" % (storage_type, host_post_info.host))
def get_host_ssh_info(self, host_ip, type):
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
if type == 'host':
query.sql = "select * from HostVO where managementIp='%s'" % host_ip
host_uuid = query.query()[0]['uuid']
query.sql = "select * from KVMHostVO where uuid='%s'" % host_uuid
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['port']
return (username, password, ssh_port)
elif type == "sftp_bs":
query.sql = "select * from SftpBackupStorageVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "ceph_bs":
query.sql = "select * from CephBackupStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "fusionStor_bs":
query.sql = "select * from FusionstorPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "imageStore_bs":
query.sql = "select * from ImageStoreBackupStorageVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "ceph_ps":
query.sql = "select * from CephPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "fusionStor_ps":
query.sql = "select * from FusionstorPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "vrouter":
query.sql = "select value from GlobalConfigVO where name='vrouter.password'"
password = query.query()
username = "vyos"
ssh_port = 22
return (username, password, ssh_port)
else:
warn("unknown target type: %s" % type)
def get_management_node_log(self, collect_dir, host_post_info, collect_full_log=False):
'''management.log maybe not exist, so collect latest files, maybe a tarball'''
if check_host_reachable(host_post_info) is True:
mn_ip = host_post_info.host
info("Collecting log from management node %s ..." % mn_ip)
local_collect_dir = collect_dir + "/management-node-%s/" % mn_ip + '/'
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
tmp_log_dir = "%s/../../logs/tmp-log/" % ctl.zstack_home
command = 'rm -rf %s && mkdir -p %s' % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
command = "mn_log=`find %s/../../logs/management-serve* -maxdepth 1 -type f -printf" \
" '%%T+\\t%%p\\n' | sort -r | awk '{print $2; exit}'`; /bin/cp -rf $mn_log %s" % (ctl.zstack_home, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
if status is not True:
warn("get management-server log failed: %s" % output)
command = "/bin/cp -f %s/../../logs/zstack-api.log %s" % (ctl.zstack_home, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
if status is not True:
warn("get zstack-api log failed: %s" % output)
if collect_full_log:
for item in range(0, 15):
log_name = "management-server-" + (datetime.today() - timedelta(days=item)).strftime("%Y-%m-%d")
command = "/bin/cp -rf %s/../../logs/%s* %s/" % (ctl.zstack_home, log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
for log in CollectLogCmd.mn_log_list:
if file_dir_exist("path=%s/%s" % (CollectLogCmd.zstack_log_dir, log), host_post_info):
command = "tail -n %d %s/%s > %s/%s " \
% (CollectLogCmd.collect_lines, CollectLogCmd.zstack_log_dir, log, tmp_log_dir, log)
run_remote_command(command, host_post_info)
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir, tmp_log_dir, host_post_info)
else:
warn("Management node %s is unreachable!" % host_post_info.host)
def get_local_mn_log(self, collect_dir, collect_full_log=False):
info("Collecting log from this management node ...")
mn_log_dir = collect_dir + 'management-node-%s' % get_default_ip()
if not os.path.exists(mn_log_dir):
os.makedirs(mn_log_dir)
command = "mn_log=`find %s/../..//logs/management-serve* -maxdepth 1 -type f -printf '%%T+\\t%%p\\n' | sort -r | " \
"awk '{print $2; exit}'`; /bin/cp -rf $mn_log %s/" % (ctl.zstack_home, mn_log_dir)
(status, output) = commands.getstatusoutput(command)
if status !=0:
warn("get management-server log failed: %s" % output)
command = "/bin/cp -f %s/../../logs/zstack-api.log %s" % (ctl.zstack_home, mn_log_dir)
(status, output) = commands.getstatusoutput(command)
if status != 0:
warn("get zstack-api log failed: %s" % output)
if collect_full_log:
for item in range(0, 15):
log_name = "management-server-" + (datetime.today() - timedelta(days=item)).strftime("%Y-%m-%d")
command = "/bin/cp -rf %s/../../logs/%s* %s/" % (ctl.zstack_home, log_name, mn_log_dir)
(status, output) = commands.getstatusoutput(command)
for log in CollectLogCmd.mn_log_list:
if os.path.exists(CollectLogCmd.zstack_log_dir + log):
command = ( "tail -n %d %s/%s > %s/%s " % (CollectLogCmd.collect_lines, CollectLogCmd.zstack_log_dir, log, mn_log_dir, log))
(status, output) = commands.getstatusoutput(command)
if status != 0:
warn("get %s failed: %s" % (log, output))
host_info_log = mn_log_dir + "/host_info"
command = "uptime > %s && last reboot >> %s && free -h >> %s && cat /proc/cpuinfo >> %s && ip addr >> %s && df -h >> %s" % \
(host_info_log, host_info_log, host_info_log, host_info_log, host_info_log, host_info_log)
commands.getstatusoutput(command)
command = "cp /var/log/dmesg* /var/log/messages %s/" % mn_log_dir
commands.getstatusoutput(command)
command = "cp %s/*git-commit %s/" % (ctl.zstack_home, mn_log_dir)
commands.getstatusoutput(command)
command = " rpm -qa | sort > %s/pkg_list" % mn_log_dir
commands.getstatusoutput(command)
command = " rpm -qa | sort > %s/pkg_list" % mn_log_dir
commands.getstatusoutput(command)
command = "journalctl -x > %s/journalctl_info" % mn_log_dir
commands.getstatusoutput(command)
def generate_tar_ball(self, run_command_dir, detail_version, time_stamp):
(status, output) = commands.getstatusoutput("cd %s && tar zcf collect-log-%s-%s.tar.gz collect-log-%s-%s"
% (run_command_dir, detail_version, time_stamp, detail_version, time_stamp))
if status != 0:
error("Generate tarball failed: %s " % output)
def generate_host_post_info(self, host_ip, type):
host_post_info = HostPostInfo()
# update inventory
with open(ctl.zstack_home + "/../../../ansible/hosts") as f:
old_hosts = f.read()
if host_ip not in old_hosts:
with open(ctl.zstack_home + "/../../../ansible/hosts", "w") as f:
new_hosts = host_ip + "\n" + old_hosts
f.write(new_hosts)
(host_user, host_password, host_port) = self.get_host_ssh_info(host_ip, type)
if host_user != 'root' and host_password is not None:
host_post_info.become = True
host_post_info.remote_user = host_user
host_post_info.remote_pass = host_password
host_post_info.remote_port = host_port
host_post_info.host = host_ip
host_post_info.host_inventory = ctl.zstack_home + "/../../../ansible/hosts"
host_post_info.private_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa"
host_post_info.post_url = ""
return host_post_info
def run(self, args):
run_command_dir = os.getcwd()
time_stamp = datetime.now().strftime("%Y-%m-%d_%H-%M")
# create log
create_log(CollectLogCmd.logger_dir, CollectLogCmd.logger_file)
if get_detail_version() is not None:
detail_version = get_detail_version().replace(' ','_')
else:
hostname, port, user, password = ctl.get_live_mysql_portal()
detail_version = get_zstack_version(hostname, port, user, password)
# collect_dir used to store the collect-log
collect_dir = run_command_dir + '/collect-log-%s-%s/' % (detail_version, time_stamp)
if not os.path.exists(collect_dir):
os.makedirs(collect_dir)
if os.path.exists(InstallHACmd.conf_file) is not True:
self.get_local_mn_log(collect_dir, args.full)
else:
# this only for HA due to db will lost mn info if mn offline
mn_list = get_ha_mn_list(InstallHACmd.conf_file)
for mn_ip in mn_list:
host_post_info = HostPostInfo()
host_post_info.remote_user = 'root'
# this will be changed in the future
host_post_info.remote_port = '22'
host_post_info.host = mn_ip
host_post_info.host_inventory = InstallHACmd.conf_dir + 'host'
host_post_info.post_url = ""
host_post_info.private_key = InstallHACmd.conf_dir + 'ha_key'
self.get_management_node_log(collect_dir, host_post_info, args.full)
#collect bs log
sftp_bs_vo = get_host_list("SftpBackupStorageVO")
for bs in sftp_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "sftp_bs"), collect_dir, "sftp_bs")
ceph_bs_vo = get_host_list("CephBackupStorageMonVO")
for bs in ceph_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "ceph_bs"), collect_dir, "ceph_bs")
fusionStor_bs_vo = get_host_list("FusionstorBackupStorageMonVO")
for bs in fusionStor_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "fusionStor_bs"), collect_dir, "fusionStor_bs")
imageStore_bs_vo = get_host_list("ImageStoreBackupStorageVO")
for bs in imageStore_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "imageStore_bs"), collect_dir, "imageStore_bs")
#collect ps log
ceph_ps_vo = get_host_list("CephPrimaryStorageMonVO")
for ps in ceph_ps_vo:
ps_ip = ps['hostname']
self.get_storage_log(self.generate_host_post_info(ps_ip,"ceph_ps"), collect_dir, "ceph_ps")
fusionStor_ps_vo = get_host_list("FusionstorPrimaryStorageMonVO")
for ps in fusionStor_ps_vo:
ps_ip = ps['hostname']
self.get_storage_log(self.generate_host_post_info(ps_ip,"fusionStor_ps"), collect_dir, "fusionStor_ps")
#collect vrouter log
vrouter_ip_list = get_vrouter_list()
for vrouter_ip in vrouter_ip_list:
self.get_vrouter_log(self.generate_host_post_info(vrouter_ip, "vrouter"),collect_dir)
if args.db is True:
self.get_db(collect_dir)
if args.mn_only is not True:
host_vo = get_host_list("HostVO")
#collect host log
for host in host_vo:
if args.host is not None:
host_ip = args.host
else:
host_ip = host['managementIp']
host_type = host['hypervisorType']
if host_type == "KVM":
self.get_host_log(self.generate_host_post_info(host_ip, "host"), collect_dir, args.full)
else:
warn("host %s is not a KVM host, skip..." % host_ip)
if args.host is not None:
break
self.generate_tar_ball(run_command_dir, detail_version, time_stamp)
if CollectLogCmd.failed_flag is True:
info("The collect log generate at: %s/collect-log-%s-%s.tar.gz" % (run_command_dir, detail_version, time_stamp))
info(colored("Please check the reason of failed task in log: %s\n" % (CollectLogCmd.logger_dir + CollectLogCmd.logger_file), 'yellow'))
else:
info("The collect log generate at: %s/collect-log-%s-%s.tar.gz" % (run_command_dir, detail_version, time_stamp))
class ChangeIpCmd(Command):
def __init__(self):
super(ChangeIpCmd, self).__init__()
self.name = "change_ip"
self.description = (
"update new management ip address to zstack property file"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--ip', help='The new IP address of management node.'
'This operation will update the new ip address to '
'zstack config file' , required=True)
parser.add_argument('--cloudbus_server_ip', help='The new IP address of CloudBus.serverIp.0, default will use value from --ip', required=False)
parser.add_argument('--mysql_ip', help='The new IP address of DB.url, default will use value from --ip', required=False)
def run(self, args):
if args.ip == '0.0.0.0':
raise CtlError('for your data safety, please do NOT use 0.0.0.0 as the listen address')
if args.cloudbus_server_ip is not None:
cloudbus_server_ip = args.cloudbus_server_ip
else:
cloudbus_server_ip = args.ip
if args.mysql_ip is not None:
mysql_ip = args.mysql_ip
else:
mysql_ip = args.ip
zstack_conf_file = ctl.properties_file_path
ip_check = re.compile('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
for input_ip in [cloudbus_server_ip, mysql_ip]:
if not ip_check.match(input_ip):
info("The ip address you input: %s seems not a valid ip" % input_ip)
return 1
# Update /etc/hosts
if os.path.isfile(zstack_conf_file):
old_ip = ctl.read_property('management.server.ip')
if old_ip is not None:
if not ip_check.match(old_ip):
info("The ip address[%s] read from [%s] seems not a valid ip" % (old_ip, zstack_conf_file))
return 1
# read from env other than /etc/hostname in case of impact of DHCP SERVER
old_hostname = shell("hostname").replace("\n","")
new_hostname = args.ip.replace(".","-")
if old_hostname != "localhost" and old_hostname != "localhost.localdomain":
new_hostname = old_hostname
if old_ip != None:
shell('sed -i "/^%s .*$/d" /etc/hosts' % old_ip)
else:
shell('sed -i "/^.* %s$/d" /etc/hosts' % new_hostname)
shell('echo "%s %s" >> /etc/hosts' % (args.ip, new_hostname))
shell('hostnamectl set-hostname %s' % new_hostname)
shell('export HOSTNAME=%s' % new_hostname)
if old_ip != None:
info("Update /etc/hosts, old_ip:%s, new_ip:%s" % (old_ip, args.ip))
else:
info("Update /etc/hosts, new_ip:%s" % args.ip)
else:
info("Didn't find %s, skip update new ip" % zstack_conf_file )
return 1
# Update zstack config file
if os.path.isfile(zstack_conf_file):
shell("yes | cp %s %s.bak" % (zstack_conf_file, zstack_conf_file))
ctl.write_properties([
('CloudBus.serverIp.0', cloudbus_server_ip),
])
info("Update cloudbus server ip %s in %s " % (cloudbus_server_ip, zstack_conf_file))
ctl.write_properties([
('management.server.ip', args.ip),
])
info("Update management server ip %s in %s " % (args.ip, zstack_conf_file))
# update zstack db url
db_url = ctl.read_property('DB.url')
db_old_ip = re.findall(r'[0-9]+(?:\.[0-9]{1,3}){3}', db_url)
db_new_url = db_url.split(db_old_ip[0])[0] + mysql_ip + db_url.split(db_old_ip[0])[1]
ctl.write_properties([
('DB.url', db_new_url),
])
info("Update mysql new url %s in %s " % (db_new_url, zstack_conf_file))
# update zstack_ui db url
db_url = ctl.read_ui_property('db_url')
db_old_ip = re.findall(r'[0-9]+(?:\.[0-9]{1,3}){3}', db_url)
db_new_url = db_url.split(db_old_ip[0])[0] + mysql_ip + db_url.split(db_old_ip[0])[1]
ctl.write_ui_properties([
('db_url', db_new_url),
])
info("Update mysql new url %s in %s " % (db_new_url, ctl.ui_properties_file_path))
else:
info("Didn't find %s, skip update new ip" % zstack_conf_file )
return 1
# Update iptables
mysql_ports = {3306}
mq_ports = {4369, 5672, 15672, 25672}
ports = mysql_ports | mq_ports
cmd = "/sbin/iptables-save | grep INPUT | grep '%s'" % '\\|'.join('dport %s ' % port for port in ports)
o = ShellCmd(cmd)
o(False)
if o.return_code == 0:
old_rules = o.stdout.splitlines()
else:
old_rules = []
iptstrs = shell("/sbin/iptables-save").splitlines()
for rule in old_rules:
iptstrs.remove(rule)
(tmp_fd, tmp_path) = tempfile.mkstemp()
tmp_fd = os.fdopen(tmp_fd, 'w')
tmp_fd.write('\n'.join(iptstrs))
tmp_fd.close()
shell('/sbin/iptables-restore < %s' % tmp_path)
os.remove(tmp_path)
if mysql_ip != args.ip:
ports -= mysql_ports
if cloudbus_server_ip != args.ip:
ports -= mq_ports
for port in ports:
shell('iptables -A INPUT -p tcp --dport %s -j REJECT' % port)
shell('iptables -I INPUT -p tcp --dport %s -d %s -j ACCEPT' % (port, args.ip))
shell('iptables -I INPUT -p tcp --dport %s -d 127.0.0.1 -j ACCEPT' % port)
info("update iptables rules successfully")
# Reset RabbitMQ
info("Starting reset rabbitmq...")
if shell_return("zstack-ctl reset_rabbitmq") != 0:
error("Reset rabbitMQ failed\n"
"Change ip failed")
info("Reset rabbitMQ successfully")
info("Change ip successfully")
class InstallManagementNodeCmd(Command):
def __init__(self):
super(InstallManagementNodeCmd, self).__init__()
self.name = "install_management_node"
self.description = (
"install ZStack management node from current machine to a remote machine with zstack.properties."
"\nNOTE: please configure current node before installing node on other machines"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='target host IP user and password, for example, root:password@192.168.0.212, to install ZStack management node to a remote machine', required=True)
parser.add_argument('--install-path', help='the path on remote machine where Apache Tomcat will be installed, which must be an absolute path; [DEFAULT]: /usr/local/zstack', default='/usr/local/zstack')
parser.add_argument('--source-dir', help='the source folder containing Apache Tomcat package and zstack.war, if omitted, it will default to a path related to $ZSTACK_HOME')
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--force-reinstall', help="delete existing Apache Tomcat and resinstall ZStack", action="store_true", default=False)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def add_public_key_to_host(self, key_path, host_info):
command ='timeout 10 sshpass -p "%s" ssh-copy-id -o UserKnownHostsFile=/dev/null -o PubkeyAuthentication=no' \
' -o StrictHostKeyChecking=no -i %s root@%s' % (host_info.remote_pass, key_path, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Copy public key '%s' to host: '%s' failed:\n %s" % (key_path, host_info.host, output))
def run(self, args):
if not os.path.isabs(args.install_path):
raise CtlError('%s is not an absolute path' % args.install_path)
if not args.source_dir:
args.source_dir = os.path.join(ctl.zstack_home, "../../../")
if not os.path.isdir(args.source_dir):
raise CtlError('%s is not an directory' % args.source_dir)
if not args.yum:
args.yum = get_yum_repo_from_property()
if args.ssh_key is None:
args.ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = args.ssh_key.split('.')[0]
inventory_file = ctl.zstack_home + "/../../../ansible/hosts"
host_info = HostPostInfo()
host_info.private_key = private_key
host_info.host_inventory = inventory_file
(host_info.remote_user, host_info.remote_pass, host_info.host, host_info.remote_port) = check_host_info_format(args.host)
check_host_password(host_info.remote_pass, host_info.host)
self.add_public_key_to_host(args.ssh_key, host_info)
apache_tomcat = None
zstack = None
apache_tomcat_zip_name = None
for file in os.listdir(args.source_dir):
full_path = os.path.join(args.source_dir, file)
if file.startswith('apache-tomcat') and file.endswith('zip') and os.path.isfile(full_path):
apache_tomcat = full_path
apache_tomcat_zip_name = file
if file == 'zstack.war':
zstack = full_path
if not apache_tomcat:
raise CtlError('cannot find Apache Tomcat ZIP in %s, please use --source-dir to specify the directory containing the ZIP' % args.source_dir)
if not zstack:
raise CtlError('cannot find zstack.war in %s, please use --source-dir to specify the directory containing the WAR file' % args.source_dir)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
pypi_tar_path = os.path.join(ctl.zstack_home, "static/pypi.tar.bz")
static_path = os.path.join(ctl.zstack_home, "static")
shell('cd %s; tar jcf pypi.tar.bz pypi' % static_path)
yaml = '''---
- hosts: $host
remote_user: root
vars:
root: $install_path
yum_repo: "$yum_repo"
tasks:
- name: check remote env on RedHat OS 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7'
script: $pre_script_on_rh6
- name: prepare remote environment
script: $pre_script
- name: sync repo from remote management node
script: $sync_repo
- name: install dependencies on RedHat OS from user defined repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y dmidecode java-1.8.0-openjdk wget python-devel gcc autoconf tar gzip unzip python-pip openssh-clients sshpass bzip2 ntp ntpdate sudo libselinux-python python-setuptools iptables-services libffi-devel openssl-devel
- name: install dependencies on RedHat OS from system repos
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y dmidecode java-1.8.0-openjdk wget python-devel gcc autoconf tar gzip unzip python-pip openssh-clients sshpass bzip2 ntp ntpdate sudo libselinux-python python-setuptools iptables-services libffi-devel openssl-devel
- name: set java 8 as default runtime
when: ansible_os_family == 'RedHat'
shell: update-alternatives --install /usr/bin/java java /usr/lib/jvm/jre-1.8.0/bin/java 0; update-alternatives --set java /usr/lib/jvm/jre-1.8.0/bin/java
- name: add ppa source for openjdk-8 on Ubuntu 14.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
shell: add-apt-repository ppa:openjdk-r/ppa -y; apt-get update
- name: install openjdk on Ubuntu 14.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
apt: pkg={{item}} update_cache=yes
with_items:
- openjdk-8-jdk
- name: install openjdk on Ubuntu 16.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '16.04'
apt: pkg={{item}} update_cache=yes
with_items:
- openjdk-8-jdk
- name: set java 8 as default runtime
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
shell: update-alternatives --install /usr/bin/java java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java 0; update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/javac 0; update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java; update-alternatives --set javac /usr/lib/jvm/java-8-openjdk-amd64/bin/javac
- name: install dependencies Debian OS
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- wget
- python-dev
- gcc
- autoconf
- tar
- gzip
- unzip
- python-pip
- sshpass
- bzip2
- ntp
- ntpdate
- sudo
- python-setuptools
- stat: path=/usr/bin/mysql
register: mysql_path
- name: install MySQL client for RedHat 6 from user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo != 'false' and (mysql_path.stat.exists == False)
shell: yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mysql
- name: install MySQL client for RedHat 6 from system repo
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo == 'false' and (mysql_path.stat.exists == False)
shell: yum --nogpgcheck install -y mysql
- name: install MySQL client for RedHat 7 from user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo != 'false' and (mysql_path.stat.exists == False)
shell: yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mariadb
- name: install MySQL client for RedHat 7 from system repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo == 'false' and (mysql_path.stat.exists == False)
shell: yum --nogpgcheck install -y mariadb
- name: install MySQL client for Ubuntu
when: ansible_os_family == 'Debian' and (mysql_path.stat.exists == False)
apt: pkg={{item}}
with_items:
- mysql-client
- name: copy pypi tar file
copy: src=$pypi_tar_path dest=$pypi_tar_path_dest
- name: untar pypi
shell: "cd /tmp/; tar jxf $pypi_tar_path_dest"
- name: install pip from local source
shell: "easy_install -i file://$pypi_path/simple --upgrade pip"
- name: install ansible from local source
pip: name="ansible" extra_args="-i file://$pypi_path/simple --ignore-installed --trusted-host localhost"
- name: install virtualenv
pip: name="virtualenv" extra_args="-i file://$pypi_path/simple --ignore-installed --trusted-host localhost"
- name: copy Apache Tomcat
copy: src=$apache_path dest={{root}}/$apache_tomcat_zip_name
- name: copy zstack.war
copy: src=$zstack_path dest={{root}}/zstack.war
- name: install ZStack
script: $post_script
- name: copy zstack.properties
copy: src=$properties_file dest={{root}}/apache-tomcat/webapps/zstack/WEB-INF/classes/zstack.properties
- name: mount zstack-dvd
file:
src: /opt/zstack-dvd
dest: $install_path/apache-tomcat/webapps/zstack/static/zstack-dvd
state: link
force: yes
- name: setup zstack account
script: $setup_account
- name: change owner of /var/lib/zstack/
shell: "mkdir -p /var/lib/zstack/; chown -R zstack:zstack /var/lib/zstack/"
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $$? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/os/\$$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/updates/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/extras/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$$releasever/\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/os/\$$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/updates/\$$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/extras/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$$releasever/\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
whereis zstack-ctl
if [ $$? -eq 0 ]; then
zstack-ctl stop_node
fi
apache_path=$install_path/apache-tomcat
if [[ -d $$apache_path ]] && [[ $force_resinstall -eq 0 ]]; then
echo "found existing Apache Tomcat directory $$apache_path; please use --force-reinstall to delete it and re-install"
exit 1
fi
rm -rf $install_path
mkdir -p $install_path
'''
t = string.Template(pre_script)
pre_script = t.substitute({
'force_resinstall': int(args.force_reinstall),
'install_path': args.install_path
})
fd, pre_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(pre_script)
pre_script_on_rh6 = '''
ZSTACK_INSTALL_LOG='/tmp/zstack_installation.log'
rpm -qi python-crypto >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Management node remote installation failed. You need to manually remove python-crypto by \n\n \`rpm -ev python-crypto\` \n\n in remote management node; otherwise it will conflict with ansible's pycrypto." >>$ZSTACK_INSTALL_LOG
exit 1
fi
'''
t = string.Template(pre_script_on_rh6)
fd, pre_script_on_rh6_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(pre_script_on_rh6)
def cleanup_pre_script():
os.remove(pre_script_path)
os.remove(pre_script_on_rh6_path)
self.install_cleanup_routine(cleanup_pre_script)
post_script = '''
set -e
filename=$apache_tomcat_zip_name
foldername="$${filename%.*}"
apache_path=$install_path/apache-tomcat
unzip $apache -d $install_path
ln -s $install_path/$$foldername $$apache_path
unzip $zstack -d $$apache_path/webapps/zstack
chmod a+x $$apache_path/bin/*
cat >> $$apache_path/bin/setenv.sh <<EOF
export CATALINA_OPTS=" -Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote=true"
EOF
install_script="$$apache_path/webapps/zstack/WEB-INF/classes/tools/install.sh"
eval "bash $$install_script zstack-ctl"
eval "bash $$install_script zstack-cli"
set +e
grep "ZSTACK_HOME" ~/.bashrc > /dev/null
if [ $$? -eq 0 ]; then
sed -i "s#export ZSTACK_HOME=.*#export ZSTACK_HOME=$$apache_path/webapps/zstack#" ~/.bashrc
else
echo "export ZSTACK_HOME=$$apache_path/webapps/zstack" >> ~/.bashrc
fi
which ansible-playbook &> /dev/null
if [ $$? -ne 0 ]; then
pip install -i file://$pypi_path/simple --trusted-host localhost ansible
fi
'''
t = string.Template(post_script)
post_script = t.substitute({
'install_path': args.install_path,
'apache': os.path.join(args.install_path, apache_tomcat_zip_name),
'zstack': os.path.join(args.install_path, 'zstack.war'),
'apache_tomcat_zip_name': apache_tomcat_zip_name,
'pypi_path': '/tmp/pypi/'
})
fd, post_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(post_script)
def cleanup_post_script():
os.remove(post_script_path)
self.install_cleanup_routine(cleanup_post_script)
setup_account = '''id -u zstack >/dev/null 2>&1
if [ $$? -eq 0 ]; then
usermod -d $install_path zstack
else
useradd -d $install_path zstack && mkdir -p $install_path && chown -R zstack.zstack $install_path
fi
grep 'zstack' /etc/sudoers >/dev/null || echo 'zstack ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
grep '^root' /etc/sudoers >/dev/null || echo 'root ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
sed -i '/requiretty$$/d' /etc/sudoers
chown -R zstack.zstack $install_path
mkdir /home/zstack && chown -R zstack.zstack /home/zstack
zstack-ctl setenv ZSTACK_HOME=$install_path/apache-tomcat/webapps/zstack
'''
t = string.Template(setup_account)
setup_account = t.substitute({
'install_path': args.install_path
})
fd, setup_account_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(setup_account)
def clean_up():
os.remove(setup_account_path)
self.install_cleanup_routine(clean_up)
sync_repo = '''
# check /opt/zstack-dvd
if [ ! -d /opt/zstack-dvd ]; then
echo "/opt/zstack-dvd not found, please download ZStack ISO and execute '# zstack-upgrade -r PATH_TO_ZSTACK_ISO'"
exit 1
fi
# prepare yum repo file
cat > /etc/yum.repos.d/zstack-online-base.repo << EOF
[zstack-online-base]
name=zstack-online-base
baseurl=${BASEURL}
gpgcheck=0
enabled=0
EOF
cat > /etc/yum.repos.d/zstack-online-ceph.repo << EOF
[zstack-online-ceph]
name=zstack-online-ceph
baseurl=${BASEURL}/Extra/ceph
gpgcheck=0
enabled=0
EOF
cat > /etc/yum.repos.d/zstack-online-uek4.repo << EOF
[zstack-online-uek4]
name=zstack-online-uek4
baseurl=${BASEURL}/Extra/uek4
gpgcheck=0
enabled=0
EOF
cat > /etc/yum.repos.d/zstack-online-galera.repo << EOF
[zstack-online-galera]
name=zstack-online-galera
baseurl=${BASEURL}/Extra/galera
gpgcheck=0
enabled=0
EOF
cat > /etc/yum.repos.d/zstack-online-qemu-kvm-ev.repo << EOF
[zstack-online-qemu-kvm-ev]
name=zstack-online-qemu-kvm-ev
baseurl=${BASEURL}/Extra/qemu-kvm-ev
gpgcheck=0
enabled=0
EOF
cat > /etc/yum.repos.d/zstack-online-virtio-win.repo << EOF
[zstack-online-virtio-win]
name=zstack-online-virtio-win
baseurl=${BASEURL}/Extra/virtio-win
gpgcheck=0
enabled=0
EOF
# close epel
yum clean all >/dev/null 2>&1
if [ -f /etc/yum.repos.d/epel.repo ]; then
sed -i 's/enabled=1/enabled=0/g' /etc/yum.repos.d/epel.repo
fi
# install necessary packages
pkg_list="createrepo curl yum-utils"
yum -y --disablerepo=* --enablerepo=zstack-online-base install $${pkg_list} >/dev/null 2>&1 || exit 1
# reposync
mkdir -p /opt/zstack-dvd/Base/ >/dev/null 2>&1
umount /opt/zstack-dvd/Extra/qemu-kvm-ev >/dev/null 2>&1
mv /opt/zstack-dvd/Packages /opt/zstack-dvd/Base/ >/dev/null 2>&1
reposync -r zstack-online-base -p /opt/zstack-dvd/Base/ --norepopath -m -d
reposync -r zstack-online-ceph -p /opt/zstack-dvd/Extra/ceph --norepopath -d
reposync -r zstack-online-uek4 -p /opt/zstack-dvd/Extra/uek4 --norepopath -d
reposync -r zstack-online-galera -p /opt/zstack-dvd/Extra/galera --norepopath -d
reposync -r zstack-online-qemu-kvm-ev -p /opt/zstack-dvd/Extra/qemu-kvm-ev --norepopath -d
reposync -r zstack-online-virtio-win -p /opt/zstack-dvd/Extra/virtio-win --norepopath -d
rm -f /etc/yum.repos.d/zstack-online-*.repo
# createrepo
createrepo -g /opt/zstack-dvd/Base/comps.xml /opt/zstack-dvd/Base/ >/dev/null 2>&1 || exit 1
rm -rf /opt/zstack-dvd/repodata >/dev/null 2>&1
mv /opt/zstack-dvd/Base/* /opt/zstack-dvd/ >/dev/null 2>&1
rm -rf /opt/zstack-dvd/Base/ >/dev/null 2>&1
createrepo /opt/zstack-dvd/Extra/ceph/ >/dev/null 2>&1 || exit 1
createrepo /opt/zstack-dvd/Extra/uek4/ >/dev/null 2>&1 || exit 1
createrepo /opt/zstack-dvd/Extra/galera >/dev/null 2>&1 || exit 1
createrepo /opt/zstack-dvd/Extra/qemu-kvm-ev >/dev/null 2>&1 || exit 1
createrepo /opt/zstack-dvd/Extra/virtio-win >/dev/null 2>&1 || exit 1
# sync .repo_version
echo ${repo_version} > /opt/zstack-dvd/.repo_version
# clean up
rm -f /opt/zstack-dvd/comps.xml
yum clean all >/dev/null 2>&1
'''
command = "yum --disablerepo=* --enablerepo=zstack-mn repoinfo | grep Repo-baseurl | awk -F ' : ' '{ print $NF }'"
(status, baseurl, stderr) = shell_return_stdout_stderr(command)
if status != 0:
baseurl = 'http://localhost:8080/zstack/static/zstack-dvd/'
with open('/opt/zstack-dvd/.repo_version') as f:
repoversion = f.readline().strip()
t = string.Template(sync_repo)
sync_repo = t.substitute({
'BASEURL': baseurl.strip(),
'repo_version': repoversion
})
fd, sync_repo_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(sync_repo)
def clean_up():
os.remove(sync_repo_path)
self.install_cleanup_routine(clean_up)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': host_info.host,
'install_path': args.install_path,
'apache_path': apache_tomcat,
'zstack_path': zstack,
'pre_script': pre_script_path,
'pre_script_on_rh6': pre_script_on_rh6_path,
'post_script': post_script_path,
'properties_file': ctl.properties_file_path,
'apache_tomcat_zip_name': apache_tomcat_zip_name,
'pypi_tar_path': pypi_tar_path,
'pypi_tar_path_dest': '/tmp/pypi.tar.bz',
'pypi_path': '/tmp/pypi/',
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'setup_account': setup_account_path,
'sync_repo' : sync_repo_path
})
ansible(yaml, host_info.host, args.debug, private_key)
info('successfully installed new management node on machine(%s)' % host_info.host)
class ShowConfiguration(Command):
def __init__(self):
super(ShowConfiguration, self).__init__()
self.name = "show_configuration"
self.description = "a shortcut that prints contents of zstack.properties to screen"
ctl.register_command(self)
def run(self, args):
shell_no_pipe('cat %s' % ctl.properties_file_path)
class SetEnvironmentVariableCmd(Command):
PATH = os.path.join(ctl.USER_ZSTACK_HOME_DIR, "zstack-ctl/ctl-env")
def __init__(self):
super(SetEnvironmentVariableCmd, self).__init__()
self.name = "setenv"
self.description = "set variables to zstack-ctl variable file at %s" % self.PATH
ctl.register_command(self)
def need_zstack_home(self):
return False
def run(self, args):
if not ctl.extra_arguments:
raise CtlError('please input variables that are in format of "key=value" split by space')
if not os.path.isdir(ctl.USER_ZSTACK_HOME_DIR):
raise CtlError('cannot find home directory(%s) of user "zstack"' % ctl.USER_ZSTACK_HOME_DIR)
with use_user_zstack():
path_dir = os.path.dirname(self.PATH)
if not os.path.isdir(path_dir):
os.makedirs(path_dir)
with open(self.PATH, 'a'):
# create the file if not existing
pass
env = PropertyFile(self.PATH)
arg_str = ' '.join(ctl.extra_arguments)
env.write_properties([arg_str.split('=', 1)])
class UnsetEnvironmentVariableCmd(Command):
NAME = 'unsetenv'
def __init__(self):
super(UnsetEnvironmentVariableCmd, self).__init__()
self.name = self.NAME
self.description = (
'unset variables in %s' % SetEnvironmentVariableCmd.PATH
)
ctl.register_command(self)
def run(self, args):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
return
if not ctl.extra_arguments:
raise CtlError('please input a list of variable names you want to unset')
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.delete_properties(ctl.extra_arguments)
info('unset zstack environment variables: %s' % ctl.extra_arguments)
class GetEnvironmentVariableCmd(Command):
NAME = 'getenv'
def __init__(self):
super(GetEnvironmentVariableCmd, self).__init__()
self.name = self.NAME
self.description = (
"get variables from %s" % SetEnvironmentVariableCmd.PATH
)
ctl.register_command(self)
def run(self, args):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
raise CtlError('cannot find the environment variable file at %s' % SetEnvironmentVariableCmd.PATH)
ret = []
if ctl.extra_arguments:
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
for key in ctl.extra_arguments:
value = env.read_property(key)
if value:
ret.append('%s=%s' % (key, value))
else:
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
for k, v in env.read_all_properties():
ret.append('%s=%s' % (k, v))
info('\n'.join(ret))
# For UI 1.x
class InstallDashboardCmd(Command):
def __init__(self):
super(InstallDashboardCmd, self).__init__()
self.name = "install_ui"
self.description = "install ZStack Web UI"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='target host IP, for example, 192.168.0.212, to install ZStack web UI; if omitted, it will be installed on local machine')
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--force', help="delete existing virtualenv and resinstall zstack ui and all dependencies", action="store_true", default=False)
def _install_to_local(self, args):
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
info('found installation script at %s, start installing ZStack web UI' % install_script)
if args.force:
shell('bash %s zstack-dashboard force' % install_script)
else:
shell('bash %s zstack-dashboard' % install_script)
def run(self, args):
if not args.host:
self._install_to_local(args)
return
if not args.yum:
args.yum = get_yum_repo_from_property()
tools_path = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/")
if not os.path.isdir(tools_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary = None
for l in os.listdir(tools_path):
if l.startswith('zstack_dashboard'):
ui_binary = l
break
if not ui_binary:
raise CtlError('cannot find zstack-dashboard package under %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary_path = os.path.join(tools_path, ui_binary)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
pypi_tar_path = os.path.join(ctl.zstack_home, "static/pypi.tar.bz")
if not os.path.isfile(pypi_tar_path):
static_path = os.path.join(ctl.zstack_home, "static")
os.system('cd %s; tar jcf pypi.tar.bz pypi' % static_path)
yaml = '''---
- hosts: $host
remote_user: root
vars:
virtualenv_root: /var/lib/zstack/virtualenv/zstack-dashboard
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
script: $pre_install_script
- name: install Python pip for RedHat OS from user defined repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y libselinux-python python-pip bzip2 python-devel gcc autoconf
- name: install Python pip for RedHat OS from system repo
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y libselinux-python python-pip bzip2 python-devel gcc autoconf
- name: copy zstack-dashboard package
copy: src=$src dest=$dest
- name: copy pypi tar file
copy: src=$pypi_tar_path dest=$pypi_tar_path_dest
- name: untar pypi
shell: "cd /tmp/; tar jxf $pypi_tar_path_dest"
- name: install Python pip for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- python-pip
- iptables-persistent
- name: install pip from local source
shell: "cd $pypi_path/simple/pip/; pip install --ignore-installed pip*.tar.gz"
- shell: virtualenv --version | grep "12.1.1"
register: virtualenv_ret
ignore_errors: True
- name: install virtualenv
pip: name=virtualenv version=12.1.1 extra_args="--ignore-installed --trusted-host localhost -i file://$pypi_path/simple"
when: virtualenv_ret.rc != 0
- name: create virtualenv
shell: "rm -rf {{virtualenv_root}} && virtualenv {{virtualenv_root}}"
- name: install zstack-dashboard
pip: name=$dest extra_args="--trusted-host localhost -i file://$pypi_path/simple" virtualenv="{{virtualenv_root}}"
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
'''
fd, pre_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_script)
def cleanup_prescript():
os.remove(pre_script_path)
self.install_cleanup_routine(cleanup_prescript)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
"src": ui_binary_path,
"dest": os.path.join('/tmp', ui_binary),
"host": args.host,
'pre_install_script': pre_script_path,
'pypi_tar_path': pypi_tar_path,
'pypi_tar_path_dest': '/tmp/pypi.tar.bz',
'pypi_path': '/tmp/pypi/',
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo
})
ansible(yaml, args.host, ssh_key=args.ssh_key)
# For UI 2.0
class InstallZstackUiCmd(Command):
def __init__(self):
super(InstallZstackUiCmd, self).__init__()
self.name = "install_ui"
self.description = "install ZStack web UI"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='target host IP, for example, 192.168.0.212, to install ZStack web UI; if omitted, it will be installed on local machine')
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def _install_to_local(self, args):
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
info('found installation script at %s, start installing ZStack web UI' % install_script)
shell('bash %s zstack-ui' % install_script)
def run(self, args):
if not args.host:
self._install_to_local(args)
return
# remote install
tools_path = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/")
if not os.path.isdir(tools_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary = None
for l in os.listdir(tools_path):
if l.startswith('zstack-ui'):
ui_binary = l
break
if not ui_binary:
raise CtlError('cannot find zstack-ui package under %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary_path = os.path.join(tools_path, ui_binary)
yaml = '''---
- hosts: $host
remote_user: root
tasks:
- name: create zstack-ui directory
shell: "mkdir -p {{ui_home}}/tmp"
- name: copy zstack-ui package
copy: src=$src dest=$dest
- name: decompress zstack-ui package
shell: "rm -rf {{ui_home}}/tmp; unzip {{dest}} -d {{ui_home}}/tmp"
'''
t = string.Template(yaml)
yaml = t.substitute({
"src": ui_binary_path,
"dest": os.path.join(ctl.ZSTACK_UI_HOME, ui_binary),
"ui_home": ctl.ZSTACK_UI_HOME,
"host": args.host
})
ansible(yaml, args.host, ssh_key=args.ssh_key)
class BootstrapCmd(Command):
def __init__(self):
super(BootstrapCmd, self).__init__()
self.name = 'bootstrap'
self.description = (
'create user and group of "zstack" and add "zstack" to sudoers;'
'\nthis command is only needed by installation script'
' and users that install ZStack manually'
)
ctl.register_command(self)
def need_zstack_user(self):
return False
def run(self, args):
shell('id -u zstack 2>/dev/null || (useradd -d %s zstack -s /bin/false && mkdir -p %s && chown -R zstack.zstack %s)' % (ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR))
shell("grep 'zstack' /etc/sudoers || echo 'zstack ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers")
shell('mkdir -p %s && chown zstack:zstack %s' % (ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR))
class UpgradeManagementNodeCmd(Command):
def __init__(self):
super(UpgradeManagementNodeCmd, self).__init__()
self.name = "upgrade_management_node"
self.description = 'upgrade the management node to a specified version'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='IP or DNS name of the machine to upgrade the management node, for example, zstack-ctl upgrade_management_node --host=192.168.0.212 --war-file=/usr/local/zstack/zstack.war, to upgrade ZStack management node to a remote machine', default=None)
parser.add_argument('--war-file', help='path to zstack.war. A HTTP/HTTPS url or a path to a local zstack.war', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
error_if_tool_is_missing('unzip')
need_download = args.war_file.startswith('http')
if need_download:
error_if_tool_is_missing('wget')
upgrade_tmp_dir = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'upgrade', time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()))
shell('mkdir -p %s' % upgrade_tmp_dir)
property_file_backup_path = os.path.join(upgrade_tmp_dir, 'zstack.properties')
class NewWarFilePath(object):
self.path = None
new_war = NewWarFilePath()
if not need_download:
new_war.path = expand_path(args.war_file)
if not os.path.exists(new_war.path):
raise CtlError('%s not found' % new_war.path)
def local_upgrade():
def backup():
ctl.internal_run('save_config', '--save-to %s' % os.path.dirname(property_file_backup_path))
shell('cp -r %s %s' % (ctl.zstack_home, upgrade_tmp_dir))
info('backup %s to %s' % (ctl.zstack_home, upgrade_tmp_dir))
def download_war_if_needed():
if need_download:
new_war.path = os.path.join(upgrade_tmp_dir, 'new', 'zstack.war')
shell_no_pipe('wget --no-check-certificate %s -O %s' % (args.war_file, new_war.path))
info('downloaded new zstack.war to %s' % new_war.path)
def stop_node():
info('start to stop the management node ...')
ctl.internal_run('stop_node')
def upgrade():
info('start to upgrade the management node ...')
shell('rm -rf %s' % ctl.zstack_home)
if ctl.zstack_home.endswith('/'):
webapp_dir = os.path.dirname(os.path.dirname(ctl.zstack_home))
else:
webapp_dir = os.path.dirname(ctl.zstack_home)
shell('cp %s %s' % (new_war.path, webapp_dir))
ShellCmd('unzip %s -d zstack' % os.path.basename(new_war.path), workdir=webapp_dir)()
#create local repo folder for possible zstack local yum repo
zstack_dvd_repo = '%s/zstack/static/zstack-dvd' % webapp_dir
shell('rm -f %s; ln -s /opt/zstack-dvd %s' % (zstack_dvd_repo, zstack_dvd_repo))
def restore_config():
info('restoring the zstack.properties ...')
ctl.internal_run('restore_config', '--restore-from %s' % os.path.dirname(property_file_backup_path))
def install_tools():
info('upgrading zstack-cli, zstack-ctl; this may cost several minutes ...')
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
shell("bash %s zstack-cli" % install_script)
shell("bash %s zstack-ctl" % install_script)
info('successfully upgraded zstack-cli, zstack-ctl')
def save_new_war():
sdir = os.path.join(ctl.zstack_home, "../../../")
shell('yes | cp %s %s' % (new_war.path, sdir))
def chown_to_zstack():
info('change permission to user zstack')
shell('chown -R zstack:zstack %s' % os.path.join(ctl.zstack_home, '../../'))
backup()
download_war_if_needed()
stop_node()
upgrade()
restore_config()
install_tools()
save_new_war()
chown_to_zstack()
info('----------------------------------------------\n'
'Successfully upgraded the ZStack management node to a new version.\n'
'We backup the old zstack as follows:\n'
'\tzstack.properties: %s\n'
'\tzstack folder: %s\n'
'Please test your new ZStack. If everything is OK and stable, you can manually delete those backup by deleting %s.\n'
'Otherwise you can use them to rollback to the previous version\n'
'-----------------------------------------------\n' %
(property_file_backup_path, os.path.join(upgrade_tmp_dir, 'zstack'), upgrade_tmp_dir))
def remote_upgrade():
need_copy = 'true'
src_war = new_war.path
dst_war = '/tmp/zstack.war'
if need_download:
need_copy = 'false'
src_war = args.war_file
dst_war = args.war_file
upgrade_script = '''
zstack-ctl upgrade_management_node --war-file=$war_file
if [ $$? -ne 0 ]; then
echo 'failed to upgrade the remote management node'
exit 1
fi
if [ "$need_copy" == "true" ]; then
rm -f $war_file
fi
'''
t = string.Template(upgrade_script)
upgrade_script = t.substitute({
'war_file': dst_war,
'need_copy': need_copy
})
fd, upgrade_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(upgrade_script)
def cleanup_upgrade_script():
os.remove(upgrade_script_path)
self.install_cleanup_routine(cleanup_upgrade_script)
yaml = '''---
- hosts: $host
remote_user: root
vars:
need_copy: "$need_copy"
tasks:
- name: copy zstack.war to remote
copy: src=$src_war dest=$dst_war
when: need_copy == 'true'
- name: upgrade management node
script: $upgrade_script
register: output
ignore_errors: yes
- name: failure
fail: msg="failed to upgrade the remote management node. {{ output.stdout }} {{ output.stderr }}"
when: output.rc != 0
'''
t = string.Template(yaml)
yaml = t.substitute({
"src_war": src_war,
"dst_war": dst_war,
"host": args.host,
"need_copy": need_copy,
"upgrade_script": upgrade_script_path
})
info('start to upgrade the remote management node; the process may cost several minutes ...')
if args.ssh_key is None:
args.ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = args.ssh_key.split('.')[0]
ansible(yaml, args.host, args.debug, ssh_key=private_key)
info('upgraded the remote management node successfully')
if args.host:
remote_upgrade()
else:
local_upgrade()
class UpgradeMultiManagementNodeCmd(Command):
logger_dir = '/var/log/zstack'
logger_file = 'zstack-ctl.log'
SpinnerInfo.spinner_status = {'stop_local':False, 'upgrade_local':False , 'start_local':False, 'upgrade':False, 'stop':False, 'start':False}
def __init__(self):
super(UpgradeMultiManagementNodeCmd, self).__init__()
self.name = "upgrade_multi_management_node"
self.description = 'upgrade the management cluster'
ctl.register_command(self)
def start_mn(self, host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
#Ansible finish command will lead mn stop, so use ssh native connection to start mn
(status, output) = commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def install_argparse_arguments(self, parser):
parser.add_argument('--installer-bin','--bin',
help="The new version installer package with absolute path",
required=True)
parser.add_argument('--force', '-F',
help="Force upgrade when database upgrading dry-run failed",
action='store_true', default=False)
def run(self, args):
if os.path.isfile(args.installer_bin) is not True:
error("Didn't find install package %s" % args.installer_bin)
create_log(UpgradeMultiManagementNodeCmd.logger_dir, UpgradeMultiManagementNodeCmd.logger_file)
mn_vo = get_host_list("ManagementNodeVO")
local_mn_ip = get_default_ip()
mn_ip_list = []
cmd = create_check_mgmt_node_command()
cmd(False)
if 'true' not in cmd.stdout:
error("Local management node status is not Running, can't make sure ZStack status is healthy")
for mn in mn_vo:
mn_ip_list.append(mn['hostName'])
mn_ip_list.insert(0, mn_ip_list.pop(mn_ip_list.index(local_mn_ip)))
all_mn_ip = ' '.join(mn_ip_list)
info(" Will upgrade all 'Running' management nodes: %s" % colored(all_mn_ip,'green'))
ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = ssh_key.split('.')[0]
inventory_file = ctl.zstack_home + "/../../../ansible/hosts"
for mn_ip in mn_ip_list:
if mn_ip != local_mn_ip:
host_info = HostPostInfo()
host_info.host = mn_ip
host_info.private_key = private_key
host_info.host_inventory = inventory_file
host_reachable = check_host_reachable(host_info, True)
if host_reachable is True:
spinner_info = SpinnerInfo()
spinner_info.output = "Stop remote management node %s" % mn_ip
spinner_info.name = "stop_%s" % mn_ip
SpinnerInfo.spinner_status['stop_%s' % mn_ip] = False
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_%s' % mn_ip] = True
ZstackSpinner(spinner_info)
command = "zstack-ctl stop_node"
run_remote_command(command, host_info)
else:
# running management node will block upgrade process
error("Management node %s is unreachable, please sync public key %s to other management nodes" % (mn_ip, ssh_key))
else:
spinner_info = SpinnerInfo()
spinner_info.output = "Stop local management node %s" % mn_ip
spinner_info.name = "stop_local"
SpinnerInfo.spinner_status['stop_local'] = False
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_local'] = True
ZstackSpinner(spinner_info)
command = "zstack-ctl stop_node"
shell(command)
for mn_ip in mn_ip_list:
host_info = HostPostInfo()
host_info.host = mn_ip
host_info.private_key = private_key
host_info.host_inventory = inventory_file
if mn_ip == local_mn_ip:
spinner_info = SpinnerInfo()
spinner_info.output = "Upgrade management node on localhost(%s)" % local_mn_ip
spinner_info.name = 'upgrade_local'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_local'] = True
ZstackSpinner(spinner_info)
if args.force is True:
shell("rm -rf /tmp/zstack_upgrade.lock && bash %s -u -F" % args.installer_bin)
else:
shell("rm -rf /tmp/zstack_upgrade.lock && bash %s -u" % args.installer_bin)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on localhost(%s)" % local_mn_ip
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start_local'] = True
ZstackSpinner(spinner_info)
shell("zstack-ctl start_node && zstack-ctl start_ui")
else:
spinner_info = SpinnerInfo()
spinner_info.output = "Upgrade management node on host %s" % mn_ip
spinner_info.name = 'upgrade'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade'] = True
ZstackSpinner(spinner_info)
war_file = ctl.zstack_home + "/../../../apache-tomcat-7.0.35/webapps/zstack.war"
ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa"
status,output = commands.getstatusoutput("zstack-ctl upgrade_management_node --host %s --ssh-key %s --war-file %s" % (mn_ip, ssh_key, war_file))
if status != 0:
error(output)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on host %s" % mn_ip
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start'] = True
ZstackSpinner(spinner_info)
self.start_mn(host_info)
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
time.sleep(0.3)
info(colored("All management nodes upgrade successfully!",'blue'))
class UpgradeDbCmd(Command):
def __init__(self):
super(UpgradeDbCmd, self).__init__()
self.name = 'upgrade_db'
self.description = (
'upgrade the database from current version to a new version'
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--force', help='bypass management nodes status check.'
'\nNOTE: only use it when you know exactly what it does', action='store_true', default=False)
parser.add_argument('--no-backup', help='do NOT backup the database. If the database is very large and you have manually backup it, using this option will fast the upgrade process. [DEFAULT] false', default=False)
parser.add_argument('--dry-run', help='Check if db could be upgraded. [DEFAULT] not set', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysqldump')
error_if_tool_is_missing('mysql')
db_url = ctl.get_db_url()
db_url_params = db_url.split('//')
db_url = db_url_params[0] + '//' + db_url_params[1].split('/')[0]
if 'zstack' not in db_url:
db_url = '%s/zstack' % db_url.rstrip('/')
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
flyway_path = os.path.join(ctl.zstack_home, 'WEB-INF/classes/tools/flyway-3.2.1/flyway')
if not os.path.exists(flyway_path):
raise CtlError('cannot find %s. Have you run upgrade_management_node?' % flyway_path)
upgrading_schema_dir = os.path.join(ctl.zstack_home, 'WEB-INF/classes/db/upgrade/')
if not os.path.exists(upgrading_schema_dir):
raise CtlError('cannot find %s. Have you run upgrade_management_node?' % upgrading_schema_dir)
ctl.check_if_management_node_has_stopped(args.force)
if args.dry_run:
info('Dry run finished. Database could be upgraded. ')
return True
def backup_current_database():
if args.no_backup:
return
info('start to backup the database ...')
db_backup_path = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'db_backup', time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()), 'backup.sql')
shell('mkdir -p %s' % os.path.dirname(db_backup_path))
if db_password:
shell('mysqldump -u %s -p%s --host %s --port %s zstack > %s' % (db_user, db_password, db_hostname, db_port, db_backup_path))
else:
shell('mysqldump -u %s --host %s --port %s zstack > %s' % (db_user, db_hostname, db_port, db_backup_path))
info('successfully backup the database to %s' % db_backup_path)
def create_schema_version_table_if_needed():
if db_password:
out = shell('''mysql -u %s -p%s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_password, db_hostname, db_port))
else:
out = shell('''mysql -u %s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_hostname, db_port))
if 'schema_version' in out:
return
info('version table "schema_version" is not existing; initializing a new version table first')
if db_password:
shell_no_pipe('bash %s baseline -baselineVersion=0.6 -baselineDescription="0.6 version" -user=%s -password=%s -url=%s' %
(flyway_path, db_user, db_password, db_url))
else:
shell_no_pipe('bash %s baseline -baselineVersion=0.6 -baselineDescription="0.6 version" -user=%s -url=%s' %
(flyway_path, db_user, db_url))
def migrate():
schema_path = 'filesystem:%s' % upgrading_schema_dir
if db_password:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -password=%s -url=%s -locations=%s' % (flyway_path, db_user, db_password, db_url, schema_path))
else:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -url=%s -locations=%s' % (flyway_path, db_user, db_url, schema_path))
info('Successfully upgraded the database to the latest version.\n')
backup_current_database()
create_schema_version_table_if_needed()
migrate()
class UpgradeUIDbCmd(Command):
def __init__(self):
super(UpgradeUIDbCmd, self).__init__()
self.name = 'upgrade_ui_db'
self.description = (
'upgrade the zstack_ui database from current version to a new version'
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--force', help='bypass zstack ui status check.'
'\nNOTE: only use it when you know exactly what it does', action='store_true', default=False)
parser.add_argument('--no-backup', help='do NOT backup the zstack_ui database. If the database is very large and you have manually backup it, using this option will fast the upgrade process. [DEFAULT] false', default=False)
parser.add_argument('--dry-run', help='Check if zstack_ui database could be upgraded. [DEFAULT] not set', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysqldump')
error_if_tool_is_missing('mysql')
db_url = ctl.get_ui_db_url()
db_url_params = db_url.split('//')
db_url = db_url_params[0] + '//' + db_url_params[1].split('/')[0]
if 'zstack_ui' not in db_url:
db_url = '%s/zstack_ui' % db_url.rstrip('/')
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal(True)
flyway_path = os.path.join(ctl.zstack_home, 'WEB-INF/classes/tools/flyway-3.2.1/flyway')
if not os.path.exists(flyway_path):
raise CtlError('cannot find %s. Have you run upgrade_management_node?' % flyway_path)
upgrading_schema_dir = os.path.join(ctl.ZSTACK_UI_HOME, 'tmp/WEB-INF/classes/db/migration/')
if not os.path.exists(upgrading_schema_dir):
raise CtlError('cannot find %s' % upgrading_schema_dir)
if not args.force:
(status, output)= commands.getstatusoutput("zstack-ctl ui_status")
if status == 0 and 'Running' in output:
raise CtlError('ZStack UI is still running. Please stop it before upgrade zstack_ui database.')
if args.dry_run:
info('Dry run finished. zstack_ui database could be upgraded. ')
return True
def backup_current_database():
if args.no_backup:
return
info('start to backup the zstack_ui database ...')
db_backup_path = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'db_backup', time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()), 'ui_backup.sql')
shell('mkdir -p %s' % os.path.dirname(db_backup_path))
if db_password:
shell('mysqldump -u %s -p%s --host %s --port %s zstack_ui > %s' % (db_user, db_password, db_hostname, db_port, db_backup_path))
else:
shell('mysqldump -u %s --host %s --port %s zstack_ui > %s' % (db_user, db_hostname, db_port, db_backup_path))
info('successfully backup the zstack_ui database to %s' % db_backup_path)
def create_schema_version_table_if_needed():
if db_password:
out = shell('''mysql -u %s -p%s --host %s --port %s -t zstack_ui -e "show tables like 'schema_version'"''' %
(db_user, db_password, db_hostname, db_port))
else:
out = shell('''mysql -u %s --host %s --port %s -t zstack_ui -e "show tables like 'schema_version'"''' %
(db_user, db_hostname, db_port))
if 'schema_version' in out:
return
info('version table "schema_version" is not existing; initializing a new version table first')
if db_password:
shell_no_pipe('bash %s baseline -baselineVersion=2.3.1 -baselineDescription="2.3.1 version" -user=%s -password=%s -url=%s' %
(flyway_path, db_user, db_password, db_url))
else:
shell_no_pipe('bash %s baseline -baselineVersion=2.3.1 -baselineDescription="2.3.1 version" -user=%s -url=%s' %
(flyway_path, db_user, db_url))
def migrate():
schema_path = 'filesystem:%s' % upgrading_schema_dir
if db_password:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -password=%s -url=%s -locations=%s' % (flyway_path, db_user, db_password, db_url, schema_path))
else:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -url=%s -locations=%s' % (flyway_path, db_user, db_url, schema_path))
info('Successfully upgraded the zstack_ui database to the latest version.\n')
backup_current_database()
create_schema_version_table_if_needed()
migrate()
class UpgradeCtlCmd(Command):
def __init__(self):
super(UpgradeCtlCmd, self).__init__()
self.name = 'upgrade_ctl'
self.description = (
'upgrade the zstack-ctl to a new version'
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--package', help='the path to the new zstack-ctl package', required=True)
def run(self, args):
error_if_tool_is_missing('pip')
path = expand_path(args.package)
if not os.path.exists(path):
raise CtlError('%s not found' % path)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
install_script = '''set -e
which virtualenv &>/dev/null
if [ $$? != 0 ]; then
pip install -i file://$pypi_path/simple --trusted-host localhost virtualenv
fi
CTL_VIRENV_PATH=/var/lib/zstack/virtualenv/zstackctl
rm -rf $$CTL_VIRENV_PATH
virtualenv $$CTL_VIRENV_PATH
. $$CTL_VIRENV_PATH/bin/activate
pip install -i file://$pypi_path/simple --trusted-host --ignore-installed $package || exit 1
chmod +x /usr/bin/zstack-ctl
'''
script(install_script, {"pypi_path": pypi_path, "package": args.package})
info('successfully upgraded zstack-ctl to %s' % args.package)
class RollbackManagementNodeCmd(Command):
def __init__(self):
super(RollbackManagementNodeCmd, self).__init__()
self.name = "rollback_management_node"
self.description = "rollback the management node to a previous version if the upgrade fails"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='the IP or DNS name of machine to rollback the management node')
parser.add_argument('--war-file', help='path to zstack.war. A HTTP/HTTPS url or a path to a local zstack.war', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--property-file', help="the path to zstack.properties. If omitted, the current zstack.properties will be used", default=None)
def run(self, args):
error_if_tool_is_missing('unzip')
rollback_tmp_dir = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'rollback', time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()))
shell('mkdir -p %s' % rollback_tmp_dir)
need_download = args.war_file.startswith('http')
class Info(object):
def __init__(self):
self.war_path = None
self.property_file = None
rollbackinfo = Info()
def local_rollback():
def backup_current_zstack():
info('start to backup the current zstack ...')
shell('cp -r %s %s' % (ctl.zstack_home, rollback_tmp_dir))
info('backup %s to %s' % (ctl.zstack_home, rollback_tmp_dir))
info('successfully backup the current zstack to %s' % os.path.join(rollback_tmp_dir, os.path.basename(ctl.zstack_home)))
def download_war_if_needed():
if need_download:
rollbackinfo.war_path = os.path.join(rollback_tmp_dir, 'zstack.war')
shell_no_pipe('wget --no-check-certificate %s -O %s' % (args.war_file, rollbackinfo.war_path))
info('downloaded zstack.war to %s' % rollbackinfo.war_path)
else:
rollbackinfo.war_path = expand_path(args.war_file)
if not os.path.exists(rollbackinfo.war_path):
raise CtlError('%s not found' % rollbackinfo.war_path)
def save_property_file_if_needed():
if not args.property_file:
ctl.internal_run('save_config', '--save-to %s' % rollback_tmp_dir)
rollbackinfo.property_file = os.path.join(rollback_tmp_dir, 'zstack.properties')
else:
rollbackinfo.property_file = args.property_file
if not os.path.exists(rollbackinfo.property_file):
raise CtlError('%s not found' % rollbackinfo.property_file)
def stop_node():
info('start to stop the management node ...')
ctl.internal_run('stop_node')
def rollback():
info('start to rollback the management node ...')
shell('rm -rf %s' % ctl.zstack_home)
shell('unzip %s -d %s' % (rollbackinfo.war_path, ctl.zstack_home))
def restore_config():
info('restoring the zstack.properties ...')
ctl.internal_run('restore_config', '--restore-from %s' % rollbackinfo.property_file)
def install_tools():
info('rollback zstack-cli, zstack-ctl to the previous version. This may cost several minutes ...')
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
shell("bash %s zstack-cli" % install_script)
shell("bash %s zstack-ctl" % install_script)
info('successfully upgraded zstack-cli, zstack-ctl')
backup_current_zstack()
download_war_if_needed()
save_property_file_if_needed()
stop_node()
rollback()
restore_config()
install_tools()
info('----------------------------------------------\n'
'Successfully rollback the ZStack management node to a previous version.\n'
'We backup the current zstack as follows:\n'
'\tzstack.properties: %s\n'
'\tzstack folder: %s\n'
'Please test your ZStack. If everything is OK and stable, you can manually delete those backup by deleting %s.\n'
'-----------------------------------------------\n' %
(rollbackinfo.property_file, os.path.join(rollback_tmp_dir, os.path.basename(ctl.zstack_home)), rollback_tmp_dir))
def remote_rollback():
error_if_tool_is_missing('wget')
need_copy = 'true'
src_war = rollbackinfo.war_path
dst_war = '/tmp/zstack.war'
if need_download:
need_copy = 'false'
src_war = args.war_file
dst_war = args.war_file
rollback_script = '''
zstack-ctl rollback_management_node --war-file=$war_file
if [ $$? -ne 0 ]; then
echo 'failed to rollback the remote management node'
exit 1
fi
if [ "$need_copy" == "true" ]; then
rm -f $war_file
fi
'''
t = string.Template(rollback_script)
rollback_script = t.substitute({
'war_file': dst_war,
'need_copy': need_copy
})
fd, rollback_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(rollback_script)
def cleanup_rollback_script():
os.remove(rollback_script_path)
self.install_cleanup_routine(cleanup_rollback_script)
yaml = '''---
- hosts: $host
remote_user: root
vars:
need_copy: "$need_copy"
tasks:
- name: copy zstack.war to remote
copy: src=$src_war dest=$dst_war
when: need_copy == 'true'
- name: rollback the management node
script: $rollback_script
register: output
ignore_errors: yes
- name: failure
fail: msg="failed to rollback the remote management node. {{ output.stdout }} {{ output.stderr }}"
when: output.rc != 0
'''
t = string.Template(yaml)
yaml = t.substitute({
"src_war": src_war,
"dst_war": dst_war,
"host": args.host,
"need_copy": need_copy,
"rollback_script": rollback_script_path
})
info('start to rollback the remote management node; the process may cost several minutes ...')
ansible(yaml, args.host, args.debug, ssh_key=args.ssh_key)
info('successfully rollback the remote management node')
if args.host:
remote_rollback()
else:
local_rollback()
class RollbackDatabaseCmd(Command):
def __init__(self):
super(RollbackDatabaseCmd, self).__init__()
self.name = 'rollback_db'
self.description = "rollback the database to the previous version if the upgrade fails"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--db-dump', help="the previous database dump file", required=True)
parser.add_argument('--root-password', help="the password for mysql root user. [DEFAULT] empty password")
parser.add_argument('--force', help='bypass management nodes status check.'
'\nNOTE: only use it when you know exactly what it does', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysql')
ctl.check_if_management_node_has_stopped(args.force)
if not os.path.exists(args.db_dump):
raise CtlError('%s not found' % args.db_dump)
host, port, _, _ = ctl.get_live_mysql_portal()
if args.root_password:
cmd = ShellCmd('mysql -u root -p%s --host %s --port %s -e "select 1"' % (args.root_password, host, port))
else:
cmd = ShellCmd('mysql -u root --host %s --port %s -e "select 1"' % (host, port))
cmd(False)
if cmd.return_code != 0:
error_not_exit('failed to test the mysql server. You may have provided a wrong password of the root user. Please use --root-password to provide the correct password')
cmd.raise_error()
info('start to rollback the database ...')
if args.root_password:
shell('mysql -u root -p%s --host %s --port %s -t zstack < %s' % (args.root_password, host, port, args.db_dump))
else:
shell('mysql -u root --host %s --port %s -t zstack < %s' % (host, port, args.db_dump))
info('successfully rollback the database to the dump file %s' % args.db_dump)
# For UI 1.x
class StopDashboardCmd(Command):
def __init__(self):
super(StopDashboardCmd, self).__init__()
self.name = 'stop_ui'
self.description = "stop UI server on the local or remote host"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
def _remote_stop(self, host):
cmd = '/etc/init.d/zstack-dashboard stop'
ssh_run_no_pipe(host, cmd)
def run(self, args):
if args.host != 'localhost':
self._remote_stop(args.host)
return
pidfile = '/var/run/zstack/zstack-dashboard.pid'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
shell('kill %s >/dev/null 2>&1' % pid, is_exception=False)
def stop_all():
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
shell('kill -9 %s >/dev/null 2>&1' % pid)
stop_all()
else:
return
stop_all()
info('successfully stopped the UI server')
# For UI 2.0
class StopUiCmd(Command):
def __init__(self):
super(StopUiCmd, self).__init__()
self.name = 'stop_ui'
self.description = "stop UI server on the local or remote host"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
def _remote_stop(self, host):
cmd = '/etc/init.d/zstack-ui stop'
ssh_run_no_pipe(host, cmd)
def run(self, args):
if args.host != 'localhost':
self._remote_stop(args.host)
return
pidfile = '/var/run/zstack/zstack-ui.pid'
portfile = '/var/run/zstack/zstack-ui.port'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
shell('kill %s >/dev/null 2>&1' % pid, is_exception=False)
def stop_all():
pid = find_process_by_cmdline('zstack-ui')
if pid:
shell('kill -9 %s >/dev/null 2>&1' % pid)
stop_all()
else:
return
def clean_pid_port():
shell('rm -f %s' % pidfile)
shell('rm -f %s' % portfile)
stop_all()
clean_pid_port()
info('successfully stopped the UI server')
# For VDI UI 2.1
class StopVDIUiCmd(Command):
def __init__(self):
super(StopVDIUiCmd, self).__init__()
self.name = 'stop_vdi'
self.description = "stop VDI server on the local host"
ctl.register_command(self)
def run(self, args):
pidfile = '/var/run/zstack/zstack-vdi.pid'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
shell('kill %s >/dev/null 2>&1' % pid, is_exception=False)
def stop_all():
pid = find_process_by_cmdline('zstack-vdi')
if pid:
shell('kill -9 %s >/dev/null 2>&1' % pid)
stop_all()
else:
return
stop_all()
info('successfully stopped the VDI server')
# For UI 1.x
class DashboardStatusCmd(Command):
def __init__(self):
super(DashboardStatusCmd, self).__init__()
self.name = "ui_status"
self.description = "check the UI server status on the local or remote host."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def _remote_status(self, host):
cmd = '/etc/init.d/zstack-dashboard status'
ssh_run_no_pipe(host, cmd)
def run(self, args):
self.quiet = args.quiet
if args.host != 'localhost':
self._remote_status(args.host)
return
ha_info_file = '/var/lib/zstack/ha/ha.yaml'
pidfile = '/var/run/zstack/zstack-dashboard.pid'
portfile = '/var/run/zstack/zstack-dashboard.port'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
if os.path.exists(ha_info_file):
with open(ha_info_file, 'r') as fd2:
ha_conf = yaml.load(fd2)
if check_ip_port(ha_conf['vip'], 8888):
info('UI status: %s [PID:%s] http://%s:8888' % (colored('Running', 'green'), pid, ha_conf['vip']))
else:
info('UI status: %s' % colored('Unknown', 'yellow'))
return
default_ip = get_default_ip()
if not default_ip:
info('UI status: %s [PID:%s]' % (colored('Running', 'green'), pid))
else:
if os.path.exists(portfile):
with open(portfile, 'r') as fd2:
port = fd2.readline()
port = port.strip(' \t\n\r')
else:
port = 5000
info('UI status: %s [PID:%s] http://%s:%s' % (colored('Running', 'green'), pid, default_ip, port))
return
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
info('UI status: %s [PID: %s]' % (colored('Zombie', 'yellow'), pid))
else:
info('UI status: %s [PID: %s]' % (colored('Stopped', 'red'), pid))
# For UI 2.0
class UiStatusCmd(Command):
def __init__(self):
super(UiStatusCmd, self).__init__()
self.name = "ui_status"
self.description = "check the UI server status on the local or remote host."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def _remote_status(self, host):
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl ui_status"' % host)
def run(self, args):
self.quiet = args.quiet
if args.host != 'localhost':
self._remote_status(args.host)
return
# no need to consider ha because it's not supported any more
#ha_info_file = '/var/lib/zstack/ha/ha.yaml'
pidfile = '/var/run/zstack/zstack-ui.pid'
portfile = '/var/run/zstack/zstack-ui.port'
if os.path.exists(portfile):
with open(portfile, 'r') as fd2:
port = fd2.readline()
port = port.strip(' \t\n\r')
else:
port = 5000
def write_status(status):
info('UI status: %s' % status)
pid = ''
output = ''
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps %s' % pid)
output = check_pid_cmd(is_exception=False)
cmd = create_check_ui_status_command(ui_port=port, if_https='--ssl.enabled=true' in output)
if not cmd:
write_status('cannot detect status, no wget and curl installed')
return
cmd(False)
if cmd.return_code != 0:
if cmd.stdout or 'Failed' in cmd.stdout and pid:
write_status('Starting, should be ready in a few seconds')
elif pid:
write_status(
'%s, the ui seems to become zombie as it stops responding APIs but the '
'process(PID: %s) is still running. Please stop the node using zstack-ctl stop_ui' %
(colored('Zombie', 'yellow'), pid))
else:
write_status(colored('Stopped', 'red'))
return False
elif 'UP' in cmd.stdout:
default_ip = get_default_ip()
if not default_ip:
info('UI status: %s [PID:%s]' % (colored('Running', 'green'), pid))
else:
http = 'https' if '--ssl.enabled=true' in output else 'http'
info('UI status: %s [PID:%s] %s://%s:%s' % (
colored('Running', 'green'), pid, http, default_ip, port))
else:
write_status(colored('Unknown', 'yellow'))
# For VDI UI 2.1
class VDIUiStatusCmd(Command):
def __init__(self):
super(VDIUiStatusCmd, self).__init__()
self.name = "vdi_status"
self.description = "check the VDI server status on the local host."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def run(self, args):
self.quiet = args.quiet
pidfile = '/var/run/zstack/zstack-vdi.pid'
portfile = '/var/run/zstack/zstack-vdi.port'
port = 9000
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
default_ip = get_default_ip()
if not default_ip:
info('VDI status: %s [PID:%s] http://%s:%s' % (colored('Running', 'green'), pid, default_ip,port))
else:
if os.path.exists(portfile):
with open(portfile, 'r') as fd2:
port = fd2.readline()
port = port.strip(' \t\n\r')
info('VDI UI status: %s [PID:%s] http://%s:%s' % (colored('Running', 'green'), pid, default_ip,port))
return
pid = find_process_by_cmdline('zstack-vdi')
if pid:
info('VDI UI status: %s [PID: %s]' % (colored('Zombie', 'yellow'), pid))
else:
info('VDI UI status: %s [PID: %s]' % (colored('Stopped', 'red'), pid))
def mysql(cmd):
(db_hostname, db_port, db_user, db_password) = ctl.get_live_mysql_portal()
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
if db_hostname == "localhost" or db_hostname == "127.0.0.1" or (db_hostname in RestoreMysqlCmd.all_local_ip):
db_hostname = ""
else:
db_hostname = "--host %s" % db_hostname
command = "mysql -uzstack %s -P %s %s zstack -e \"%s\"" % (db_connect_password, db_port, db_hostname, cmd)
return shell(command).strip()
class ShowSessionCmd(Command):
def __init__(self):
super(ShowSessionCmd, self).__init__()
self.name = "show_session_list"
self.description = "show user session list"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--account', '-c', help='Show the designated account session lists')
def run(self, args):
command = "select a.name, count(1) from AccountVO a, SessionVO s where s.accountUuid = a.uuid group by a.name"
result = mysql(command)
if result is not None:
output = result.splitlines()
info("account sessions")
info("---------------")
count = 0
for o in output[1:]:
session = o.split()
if args.account is None:
info(o)
else:
if args.account == session[0]:
info(o)
else:
continue
count = int(session[1]) + count
info("---------------")
info("total %d" % count)
class DropSessionCmd(Command):
def __init__(self):
super(DropSessionCmd, self).__init__()
self.name = "drop_account_session"
self.description = "drop account session"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--all', '-a', help='Drop all sessions except which belong to admin account', action='store_true', default=False)
parser.add_argument('--account', '-c', help='Drop the designated account sessions')
def run(self, args):
count = 0
command = ""
if not args.all:
if args.account is None:
return
countCmd = "select count(1) from SessionVO where accountUuid = (select distinct(a.uuid) from AccountVO a, (select * from SessionVO)" \
" as s where s.accountUuid = a.uuid and a.name='%s')" % args.account
command = "delete from SessionVO where accountUuid = (select distinct(a.uuid) from AccountVO a, (select * from SessionVO)" \
" as s where s.accountUuid = a.uuid and a.name='%s')" % args.account
result = mysql(countCmd)
else:
countCmd = "select count(1) from SessionVO where accountUuid not in (select uuid from AccountVO where type='SystemAdmin')"
command = "delete from SessionVO where accountUuid not in (select uuid from AccountVO where type='SystemAdmin')"
result = mysql(countCmd)
count = result.splitlines()
if count is not None and len(count) > 0 and int(count[1]) > 0:
mysql(command)
info("drop %d sessions totally" % int(count[1]))
else:
info("drop 0 session")
class InstallLicenseCmd(Command):
def __init__(self):
super(InstallLicenseCmd, self).__init__()
self.name = "install_license"
self.description = "install zstack license"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--license', '-f', help="path to the license file", required=True)
parser.add_argument('--prikey', help="[OPTIONAL] the path to the private key used to generate license request")
def run(self, args):
lpath = expand_path(args.license)
if not os.path.isfile(lpath):
raise CtlError('cannot find the license file at %s' % args.license)
ppath = None
if args.prikey:
ppath = expand_path(args.prikey)
if not os.path.isfile(ppath):
raise CtlError('cannot find the private key file at %s' % args.prikey)
license_folder = '/var/lib/zstack/license'
shell('''mkdir -p %s''' % license_folder)
shell('''chown zstack:zstack %s''' % license_folder)
shell('''yes | cp %s %s/license.txt''' % (lpath, license_folder))
shell('''chown zstack:zstack %s/license.txt''' % license_folder)
info("successfully installed the license file to %s/license.txt" % license_folder)
if ppath:
shell('''yes | cp %s %s/pri.key''' % (ppath, license_folder))
shell('''chown zstack:zstack %s/pri.key''' % license_folder)
info("successfully installed the private key file to %s/pri.key" % license_folder)
# For UI 1.x
class StartDashboardCmd(Command):
PID_FILE = '/var/run/zstack/zstack-dashboard.pid'
def __init__(self):
super(StartDashboardCmd, self).__init__()
self.name = "start_ui"
self.description = "start UI server on the local or remote host"
ctl.register_command(self)
if not os.path.exists(os.path.dirname(self.PID_FILE)):
shell("mkdir -p %s" % os.path.dirname(self.PID_FILE))
shell("mkdir -p /var/log/zstack")
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--port', help="UI server port. [DEFAULT] 5000", default='5000')
def _remote_start(self, host, params):
cmd = '/etc/init.d/zstack-dashboard start --rabbitmq %s' % params
ssh_run_no_pipe(host, cmd)
info('successfully start the UI server on the remote host[%s]' % host)
def _check_status(self, port):
if os.path.exists(self.PID_FILE):
with open(self.PID_FILE, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
default_ip = get_default_ip()
if not default_ip:
info('UI server is still running[PID:%s]' % pid)
else:
info('UI server is still running[PID:%s], http://%s:%s' % (pid, default_ip, port))
return False
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
info('found a zombie UI server[PID:%s], kill it and start a new one' % pid)
shell('kill -9 %s > /dev/null' % pid)
return True
def run(self, args):
ips = ctl.read_property_list("UI.vip.")
if not ips:
ips = ctl.read_property_list("CloudBus.serverIp.")
if not ips:
raise CtlError('no RabbitMQ IPs found in %s. The IPs should be configured as CloudBus.serverIp.0, CloudBus.serverIp.1 ... CloudBus.serverIp.N' % ctl.properties_file_path)
ips = [v for k, v in ips]
username = ctl.read_property("CloudBus.rabbitmqUsername")
password = ctl.read_property("CloudBus.rabbitmqPassword")
if username and not password:
raise CtlError('CloudBus.rabbitmqUsername is configured but CloudBus.rabbitmqPassword is not. They must be both set or not set. Check %s' % ctl.properties_file_path)
if not username and password:
raise CtlError('CloudBus.rabbitmqPassword is configured but CloudBus.rabbitmqUsername is not. They must be both set or not set. Check %s' % ctl.properties_file_path)
if username and password:
urls = ["%s:%s@%s" % (username, password, ip) for ip in ips]
else:
urls = ips
param = ','.join(urls)
if args.host != 'localhost':
self._remote_start(args.host, param)
return
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv):
raise CtlError('%s not found. Are you sure the UI server is installed on %s?' % (virtualenv, args.host))
if not self._check_status(args.port):
return
distro = platform.dist()[0]
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT && service iptables save)' % args.port)
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT && /etc/init.d/iptables-persistent save)' % args.port)
else:
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT ' % args.port)
scmd = '. %s/bin/activate\nZSTACK_DASHBOARD_PORT=%s nohup python -c "from zstack_dashboard import web; web.main()" --rabbitmq %s >/var/log/zstack/zstack-dashboard.log 2>&1 </dev/null &' % (virtualenv, args.port, param)
script(scmd, no_pipe=True)
@loop_until_timeout(5, 0.5)
def write_pid():
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
with open(self.PID_FILE, 'w') as fd:
fd.write(str(pid))
return True
else:
return False
write_pid()
pid = find_process_by_cmdline('zstack_dashboard')
if not pid:
info('fail to start UI server on the local host. Use zstack-ctl start_ui to restart it. zstack UI log could be found in /var/log/zstack/zstack-dashboard.log')
return False
default_ip = get_default_ip()
if not default_ip:
info('successfully started UI server on the local host, PID[%s]' % pid)
else:
info('successfully started UI server on the local host, PID[%s], http://%s:%s' % (pid, default_ip, args.port))
os.system('mkdir -p /var/run/zstack/')
with open('/var/run/zstack/zstack-dashboard.port', 'w') as fd:
fd.write(args.port)
# For UI 2.0
class StartUiCmd(Command):
PID_FILE = '/var/run/zstack/zstack-ui.pid'
PORT_FILE = '/var/run/zstack/zstack-ui.port'
def __init__(self):
super(StartUiCmd, self).__init__()
self.name = "start_ui"
self.description = "start UI server on the local or remote host"
ctl.register_command(self)
if not os.path.exists(os.path.dirname(self.PID_FILE)):
shell("mkdir -p %s" % os.path.dirname(self.PID_FILE))
def install_argparse_arguments(self, parser):
ui_logging_path = os.path.normpath(os.path.join(ctl.zstack_home, "../../logs/"))
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--mn-host', help="ZStack Management Host IP.")
parser.add_argument('--mn-port', help="ZStack Management Host port.")
parser.add_argument('--webhook-host', help="Webhook Host IP.")
parser.add_argument('--webhook-port', help="Webhook Host port.")
parser.add_argument('--server-port', help="UI server port.")
parser.add_argument('--log', help="UI log folder.")
parser.add_argument('--timeout', help='Wait for ZStack UI startup timeout, default is 120 seconds.',
default=120)
# arguments for https
parser.add_argument('--enable-ssl', help="Enable HTTPS for ZStack UI.", action="store_true", default=False)
parser.add_argument('--ssl-keyalias', help="HTTPS SSL KeyAlias.")
parser.add_argument('--ssl-keystore', help="HTTPS SSL KeyStore Path.")
parser.add_argument('--ssl-keystore-type', help="HTTPS SSL KeyStore Type (PKCS12/JKS).")
parser.add_argument('--ssl-keystore-password', help="HTTPS SSL KeyStore Password.")
# arguments for ui_db
parser.add_argument('--db-url', help="zstack_ui database jdbc url")
parser.add_argument('--db-username', help="zstack_ui database username")
parser.add_argument('--db-password', help="zstack_ui database password")
def _remote_start(self, host, mn_host, mn_port, webhook_host, webhook_port, server_port, log, enable_ssl, ssl_keyalias, ssl_keystore, ssl_keystore_type, ssl_keystore_password, db_url, db_username, db_password):
if enable_ssl:
cmd = '/etc/init.d/zstack-ui start --mn-host %s --mn-port %s --webhook-host %s --webhook-port %s --server-port %s --log %s --enable-ssl --ssl-keyalias %s --ssl-keystore %s --ssl-keystore-type %s --ssl-keystore-password %s --db-url %s --db-username %s --db-password %s' % (mn_host, mn_port, webhook_host, webhook_port, server_port, log, ssl_keyalias, ssl_keystore, ssl_keystore_type, ssl_keystore_password, db_url, db_username, db_password)
else:
cmd = '/etc/init.d/zstack-ui start --mn-host %s --mn-port %s --webhook-host %s --webhook-port %s --server-port %s --log %s --db-url %s --db-username %s --db-password %s' % (mn_host, mn_port, webhook_host, webhook_port, server_port, log, db_url, db_username, db_password)
ssh_run_no_pipe(host, cmd)
info('successfully start the UI server on the remote host[%s:%s]' % (host, server_port))
def _check_status(self):
port = 5000
if os.path.exists(self.PORT_FILE):
with open(self.PORT_FILE, 'r') as fd:
port = fd.readline()
port = port.strip(' \t\n\r')
if os.path.exists(self.PID_FILE):
with open(self.PID_FILE, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
default_ip = get_default_ip()
if not default_ip:
info('UI server is still running[PID:%s]' % pid)
else:
info('UI server is still running[PID:%s], http://%s:%s' % (pid, default_ip, port))
return False
pid = find_process_by_cmdline('zstack-ui.war')
if pid:
info('found a zombie UI server[PID:%s], kill it and start a new one' % pid)
shell('kill -9 %s > /dev/null' % pid)
return True
def _gen_default_ssl_keystore(self):
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
cert = OpenSSL.crypto.X509()
cert.set_serial_number(0)
cert.get_subject().CN = "localhost"
cert.set_issuer(cert.get_subject())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_pubkey(key)
cert.sign(key, 'sha256')
p12 = OpenSSL.crypto.PKCS12()
p12.set_privatekey(key)
p12.set_certificate(cert)
p12.set_friendlyname('zstackui')
open(ctl.ZSTACK_UI_KEYSTORE, 'w').write(p12.export(b'password'))
def _get_db_info(self):
# get default db_url, db_username, db_password etc.
db_url_params = ctl.get_ui_db_url().split('//')
self.db_url = db_url_params[0] + '//' + db_url_params[1].split('/')[0]
if 'zstack_ui' not in self.db_url:
self.db_url = '%s/zstack_ui' % self.db_url.rstrip('/')
_, _, self.db_username, self.db_password = ctl.get_live_mysql_portal(True)
def run(self, args):
ui_logging_path = os.path.normpath(os.path.join(ctl.zstack_home, "../../logs/"))
if args.host != 'localhost':
self._remote_start(args.host, args.mn_host, args.mn_port, args.webhook_host, args.webhook_port, args.server_port, args.log, args.enable_ssl, args.ssl_keyalias, args.ssl_keystore, args.ssl_keystore_type, args.ssl_keystore_password, args.db_url, args.db_username, args.db_password)
return
# init zstack.ui.properties
ctl.internal_run('config_ui')
# combine with zstack.ui.properties
cfg_mn_host = ctl.read_ui_property("mn_host")
cfg_mn_port = ctl.read_ui_property("mn_port")
cfg_webhook_host = ctl.read_ui_property("webhook_host")
cfg_webhook_port = ctl.read_ui_property("webhook_port")
cfg_server_port = ctl.read_ui_property("server_port")
cfg_log = ctl.read_ui_property("log")
cfg_enable_ssl = ctl.read_ui_property("enable_ssl")
cfg_ssl_keyalias = ctl.read_ui_property("ssl_keyalias")
cfg_ssl_keystore = ctl.read_ui_property("ssl_keystore")
cfg_ssl_keystore_type = ctl.read_ui_property("ssl_keystore_type")
cfg_ssl_keystore_password = ctl.read_ui_property("ssl_keystore_password")
if not args.mn_host:
args.mn_host = cfg_mn_host
if not args.mn_port:
args.mn_port = cfg_mn_port
if not args.webhook_host:
args.webhook_host = cfg_webhook_host
if not args.webhook_port:
args.webhook_port = cfg_webhook_port
if not args.server_port:
args.server_port = cfg_server_port
if not args.log:
args.log = cfg_log
if not args.enable_ssl:
args.enable_ssl = True if cfg_enable_ssl == 'true' else False
if not args.ssl_keyalias:
args.ssl_keyalias = cfg_ssl_keyalias
if not args.ssl_keystore:
args.ssl_keystore = cfg_ssl_keystore
if not args.ssl_keystore_type:
args.ssl_keystore_type = cfg_ssl_keystore_type
if not args.ssl_keystore_password:
args.ssl_keystore_password = cfg_ssl_keystore_password
# create default ssl keystore anyway
if not os.path.exists(ctl.ZSTACK_UI_KEYSTORE):
self._gen_default_ssl_keystore()
# server_port default value is 5443 if enable_ssl is True
if args.enable_ssl and args.webhook_port == '5000':
args.webhook_port = '5443'
if args.enable_ssl and args.server_port == '5000':
args.server_port = '5443'
if not os.path.exists(args.ssl_keystore):
raise CtlError('%s not found.' % args.ssl_keystore)
# copy args.ssl_keystore to ctl.ZSTACK_UI_KEYSTORE_CP
if args.ssl_keystore != ctl.ZSTACK_UI_KEYSTORE and args.ssl_keystore != ctl.ZSTACK_UI_KEYSTORE_CP:
copyfile(args.ssl_keystore, ctl.ZSTACK_UI_KEYSTORE_CP)
args.ssl_keystore = ctl.ZSTACK_UI_KEYSTORE_CP
# ui_db
self._get_db_info()
if not args.db_url or args.db_url.strip() == '':
args.db_url = self.db_url
if not args.db_username or args.db_username.strip() == '':
args.db_username = self.db_username
if not args.db_password or args.db_password.strip() == '':
args.db_password = self.db_password
shell("mkdir -p %s" % args.log)
zstackui = ctl.ZSTACK_UI_HOME
if not os.path.exists(zstackui):
raise CtlError('%s not found. Are you sure the UI server is installed on %s?' % (zstackui, args.host))
# ui still running
if not self._check_status():
return
distro = platform.dist()[0]
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && service iptables save)' % (args.server_port, args.server_port))
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && service iptables save)' % (args.webhook_port, args.webhook_port))
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && /etc/init.d/iptables-persistent save)' % (args.server_port, args.server_port))
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && /etc/init.d/iptables-persistent save)' % (args.webhook_port, args.webhook_port))
else:
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT ' % (args.server_port, args.server_port))
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT ' % (args.webhook_port, args.webhook_port))
if args.enable_ssl:
scmd = "runuser -l zstack -c 'LOGGING_PATH=%s java -jar %szstack-ui.war --mn.host=%s --mn.port=%s --webhook.host=%s --webhook.port=%s --server.port=%s --ssl.enabled=true --ssl.keyalias=%s --ssl.keystore=%s --ssl.keystore-type=%s --ssl.keystore-password=%s --db.url=%s --db.username=%s --db.password=%s >>%s/zstack-ui.log 2>&1 &'" % (args.log, zstackui, args.mn_host, args.mn_port, args.webhook_host, args.webhook_port, args.server_port, args.ssl_keyalias, args.ssl_keystore, args.ssl_keystore_type, args.ssl_keystore_password, args.db_url, args.db_username, args.db_password, args.log)
else:
scmd = "runuser -l zstack -c 'LOGGING_PATH=%s java -jar %szstack-ui.war --mn.host=%s --mn.port=%s --webhook.host=%s --webhook.port=%s --server.port=%s --db.url=%s --db.username=%s --db.password=%s >>%s/zstack-ui.log 2>&1 &'" % (args.log, zstackui, args.mn_host, args.mn_port, args.webhook_host, args.webhook_port, args.server_port, args.db_url, args.db_username, args.db_password, args.log)
script(scmd, no_pipe=True)
@loop_until_timeout(5, 0.5)
def write_pid():
pid = find_process_by_cmdline('zstack-ui.war')
if pid:
with open(self.PID_FILE, 'w') as fd:
fd.write(str(pid))
return True
else:
return False
write_pid()
os.system('mkdir -p /var/run/zstack/')
with open('/var/run/zstack/zstack-ui.port', 'w') as fd:
fd.write(args.server_port)
timeout = int(args.timeout)
@loop_until_timeout(timeout)
def check_ui_status():
command = 'zstack-ctl ui_status'
(status, output) = commands.getstatusoutput(command)
if status != 0:
return False
return "Running" in output
if not check_ui_status():
info('fail to start UI server on the localhost. Use zstack-ctl start_ui to restart it. zstack UI log could be found in %s/zstack-ui.log' % args.log)
shell('zstack-ctl stop_ui')
shell('rm -rf /var/run/zstack/zstack-ui.port')
shell('rm -rf /var/run/zstack/zstack-ui.pid')
return False
pid = find_process_by_cmdline('zstack-ui')
default_ip = get_default_ip()
if not default_ip:
info('successfully started UI server on the local host, PID[%s]' % pid)
else:
info('successfully started UI server on the local host, PID[%s], %s://%s:%s' % (pid, 'https' if args.enable_ssl else 'http', default_ip, args.server_port))
# For UI 2.0
class ConfigUiCmd(Command):
def __init__(self):
super(ConfigUiCmd, self).__init__()
self.name = "config_ui"
self.description = "configure zstack.ui.properties"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
ui_logging_path = os.path.normpath(os.path.join(ctl.zstack_home, "../../logs/"))
parser.add_argument('--host', help='SSH URL, for example, root@192.168.0.10, to set properties in zstack.ui.properties on the remote machine')
parser.add_argument('--restore', help='restore zstack ui properties to default values', action="store_true", default=False)
parser.add_argument('--mn-host', help="ZStack Management Host IP. [DEFAULT] 127.0.0.1")
parser.add_argument('--mn-port', help="ZStack Management Host port. [DEFAULT] 8080")
parser.add_argument('--webhook-host', help="Webhook Host IP. [DEFAULT] 127.0.0.1")
parser.add_argument('--webhook-port', help="Webhook Host port. [DEFAULT] 5000")
parser.add_argument('--server-port', help="UI server port. [DEFAULT] 5000")
parser.add_argument('--log', help="UI log folder. [DEFAULT] %s" % ui_logging_path)
# arguments for https
parser.add_argument('--enable-ssl', help="Enable HTTPS for ZStack UI. [DEFAULT] False")
parser.add_argument('--ssl-keyalias', help="HTTPS SSL KeyAlias. [DEFAULT] zstackui")
parser.add_argument('--ssl-keystore', help="HTTPS SSL KeyStore Path. [DEFAULT] %s" % ctl.ZSTACK_UI_KEYSTORE)
parser.add_argument('--ssl-keystore-type', help="HTTPS SSL KeyStore Type (PKCS12/JKS). [DEFAULT] PKCS12")
parser.add_argument('--ssl-keystore-password', help="HTTPS SSL KeyStore Password. [DEFAULT] password")
# arguments for ui_db
parser.add_argument('--db-url', help="zstack_ui database jdbc url.")
parser.add_argument('--db-username', help="username of zstack_ui database.")
parser.add_argument('--db-password', help="password of zstack_ui database.")
def _configure_remote_node(self, args):
shell_no_pipe('ssh %s "/usr/bin/zstack-ctl config_ui %s"' % (args.host, ' '.join(ctl.extra_arguments)))
def run(self, args):
ui_logging_path = os.path.normpath(os.path.join(ctl.zstack_home, "../../logs/"))
if args.host:
self._configure_remote_node(args)
return
zstackui = ctl.ZSTACK_UI_HOME
if not os.path.exists(zstackui):
raise CtlError('%s not found. Are you sure the UI server is installed?' % zstackui)
# init zstack.ui.properties
if not ctl.read_ui_property("mn_host"):
ctl.write_ui_property("mn_host", '127.0.0.1')
if not ctl.read_ui_property("mn_port"):
ctl.write_ui_property("mn_port", '8080')
if not ctl.read_ui_property("webhook_host"):
ctl.write_ui_property("webhook_host", '127.0.0.1')
if not ctl.read_ui_property("webhook_port"):
ctl.write_ui_property("webhook_port", '5000')
if not ctl.read_ui_property("server_port"):
ctl.write_ui_property("server_port", '5000')
if not ctl.read_ui_property("log"):
ctl.write_ui_property("log", ui_logging_path)
if not ctl.read_ui_property("enable_ssl"):
ctl.write_ui_property("enable_ssl", 'false')
if not ctl.read_ui_property("ssl_keyalias"):
ctl.write_ui_property("ssl_keyalias", 'zstackui')
if not ctl.read_ui_property("ssl_keystore"):
ctl.write_ui_property("ssl_keystore", ctl.ZSTACK_UI_KEYSTORE)
if not ctl.read_ui_property("ssl_keystore_type"):
ctl.write_ui_property("ssl_keystore_type", 'PKCS12')
if not ctl.read_ui_property("ssl_keystore_password"):
ctl.write_ui_property("ssl_keystore_password", 'password')
if not ctl.read_ui_property("db_url"):
ctl.write_ui_property("db_url", 'jdbc:mysql://127.0.0.1:3306')
if not ctl.read_ui_property("db_username"):
ctl.write_ui_property("db_username", 'zstack_ui')
if not ctl.read_ui_property("db_password"):
ctl.write_ui_property("db_password", 'zstack.ui.password')
# restore to default values
if args.restore:
ctl.write_ui_property("mn_host", '127.0.0.1')
ctl.write_ui_property("mn_port", '8080')
ctl.write_ui_property("webhook_host", '127.0.0.1')
ctl.write_ui_property("webhook_port", '5000')
ctl.write_ui_property("server_port", '5000')
ctl.write_ui_property("log", ui_logging_path)
ctl.write_ui_property("enable_ssl", 'false')
ctl.write_ui_property("ssl_keyalias", 'zstackui')
ctl.write_ui_property("ssl_keystore", ctl.ZSTACK_UI_KEYSTORE)
ctl.write_ui_property("ssl_keystore_type", 'PKCS12')
ctl.write_ui_property("ssl_keystore_password", 'password')
return
# use 5443 instead if enable_ssl
if args.enable_ssl and args.enable_ssl.lower() == 'true':
if args.webhook_port == '5000':
args.webhook_port = '5443'
if args.server_port == '5000':
args.server_port = '5443'
# copy args.ssl_keystore to ctl.ZSTACK_UI_KEYSTORE_CP
if args.ssl_keystore and args.ssl_keystore != ctl.ZSTACK_UI_KEYSTORE:
if not os.path.exists(args.ssl_keystore):
raise CtlError('%s not found.' % args.ssl_keystore)
if args.ssl_keystore != ctl.ZSTACK_UI_KEYSTORE_CP:
copyfile(args.ssl_keystore, ctl.ZSTACK_UI_KEYSTORE_CP)
args.ssl_keystore = ctl.ZSTACK_UI_KEYSTORE_CP
if args.mn_host:
ctl.write_ui_property("mn_host", args.mn_host)
if args.mn_port:
ctl.write_ui_property("mn_port", args.mn_port)
if args.webhook_host:
ctl.write_ui_property("webhook_host", args.webhook_host)
if args.webhook_port:
ctl.write_ui_property("webhook_port", args.webhook_port)
if args.server_port:
ctl.write_ui_property("server_port", args.server_port)
if args.log:
ctl.write_ui_property("log", args.log)
# https
if args.enable_ssl:
ctl.write_ui_property("enable_ssl", args.enable_ssl.lower())
if args.ssl_keyalias:
ctl.write_ui_property("ssl_keyalias", args.ssl_keyalias)
if args.ssl_keystore:
ctl.write_ui_property("ssl_keystore", args.ssl_keystore)
if args.ssl_keystore_type:
ctl.write_ui_property("ssl_keystore_type", args.ssl_keystore_type)
if args.ssl_keystore_password:
ctl.write_ui_property("ssl_keystore_password", args.ssl_keystore_password)
# ui_db
if args.db_url:
ctl.write_ui_property("db_url", args.db_url)
if args.db_username:
ctl.write_ui_property("db_username", args.db_username)
if args.db_password:
ctl.write_ui_property("db_password", args.db_password)
# For UI 2.0
class ShowUiCfgCmd(Command):
def __init__(self):
super(ShowUiCfgCmd, self).__init__()
self.name = "show_ui_config"
self.description = "a shortcut that prints contents of zstack.ui.properties to screen"
ctl.register_command(self)
def run(self, args):
zstackui = ctl.ZSTACK_UI_HOME
if not os.path.exists(zstackui):
raise CtlError('%s not found. Are you sure the UI server is installed?' % zstackui)
shell_no_pipe('cat %s' % ctl.ui_properties_file_path)
# For VDI PORTAL 2.1
class StartVDIUICmd(Command):
PID_FILE = '/var/run/zstack/zstack-vdi.pid'
PORT_FILE = '/var/run/zstack/zstack-vdi.port'
def __init__(self):
super(StartVDIUICmd, self).__init__()
self.name = "start_vdi"
self.description = "start VDI UI server on the local host"
ctl.register_command(self)
if not os.path.exists(os.path.dirname(self.PID_FILE)):
shell("mkdir -p %s" % os.path.dirname(self.PID_FILE))
def install_argparse_arguments(self, parser):
ui_logging_path = os.path.normpath(os.path.join(ctl.zstack_home, "../../logs/"))
parser.add_argument('--mn-port', help="ZStack Management Host port. [DEFAULT] 8080", default='8080')
parser.add_argument('--webhook-port', help="Webhook Host port. [DEFAULT] 9000", default='9000')
parser.add_argument('--server-port', help="UI server port. [DEFAULT] 9000", default='9000')
parser.add_argument('--vdi-path', help="VDI path. [DEFAULT] /opt/zstack-dvd/zstack-vdi.war", default='/opt/zstack-dvd/zstack-vdi.war')
parser.add_argument('--log', help="UI log folder. [DEFAULT] %s" % ui_logging_path, default=ui_logging_path)
def _check_status(self):
VDI_UI_PORT = 9000
if os.path.exists(self.PORT_FILE):
with open(self.PORT_FILE, 'r') as fd:
VDI_UI_PORT = fd.readline()
VDI_UI_PORT = VDI_UI_PORT.strip(' \t\n\r')
if os.path.exists(self.PID_FILE):
with open(self.PID_FILE, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
default_ip = get_default_ip()
if not default_ip:
info('VDI UI is still running[PID:%s]' % pid)
else:
info('VDI UI is still running[PID:%s], http://%s:%s' % (pid, default_ip, VDI_UI_PORT))
return False
pid = find_process_by_cmdline('zstack-vdi')
if pid:
info('found a zombie VDI UI server[PID:%s], kill it and start a new one' % pid)
shell('kill -9 %s > /dev/null' % pid)
return True
def run(self, args):
shell("mkdir -p %s" % args.log)
zstack_vdi = args.vdi_path
if not os.path.exists(zstack_vdi):
raise CtlError('%s not found. Are you sure the VDI UI server is installed ?' % (zstack_vdi))
# vdi ui still running
if not self._check_status():
return
distro = platform.dist()[0]
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && service iptables save)' % (args.server_port, args.server_port))
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && service iptables save)' % (args.webhook_port, args.webhook_port))
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && /etc/init.d/iptables-persistent save)' % (args.server_port, args.server_port))
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && /etc/init.d/iptables-persistent save)' % (args.webhook_port, args.webhook_port))
else:
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT ' % (args.server_port, args.server_port))
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT ' % (args.webhook_port, args.webhook_port))
scmd = "runuser -l zstack -c 'LOGGING_PATH=%s java -jar -Dmn.port=%s -Dwebhook.port=%s -Dserver.port=%s %s >>%s/zstack-vdi.log 2>&1 &'" % (args.log, args.mn_port, args.webhook_port, args.server_port, args.vdi_path, args.log)
script(scmd, no_pipe=True)
@loop_until_timeout(5, 0.5)
def write_pid():
pid = find_process_by_cmdline('zstack-vdi')
if pid:
with open(self.PID_FILE, 'w') as fd:
fd.write(str(pid))
return True
else:
return False
write_pid()
pid = find_process_by_cmdline('zstack-vdi')
if not pid:
info('fail to start VDI UI server on the localhost. Use zstack-ctl start_vdi to restart it. zstack VDI portal log could be found in %s/zstack-vdi.log' % args.log)
return False
default_ip = get_default_ip()
if not default_ip:
info('successfully started VDI UI server on the local host, PID[%s]' % pid)
else:
info('successfully started VDI UI server on the local host, PID[%s], http://%s:%s' % (pid, default_ip, args.server_port))
os.system('mkdir -p /var/run/zstack/')
with open('/var/run/zstack/zstack-vdi.port', 'w') as fd:
fd.write(args.server_port)
class ResetAdminPasswordCmd(Command):
SYSTEM_ADMIN_TYPE = 'SystemAdmin'
def __init__(self):
super(ResetAdminPasswordCmd, self).__init__()
self.name = "reset_password"
self.description = "reset ZStack admin account password, if not set, default is 'password'"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--password', help="the new password of admin. If not set, the default is 'password'")
def run(self, args):
info("start reset password")
new_password = ['password', args.password][args.password is not None]
sha512_pwd = hashlib.sha512(new_password).hexdigest()
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "update AccountVO set password='%s' where type='%s'" % (sha512_pwd, self.SYSTEM_ADMIN_TYPE)
query.query()
info("reset password succeed")
def main():
AddManagementNodeCmd()
BootstrapCmd()
ChangeIpCmd()
CollectLogCmd()
ConfigureCmd()
DumpMysqlCmd()
ChangeMysqlPasswordCmd()
DeployDBCmd()
DeployUIDBCmd()
GetEnvironmentVariableCmd()
InstallHACmd()
InstallDbCmd()
InstallRabbitCmd()
InstallManagementNodeCmd()
InstallLicenseCmd()
ShowConfiguration()
SetEnvironmentVariableCmd()
RollbackManagementNodeCmd()
RollbackDatabaseCmd()
ResetAdminPasswordCmd()
ResetRabbitCmd()
RestoreConfigCmd()
RestartNodeCmd()
RestoreMysqlCmd()
RecoverHACmd()
ShowStatusCmd()
StartCmd()
StopCmd()
SaveConfigCmd()
StartAllCmd()
StopAllCmd()
TailLogCmd()
UnsetEnvironmentVariableCmd()
UpgradeManagementNodeCmd()
UpgradeMultiManagementNodeCmd()
UpgradeDbCmd()
UpgradeUIDbCmd()
UpgradeCtlCmd()
UpgradeHACmd()
StartVDIUICmd()
StopVDIUiCmd()
VDIUiStatusCmd()
ShowSessionCmd()
DropSessionCmd()
# If tools/zstack-ui.war exists, then install zstack-ui
# else, install zstack-dashboard
ctl.locate_zstack_home()
if os.path.exists(ctl.zstack_home + "/WEB-INF/classes/tools/zstack-ui.war"):
InstallZstackUiCmd()
StartUiCmd()
StopUiCmd()
UiStatusCmd()
ConfigUiCmd()
ShowUiCfgCmd()
else:
InstallDashboardCmd()
StartDashboardCmd()
StopDashboardCmd()
DashboardStatusCmd()
try:
ctl.run()
except CtlError as e:
if ctl.verbose:
error_not_exit(traceback.format_exc())
error(str(e))
if __name__ == '__main__':
main()
| live4thee/zstack-utility | zstackctl/zstackctl/ctl.py | Python | apache-2.0 | 375,657 | [
"VisIt"
] | 40230361ad8eeb302fa39de35c8ec5b86684b72706e19a942d726f2308c9b6f3 |
import numpy as np
import subprocess as subp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import mayavi.mlab as mlab
from matplotlib.patches import Ellipse
import ipdb, re
import os.path
import time
from js.utils.plot.colors import colorScheme
from js.utils.config import Config2String
from vpCluster.manifold.karcherMean import karcherMeanSphere_propper
from vpCluster.manifold.sphere import Sphere
import matplotlib as mpl
#poster
mpl.rc('font',size=35)
mpl.rc('lines',linewidth=4.)
figSize = (12, 14)
#paper
mpl.rc('font',size=25)
mpl.rc('lines',linewidth=4.)
figSize = (14, 6.5)
def mutualInfo(z,zGt):
''' assumes same number of clusters in gt and inferred labels '''
N = float(z.size)
Kgt = int(np.max(zGt)+1)
K = int(np.max(z)+1)
print Kgt, K
mi = 0.0
for j in range(K):
for k in range(Kgt):
Njk = np.logical_and(z==j,zGt==k).sum()
Nj = (z==j).sum()
Nk = (zGt==k).sum()
if Njk > 0:
# print '{} {} {} {} {} -> += {}'.format(N, Njk,Nj,Nk, N*Njk/(Nj*Nk), Njk/N * np.log(N*Njk/(Nj*Nk)))
mi += Njk/N * np.log(N*Njk/(Nj*Nk))
return mi
def entropy(z):
''' assumes same number of clusters in gt and inferred labels '''
N = float(z.size)
K = int(np.max(z)+1)
H = 0.0
for k in range(K):
Nk = (z==k).sum()
if Nk > 0:
# print '{} {} {} {} {} -> += {}'.format(N, Njk,Nj,Nk, N*Njk/(Nj*Nk), Njk/N * np.log(N*Njk/(Nj*Nk)))
H -= Nk/N * np.log(Nk/N)
return H
rootPath = '../results/dpMM_spherical/expres1/aistatsResubmission/'
dataPath = './rndSphereData.csv';
dataPath = './rndSphereDataIw.csv';
dataPath = './rndSphereDataElipticalCovs.csv';
dataPath = './rndSphereDataElipticalCovs1.csv';
dataPath = './rndSphereDataElipticalCovs2.csv'; # 10k datapoints with 30 classes
dataPath = './rndSphereDataElipticalCovs3.csv'; # 10k datapoints with 30 classes
# for final eval
dataPath = './rndSphereDataElipticalCovs4.csv'; # 10k datapoints with 30 classes less spread
dataPath = './rndSphereDataIwUncertain.csv';
# rebuttal
dataPath = './rndSphereDataNu9D20.csv';
dataPath = './rndSphereNu9D20N30000.csv';
dataPath = './rndSphereDataNu29D20N30000.csv';
dataPath = './rndSphereDataNu25D20N30000.csv';
dataPath = './rndSphereDataNu26D20N30000.csv';
dataPath = './rndSphereDataIwUncertain.csv'; # still works well in showing advantage of DpNiwSphereFull
dataPath = './rndSphereDataElipticalCovs4.csv'; # 10k datapoints with 30 classes less spread
# DP-vMF-means
dataPath = './rndSphereDataElipticalCovs4.csv'; # 10k datapoints with 30 classes less spread
dataPath = './rndSphereDataIwUncertain.csv';
# aistats resubmission
dataPath = './rndSphereDataIwUncertain.csv'; # still works well in showing advantage of DpNiwSphereFull
dataPath = './rndSphereDataNu25D3N30000NonOverLap.csv' # a few anisotropic clusters; used in paper
dataPath = './rndSphereDataNu10D3N30000NonOverLap.csv' # very isotropic
dataPath = './rndSphereminAngle_15.0-K_30-N_30000-delta_4.0-nu_3.001-D_3.csv' # used in paper
dataPath = './rndSphereminAngle_15.0-K_30-N_30000-delta_100.0-nu_21.0-D_20.csv'
dataPath = '././rndSphereminAngle_15.0-K_30-N_30000-delta_30.0-nu_21.0-D_20.csv'
dataPath = './rndSphereminAngle_15.0-K_30-N_30000-delta_30.0-nu_21.0-D_20.csv'
dataPath = './rndSphereminAngle_10.0-K_60-N_60000-delta_30.0-nu_21.0-D_20.csv'
dataPath = '././rndSphereminAngle_10.0-K_60-N_60000-delta_25.0-nu_21.0-D_20.csv'
# aistats final
dataPath = './rndSphereDataIwUncertain.csv'; # still works well in showing advantage of DpNiwSphereFull
dataPath = './rndSphereminAngle_15.0-K_30-N_30000-delta_4.0-nu_3.001-D_3.csv' # used in paper
dataPath = './rndSphereDataNu25D3N30000NonOverLap.csv' # a few anisotropic clusters; used in paper
if os.path.isfile(re.sub('.csv','_gt.lbl',rootPath+dataPath)):
zGt = np.loadtxt(re.sub('.csv','_gt.lbl',rootPath+dataPath),dtype=int,delimiter=' ')
Kgt = np.max(zGt)+1
else:
print "groundtruth not found"
cfg = dict()
bases = ['NiwSphereUnifNoise','spkm','spkmKarcher','NiwSphere','kmeans'];
bases = ['spkm','spkmKarcher','kmeans','NiwSphere'];
bases = ['DpNiw'];
bases = ['spkm'];
bases = ['DpNiwSphereFull','DpNiw','DpNiwTangent','DpNiwSphere'];
bases = ['DpNiwSphereFull'];
bases = ['NiwSphere','DpNiwSphereFull','DpNiwSphere']
bases = ['spkm','spkmKarcher','kmeans','NiwSphere']
bases = ['kmeans','NiwSphere']
bases = ['spkm','spkmKarcher','kmeans','NiwSphere','DpNiwSphereFull','DpNiwSphere']
bases = ['spkm','spkmKarcher','kmeans']
bases = ['spkm','spkmKarcher','kmeans','DpNiwSphereFull']
bases = ['DpNiwSphereFull']
bases = ['spkm','spkmKarcher','kmeans','NiwSphere']
bases = ['DpNiwSphereFull','DpNiw','DpNiwTangent','DpNiwSphere'];
bases = ['spkm'];
bases = ['DpNiwSphereFull'];
bases = ['spkm','kmeans','NiwSphere','DpNiwSphereFull']
bases = ['DpNiwSphereFull','DpNiw','DpNiwTangent','DpNiwSphere'];
bases = ['spkm'];
bases = ['spkm','kmeans','NiwSphere']
bases = ['spkm','kmeans','NiwSphere','DpNiw']
bases = ['DpNiwSphereFull'];
bases = ['spkm','kmeans','NiwSphere','DpNiw','DpNiwSphereFull']
bases = ['spkm','kmeans','DpNiw','DpNiwSphereFull']
bases = ['spkm_15','spkm_30','spkm_45','kmeans_15','kmeans_30','kmeans_45','DpNiw','DpNiwSphereFull']
bases = ['spkm_30','kmeans_30','DpNiw_1','DpNiw_10','DpNiwSphereFull_1','DpNiwSphereFull_10']
bases = ['DpNiwSphereFull_1','DpNiw_1'];
bases = ['spkm_27','kmeans_27']
# final eval
bases = ['spkm_30','kmeans_30','DpNiw_1','DpNiw_10','DpNiwSphereFull_1','DpNiwSphereFull_10']
#rebuttal
bases = ['DpNiwSphereFull_1','DpNiw_1']
bases = ['DpNiw_1']
bases = ['spkm_30','kmeans_30']
bases = ['spkm_30','kmeans_30','DpNiw_1','DpNiwSphereFull_1']
bases = ['DPvMFmeans_10']
# DP-vMF-means
bases = ['spkm_30','kmeans_30','DPvMFmeans_1','DPvMFmeans_10']#,'DpNiwSphereFull_1','DpNiwSphereFull_10']
# aistats resubmission
bases = ['spkm_30','kmeans_30','DpNiw_1','DpNiw_10','DpNiwSphereFull_1','DpNiwSphereFull_10']
bases = ['spkm_30','kmeans_30','DpNiw_1','DpNiwSphereFull_1', 'DpNiwSphereFullNoPropose_1', 'DpNiwSphereFullNoPropose_30']
bases = ['DpNiwSphereFullNoPropose_50']
bases = ['DirNiwSphereFull_50','DpNiw_1','DpNiwSphereFull_1']
bases = ['DirNiwSphereFull_100','DpNiwSphereFull_1']
bases = ['spkm_30','kmeans_30','DpNiwSphereFull_1']
bases = ['DirNiwSphereFull_100']
bases = ['DpNiwSphereFull_1']
bases = ['DpNiw_1']
bases = ['spkm_30','kmeans_30']
bases = ['DirNiwSphereFull_100','spkm_30','kmeans_30','DpNiw_1','DpNiwSphereFull_1']
bases = ['DpNiw_1']
x=np.loadtxt(rootPath+dataPath,delimiter=' ')
N = x.shape[1]
D = x.shape[0]
K = 30 # 15 30 45
reRun = False
reRun = True
cfg['K'] = K;
cfg['T'] = 200
cfg['T'] = 100
cfg['J'] = 1
nmis = np.zeros((len(bases)*cfg['J'],cfg['T']))
vMeasures = np.zeros((len(bases)*cfg['J'],cfg['T']))
mis = np.zeros((len(bases)*cfg['J'],cfg['T']))
Ns = np.zeros((len(bases)*cfg['J'],cfg['T']))
for i,base in enumerate(bases):
for j in range(cfg['J']):
cfg['j']=j
cfg['base']=base
if cfg['base'] in ["DpNiwSphere","DpNiwSphereFull",'DpNiw','DpNiwTangent']:
info = cfg['base'].split('_')
cfg['base'] = info[0]
cfg['K'] = int(info[1])
# cfg['K'] = 1 #10
alpha = np.ones(cfg['K']) *1.;
else:
# get K from base string
print cfg['base']
info = cfg['base'].split('_')
cfg['base'] = info[0]
cfg['K'] = int(info[1])
alpha = np.ones(cfg['K']) * 10.;
if cfg['base'] == "DpNiwSphereFullNoPropose":
cfg['noPropose'] = True
cfg['base'] = "DpNiwSphereFull"
if cfg['base'] == 'NiwSphereUnifNoise':
alpha[cfg['K']-1] = 1.0;
if cfg['base'] == 'DirNiwSphereFull':
alpha = alpha/cfg['K']
nu = D+1. #+N/10.
Delta = nu* (1.*np.pi)/180. * np.eye(D-1)
params = np.array([nu])
params = np.r_[params,Delta.ravel()]
if cfg['base'] in ['DpNiw']:
Delta = nu* (0.1*np.pi)/180. * np.eye(D)
kappa = 1.0
thetaa = np.zeros(D)
params = np.array([nu,kappa])
params = np.r_[params,thetaa.ravel(),Delta.ravel()]
if cfg['base'] in ['DPvMFmeans']:
params = np.array([np.cos(15.0*np.pi/180.0)-1])
outName,_ = os.path.splitext(rootPath+dataPath)
outName += '_'+Config2String(cfg).toString()
#args = ['../build/dpSubclusterSphereGMM',
# args = ['../build/dpStickGMM',
args = ['../../dpMMshared/build/dpmmSampler',
'--seed {}'.format(int(time.time()*100000) - 100000*int(time.time())),
'-s', # print silhouette
'-N {}'.format(N),
'-D {}'.format(D),
'-K {}'.format(cfg['K']),
'-T {}'.format(cfg['T']),
'--alpha '+' '.join([str(a) for a in alpha]),
#'--base NiwSphereUnifNoise',
'--base '+cfg['base'],
'-i {}'.format(rootPath+dataPath),
'-o {}'.format(outName+'.lbl'),
'--params '+' '.join([str(p) for p in params])]
if 'noPropose' in cfg.keys():
args.append('-n')
if reRun:
print ' '.join(args)
print ' --------------------- '
time.sleep(3)
err = subp.call(' '.join(args),shell=True)
if err:
print 'error when executing'
raw_input()
z = np.loadtxt(outName+'.lbl',dtype=int,delimiter=' ')
if cfg['base'] in ["DpNiwSphere","DpNiwSphereFull","DpNiw",'DpNiwTangent']:
z = np.floor(z/2)
# compute MI and entropies - if not already computed and stored
MI = np.zeros(cfg['T'])
Hz = np.zeros(cfg['T'])
Hgt = np.zeros(cfg['T'])
if not reRun and os.path.exists('./'+outName+'_MI.csv'):
MI = np.loadtxt(outName+'_MI.csv')
Hgt = np.loadtxt(outName+'_Hgt.csv')
Hz = np.loadtxt(outName+'_Hz.csv')
Ns[i*cfg['J']+j,:] = np.loadtxt(outName+'_Ns.csv')
else:
for t in range(cfg['T']):
MI[t] = mutualInfo(z[t,:],zGt)
Hz[t] = entropy(z[t,:])
Hgt[t] = entropy(zGt)
# ipdb.set_trace()
Ns[i*cfg['J']+j,t] = np.unique(z[t,:]).size
# Ns[i,t] = int(np.max(z[t,:])+1)
print Ns[i*cfg['J']+j,:]
np.savetxt(outName+'_MI.csv',MI);
np.savetxt(outName+'_Hgt.csv',Hgt);
np.savetxt(outName+'_Hz.csv',Hz);
np.savetxt(outName+'_Ns.csv',Ns[i*cfg['J']+j,:]);
for t in range(cfg['T']):
nmis[i*cfg['J']+j,t] = MI[t] / np.sqrt(Hz[t]*Hgt[t])
# nmis[i*cfg['J']+j,t] = MI*2. / (Hz+Hgt)
mis[i*cfg['J']+j,t] = MI[t]
vMeasures[i*cfg['J']+j,t] = 2.* MI[t] / (Hz[t]+Hgt[t])
print nmis[i*cfg['J']+j,t], 2.*MI[t], Hz[t], Hgt[t]
baseMap={'spkm':'spkm','kmeans':'k-means','NiwSphere':'DirSNIW', \
'DpNiw':'DP-GMM','DpNiwSphere':'DpSNIW opt','DpNiwSphereFull':'DP-TGMM', \
'DPvMFmeans':'DPvMFmeans',"DirNiwSphereFull":"Dir-TGMM"}
#cl = cm.gnuplot2(np.arange(len(bases)))
cl = cm.hsv(np.arange(255))
cl = cm.brg(np.arange(255))
cl = cm.gist_rainbow(np.arange(255))
cl = cm.gnuplot2(np.arange(255))
cl = cm.gnuplot(np.arange(255))
cl = cm.spectral(np.arange(255))
#print cltlib
I = len(bases) +1
print nmis.shape
fig = plt.figure(figsize=figSize, dpi=80, facecolor='w', edgecolor='k')
for i,base in enumerate(bases):
if not re.search('_',base) is None:
info = base.split('_')
base = info[0]
Ki = int(info[1])
plt.plot(np.arange(cfg['T']),nmis[i*cfg['J'],:],label=baseMap[base]+' ($K_0={}$)'.format(Ki),c=cl[(i+1)*255/I])
for j in range(1,cfg['J']):
plt.plot(np.arange(cfg['T']),nmis[i*cfg['J']+j,:],c=cl[(i+1)*255/I])
else:
plt.plot(np.arange(cfg['T']),nmis[i*cfg['J'],:],label=baseMap[base],c=cl[(i+1)*255/I])
for j in range(1,cfg['J']):
plt.plot(np.arange(cfg['T']),nmis[i*cfg['J']+j,:],c=cl[(i+1)*255/I])
print i*255/len(bases)
plt.xlabel('iterations')
plt.ylabel('NMI')
plt.ylim([0,1])
plt.legend(loc='lower right')
plt.tight_layout()
#plt.title(rootPath+dataPath)
plt.savefig(outName+'_NMI.png',figure=fig)
fig = plt.figure(figsize=figSize, dpi=80, facecolor='w', edgecolor='k')
for i,base in enumerate(bases):
if not re.search('_',base) is None:
info = base.split('_')
base = info[0]
Ki = int(info[1])
plt.plot(np.arange(cfg['T']),vMeasures[i*cfg['J'],:],label=baseMap[base]+' ($K_0={}$)'.format(Ki),c=cl[(i+1)*255/I])
for j in range(1,cfg['J']):
plt.plot(np.arange(cfg['T']),vMeasures[i*cfg['J']+j,:],c=cl[(i+1)*255/I])
else:
plt.plot(np.arange(cfg['T']),vMeasures[i*cfg['J'],:],label=baseMap[base],c=cl[(i+1)*255/I])
for j in range(1,cfg['J']):
plt.plot(np.arange(cfg['T']),vMeasures[i*cfg['J']+j,:],c=cl[(i+1)*255/I])
plt.xlabel('iterations')
plt.ylabel('V-Measure')
plt.ylim([0,1])
plt.legend(loc='lower right')
plt.tight_layout()
#plt.title(rootPath+dataPath)
plt.savefig(outName+'_VMeasure.png',figure=fig)
fig = plt.figure(figsize=figSize, dpi=80, facecolor='w', edgecolor='k')
for i,base in enumerate(bases):
if not re.search('_',base) is None:
info = base.split('_')
base = info[0]
Ki = int(info[1])
plt.plot(np.arange(cfg['T']),mis[i*cfg['J'],:],label=baseMap[base]+' ($K_0={}$)'.format(Ki),c=cl[(i+1)*255/I])
for j in range(1,cfg['J']):
plt.plot(np.arange(cfg['T']),mis[i*cfg['J']+j,:],c=cl[(i+1)*255/I])
else:
plt.plot(np.arange(cfg['T']),mis[i*cfg['J'],:],label=baseMap[base],c=cl[(i+1)*255/I])
for j in range(1,cfg['J']):
plt.plot(np.arange(cfg['T']),mis[i*cfg['J']+j,:],c=cl[(i+1)*255/I])
plt.xlabel('iterations')
plt.ylabel('MI')
plt.legend(loc='lower right')
plt.tight_layout()
#plt.title(rootPath+dataPath)
plt.savefig(outName+'_MI.png',figure=fig)
#plt.show()
fig = plt.figure(figsize=figSize, dpi=80, facecolor='w', edgecolor='k')
plt.plot(np.arange(1),np.ones(1)*K)
#plt.plot(np.arange(1),np.ones(1)*K)
#plt.plot(np.arange(1),np.ones(1)*K)
#plt.plot(np.arange(cfg['T']),np.ones(cfg['T'])*15,label='spkm, k-means (k=15)')
plt.plot(np.arange(cfg['T']),np.ones(cfg['T'])*30,label='spkm, k-means',c=cl[(1+1)*255/I])
#plt.plot(np.arange(cfg['T']),np.ones(cfg['T'])*45,label='spkm, k-means (k=45)')
for i,base in enumerate(bases):
if not re.search('_',base) is None:
info = base.split('_')
base = info[0]
if base in ['DpNiw','DpNiwSphere','DpNiwSphereFull','DPvMFmeans',"DirNiwSphereFull"]:
Ki = int(info[1])
plt.plot(np.arange(cfg['T']),Ns[i*cfg['J'],:],c=cl[(i+1)*255/I])
for j in range(1,cfg['J']):
plt.plot(np.arange(cfg['T']),Ns[i*cfg['J']+j,:],c=cl[(i+1)*255/I])
# plt.plot(np.arange(cfg['T']),Ns[i*cfg['J']:(i+1)*cfg['J'],:],label=baseMap[base]+' ($K_0={}$)'.format(Ki),c=cl[(i+1)*255/I])
plt.plot(np.arange(cfg['T']),np.ones(cfg['T'])*Kgt,'--',label='ground-truth',c=cl[0])
plt.xlabel('iterations')
plt.ylabel('# clusters')
plt.ylim([0,np.max(Ns)+1])
plt.legend(loc='lower right')
plt.tight_layout()
#plt.title(dataPath)
plt.savefig(outName+'_nClusters.png',figure=fig)
plt.show()
| jstraub/dpMM | python/evalSphereMMsynthetic.py | Python | mit | 14,704 | [
"Mayavi"
] | 52ad56d7f5611fd2a5ae0072f7a56d22723e14f1b6d6c03d61c40c32db20ba76 |
#!/usr/bin/env python3
#
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
import os
# collect list of Doxygen warnings
with open('doc/doxygen/warnings.log') as f:
content = f.read()
raw_warnings = re.findall(
r'(?:^|\n)doxygen:(.+?):(\d+): warning: (.+?)(?=\n\S|\n*$)',
content, re.DOTALL)
# collect list of empty @param and @tparam blocks
with open('doc/doxygen/empty-params.log') as f:
content = f.read().strip()
if content:
source_code_ext = set(['.hpp', '.cpp', '.hh', '.cc', '.h', '.c', '.cuh',
'.cu', '.dox'])
for line in content.strip().split('\n'):
m = re.search(r'^(.+):(\d+):[\s\*]*([@\\]t?param).*\s(\S+)\s*$', line)
filepath, lineno, paramtype, varname = m.groups()
ext = os.path.splitext(filepath)[1]
if ext.lower() not in source_code_ext:
continue
warning = ('argument \'{0}\' of {1} has no description, either add one'
' or remove {1}'.format(varname, paramtype))
raw_warnings.append((filepath, lineno, warning))
# remove duplicated warnings
n_all = len(raw_warnings)
raw_warnings = {(filepath, int(lineno), warning.split('\n')[0]):
set(warning.split('\n')[1:])
for filepath, lineno, warning in raw_warnings}
n_unique_raw = len(raw_warnings)
# filter out non-critical warnings
warnings = {}
for (filepath, lineno, warning), warning_list in raw_warnings.items():
if re.search(r'^member \S+ belongs to two different groups\. '
r'The second one found here will be ignored\.$', warning):
# happens when a function is declared in a group in the .hpp file but
# defined in another group in the .cpp file; this is usually caused by
# the "Private functions" and "Exported functions" groups in .hpp files
continue
if re.search(r'^documented symbol `\S+\' was not declared or defined\.$',
warning):
# known bug, fixed in 1.8.16
continue
if re.search('^no uniquely matching class member found for $', warning):
# known bug, not fixed yet
continue
if re.search(
'^The following parameters? of .+ (is|are) not documented:$', warning):
# non-critical warning, enforcing it would encourage bad behavior, i.e.
# inserting "@param argument" without a description to silence the
# warning, when in reality the warning is silenced because the text on
# the following line is captured and becomes the argument description
continue
filepath = re.sub(r'^.*(?=src/)', '', filepath)
if filepath not in warnings:
warnings[filepath] = {}
warnings[filepath][(lineno, warning)] = warning_list
n_unique = sum(map(len, warnings.values()))
if n_unique == 0:
with open('dox_warnings_summary.log', 'w') as f:
pass
exit()
# generate a log file
with open('dox_warnings_summary.log', 'w') as f:
f.write('The Doxygen documentation generated {} unique warnings (total: {},'
' ignored: {}):\n'.format(n_unique, n_all, n_unique_raw - n_unique))
for filepath in sorted(warnings.keys()):
f.write(filepath + ':\n')
for (lineno, warning) in sorted(warnings[filepath].keys()):
warning_list = warnings[filepath][(lineno, warning)]
s = re.sub(r'\(.*\)', '()', warning)
if warning_list:
s += ': ' + ', '.join(x.strip() for x in warning_list)
f.write(' line {}: {}\n'.format(lineno, s))
| fweik/espresso | maintainer/CI/dox_warnings.py | Python | gpl-3.0 | 4,195 | [
"ESPResSo"
] | 141fd833f87d00ce33d036a7525c791fcbc3bdae5eebf33ba6f8d15b2d2ce60d |
"""
KeepNote
MultiEditor widget in main window
This editor contain multiple editors that can be switched based on
the content-type of the node.
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# pygtk imports
import pygtk
pygtk.require('2.0')
from gtk import gdk
import gtk.glade
import gobject
# keepnote imports
import keepnote
from keepnote.gui.editor import KeepNoteEditor
_ = keepnote.translate
class MultiEditor (KeepNoteEditor):
"""
Manager for switching between multiple editors
"""
def __init__(self, app):
KeepNoteEditor.__init__(self, app)
self.show_all()
self._notebook = None
self._pages = []
self._editor = None
self._window = None
self._signals = ["view-node",
"visit-node",
"modified",
"font-change",
"error",
"child-activated",
"window-request",
"make-link"]
self._signal_ids = []
def set_editor(self, editor):
# do nothing if editor is already set
if editor == self._editor:
return
# tear down old editor, if it exists
if self._editor:
self._editor.view_pages([])
self._editor.save_preferences(self._app.pref)
self._disconnect_signals(self._editor)
if self._window:
self._editor.remove_ui(self._window)
self._editor.set_notebook(None)
self.remove(self._editor)
self._editor = editor
# start up new editor, if it exists
if self._editor:
self.pack_start(self._editor, True, True, 0)
self._editor.show()
self._connect_signals(self._editor)
self._editor.set_notebook(self._notebook)
if self._window:
self._editor.add_ui(self._window)
self._editor.load_preferences(self._app.pref)
self._editor.view_pages(self._pages)
def get_editor(self):
return self._editor
def _connect_signals(self, editor):
def make_callback(sig):
return lambda *args: self.emit(sig, *args[1:])
for sig in self._signals:
self._signal_ids.append(
editor.connect(sig, make_callback(sig)))
def _disconnect_signals(self, editor):
for sigid in self._signal_ids:
editor.disconnect(sigid)
self._signal_ids = []
#========================================
# Editor Interface
def set_notebook(self, notebook):
"""Set notebook for editor"""
self._notebook = notebook
if self._editor:
self._editor.set_notebook(notebook)
def get_textview(self):
"""Return the textview"""
if self._editor:
return self._editor.get_textview()
return None
def is_focus(self):
"""Return True if text editor has focus"""
if self._editor:
return self._editor.is_focus()
return False
def grab_focus(self):
"""Pass focus to textview"""
if self._editor:
return self._editor.grab_focus()
def clear_view(self):
"""Clear editor view"""
if self._editor:
return self._editor.clear_view()
def view_pages(self, pages):
"""View a page in the editor"""
self._pages = pages[:]
if self._editor:
return self._editor.view_pages(pages)
def save(self):
"""Save the loaded page"""
if self._editor:
return self._editor.save()
def save_needed(self):
"""Returns True if textview is modified"""
if self._editor:
return self._editor.save_needed()
return False
def load_preferences(self, app_pref, first_open=False):
"""Load application preferences"""
if self._editor:
return self._editor.load_preferences(app_pref, first_open)
def save_preferences(self, app_pref):
"""Save application preferences"""
if self._editor:
return self._editor.save_preferences(app_pref)
def add_ui(self, window):
self._window = window
if self._editor:
return self._editor.add_ui(window)
def remove_ui(self, window):
self._window = None
if self._editor:
return self._editor.remove_ui(window)
def undo(self):
if self._editor:
return self._editor.undo()
def redo(self):
if self._editor:
return self._editor.redo()
class ContentEditor (MultiEditor):
"""
Register multiple editors depending on the content type
"""
def __init__(self, app):
MultiEditor.__init__(self, app)
self._editors = {}
self._default_editor = None
def add_editor(self, content_type, editor):
self._editors[content_type] = editor
def removed_editor(self, content_type):
del self._editors[content_type]
def get_editor_content(self, content_type):
return self._editors[content_type]
def set_default_editor(self, editor):
self._default_editor = editor
#=============================
# Editor Interface
def view_pages(self, pages):
if len(pages) != 1:
MultiEditor.view_pages(self, [])
else:
content_type = pages[0].get_attr("content_type").split("/")
for i in xrange(len(content_type), 0, -1):
editor = self._editors.get("/".join(content_type[:i]), None)
if editor:
self.set_editor(editor)
break
else:
self.set_editor(self._default_editor)
MultiEditor.view_pages(self, pages)
| reshadh/Keepnote-LaTeX | keepnote/gui/editor_multi.py | Python | gpl-2.0 | 6,682 | [
"VisIt"
] | e2b848427dc4423727a1dd0cc2c31cffc90a62cdd3142ce3918f88bc9cac7146 |
import json
def path_ID(start, end):
out_arr = [start, end]
out_arr.sort()
return out_arr[0] + "-" + out_arr[1]
with open("walked_paths.json") as f:
paths = json.load(f)
with open("../data/speeding_graph_data.json") as f:
speeding_data = json.load(f)
with open("../old/id_data.json", "r") as f:
id_data = json.load(f)
ids = []
busyness = {}
max_per_day = 0
cars = ["1", "2", "2P", "3", "4", "5", "6"]
for source in paths.keys():
for target in paths[source].keys():
ID = path_ID(source, target)
if ID not in ids:
ids.append(ID)
for ID in ids:
busyness[ID] = []
for date in speeding_data.keys():
checked = []
pathdata = speeding_data[date]["links"]
for link in pathdata:
ID = path_ID(link["source"], link["target"])
visitors = link["visitors"]
temp = {"x": len(busyness[ID])}
for car in cars:
temp[car] = []
for visit in visitors:
car = id_data[visit]["car-type"]
temp[car].append(visit)
for car2 in cars:
if temp[str(car2)] == []:
temp[str(car2)] = 0
else:
temp[str(car2)] = len(temp[str(car2)])
if ID not in checked:
busyness[ID].append(temp)
checked.append(ID)
if len(link["visitors"]) > max_per_day:
max_per_day = len(link["visitors"])
with open("../data/path_busyness.json", "w") as f:
json.dump(busyness, f)
print(max_per_day)
| SvenvDam/programmeerproject | Pre-processing/path_busyness.py | Python | unlicense | 1,529 | [
"VisIt"
] | 9b30b02585c97dfdcedbff6a2563feaa7517bd15c98a6d5127af0d89bd0300ed |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# conda execute
# env:
# - python >=3
revisados01 = [
]
revisados02 = [
]
revisados03 = [
]
revisados04 = [
]
#1305 últimos de primero
revisados05 = [
'Liga de la Justicia',
'Leyenda negra española',
'Tarzán',
'Dick Grayson',
'República Cromañón',
'María Díaz de Haro (c.1270-1342)',
'El Tabo',
'Gobernantes de Argentina',
'Partido Socialista (Argentina)',
'Detective Conan',
'Viswanathan Anand',
'Monkey D. Luffy',
'Košice',
'Laodicea',
'Club Atlético Universidad Nacional San Agustín',
'Monasterio de Vallbona de les Monges',
'Final Fantasy VI',
'Opportunity',
'Provincia de El Oro',
'Galga extensiométrica',
'Mustafar',
'Montículo (informática)',
'Orden del Císter',
'Linterna Verde',
'Leslie Cheung',
'Alto Perú',
'Sodoma',
'Política del Sahara Occidental',
'Frente Revolucionario Antifascista y Patriota',
'Paintball',
'Monster (manga)',
'Invasiones Inglesas',
'Codificación Huffman',
'Violadores del Verso',
'Atizapán de Zaragoza',
'Foot Ball Club Melgar',
'Teoría conspirativa',
'Ren & Stimpy',
'Universidad de Carabobo',
'Guerra de Arauco',
'Chipiona',
'Monte Plata',
'María Trinidad Sánchez (provincia)',
'Hato Mayor',
'Historia medieval de España',
'Club Sporting Cristal',
'Thalía',
'Injerto',
'Orihuela',
'Glenn L. Martin Company',
'Rodrigo Rato',
'El lejano país de los estanques',
'República de Weimar',
'Paraná (Argentina)',
'Árbol AVL',
'Recife',
'El Chapulín Colorado',
'Árbol rojo-negro',
'Árbol binario',
'Võ Nguyên Giáp',
'Morcín',
'Langreo',
'Cianuro',
'Curare',
'Jackie Chan',
'Historia de Málaga',
'Nueva Caledonia',
'Montemayor del Río',
'Onís',
'Ailuropoda melanoleuca',
'Benito Juárez (Distrito Federal)',
'Vegetación de España',
'Relación interespecífica',
'Golpe de Estado',
'George Stephenson',
'CBS',
'Micción',
'Epífisis',
'National Broadcasting Company',
'La princesa Mononoke',
'Gustavo A. Madero (Distrito Federal)',
'Pamplona',
'Harry S. Truman',
'Aculco',
'Chimaltenango (departamento)',
'Música de Argentina',
'Unión Soviética',
'Extensión de cuerpos',
'Pascal (lenguaje de programación)',
'Tlaxcala',
'Reino Unido',
'1982',
'Karma',
'Horario de verano en el mundo',
'Plantae',
'Magnoliaceae',
'Linaceae',
'Xosé Manuel Pereiro',
'Complejo Industrial La Plata',
'Jean-Loup Dabadie',
'Owodog',
'San Miguel de Horcasitas',
'Jorge Luis Rojas',
'Reynosa F.C.',
'Parque Robert Allerton',
'Santa María Atarasquillo',
'Steven Sinofsky',
'Carranco (San Luis Potosí)',
'Fe (Los 80)',
'Cássio Gabus Mendes',
'Ewen Leslie',
'Texca',
'San Pedro las Playas',
'Takrur',
'Antje Traue',
'Solenn Adea Heussaff',
'WrestleMania 29',
'Atencingo',
'Ott Lepland',
'Andy Milder',
'Sophie Choudry',
'Qi Wei',
'Garett Bischoff',
'Give Up the Funk (Tear the Roof off the Sucker)',
'Tony Award por Mejor dirección en obra',
'Cristian González y Orquesta La Bohemia',
'Cubitán de Dolores',
'Coste social',
'Hoodie Allen',
'DDR4 SDRAM',
'Medal of Honor: Frontline',
'Helga',
'Sisters of War',
'Batalla de Monte Tumbledown',
'Gypsy Heart Tour',
'Monster in My Pocket',
'El problema del costo social',
'Dōjutsu',
'Elizabeth Eichhorn',
'Plataforma HD',
'Suzy (cantante)',
'Jeremy Scahill',
'Caxuxi',
'Marbella Corella',
'Boris Kodjoe',
'Carol Cleveland',
'Joseph Morgan',
'Aidan Alexander',
'Sentispac',
'Nuestra Belleza Latina 2011',
'Alice: Madness Returns',
'Exiliado jedi',
'The Kids Are Alright (película)',
'Mariscal Francisco Solano López (distrito)',
'Renato Munster',
'Revolución egipcia',
'Royal Pains',
'Hebe Tien',
'The weeks Top Charts',
'My Only Wish',
'Leandro Sapetti',
'Živilė Raudonienė',
'Motorola 68060',
'Who Are You (canción)',
'Leonard Mlodinow',
'Alejandro Urdapilleta',
'Luciano Abecasis',
'Luciano Arruga',
'Xaltianguis',
'Nuevas series animadas de Hanna-Barbera',
"Can't Be Tamed",
'Packed to the Rafters',
'Christoph Eschenbach',
"Gyro apó t'óneiro",
'Mi tío Napoleón',
'Jodi Gordon',
'Academia canadiense de Cine y Televisión',
'Christina Pickles',
'Auttapon Prakopkong',
'Tecolutilla',
'Sociedad Secreta (DC Comics)',
'Wendy O. Williams',
'Lisa McCune',
'3T',
'Krrish',
'My Name is Barbra, Two',
'GMA Network',
'Veronica Ferres',
'Stella McCartney',
'Ampex',
'Saga Golden Axe',
'John Debney',
'La Joya (Morelos)',
'P. N. Elrod',
'Carolyn Omine',
'Cuentos de Eva Luna',
'Dubai City Tower',
'Bienvenida Pérez',
'Antônio Fagundes',
'Santa María Acuitlapilco',
'Ángel Quesada',
'Evolution I (Stargate SG-1)',
'Homecoming (desambiguación)',
'San Pedro Buenavista',
'Los Flippers',
'León (apellido)',
'Anchiornis',
'Internet Protocol Datacast',
'Oscar Goldman',
'James Byng',
'The Saturdays',
'The Prime Time of Your Life',
'Alegato antisemita',
'Lomas de San Juan',
'La morena',
'Jeff Conaway',
'Dunav Kuzmanich',
'Chris Garver',
'Julian Iantzi',
'Thrill Seekers (serie de televisión)',
'Jesús Morales (actriz)',
'Felipe Armas',
'Midtown Madness (videojuego)',
'Ivanka Trump',
'Grant Goodeve',
'Josh Schwartz',
'Lo siento',
'Tejido de conducción',
'Mark Moses',
'La vida World Tour',
'El detective cantante',
'Lucía Palacios',
'Disney Channel Latinoamérica',
'Leeland',
'György Spiró',
'Bleeding Love',
'Howard Blake',
'MADtv',
'Canal 9 Litoral',
'Conquista Online',
'Scansoriopteryx',
'Carlos Hauptvogel',
'Adolf Ehrnrooth',
'Ray Liotta',
'Selección femenina de fútbol de España',
'Leandro Rivera',
'Canal 33 Madrid',
'Kate Nash',
'Javier Hernán García',
'Jeffrey Dean Morgan',
'El planeta salvaje',
'Rosa... de lejos',
'Soukous',
'Escalerillas',
'Mujer contra mujer',
'Frank Serpico',
'Torres de Malory',
'Felipe Caicedo',
'Habbo',
'Marta Robles',
'Lea Salonga',
'Sarah Connor (personaje)',
'Al Jarreau',
'Thursday',
'Distrito de Baños del Inca',
'El baile en TVN',
'TVXQ',
'Dzitbalché',
'Danny Phantom',
'Robert Cochran',
'Joel Surnow',
'Envy',
'Will Vinton',
'Cobra (Manga)',
'Acció Cultural del País Valencià',
'Texcoco',
'Richard Curtis',
'SECEMIE',
'Selling England by the Pound',
'Club Atlético Aguada',
'Robin Söderling',
'The Videos 86-98',
'Mosaico genético',
'Paul Kagame',
'Sonic the Hedgehog 3',
'Rainbow (álbum de Ayumi Hamasaki)',
'Madness',
'The Strongest',
'Emma Bunton',
'Wolf Vostell',
'GP2X',
'Hosni Mubarak',
'Gabriela Ruffo',
'Virginia',
'Caiga quien caiga',
'Roberto Ayala',
'Mataró',
'TV Aichi',
'Liverpool Football Club',
'Daniel Bertoni',
'Ahí está el detalle',
'Santiago Creel',
'Mil Mi-24',
'She-Hulk',
'Estadio Alberto J. Armando',
'Selección de fútbol de Panamá',
'VDSL',
'Las aventuras de Alicia en el país de las maravillas',
'Lluvia de animales',
'Luis Alberto Spinetta',
'Terrance y Phillip',
'Club Sportivo Cienciano',
'Arcadia (desambiguación)',
'Singleton',
'Paradoja',
'Colegio Hogwarts de Magia y Hechicería',
'Quiz Show: El dilema',
'Ricardo Lagos',
'Infierno: Canto Duodécimo',
'Sony Ericsson c510',
'Stylo',
'Isla Pamban',
'Sergio Zárate',
'Vaidika Prastisthana Sangha',
'Lyn Scully',
'Adiós amor',
'Crocodile Dundee in Los Angeles',
'The Piano Lesson',
'Rap alternativo',
'National Airlines (Chile)',
'Sony Ericsson W995',
'Robert Ortiz',
'César Montiglio',
'Louis Chevrolet',
'Taking Chance',
'N. N. (serie de televisión)',
'Leonor Martín',
'Miguel Moly',
'Germán Caffa',
'La Fiamma',
'Enzo Bearzot',
'Carlos Guichandut',
'Slaughterhouse',
'Vesikko',
'Family Values Tour 2007',
'Need for Speed: World',
'Lucas Rimoldi',
'Atomizer',
'Clásica de San Sebastián 2010',
'Infierno: Canto Vigésimo primero',
'Mayoi Neko Overrun!',
'El Generico',
'Cecilia Rossetto',
'Pastel de papa',
'Milan Kalina',
'Misael Acosta Solís',
'Citroën Junior Team',
'Citroën World Rally Team',
'Ciguapa',
'Citroën Xsara WRC',
'Manizales',
'Entre el amor y el deseo',
'Nagato (Personaje de Naruto)',
'Infierno: Canto Vigésimo',
'Historia de Cuitzeo',
'Cementerio de Zaragoza',
'Toti Pasman',
'SummerSlam (2010)',
'Ricardo Peralta',
'Luis Elordi',
'Happenstance',
'Reserva de la biosfera Zicuirán Infiernillo',
'Ángeles Vicente',
'Luis Alarcón',
'Danny Casolaro',
'Club Deportivo Motril Atlético',
'Corona (heráldica)',
'La Sombra (luchador)',
'Bandera de Tlaxcala',
'Ichiban Ushiro no Dai Maō',
'Explosiones Nucleares para la Economía Nacional',
'Iker Cobo',
'Modelo de escorrentía',
'Ralph Santolla',
'Rakuten',
'Sacrebleu',
'Demoliendo teles',
'Habitación 23',
'William Stanley (inventor)',
'Tezontle',
'Lamivudina',
'Peter Freund',
'Finales de la NBA de 1962',
'Medal of Honor (videojuego de 2010)',
'Keep Bikini Bottom Beautiful',
'Fernando Bujones',
'Santuario nacional Pampa Hermosa',
'Serena Deeb',
'El Atolón de Funafuti',
'Los Sims 3',
'Roderick Strong',
'Fallout: New Vegas',
'Estadio Independencia de La Rinconada',
'The I Heart Revolution: With Hearts as One',
'The Love of Siam',
'Intuición femenina',
'Proceso de Rivonia',
'The Scythe',
'Presa de Tucuruí',
'Condado de Río Blanco',
'Estación Blas Durañona',
'Fondo blanco',
'Condado de Pitkin',
'Chismes',
'Alejandro Murature',
'Antonio Susini',
'Historia del Club Atlético River Plate',
'Río Cottica',
'Estrellas del Porno',
'City CarShare',
'Condado de Conejos',
'Spookyfish',
'Compuestos orgánicos persistentes',
'Chocolates Matías López',
'Moe Letter Blues',
'Estanques de Milicz',
'Estación Amalia',
'La leyenda del beso',
'Roland Morillot (S613)',
'Gormiti (serie animada)',
'The Beautiful People (lucha libre)',
'Rednecks and Broomsticks',
'Gothic (serie)',
'Rock cristiano',
'Primera B de Chile 2010',
'Nina Radio Online',
'Adolfo Ypey',
'Juan Manuel Insaurralde',
'Lance Hoyt',
'Eisblume',
'Delfín Leguizamón',
'XENA-AM',
'Maxence Caron',
'XEJX-AM',
'Mighty Mightor',
'Cloudy with a chance of meatballs',
'Desgranadora',
'Chepe Fortuna',
'Veolia Redbus Urbano',
'Lectura rápida',
'Cristóbal de la Mancha y Velazco',
'Edificio La Inmobiliaria',
'Alborosie',
'Neofascismo',
'Lo esencial de... Alejandro Sanz',
'Ricardo García Pico',
'Arquitectura rupestre',
'She Came In Through the Bathroom Window',
'Botafogo Futebol Clube',
'Yoel Hernández',
'Ética militar',
'Gasteracantha cancriformis',
'Exist † trace',
'Roger Amuruz Gallegos',
'The Pagemaster',
'San Salvador el Seco',
'South Park (videojuego)',
'Mujeres de arena (telenovela de 1993)',
'Museo Municipal de Ciencias Naturales y Arqueología de San Antonio',
'Temperamento (Artista)',
'X-Time',
'Jump Square',
'Iglesia Arciprestal de Santa María del Salvador (Chinchilla de Montearagón)',
'René Segura',
'Grand Slam Cup',
'Iglesia de San Andrés (Valladolid)',
'Llanura Indo-Gangética',
'Son Sardina',
'Transcarga',
'Carmelo Hurtado',
'Yautepec de Zaragoza',
'Esteban Terralla y Landa',
'Mohamed Rouicha',
'Almacén 13',
'Ektor Rivera',
'Maximiliano Caire',
'Luis Rivero del Val',
'Minería ilegal en Perú',
'21st Century Breakdown (canción)',
'Jorge Uliarte',
'Zona Metropolitana de Tuxtla Gutiérrez',
'Luis Medina Castro',
'Vuelta a Guatemala',
'Rostros Ocultos',
'A Little Less Sixteen Candles, a Little More "Touch Me"',
'Club Atlético San Fernando',
'Busseto',
'Batalla de Las Cangrejeras',
'Manuel Atanasio Cabañas',
'Carimagua',
'Scipione Riva-Rocci',
'Bukit Timah',
'Dalmiro Adaro',
'Uncharted 2: El Reino de los Ladrones',
'Río Citarum',
'Las aventuras de Miss Spider',
'Aída García',
'Ot Pi',
'Religión en Melena del Sur',
'Pseudomyrmex triplarinus',
'Joaquín Villalón Díez',
'Krab Borg',
'Maleficio (novela)',
'Cheat Engine',
'Dave Sanders',
'Shotgun Messiah',
'Pasión (emoción)',
'Castella Vetula',
'Smiley Face (película)',
'Todd (Beavis and Butthead)',
'Halloween II (película del 2009)',
'Cantón Chone',
'Nuestra Señora de las Nieves (La Palma)',
'This Is Us Tour',
'La cúpula (novela)',
'José María Salvadores',
'Raoul Bova',
'Cañón de Saturban',
'Jah Cure',
'Nada es color de rosa',
'Matthew McGrory',
'Verónica Coronel',
'Jesús Moreno Rodríguez',
'Morgan Gould',
'Esko Aho',
'Joe Rogan',
'Rachel Dawes',
'Scarve',
'Tess Mercer',
'The World Ends with You',
'Sangre de lobos',
'Salud en México',
'Enugu',
'Saber vivir',
'Mazda 323',
'Carlos Antonio Vélez',
'TVMax',
'HB (banda)',
'K-Paz de la Sierra',
'Jason X',
'Área Metropolitana de Ciudad del Este',
'Enredados',
'Oerlikon 20 mm',
'Río Grande (Oaxaca)',
'Pretty Guardian Sailor Moon',
'Sonic Underground',
'Violetta Villas',
'Colonia Narvarte',
"Mexico's Next Top Model",
'Censura en Cuba',
'Kronos (banda)',
'Heros severus',
'Valero Lecha',
'90210',
'Jeton Kelmendi',
'Jumper (novela)',
'Command & Conquer 3: La ira de Kane',
'Beba Bidart',
'Craig Mabbitt',
'Amor ciego (película)',
'Port Royale 2: Imperio y Piratas',
'Rodrigo Marangoni',
'Richie Ray',
'Cinderella III: A Twist in Time',
'Prosthechea cochleata',
'Miraflores (Boyacá)',
'Michèle Bennett',
'Las aventuras de Rocky y Bullwinkle',
'Alejandro Morera Soto',
'Alonso Núñez de Haro',
'William Edward Forster',
'La ciudad de los tísicos',
'Oxtotitlán',
'Grace (Stargate SG-1)',
'Orlando Colón',
'Oteruelo (La Rioja)',
'Colonia Nápoles',
'Nuestra Señora de Zapopan',
'Huracán de tipo Cabo Verde',
'Hugo Leicht',
'Causas de la Segunda Guerra Mundial',
'Adolfo Díaz Recinos',
'Petra Pau',
'Radio Panamericana',
'Voltaire García',
'Esmeraldas del Caos',
'Parroquia Urbana Punta Cardón',
'UCoz',
'Unfortunate Snort',
'Celal Bayar',
'Pinkly Smooth',
'Santa Cruz Acalpixca',
'Joakim Soria',
'Leon Wood',
'Estación Intermodal Bellavista de La Florida',
'Río La Pasión',
'La otra mitad del sol (telenovela colombiana)',
'Mandy Moore (álbum)',
'Dragon Quest: Dai no Daibouken',
'Los viajes de Clovis Dardentor',
'Yuki Nagato',
'Movimiento Revolucionario Liberal',
'Cookin’ Soul',
'El rostro de Analía',
'La paisana Jacinta',
'I Wanna Be With You',
'Fairchild AC-119',
'Entre el mar y una estrella',
'Impel Down',
'Beto Casella',
'Najib Tun Razak',
'Vuélveme a querer',
'Estudios sociales',
'Louisa Hanoune',
'Luigi Galleani',
'Poneglyph',
'Resident Evil 2 DualShock Edition',
'We Made You',
'Fernando Béjar Durá',
'Renault R202',
'Inmigración argentina en España',
'Inmigración mexicana en Estados Unidos',
'Kenny Washington',
'Escuela Industrial Salesiana San Ramón',
'Fernando Navarro Morán',
'Saint Seiya Next Dimension',
'Mimí',
'Sekirei',
'Inmigración en Chile',
'Tableta digitalizadora',
'Parque nacional natural El Cocuy',
'Chía (Cundinamarca)',
'Tuone Udaina',
'Club de Deportes Puerto Montt',
'Shihan',
'Ross Geller',
'Remo (deporte)',
'Incubus (banda)',
'Provincia de Pichincha',
'Callosa de Segura',
'Prong',
'Princesa Daisy',
'Navegantes del Magallanes',
'Mort Cinder',
'Vince Clarke',
'Quiste ovárico',
'Liga Deportiva Universitaria de Quito',
'Johnny Cash',
'El Alto',
'Trigun',
'Nick Fury',
'Terremoto de México de 1985',
'Ciudad Bolívar (Bogotá)',
'Santa Ana Chiautempan',
'Automodelismo',
'Portsmouth Football Club',
'Iker Casillas',
'Silent Hill',
'Escuela Militar (estación)',
'Bestia de Gévaudan',
'Rafael Pombo',
'Palamós Club de Futbol',
'Terran',
'Kennedy (Bogotá)',
'Capcom',
'Monaguillo',
'Instituto Tecnológico de Ciudad Madero',
'Unión Balompédica Conquense',
'AMD Am386',
'La Robla',
'Sociedad Deportiva Huesca',
'Santa Rosa de Calamuchita',
'Naturalización',
'TV Brasil',
'Gillian Anderson',
'Bowser',
'Dr. No (película)',
'Operación Trueno (película)',
'Club Deportes Concepción',
'Jet Set Radio',
'All Grown Up!',
'Calle 13 Universal',
'Réquiem',
'Darth Revan',
'Jim Root',
'Tentación (telenovela)',
'Olombrada',
'Antonello Venditti',
'Rino Gaetano',
'Rol en vivo',
'Club de Deportes Copiapó',
'Coquimbo Unido',
'Seguridad y salud laboral',
'Xuxa',
'Mantenimiento de software',
'Parma Football Club',
'René Lavand',
'Flans',
'César Borgia',
'Società Sportiva Lazio',
'Timol',
'Cine de terror',
'Majin Boo',
'Cell (personaje de Dragon Ball)',
'Desastre',
'Ivy Queen',
'Mariano Mores',
'Cultural y Deportiva Leonesa',
'Club Deportivo Alcoyano',
'Defensor Sporting Club',
'Michael Rasmussen',
'Andreas Klöden',
'José Manuel de la Sota',
'Fuente Álamo de Murcia',
'Despina Vandi',
'Club Deportivo Arturo Fernández Vial',
'Harley Quinn',
'Club Deportivo Castellón',
'Riddler',
'Microsoft Visual Studio',
'Real Murcia Club de Fútbol',
'Carlos Anwandter',
'Monasterio de Montserrat',
'Fabian Cancellara',
'Tom Boonen',
'Ryō Hirohashi',
'AeroSur',
'República de Ragusa',
'Florinda Meza',
'Partido de Chascomús',
'Jay Kay',
'Chiquimula (departamento)',
'Berri Txarrak',
'Español centroamericano',
'Doñinos de Salamanca',
'Sucre',
'Virreinato del Perú',
'Departamento de Tacna',
'Áster',
'Che',
'A sangre fría (película de 1967)',
'Satanás',
'Christiaan Neethling Barnard',
'Aelo',
'Aguascalientes (Aguascalientes)',
'Donnie Darko',
'Juliaca',
'Sōkaku Takeda',
'Misery (película)',
'Aurora de Chile',
'Candelario',
'Mu (continente perdido)',
'Γ',
'Cartismo',
'Diego Rivera',
'Barruecopardo',
'Wall Street (película)',
'Full Metal Jacket (película)',
'Vejer de la Frontera',
'Grupo Televisa',
'Eiichirō Oda',
'Tetraciclina',
'Religión en España',
'Tom Berenger',
'Kendō',
'Psicosis (película de 1960)',
'Fantasma',
'Aeropuerto Internacional Ministro Pistarini',
'Panthera tigris',
'Teoría de categorías',
'Cumbres borrascosas (película de 1939)',
'Dinero',
'Detector de mentiras',
'Sistema de control',
'Historia de América del Norte',
'Instituto Tecnológico de Durango',
'Tikal',
'Victoria de Durango',
'Embarazo',
'534',
'260',
'Espacio métrico',
'Werner Herzog',
'El Bierzo',
'BMW',
'Batalla de Arica',
'Mitología japonesa',
'The Cure',
'Vibrio cholerae',
'Gaita zuliana',
'Campylobacter',
'Aa! Megami-sama',
'Real Club Deportivo de La Coruña',
'Historia de Libia',
'Mole de caderas',
'Wilfredo Gómez',
'Historia económica',
'Cultura de Bolivia',
'Boro',
'Panamá (ciudad)',
'Amala',
'Texas',
'Hunter × Hunter',
'Instalación industrial',
'Plantago lanceolata',
'Provincia de Jujuy',
'Provincia de La Pampa',
'Provincia de Misiones',
'Provincia de Santiago del Estero',
'Provincia de Camaná',
'Necton',
'Sherlock Holmes',
'Portaaviones',
'César Milstein',
'Michael Jordan',
'Huella filogenética',
'Ghana',
'Managua',
'Isla Grande de Tierra del Fuego',
'Energía solar térmica',
'Hiragana',
'ARN no codificante',
'Antofagasta',
'Ruby',
'Terapia génica',
'Ronald Reagan',
'Mangaka',
'Cultura de Colombia',
'Historia de Honduras',
'The Silence of the Lambs (película)',
'Gosu (expresión)',
'Control de gestión',
'Contabilidad financiera',
'Bandera nacional',
'Política de México',
'Medicamento',
'Historia de Panamá',
'Departamento de Potosí',
'1992',
'Ciudad de Salta',
'Aguascalientes',
'Prostitución',
'Morelos',
'11 de marzo',
'Thomas Malthus',
'Topografía',
'La Rioja (España)',
'Midget',
'Cultura de México',
'Historia de la inteligencia artificial',
'Cadmio',
'América del Norte',
'Colonia Cuauhtémoc',
'The Twilight Saga: Eclipse',
'Emanuele Molin',
'Historia del Atlético Nacional',
'R. J. Mitchell',
'Ejército del Pueblo Paraguayo',
'Lala Satalin Deviluke',
'Carlos de la Fuente',
'Club Caza Monstruos',
'Terry Tyler',
'Somos tú y yo: un nuevo día',
'Fairchild C-123 Provider',
'Destroy All Humans!',
'Idioma eudeve',
'Vallnord',
'FOX (España)',
'Hayate no Gotoku!',
'Chino & Nacho',
'Empire EP',
'Luis Izquierdo Fredes',
'FIFA 10',
'Rozen Maiden',
'Them Crooked Vultures',
'Tetraquark',
'Lo de Carranza',
'Juan de Limpias Carvajal',
'Expedición Auxiliar de Santa Cruz a Quito',
'Pattaya',
'Raymond Thevenot',
'Historia del Club de Deportes Cobreloa',
'Región Sierra Norte',
'Club Atlético Banfield',
'Ladyhawke (película)',
'Diego Valdés',
'Our Song',
'Elecciones municipales de Nicaragua de 2009',
'Fernando Cordero Fonseca',
'Rosita Pelayo',
'Basílica de Nuestra Señora del Rosario de Chiquinquirá',
'Bristol Jupiter',
'Hilda de Polaris',
'Fearless (película de 1993)',
'Ministerio de Comercio, Industria y Turismo de Colombia',
'Ronnie Aldrich',
'Eva Cortés',
'Kid vs. Kat',
'Sativanorte',
'Kaija Kärkinen',
'Daniel Sturridge',
'Jardín Balbuena',
'Transporte en la ciudad de Rosario',
'Capitán McCluskey',
'Isla Paradise',
'Phan Thị Kim Phúc',
'Cementerio de Podunk',
'Batalla de Tablada',
'Johan Rodríguez',
'Los guardianes del tiempo',
'Instituto Nacional de Migración (México)',
'Julián Cerdá Vicente',
'Federación Fajardina',
'Back-side bus',
'María Colón de la Cueva',
'Santuario de fauna y flora Iguaque',
'Tronic',
'Mirtha Medina',
'Diego (álbum de Diego González)',
'Doña Bella',
'Crimson (cómic)',
'Ricky Hatton',
'Tailandia en la Segunda Guerra Mundial',
'Blue Gender',
'Redes inalambricas',
'Club Atlético Huracán (Moquegua)',
'Delegacion Benito Juarez',
'Batalla de Moquegua',
'Manuel Castañer',
'Castle',
'Bakemonogatari',
'BlueTrack',
'Partido Socialista de la Revolución Nacional',
'Sellos de España en 1999',
'Petróleo',
'Taken by Cars',
'Serie Mundial de béisbol de 1996',
'Disa Records',
'IPhone 3GS',
'Germano argentinos',
'Víllodas',
'Ricardo Steer',
'Giro de Italia 2001',
'Team Argos-Shimano',
'Krisztián Vadócz',
'Matthias Rettner',
'Kick Buttowski: Suburban Daredevil',
'Tengo todo excepto a ti',
'Bardaisan',
'Spin (álbum)',
'Luke Jacobz',
'Luis Alberto Pozzi',
'Zeke & Luther',
'Ivantosaurus',
'Ksenia Sujínova',
'Nowhere Islands',
'Chris Coghill',
'Alaa Abdul-Zahra Khashan',
'Xicohtzinco',
"Madalyn Murray O'Hair",
'Montalbán (Carabobo)',
'Indigo (álbum)',
'Nohmul',
'Liga Nacional de Guatemala 1993/94',
'Edurne Uriarte',
'Teemu Pukki',
'Anillo del Pescador',
'Liga Nacional de Guatemala 1998/99',
'Tulio Pizzi',
'José María Morelos',
'Tauro Sport Auto',
'Sun Channel',
'Clase (biología)',
'Wifredo Espina Claveras',
'Juan Almeida Bosque',
'Alexander Corvinus',
'Jesús Casillas Romero',
'Botellita de Jerez',
'Nicolás Romero (coronel)',
'Festival Cultural de Mayo',
'Bi Sheng',
'La otra cara del alma',
'Gabriela Rivero',
'Martín Del Pomar',
'Brendon Urie',
'Kirt Hector',
'Carlos Pellegrini',
'Dominique',
'José Rosas Moreno',
'Incendio de la Iglesia de la Compañía',
'Tuxtla Gutiérrez',
'Jaques Leclercq',
'We Butter The Bread With Butter',
'Tina de Jarque',
'Rocky Balboa',
'Joan Fuster Bonnin',
'Thrust SSC',
'Manuel Alas Sardaneta y Pompa',
'María de Cleves',
'José Saturnino Cardozo',
'Robin Henderson',
'The Dr. Oz Show',
'Francisco Díaz Pimienta',
'Giezi',
'Donato Fuejo',
'Prelatura de Humahuaca',
'Malcolm X',
'Quinta de Anauco',
'Isla Baltra',
'Agencia EFE',
'Julio César Aráoz',
'Maximiliano I de México',
'Vicepresidente de Venezuela',
'Javier Alfaya',
'Sidney Bechet',
]
revisados06 = [
]
revisados07 = [
]
revisados08 = [
]
revisados09 = [
]
revisados10 = [
]
revisados11 = [
]
revisados12 = [
]
revisados = set(revisados01+revisados02+revisados03+revisados04+revisados05+revisados06+revisados07+revisados08+revisados09+revisados10+revisados11+revisados12)
| races1986/SafeLanguage | CEM/_s/revisados.py | Python | epl-1.0 | 23,672 | [
"MOE"
] | 10e9ac02a0efef46648b902058be870939a8b610e0bd0a26756f7729ba329c02 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
from math import pi
import numpy as np
import pytest
from monty.os.path import which
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.analysis.local_env import (
BrunnerNN_real,
BrunnerNN_reciprocal,
BrunnerNN_relative,
CovalentBondNN,
Critic2NN,
CrystalNN,
CutOffDictNN,
EconNN,
JmolNN,
LocalStructOrderParams,
MinimumDistanceNN,
MinimumOKeeffeNN,
MinimumVIRENN,
NearNeighbors,
OpenBabelNN,
ValenceIonicRadiusEvaluator,
VoronoiNN,
get_neighbors_of_site_with_index,
site_is_of_motif_type,
solid_angle,
IsayevNN,
)
from pymatgen.util.testing import PymatgenTest
class ValenceIonicRadiusEvaluatorTest(PymatgenTest):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [
[0, 0, 0],
[0.5, 0.5, 0],
[0.5, 0, 0.5],
[0, 0.5, 0.5],
[0.5, 0, 0],
[0, 0.5, 0],
[0, 0, 0.5],
[0.5, 0.5, 0.5],
]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
self._mgo_valrad_evaluator = ValenceIonicRadiusEvaluator(self._mgo_uc)
def test_valences_ionic_structure(self):
valence_dict = self._mgo_valrad_evaluator.valences
for val in list(valence_dict.values()):
self.assertTrue(val in {2, -2})
def test_radii_ionic_structure(self):
radii_dict = self._mgo_valrad_evaluator.radii
for rad in list(radii_dict.values()):
self.assertTrue(rad in {0.86, 1.26})
def tearDown(self):
del self._mgo_uc
del self._mgo_valrad_evaluator
class VoronoiNNTest(PymatgenTest):
def setUp(self):
self.s = self.get_structure("LiFePO4")
self.nn = VoronoiNN(targets=[Element("O")])
self.s_sic = self.get_structure("Si")
self.s_sic["Si"] = {"Si": 0.5, "C": 0.5}
self.nn_sic = VoronoiNN()
def test_get_voronoi_polyhedra(self):
self.assertEqual(len(self.nn.get_voronoi_polyhedra(self.s, 0).items()), 8)
def test_get_cn(self):
self.assertAlmostEqual(self.nn.get_cn(self.s, 0, use_weights=True), 5.809265748999465, 7)
self.assertAlmostEqual(self.nn_sic.get_cn(self.s_sic, 0, use_weights=True), 4.5381161643940668, 7)
def test_get_coordinated_sites(self):
self.assertEqual(len(self.nn.get_nn(self.s, 0)), 8)
def test_volume(self):
self.nn.targets = None
volume = 0
for n in range(len(self.s)):
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
volume += nn["volume"]
self.assertAlmostEqual(self.s.volume, volume)
def test_solid_angle(self):
self.nn.targets = None
for n in range(len(self.s)):
angle = 0
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
angle += nn["solid_angle"]
self.assertAlmostEqual(4 * np.pi, angle)
self.assertEqual(solid_angle([0, 0, 0], [[1, 0, 0], [-1, 0, 0], [0, 1, 0]]), pi)
def test_nn_shell(self):
# First, make a SC lattice. Make my math easier
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ["Cu"], [[0, 0, 0]])
# Get the 1NN shell
self.nn.targets = None
nns = self.nn.get_nn_shell_info(s, 0, 1)
self.assertEqual(6, len(nns))
# Test the 2nd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 2)
self.assertEqual(18, len(nns))
self.assertArrayAlmostEqual([1] * 6, [x["weight"] for x in nns if max(np.abs(x["image"])) == 2])
self.assertArrayAlmostEqual([2] * 12, [x["weight"] for x in nns if max(np.abs(x["image"])) == 1])
# Test the 3rd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 3)
for nn in nns:
# Check that the coordinates were set correctly
self.assertArrayAlmostEqual(nn["site"].frac_coords, nn["image"])
# Test with a structure that has unequal faces
cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"],
[[2.1045, 2.1045, 2.1045], [0, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.nn.weight = "area"
nns = self.nn.get_nn_shell_info(cscl, 0, 1)
self.assertEqual(14, len(nns))
self.assertEqual(6, np.isclose([x["weight"] for x in nns], 0.125 / 0.32476).sum()) # Square faces
self.assertEqual(8, np.isclose([x["weight"] for x in nns], 1).sum())
nns = self.nn.get_nn_shell_info(cscl, 0, 2)
# Weight of getting back on to own site
# Square-square hop: 6*5 options times (0.125/0.32476)^2 weight each
# Hex-hex hop: 8*7 options times 1 weight each
self.assertAlmostEqual(
60.4444,
np.sum([x["weight"] for x in nns if x["site_index"] == 0]),
places=3,
)
def test_adj_neighbors(self):
# Make a simple cubic structure
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ["Cu"], [[0, 0, 0]])
# Compute the NNs with adjacency
self.nn.targets = None
neighbors = self.nn.get_voronoi_polyhedra(s, 0)
# Each neighbor has 4 adjacent neighbors, all orthogonal
for nn_key, nn_info in neighbors.items():
self.assertEqual(4, len(nn_info["adj_neighbors"]))
for adj_key in nn_info["adj_neighbors"]:
self.assertEqual(0, np.dot(nn_info["normal"], neighbors[adj_key]["normal"]))
def test_all_at_once(self):
# Get all of the sites for LiFePO4
all_sites = self.nn.get_all_voronoi_polyhedra(self.s)
# Make sure they are the same as the single-atom ones
for i, site in enumerate(all_sites):
# Compute the tessellation using only one site
by_one = self.nn.get_voronoi_polyhedra(self.s, i)
# Match the coordinates the of the neighbors, as site matching does not seem to work?
all_coords = np.sort([x["site"].coords for x in site.values()], axis=0)
by_one_coords = np.sort([x["site"].coords for x in by_one.values()], axis=0)
self.assertArrayAlmostEqual(all_coords, by_one_coords)
# Test the nn_info operation
all_nn_info = self.nn.get_all_nn_info(self.s)
for i, info in enumerate(all_nn_info):
# Compute using the by-one method
by_one = self.nn.get_nn_info(self.s, i)
# Get the weights
all_weights = sorted([x["weight"] for x in info])
by_one_weights = sorted([x["weight"] for x in by_one])
self.assertArrayAlmostEqual(all_weights, by_one_weights)
def test_Cs2O(self):
"""A problematic structure in the Materials Project"""
strc = Structure(
[
[4.358219, 0.192833, 6.406960],
[2.114414, 3.815824, 6.406960],
[0.311360, 0.192833, 7.742498],
],
["O", "Cs", "Cs"],
[[0, 0, 0], [0.264318, 0.264318, 0.264318], [0.735682, 0.735682, 0.735682]],
coords_are_cartesian=False,
)
# Compute the voronoi tessellation
result = VoronoiNN().get_all_voronoi_polyhedra(strc)
self.assertEqual(3, len(result))
def test_filtered(self):
nn = VoronoiNN(weight="area")
# Make a bcc crystal
bcc = Structure(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
["Cu", "Cu"],
[[0, 0, 0], [0.5, 0.5, 0.5]],
coords_are_cartesian=False,
)
# Compute the weight of the little face
big_face_area = np.sqrt(3) * 3 / 2 * (2 / 4 / 4)
small_face_area = 0.125
little_weight = small_face_area / big_face_area
# Run one test where you get the small neighbors
nn.tol = little_weight * 0.99
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(14, len(nns))
# Run a second test where we screen out little faces
nn.tol = little_weight * 1.01
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(8, len(nns))
# Make sure it works for the `get_all` operation
all_nns = nn.get_all_nn_info(bcc * [2, 2, 2])
self.assertEqual(
[
8,
]
* 16,
[len(x) for x in all_nns],
)
def tearDown(self):
del self.s
del self.nn
class JmolNNTest(PymatgenTest):
def setUp(self):
self.jmol = JmolNN()
self.jmol_update = JmolNN(el_radius_updates={"Li": 1})
def test_get_nn(self):
s = self.get_structure("LiFePO4")
# Test the default near-neighbor finder.
nsites_checked = 0
for site_idx, site in enumerate(s):
if site.specie == Element("Li"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 0)
nsites_checked += 1
elif site.specie == Element("Fe"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 6)
nsites_checked += 1
elif site.specie == Element("P"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 4)
nsites_checked += 1
self.assertEqual(nsites_checked, 12)
# Test a user override that would cause Li to show up as 2-coordinated
self.assertEqual(self.jmol_update.get_cn(s, 0), 2)
# Verify get_nn function works
self.assertEqual(len(self.jmol_update.get_nn(s, 0)), 2)
def tearDown(self):
del self.jmol
del self.jmol_update
class TestIsayevNN(PymatgenTest):
def test_get_nn(self):
inn = IsayevNN()
s = self.get_structure("LiFePO4")
self.assertEqual(inn.get_cn(s, 0), 2)
self.assertEqual(inn.get_cn(s, 5), 6)
self.assertEqual(inn.get_cn(s, 10), 4)
self.assertEqual(len(inn.get_nn(s, 0)), 2)
class OpenBabelNNTest(PymatgenTest):
def setUp(self):
pytest.importorskip("openbabel", reason="OpenBabel not installed")
self.benzene = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "acetylene.xyz"))
def test_nn_orders(self):
strat = OpenBabelNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
# Currently, benzene bonds register either as double or single,
# not aromatic
# Instead of searching for aromatic bonds, we check that bonds are
# detected in the same way from both sides
self.assertEqual(
strat.get_nn_info(self.benzene, 0)[0]["weight"],
strat.get_nn_info(self.benzene, 1)[0]["weight"],
)
def test_nn_length(self):
strat = OpenBabelNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
self.assertAlmostEqual(strat.get_nn_info(self.acetylene, 0)[0]["weight"], 1.19, 2)
def tearDown(self):
del self.benzene
del self.acetylene
class CovalentBondNNTest(PymatgenTest):
def setUp(self):
self.benzene = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "acetylene.xyz"))
def test_nn_orders(self):
strat = CovalentBondNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
benzene = strat.get_nn_info(self.benzene, 0)
self.assertAlmostEqual(benzene[0]["weight"], 1.6596, places=4)
def test_nn_length(self):
strat = CovalentBondNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertAlmostEqual(acetylene[0]["weight"], 1.19, places=2)
def test_bonded_structure(self):
strat = CovalentBondNN()
benzene = strat.get_bonded_structure(self.benzene)
self.assertEqual(len(benzene.find_rings()), 1)
acetylene = strat.get_bonded_structure(self.acetylene)
self.assertEqual(len(acetylene.graph.nodes), 4)
def tearDown(self):
del self.benzene
del self.acetylene
class MiniDistNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C0+", "C0+"],
[[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012], [0, 0, 4.025]]),
["Na1+", "Cl1-"],
[[0, 0, 0], [2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"],
[[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.mos2 = Structure(
Lattice([[3.19, 0, 0], [-1.595, 2.763, 0], [0, 0, 17.44]]),
["Mo", "S", "S"],
[[-1e-06, 1.842, 3.72], [1.595, 0.92, 5.29], [1.595, 0.92, 2.155]],
coords_are_cartesian=True,
)
self.lifepo4 = self.get_structure("LiFePO4")
self.lifepo4.add_oxidation_state_by_guess()
def test_all_nn_classes(self):
self.assertEqual(MinimumDistanceNN(cutoff=5, get_all_sites=True).get_cn(self.cscl, 0), 14)
self.assertEqual(MinimumDistanceNN().get_cn(self.diamond, 0), 4)
self.assertEqual(MinimumDistanceNN().get_cn(self.nacl, 0), 6)
self.assertEqual(MinimumDistanceNN().get_cn(self.lifepo4, 0), 6)
self.assertEqual(MinimumDistanceNN(tol=0.01).get_cn(self.cscl, 0), 8)
self.assertEqual(MinimumDistanceNN(tol=0.1).get_cn(self.mos2, 0), 6)
for image in MinimumDistanceNN(tol=0.1).get_nn_images(self.mos2, 0):
self.assertTrue(image in [(0, 0, 0), (0, 1, 0), (-1, 0, 0), (0, 0, 0), (0, 1, 0), (-1, 0, 0)])
okeeffe = MinimumOKeeffeNN(tol=0.01)
self.assertEqual(okeeffe.get_cn(self.diamond, 0), 4)
self.assertEqual(okeeffe.get_cn(self.nacl, 0), 6)
self.assertEqual(okeeffe.get_cn(self.cscl, 0), 8)
self.assertEqual(okeeffe.get_cn(self.lifepo4, 0), 2)
virenn = MinimumVIRENN(tol=0.01)
self.assertEqual(virenn.get_cn(self.diamond, 0), 4)
self.assertEqual(virenn.get_cn(self.nacl, 0), 6)
self.assertEqual(virenn.get_cn(self.cscl, 0), 8)
self.assertEqual(virenn.get_cn(self.lifepo4, 0), 2)
brunner_recip = BrunnerNN_reciprocal(tol=0.01)
self.assertEqual(brunner_recip.get_cn(self.diamond, 0), 4)
self.assertEqual(brunner_recip.get_cn(self.nacl, 0), 6)
self.assertEqual(brunner_recip.get_cn(self.cscl, 0), 14)
self.assertEqual(brunner_recip.get_cn(self.lifepo4, 0), 6)
brunner_rel = BrunnerNN_relative(tol=0.01)
self.assertEqual(brunner_rel.get_cn(self.diamond, 0), 4)
self.assertEqual(brunner_rel.get_cn(self.nacl, 0), 6)
self.assertEqual(brunner_rel.get_cn(self.cscl, 0), 14)
self.assertEqual(brunner_rel.get_cn(self.lifepo4, 0), 6)
brunner_real = BrunnerNN_real(tol=0.01)
self.assertEqual(brunner_real.get_cn(self.diamond, 0), 4)
self.assertEqual(brunner_real.get_cn(self.nacl, 0), 6)
self.assertEqual(brunner_real.get_cn(self.cscl, 0), 14)
self.assertEqual(brunner_real.get_cn(self.lifepo4, 0), 30)
econn = EconNN()
self.assertEqual(econn.get_cn(self.diamond, 0), 4)
self.assertEqual(econn.get_cn(self.nacl, 0), 6)
self.assertEqual(econn.get_cn(self.cscl, 0), 14)
self.assertEqual(econn.get_cn(self.lifepo4, 0), 6)
voroinn = VoronoiNN(tol=0.5)
self.assertEqual(voroinn.get_cn(self.diamond, 0), 4)
self.assertEqual(voroinn.get_cn(self.nacl, 0), 6)
self.assertEqual(voroinn.get_cn(self.cscl, 0), 8)
self.assertEqual(voroinn.get_cn(self.lifepo4, 0), 6)
crystalnn = CrystalNN()
self.assertEqual(crystalnn.get_cn(self.diamond, 0), 4)
self.assertEqual(crystalnn.get_cn(self.nacl, 0), 6)
self.assertEqual(crystalnn.get_cn(self.cscl, 0), 8)
self.assertEqual(crystalnn.get_cn(self.lifepo4, 0), 6)
def test_get_local_order_params(self):
nn = MinimumDistanceNN()
ops = nn.get_local_order_parameters(self.diamond, 0)
self.assertAlmostEqual(ops["tetrahedral"], 0.9999934389036574)
ops = nn.get_local_order_parameters(self.nacl, 0)
self.assertAlmostEqual(ops["octahedral"], 0.9999995266669)
class MotifIdentificationTest(PymatgenTest):
def setUp(self):
self.silicon = Structure(
Lattice.cubic(5.47),
["Si", "Si", "Si", "Si", "Si", "Si", "Si", "Si"],
[
[0.000000, 0.000000, 0.500000],
[0.750000, 0.750000, 0.750000],
[0.000000, 0.500000, 1.000000],
[0.750000, 0.250000, 0.250000],
[0.500000, 0.000000, 1.000000],
[0.250000, 0.750000, 0.250000],
[0.500000, 0.500000, 0.500000],
[0.250000, 0.250000, 0.750000],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=None,
)
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C0+", "C0+"],
[[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012], [0, 0, 4.025]]),
["Na1+", "Cl1-"],
[[0, 0, 0], [2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"],
[[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.square_pyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.trigonal_bipyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["P", "Cl", "Cl", "Cl", "Cl", "Cl"],
[
[0, 0, 0],
[0, 0, 2.14],
[0, 2.02, 0],
[1.74937, -1.01, 0],
[-1.74937, -1.01, 0],
[0, 0, -2.14],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
def test_site_is_of_motif_type(self):
for i in range(self.diamond.num_sites):
self.assertEqual(site_is_of_motif_type(self.diamond, i), "tetrahedral")
for i in range(self.nacl.num_sites):
self.assertEqual(site_is_of_motif_type(self.nacl, i), "octahedral")
for i in range(self.cscl.num_sites):
self.assertEqual(site_is_of_motif_type(self.cscl, i), "bcc")
self.assertEqual(site_is_of_motif_type(self.square_pyramid, 0), "square pyramidal")
for i in range(1, self.square_pyramid.num_sites):
self.assertEqual(site_is_of_motif_type(self.square_pyramid, i), "unrecognized")
self.assertEqual(site_is_of_motif_type(self.trigonal_bipyramid, 0), "trigonal bipyramidal")
for i in range(1, self.trigonal_bipyramid.num_sites):
self.assertEqual(site_is_of_motif_type(self.trigonal_bipyramid, i), "unrecognized")
def test_get_neighbors_of_site_with_index(self):
self.assertEqual(len(get_neighbors_of_site_with_index(self.diamond, 0)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(self.nacl, 0)), 6)
self.assertEqual(len(get_neighbors_of_site_with_index(self.cscl, 0)), 8)
self.assertEqual(len(get_neighbors_of_site_with_index(self.diamond, 0, delta=0.01)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(self.diamond, 0, cutoff=6)), 4)
self.assertEqual(
len(get_neighbors_of_site_with_index(self.diamond, 0, approach="voronoi")),
4,
)
self.assertEqual(
len(get_neighbors_of_site_with_index(self.diamond, 0, approach="min_OKeeffe")),
4,
)
self.assertEqual(
len(get_neighbors_of_site_with_index(self.diamond, 0, approach="min_VIRE")),
4,
)
def tearDown(self):
del self.silicon
del self.diamond
del self.nacl
del self.cscl
class NearNeighborTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C0+", "C0+"],
[[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
def set_nn_info(self):
# check conformance
# implicitly assumes that all NearNeighbors subclasses
# will correctly identify bonds in diamond, if it
# can't there are probably bigger problems
subclasses = NearNeighbors.__subclasses__()
for subclass in subclasses:
# Critic2NN has external dependency, is tested separately
if "Critic2" not in str(subclass):
nn_info = subclass().get_nn_info(self.diamond, 0)
self.assertEqual(nn_info[0]["site_index"], 1)
self.assertEqual(nn_info[0]["image"][0], 1)
def tearDown(self):
del self.diamond
class LocalStructOrderParamsTest(PymatgenTest):
def setUp(self):
self.single_bond = Structure(
Lattice.cubic(10),
["H", "H", "H"],
[[1, 0, 0], [0, 0, 0], [6, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.linear = Structure(
Lattice.cubic(10),
["H", "H", "H"],
[[1, 0, 0], [0, 0, 0], [2, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.bent45 = Structure(
Lattice.cubic(10),
["H", "H", "H"],
[[0, 0, 0], [0.707, 0.707, 0], [0.707, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.cubic = Structure(
Lattice.cubic(1),
["H"],
[[0, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=None,
)
self.bcc = Structure(
Lattice.cubic(1),
["H", "H"],
[[0, 0, 0], [0.5, 0.5, 0.5]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=None,
)
self.fcc = Structure(
Lattice.cubic(1),
["H", "H", "H", "H"],
[[0, 0, 0], [0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=None,
)
self.hcp = Structure(
Lattice.hexagonal(1, 1.633),
["H", "H"],
[[0.3333, 0.6667, 0.25], [0.6667, 0.3333, 0.75]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=None,
)
self.diamond = Structure(
Lattice.cubic(1),
["H", "H", "H", "H", "H", "H", "H", "H"],
[
[0, 0, 0.5],
[0.75, 0.75, 0.75],
[0, 0.5, 0],
[0.75, 0.25, 0.25],
[0.5, 0, 0],
[0.25, 0.75, 0.25],
[0.5, 0.5, 0.5],
[0.25, 0.25, 0.75],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=None,
)
self.trigonal_off_plane = Structure(
Lattice.cubic(100),
["H", "H", "H", "H"],
[
[0.50, 0.50, 0.50],
[0.25, 0.75, 0.25],
[0.25, 0.25, 0.75],
[0.75, 0.25, 0.25],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.regular_triangle = Structure(
Lattice.cubic(30),
["H", "H", "H", "H"],
[[15, 15.28867, 15.65], [14.5, 15, 15], [15.5, 15, 15], [15, 15.866, 15]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.trigonal_planar = Structure(
Lattice.cubic(30),
["H", "H", "H", "H"],
[[15, 15.28867, 15], [14.5, 15, 15], [15.5, 15, 15], [15, 15.866, 15]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.square_planar = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H"],
[
[15, 15, 15],
[14.75, 14.75, 15],
[14.75, 15.25, 15],
[15.25, 14.75, 15],
[15.25, 15.25, 15],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.square = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H"],
[
[15, 15, 15.707],
[14.75, 14.75, 15],
[14.75, 15.25, 15],
[15.25, 14.75, 15],
[15.25, 15.25, 15],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.T_shape = Structure(
Lattice.cubic(30),
["H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.5], [15, 15.5, 15], [15, 14.5, 15]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.square_pyramid = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H", "H"],
[
[15, 15, 15],
[15, 15, 15.3535],
[14.75, 14.75, 15],
[14.75, 15.25, 15],
[15.25, 14.75, 15],
[15.25, 15.25, 15],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.pentagonal_planar = Structure(
Lattice.cubic(30),
["Xe", "F", "F", "F", "F", "F"],
[
[0, -1.6237, 0],
[1.17969, 0, 0],
[-1.17969, 0, 0],
[1.90877, -2.24389, 0],
[-1.90877, -2.24389, 0],
[0, -3.6307, 0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.pentagonal_pyramid = Structure(
Lattice.cubic(30),
["Xe", "F", "F", "F", "F", "F", "F"],
[
[0, -1.6237, 0],
[0, -1.6237, 1.17969],
[1.17969, 0, 0],
[-1.17969, 0, 0],
[1.90877, -2.24389, 0],
[-1.90877, -2.24389, 0],
[0, -3.6307, 0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.pentagonal_bipyramid = Structure(
Lattice.cubic(30),
["Xe", "F", "F", "F", "F", "F", "F", "F"],
[
[0, -1.6237, 0],
[0, -1.6237, -1.17969],
[0, -1.6237, 1.17969],
[1.17969, 0, 0],
[-1.17969, 0, 0],
[1.90877, -2.24389, 0],
[-1.90877, -2.24389, 0],
[0, -3.6307, 0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.hexagonal_planar = Structure(
Lattice.cubic(30),
["H", "C", "C", "C", "C", "C", "C"],
[
[0, 0, 0],
[0.71, 1.2298, 0],
[-0.71, 1.2298, 0],
[0.71, -1.2298, 0],
[-0.71, -1.2298, 0],
[1.4199, 0, 0],
[-1.4199, 0, 0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.hexagonal_pyramid = Structure(
Lattice.cubic(30),
["H", "Li", "C", "C", "C", "C", "C", "C"],
[
[0, 0, 0],
[0, 0, 1.675],
[0.71, 1.2298, 0],
[-0.71, 1.2298, 0],
[0.71, -1.2298, 0],
[-0.71, -1.2298, 0],
[1.4199, 0, 0],
[-1.4199, 0, 0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.hexagonal_bipyramid = Structure(
Lattice.cubic(30),
["H", "Li", "Li", "C", "C", "C", "C", "C", "C"],
[
[0, 0, 0],
[0, 0, 1.675],
[0, 0, -1.675],
[0.71, 1.2298, 0],
[-0.71, 1.2298, 0],
[0.71, -1.2298, 0],
[-0.71, -1.2298, 0],
[1.4199, 0, 0],
[-1.4199, 0, 0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.trigonal_pyramid = Structure(
Lattice.cubic(30),
["P", "Cl", "Cl", "Cl", "Cl"],
[
[0, 0, 0],
[0, 0, 2.14],
[0, 2.02, 0],
[1.74937, -1.01, 0],
[-1.74937, -1.01, 0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.trigonal_bipyramidal = Structure(
Lattice.cubic(30),
["P", "Cl", "Cl", "Cl", "Cl", "Cl"],
[
[0, 0, 0],
[0, 0, 2.14],
[0, 2.02, 0],
[1.74937, -1.01, 0],
[-1.74937, -1.01, 0],
[0, 0, -2.14],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.cuboctahedron = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H"],
[
[15, 15, 15],
[15, 14.5, 14.5],
[15, 14.5, 15.5],
[15, 15.5, 14.5],
[15, 15.5, 15.5],
[14.5, 15, 14.5],
[14.5, 15, 15.5],
[15.5, 15, 14.5],
[15.5, 15, 15.5],
[14.5, 14.5, 15],
[14.5, 15.5, 15],
[15.5, 14.5, 15],
[15.5, 15.5, 15],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.see_saw_rect = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H"],
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[-1.0, 0.0, 0.0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.sq_face_capped_trig_pris = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H", "H", "H", "H"],
[
[0, 0, 0],
[-0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.0, 0.7559289460184545, 0.6546536707079771],
[-0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.0, 0.7559289460184545, -0.6546536707079771],
[0.0, -1.0, 0.0],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
def test_init(self):
self.assertIsNotNone(LocalStructOrderParams(["cn"], parameters=None, cutoff=0.99))
parameters = [{"norm": 2}]
lostops = LocalStructOrderParams(["cn"], parameters=parameters)
tmp = lostops.get_parameters(0)
parameters[0]["norm"] = 3
self.assertEqual(tmp, lostops.get_parameters(0))
def test_get_order_parameters(self):
# Set up everything.
op_types = [
"cn",
"bent",
"bent",
"tet",
"oct",
"bcc",
"q2",
"q4",
"q6",
"reg_tri",
"sq",
"sq_pyr_legacy",
"tri_bipyr",
"sgl_bd",
"tri_plan",
"sq_plan",
"pent_plan",
"sq_pyr",
"tri_pyr",
"pent_pyr",
"hex_pyr",
"pent_bipyr",
"hex_bipyr",
"T",
"cuboct",
"see_saw_rect",
"hex_plan_max",
"tet_max",
"oct_max",
"tri_plan_max",
"sq_plan_max",
"pent_plan_max",
"cuboct_max",
"tet_max",
"sq_face_cap_trig_pris",
]
op_params = [None for i in range(len(op_types))]
op_params[1] = {"TA": 1, "IGW_TA": 1.0 / 0.0667}
op_params[2] = {"TA": 45.0 / 180, "IGW_TA": 1.0 / 0.0667}
op_params[33] = {
"TA": 0.6081734479693927,
"IGW_TA": 18.33,
"fac_AA": 1.5,
"exp_cos_AA": 2,
}
ops_044 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.44)
ops_071 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.71)
ops_087 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.87)
ops_099 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.99)
ops_101 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=1.01)
ops_501 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=5.01)
ops_voro = LocalStructOrderParams(op_types, parameters=op_params)
# Single bond.
op_vals = ops_101.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 1000)
op_vals = ops_501.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 799)
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 0)
# Linear motif.
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[1] * 1000), 1000)
# 45 degrees-bent motif.
op_vals = ops_101.get_order_parameters(self.bent45, 0)
self.assertAlmostEqual(int(op_vals[2] * 1000), 1000)
# T-shape motif.
op_vals = ops_101.get_order_parameters(self.T_shape, 0, indices_neighs=[1, 2, 3])
self.assertAlmostEqual(int(op_vals[23] * 1000), 1000)
# Cubic structure.
op_vals = ops_099.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 0.0)
self.assertIsNone(op_vals[3])
self.assertIsNone(op_vals[4])
self.assertIsNone(op_vals[5])
self.assertIsNone(op_vals[6])
self.assertIsNone(op_vals[7])
self.assertIsNone(op_vals[8])
op_vals = ops_101.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 6.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 23)
self.assertAlmostEqual(int(op_vals[4] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[5] * 1000), 333)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 763)
self.assertAlmostEqual(int(op_vals[8] * 1000), 353)
self.assertAlmostEqual(int(op_vals[28] * 1000), 1000)
# Bcc structure.
op_vals = ops_087.get_order_parameters(self.bcc, 0)
self.assertAlmostEqual(op_vals[0], 8.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 200)
self.assertAlmostEqual(int(op_vals[4] * 1000), 145)
self.assertAlmostEqual(int(op_vals[5] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
# Fcc structure.
op_vals = ops_071.get_order_parameters(self.fcc, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 36)
self.assertAlmostEqual(int(op_vals[4] * 1000), 78)
self.assertAlmostEqual(int(op_vals[5] * 1000), -2)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 190)
self.assertAlmostEqual(int(op_vals[8] * 1000), 574)
# Hcp structure.
op_vals = ops_101.get_order_parameters(self.hcp, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 33)
self.assertAlmostEqual(int(op_vals[4] * 1000), 82)
# self.assertAlmostEqual(int(op_vals[5] * 1000), -26)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 97)
self.assertAlmostEqual(int(op_vals[8] * 1000), 484)
# Diamond structure.
op_vals = ops_044.get_order_parameters(self.diamond, 0)
self.assertAlmostEqual(op_vals[0], 4.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[4] * 1000), 37)
self.assertAlmostEqual(op_vals[5], 0.75)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
self.assertAlmostEqual(int(op_vals[27] * 1000), 1000)
# Trigonal off-plane molecule.
op_vals = ops_044.get_order_parameters(self.trigonal_off_plane, 0)
self.assertAlmostEqual(op_vals[0], 3.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[33] * 1000), 1000)
# Trigonal-planar motif.
op_vals = ops_101.get_order_parameters(self.trigonal_planar, 0)
self.assertEqual(int(op_vals[0] + 0.5), 3)
self.assertAlmostEqual(int(op_vals[14] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[29] * 1000 + 0.5), 1000)
# Regular triangle motif.
op_vals = ops_101.get_order_parameters(self.regular_triangle, 0)
self.assertAlmostEqual(int(op_vals[9] * 1000), 999)
# Square-planar motif.
op_vals = ops_101.get_order_parameters(self.square_planar, 0)
self.assertAlmostEqual(int(op_vals[15] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[30] * 1000 + 0.5), 1000)
# Square motif.
op_vals = ops_101.get_order_parameters(self.square, 0)
self.assertAlmostEqual(int(op_vals[10] * 1000), 1000)
# Pentagonal planar.
op_vals = ops_101.get_order_parameters(self.pentagonal_planar.sites, 0, indices_neighs=[1, 2, 3, 4, 5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 126)
self.assertAlmostEqual(int(op_vals[16] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[31] * 1000 + 0.5), 1000)
# Trigonal pyramid motif.
op_vals = ops_101.get_order_parameters(self.trigonal_pyramid, 0, indices_neighs=[1, 2, 3, 4])
self.assertAlmostEqual(int(op_vals[18] * 1000 + 0.5), 1000)
# Square pyramid motif.
op_vals = ops_101.get_order_parameters(self.square_pyramid, 0)
self.assertAlmostEqual(int(op_vals[11] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 667)
self.assertAlmostEqual(int(op_vals[17] * 1000 + 0.5), 1000)
# Pentagonal pyramid motif.
op_vals = ops_101.get_order_parameters(self.pentagonal_pyramid, 0, indices_neighs=[1, 2, 3, 4, 5, 6])
self.assertAlmostEqual(int(op_vals[19] * 1000 + 0.5), 1000)
# Hexagonal pyramid motif.
op_vals = ops_101.get_order_parameters(self.hexagonal_pyramid, 0, indices_neighs=[1, 2, 3, 4, 5, 6, 7])
self.assertAlmostEqual(int(op_vals[20] * 1000 + 0.5), 1000)
# Trigonal bipyramidal.
op_vals = ops_101.get_order_parameters(self.trigonal_bipyramidal.sites, 0, indices_neighs=[1, 2, 3, 4, 5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 1000)
# Pentagonal bipyramidal.
op_vals = ops_101.get_order_parameters(self.pentagonal_bipyramid.sites, 0, indices_neighs=[1, 2, 3, 4, 5, 6, 7])
self.assertAlmostEqual(int(op_vals[21] * 1000 + 0.5), 1000)
# Hexagonal bipyramid motif.
op_vals = ops_101.get_order_parameters(self.hexagonal_bipyramid, 0, indices_neighs=[1, 2, 3, 4, 5, 6, 7, 8])
self.assertAlmostEqual(int(op_vals[22] * 1000 + 0.5), 1000)
# Cuboctahedral motif.
op_vals = ops_101.get_order_parameters(self.cuboctahedron, 0, indices_neighs=[i for i in range(1, 13)])
self.assertAlmostEqual(int(op_vals[24] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[32] * 1000 + 0.5), 1000)
# See-saw motif.
op_vals = ops_101.get_order_parameters(self.see_saw_rect, 0, indices_neighs=[i for i in range(1, 5)])
self.assertAlmostEqual(int(op_vals[25] * 1000 + 0.5), 1000)
# Hexagonal planar motif.
op_vals = ops_101.get_order_parameters(self.hexagonal_planar, 0, indices_neighs=[1, 2, 3, 4, 5, 6])
self.assertAlmostEqual(int(op_vals[26] * 1000 + 0.5), 1000)
# Square face capped trigonal prism.
op_vals = ops_101.get_order_parameters(
self.sq_face_capped_trig_pris, 0, indices_neighs=[i for i in range(1, 8)]
)
self.assertAlmostEqual(int(op_vals[34] * 1000 + 0.5), 1000)
# Test providing explicit neighbor lists.
op_vals = ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[1])
self.assertIsNotNone(op_vals[0])
self.assertIsNone(op_vals[3])
with self.assertRaises(ValueError):
ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[2])
def tearDown(self):
del self.single_bond
del self.linear
del self.bent45
del self.cubic
del self.fcc
del self.bcc
del self.hcp
del self.diamond
del self.regular_triangle
del self.square
del self.square_pyramid
del self.trigonal_off_plane
del self.trigonal_pyramid
del self.trigonal_planar
del self.square_planar
del self.pentagonal_pyramid
del self.hexagonal_pyramid
del self.pentagonal_bipyramid
del self.T_shape
del self.cuboctahedron
del self.see_saw_rect
class CrystalNNTest(PymatgenTest):
def setUp(self):
self.lifepo4 = self.get_structure("LiFePO4")
self.lifepo4.add_oxidation_state_by_guess()
self.he_bcc = self.get_structure("He_BCC")
self.he_bcc.add_oxidation_state_by_guess()
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_sanity(self):
with self.assertRaises(ValueError):
cnn = CrystalNN()
cnn.get_cn(self.lifepo4, 0, use_weights=True)
with self.assertRaises(ValueError):
cnn = CrystalNN(weighted_cn=True)
cnn.get_cn(self.lifepo4, 0, use_weights=False)
def test_discrete_cn(self):
cnn = CrystalNN()
cn_array = []
expected_array = [
6,
6,
6,
6,
6,
6,
6,
6,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx))
self.assertSequenceEqual(cn_array, expected_array)
def test_weighted_cn(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [
5.863,
5.8716,
5.863,
5.8716,
5.7182,
5.7182,
5.719,
5.7181,
3.991,
3.991,
3.991,
3.9907,
3.5997,
3.525,
3.4133,
3.4714,
3.4727,
3.4133,
3.525,
3.5997,
3.5997,
3.525,
3.4122,
3.4738,
3.4728,
3.4109,
3.5259,
3.5997,
]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_weighted_cn_no_oxid(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [
5.8962,
5.8996,
5.8962,
5.8996,
5.7195,
5.7195,
5.7202,
5.7194,
4.0012,
4.0012,
4.0012,
4.0009,
3.3897,
3.2589,
3.1218,
3.1914,
3.1914,
3.1218,
3.2589,
3.3897,
3.3897,
3.2589,
3.1207,
3.1924,
3.1915,
3.1207,
3.2598,
3.3897,
]
s = self.lifepo4.copy()
s.remove_oxidation_states()
for idx, _ in enumerate(s):
cn_array.append(cnn.get_cn(s, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_fixed_length(self):
cnn = CrystalNN(fingerprint_length=30)
nndata = cnn.get_nn_data(self.lifepo4, 0)
self.assertEqual(len(nndata.cn_weights), 30)
self.assertEqual(len(nndata.cn_nninfo), 30)
def test_cation_anion(self):
cnn = CrystalNN(weighted_cn=True, cation_anion=True)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True), 5.8630, 2)
def test_x_diff_weight(self):
cnn = CrystalNN(weighted_cn=True, x_diff_weight=0)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True), 5.8630, 2)
def test_noble_gas_material(self):
cnn = CrystalNN()
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 0)
cnn = CrystalNN(distance_cutoffs=(1.25, 5))
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 8)
def test_shifted_sites(self):
cnn = CrystalNN()
sites = [[0.0, 0.2, 0.2], [0, 0, 0]]
struct = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ["I"] * len(sites), sites)
bonded_struct = cnn.get_bonded_structure(struct)
sites_shifted = [[1.0, 0.2, 0.2], [0, 0, 0]]
struct_shifted = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ["I"] * len(sites_shifted), sites_shifted)
bonded_struct_shifted = cnn.get_bonded_structure(struct_shifted)
self.assertEqual(
len(bonded_struct.get_connected_sites(0)),
len(bonded_struct_shifted.get_connected_sites(0)),
)
class CutOffDictNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"],
[[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True,
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = CutOffDictNN({("C", "C"): 2})
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
nn_null = CutOffDictNN()
self.assertEqual(nn_null.get_cn(self.diamond, 0), 0)
def test_from_preset(self):
nn = CutOffDictNN.from_preset("vesta_2019")
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
# test error thrown on unknown preset
self.assertRaises(ValueError, CutOffDictNN.from_preset, "test")
@unittest.skipIf(not which("critic2"), "critic2 executable not present")
class Critic2NNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"],
[[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True,
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = Critic2NN()
# self.assertEqual(nn.get_cn(self.diamond, 0), 4)
if __name__ == "__main__":
unittest.main()
| davidwaroquiers/pymatgen | pymatgen/analysis/tests/test_local_env.py | Python | mit | 53,478 | [
"CRYSTAL",
"Jmol",
"pymatgen"
] | 6a3440fb47fd02bea6b21aee49d90bbfcfaaa9c1080a5855e76760683797c248 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in image_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.for_all_test_methods(test_util.disable_xla,
'align_corners=False not supported by XLA')
class ResizeNearestNeighborOpTestBase(test.TestCase):
TYPES = [np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype]
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
for nptype in self.TYPES:
x = np.arange(0, 4).reshape(in_shape).astype(nptype)
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
with self.cached_session():
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = (1, 4, 6, 1)
for nptype in self.TYPES:
x = np.arange(0, 6).reshape(in_shape).astype(nptype)
def resize_nn(t, shape=out_shape):
return image_ops.resize_nearest_neighbor(t, shape[1:3])
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
resize_nn, [input_tensor], delta=1 / 8))
self.assertLess(err, 1e-3)
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = (1, 2, 3, 1)
for nptype in self.TYPES:
x = np.arange(0, 24).reshape(in_shape).astype(nptype)
def resize_nn(t, shape=out_shape):
return image_ops.resize_nearest_neighbor(t, shape[1:3])
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
resize_nn, [input_tensor], delta=1 / 8))
self.assertLess(err, 1e-3)
def testCompareGpuVsCpu(self):
in_shape = [1, 4, 6, 3]
out_shape = (1, 8, 16, 3)
for nptype in self.TYPES:
x = np.arange(0, np.prod(in_shape)).reshape(in_shape).astype(nptype)
for align_corners in [True, False]:
def resize_nn(t, shape=out_shape, align_corners=align_corners):
return image_ops.resize_nearest_neighbor(
t, shape[1:3], align_corners=align_corners)
with self.cached_session(use_gpu=False):
input_tensor = constant_op.constant(x, shape=in_shape)
grad_cpu = gradient_checker_v2.compute_gradient(
resize_nn, [input_tensor], delta=1 / 8)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
grad_gpu = gradient_checker_v2.compute_gradient(
resize_nn, [input_tensor], delta=1 / 8)
self.assertAllClose(grad_cpu, grad_gpu, rtol=1e-5, atol=1e-5)
class ResizeBilinearOpTestBase(test.TestCase, parameterized.TestCase):
def _itGen(self, smaller_shape, larger_shape):
up_sample = (smaller_shape, larger_shape)
down_sample = (larger_shape, smaller_shape)
pass_through = (larger_shape, larger_shape)
shape_pairs = (up_sample, down_sample, pass_through)
# Align corners is deprecated in TF2.0, but align_corners==False is not
# supported by XLA.
options = [(True, False)]
if not test_util.is_xla_enabled():
options += [(False, True), (False, False)]
for align_corners, half_pixel_centers in options:
for in_shape, out_shape in shape_pairs:
yield in_shape, out_shape, align_corners, half_pixel_centers
def _getJacobians(self,
in_shape,
out_shape,
align_corners=False,
half_pixel_centers=False,
dtype=np.float32,
use_gpu=False,
force_gpu=False):
with self.cached_session(use_gpu=use_gpu, force_gpu=force_gpu):
# Input values should not influence gradients
x = np.arange(np.prod(in_shape)).reshape(in_shape).astype(dtype)
input_tensor = constant_op.constant(x, shape=in_shape)
def func(in_tensor):
return image_ops.resize_bilinear(
in_tensor,
out_shape[1:3],
align_corners=align_corners,
half_pixel_centers=half_pixel_centers)
return gradient_checker_v2.compute_gradient(func, [input_tensor])
@parameterized.parameters(set((True, context.executing_eagerly())))
def _testShapesParameterized(self, use_tape):
TEST_CASES = [[1, 1], [2, 3], [5, 4]] # pylint: disable=invalid-name
for batch_size, channel_count in TEST_CASES:
smaller_shape = [batch_size, 2, 3, channel_count]
larger_shape = [batch_size, 4, 6, channel_count]
for in_shape, out_shape, _, _ in self._itGen(smaller_shape, larger_shape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
# Input values should not influence shapes
x = np.arange(np.prod(in_shape)).reshape(in_shape).astype(np.float32)
input_tensor = constant_op.constant(x, shape=in_shape)
tape.watch(input_tensor)
resized_tensor = image_ops.resize_bilinear(input_tensor,
out_shape[1:3])
self.assertEqual(out_shape, list(resized_tensor.get_shape()))
grad_tensor = tape.gradient(resized_tensor, input_tensor)
self.assertEqual(in_shape, list(grad_tensor.get_shape()))
with self.cached_session():
resized_values = self.evaluate(resized_tensor)
self.assertEqual(out_shape, list(resized_values.shape))
grad_values = self.evaluate(grad_tensor)
self.assertEqual(in_shape, list(grad_values.shape))
@parameterized.parameters({
'batch_size': 1,
'channel_count': 1
}, {
'batch_size': 4,
'channel_count': 3
}, {
'batch_size': 3,
'channel_count': 2
})
def testGradients(self, batch_size, channel_count):
smaller_shape = [batch_size, 2, 3, channel_count]
larger_shape = [batch_size, 5, 6, channel_count]
for in_shape, out_shape, align_corners, half_pixel_centers in \
self._itGen(smaller_shape, larger_shape):
jacob_a, jacob_n = self._getJacobians(in_shape, out_shape, align_corners,
half_pixel_centers)
threshold = 5e-3
self.assertAllClose(jacob_a, jacob_n, threshold, threshold)
def testTypes(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
for use_gpu in [False, True]:
for dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]:
jacob_a, jacob_n = self._getJacobians(
in_shape, out_shape, dtype=dtype, use_gpu=use_gpu)
if dtype in (np.float16, dtypes.bfloat16.as_numpy_dtype):
# Compare fp16/bf16 analytical gradients to fp32 numerical gradients,
# since fp16/bf16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker, but pragmatic, check (in particular, it does not test
# the op itself, only its gradient).
_, jacob_n = self._getJacobians(
in_shape, out_shape, dtype=np.float32, use_gpu=use_gpu)
threshold = 1e-3
if dtype == np.float64:
threshold = 1e-5
self.assertAllClose(jacob_a, jacob_n, threshold, threshold)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testGradOnUnsupportedType(self, use_tape):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
x = np.arange(0, 24).reshape(in_shape).astype(np.uint8)
input_tensor = constant_op.constant(x, shape=in_shape)
tape.watch(input_tensor)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
with self.cached_session():
grad = tape.gradient(resize_out, [input_tensor])
self.assertEqual([None], grad)
def _gpuVsCpuCase(self, in_shape, out_shape, align_corners,
half_pixel_centers, dtype):
grad = {}
for use_gpu in [False, True]:
grad[use_gpu] = self._getJacobians(
in_shape,
out_shape,
align_corners,
half_pixel_centers,
dtype=dtype,
use_gpu=use_gpu)
threshold = 1e-4
# Note that this is comparing both analytical and numerical Jacobians
self.assertAllClose(grad[False], grad[True], rtol=threshold, atol=threshold)
@parameterized.parameters({
'batch_size': 1,
'channel_count': 1
}, {
'batch_size': 2,
'channel_count': 3
}, {
'batch_size': 5,
'channel_count': 4
})
def testCompareGpuVsCpu(self, batch_size, channel_count):
smaller_shape = [batch_size, 4, 6, channel_count]
larger_shape = [batch_size, 8, 16, channel_count]
for params in self._itGen(smaller_shape, larger_shape):
self._gpuVsCpuCase(*params, dtype=np.float32)
def testCompareGpuVsCpuFloat64(self):
in_shape = [1, 5, 7, 1]
out_shape = [1, 9, 11, 1]
# Note that there is no 16-bit floating-point format registered for GPU
self._gpuVsCpuCase(
in_shape,
out_shape,
align_corners=True,
half_pixel_centers=False,
dtype=np.float64)
class ResizeBicubicOpTestBase(test.TestCase, parameterized.TestCase):
"""Tests resize bicubic ops."""
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 4).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(
input_tensor, out_shape[1:3], align_corners=align_corners)
with self.cached_session():
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
input_tensor = constant_op.constant(x, shape=in_shape)
for align_corners in [True, False]:
def func(input_tensor, align_corners=align_corners):
return image_ops.resize_bicubic(
input_tensor, out_shape[1:3], align_corners=align_corners)
with self.cached_session():
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(func, [input_tensor]))
self.assertLess(err, 1e-3)
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.float32)
input_tensor = constant_op.constant(x, shape=in_shape)
for align_corners in [True, False]:
def func(input_tensor, align_corners=align_corners):
return image_ops.resize_bicubic(
input_tensor, out_shape[1:3], align_corners=align_corners)
with self.cached_session():
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(func, [input_tensor]))
self.assertLess(err, 1e-3)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testGradOnUnsupportedType(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.uint8)
input_tensor = constant_op.constant(x, shape=in_shape)
tape.watch(input_tensor)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3])
with self.cached_session():
grad = tape.gradient(resize_out, [input_tensor])
self.assertEqual([None], grad)
class ScaleAndTranslateOpTestBase(test.TestCase):
"""Tests scale and translate op."""
def testGrads(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
kernel_types = [
'lanczos1', 'lanczos3', 'lanczos5', 'gaussian', 'box', 'triangle',
'keyscubic', 'mitchellcubic'
]
scales = [(1.0, 1.0), (0.37, 0.47), (2.1, 2.1)]
translations = [(0.0, 0.0), (3.14, 1.19), (2.1, 3.1), (100.0, 200.0)]
for scale in scales:
for translation in translations:
for kernel_type in kernel_types:
for antialias in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
def scale_trans(input_tensor,
scale=scale,
translation=translation,
kernel_type=kernel_type,
antialias=antialias):
# pylint: disable=cell-var-from-loop
return image_ops.scale_and_translate(
input_tensor,
out_shape[1:3],
scale=constant_op.constant(scale),
translation=constant_op.constant(translation),
kernel_type=kernel_type,
antialias=antialias)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(scale_trans,
[input_tensor]))
self.assertLess(err, 1e-3)
def testIdentityGrads(self):
"""Tests that Gradients for 1.0 scale should be ones for some kernels."""
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
kernel_types = ['lanczos1', 'lanczos3', 'lanczos5', 'triangle', 'keyscubic']
scale = (1.0, 1.0)
translation = (0.0, 0.0)
antialias = True
for kernel_type in kernel_types:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
with backprop.GradientTape() as tape:
tape.watch(input_tensor)
scale_and_translate_out = image_ops.scale_and_translate(
input_tensor,
out_shape[1:3],
scale=constant_op.constant(scale),
translation=constant_op.constant(translation),
kernel_type=kernel_type,
antialias=antialias)
grad = tape.gradient(scale_and_translate_out, input_tensor)[0]
grad_v = self.evaluate(grad)
self.assertAllClose(np.ones_like(grad_v), grad_v)
class CropAndResizeOpTestBase(test.TestCase):
def testShapeIsCorrectAfterOp(self):
batch = 2
image_height = 3
image_width = 4
crop_height = 4
crop_width = 5
depth = 2
num_boxes = 2
image_shape = [batch, image_height, image_width, depth]
crop_size = [crop_height, crop_width]
crops_shape = [num_boxes, crop_height, crop_width, depth]
image = np.arange(0, batch * image_height * image_width *
depth).reshape(image_shape).astype(np.float32)
boxes = np.array([[0, 0, 1, 1], [.1, .2, .7, .8]], dtype=np.float32)
box_ind = np.array([0, 1], dtype=np.int32)
crops = image_ops.crop_and_resize(
constant_op.constant(image, shape=image_shape),
constant_op.constant(boxes, shape=[num_boxes, 4]),
constant_op.constant(box_ind, shape=[num_boxes]),
constant_op.constant(crop_size, shape=[2]))
with self.session():
self.assertEqual(crops_shape, list(crops.get_shape()))
crops = self.evaluate(crops)
self.assertEqual(crops_shape, list(crops.shape))
def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples):
"""Generate samples that are far enough from a set of anchor points.
We generate uniform samples in [low, high], then reject those that are less
than radius away from any point in anchors. We stop after we have accepted
num_samples samples.
Args:
low: The lower end of the interval.
high: The upper end of the interval.
anchors: A list of length num_crops with anchor points to avoid.
radius: Distance threshold for the samples from the anchors.
num_samples: How many samples to produce.
Returns:
samples: A list of length num_samples with the accepted samples.
"""
self.assertTrue(low < high)
self.assertTrue(radius >= 0)
num_anchors = len(anchors)
# Make sure that at least half of the interval is not forbidden.
self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low))
anchors = np.reshape(anchors, num_anchors)
samples = []
while len(samples) < num_samples:
sample = np.random.uniform(low, high)
if np.all(np.fabs(sample - anchors) > radius):
samples.append(sample)
return samples
def testGradRandomBoxes(self):
"""Test that the gradient is correct for randomly generated boxes.
The mapping is piecewise differentiable with respect to the box coordinates.
The points where the function is not differentiable are those which are
mapped to image pixels, i.e., the normalized y coordinates in
np.linspace(0, 1, image_height) and normalized x coordinates in
np.linspace(0, 1, image_width). Make sure that the box coordinates are
sufficiently far away from those rectangular grid centers that are points of
discontinuity, so that the finite difference Jacobian is close to the
computed one.
"""
np.random.seed(1) # Make it reproducible.
delta = 1e-3
radius = 2 * delta
low, high = -0.5, 1.5 # Also covers the case of extrapolation.
image_height = 4
for image_width in range(1, 3):
for crop_height in range(1, 3):
for crop_width in range(2, 4):
for depth in range(1, 3):
for num_boxes in range(1, 3):
batch = num_boxes
image_shape = [batch, image_height, image_width, depth]
crop_size = [crop_height, crop_width]
image = np.arange(0, batch * image_height * image_width *
depth).reshape(image_shape).astype(np.float32)
boxes = []
for _ in range(num_boxes):
# pylint: disable=unbalanced-tuple-unpacking
y1, y2 = self._randomUniformAvoidAnchors(
low, high, np.linspace(0, 1, image_height), radius, 2)
x1, x2 = self._randomUniformAvoidAnchors(
low, high, np.linspace(0, 1, image_width), radius, 2)
# pylint: enable=unbalanced-tuple-unpacking
boxes.append([y1, x1, y2, x2])
boxes = np.array(boxes, dtype=np.float32)
box_ind = np.arange(batch, dtype=np.int32)
image_tensor = constant_op.constant(image, shape=image_shape)
boxes_tensor = constant_op.constant(boxes, shape=[num_boxes, 4])
box_ind_tensor = constant_op.constant(box_ind, shape=[num_boxes])
def crop_resize(image_tensor, boxes_tensor):
# pylint: disable=cell-var-from-loop
return image_ops.crop_and_resize(
image_tensor, boxes_tensor, box_ind_tensor,
constant_op.constant(crop_size, shape=[2]))
with test_util.device(use_gpu=True):
with self.cached_session():
# pylint: disable=cell-var-from-loop
err1 = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: crop_resize(x, boxes_tensor),
[image_tensor]))
err2 = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: crop_resize(image_tensor, x),
[boxes_tensor]))
err = max(err1, err2)
self.assertLess(err, 2e-3)
@test_util.run_all_in_graph_and_eager_modes
class RGBToHSVOpTestBase(test.TestCase):
TYPES = [np.float32, np.float64]
def testShapeIsCorrectAfterOp(self):
in_shape = [2, 20, 30, 3]
out_shape = [2, 20, 30, 3]
for nptype in self.TYPES:
x = np.random.randint(0, high=255, size=[2, 20, 30, 3]).astype(nptype)
rgb_input_tensor = constant_op.constant(x, shape=in_shape)
hsv_out = gen_image_ops.rgb_to_hsv(rgb_input_tensor)
with self.cached_session():
self.assertEqual(out_shape, list(hsv_out.get_shape()))
hsv_out = self.evaluate(hsv_out)
self.assertEqual(out_shape, list(hsv_out.shape))
def testRGBToHSVGradSimpleCase(self):
def f(x):
return gen_image_ops.rgb_to_hsv(x)
# Building a simple input tensor to avoid any discontinuity
x = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8,
0.9]]).astype(np.float32)
rgb_input_tensor = constant_op.constant(x, shape=x.shape)
# Computing Analytical and Numerical gradients of f(x)
analytical, numerical = gradient_checker_v2.compute_gradient(
f, [rgb_input_tensor])
self.assertAllClose(numerical, analytical, atol=1e-4)
def testRGBToHSVGradRandomCase(self):
def f(x):
return gen_image_ops.rgb_to_hsv(x)
np.random.seed(0)
# Building a simple input tensor to avoid any discontinuity
x = np.random.rand(1, 5, 5, 3).astype(np.float32)
rgb_input_tensor = constant_op.constant(x, shape=x.shape)
# Computing Analytical and Numerical gradients of f(x)
self.assertLess(
gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [rgb_input_tensor])), 1e-4)
def testRGBToHSVGradSpecialCaseRGreatest(self):
# This test tests a specific subset of the input space
# with a dummy function implemented with native TF operations.
in_shape = [2, 10, 20, 3]
def f(x):
return gen_image_ops.rgb_to_hsv(x)
def f_dummy(x):
# This dummy function is a implementation of RGB to HSV using
# primitive TF functions for one particular case when R>G>B.
r = x[..., 0]
g = x[..., 1]
b = x[..., 2]
# Since MAX = r and MIN = b, we get the following h,s,v values.
v = r
s = 1 - math_ops.div_no_nan(b, r)
h = 60 * math_ops.div_no_nan(g - b, r - b)
h = h / 360
return array_ops.stack([h, s, v], axis=-1)
# Building a custom input tensor where R>G>B
x_reds = np.ones((in_shape[0], in_shape[1], in_shape[2])).astype(np.float32)
x_greens = 0.5 * np.ones(
(in_shape[0], in_shape[1], in_shape[2])).astype(np.float32)
x_blues = 0.2 * np.ones(
(in_shape[0], in_shape[1], in_shape[2])).astype(np.float32)
x = np.stack([x_reds, x_greens, x_blues], axis=-1)
rgb_input_tensor = constant_op.constant(x, shape=in_shape)
# Computing Analytical and Numerical gradients of f(x)
analytical, numerical = gradient_checker_v2.compute_gradient(
f, [rgb_input_tensor])
# Computing Analytical and Numerical gradients of f_dummy(x)
analytical_dummy, numerical_dummy = gradient_checker_v2.compute_gradient(
f_dummy, [rgb_input_tensor])
self.assertAllClose(numerical, analytical, atol=1e-4)
self.assertAllClose(analytical_dummy, analytical, atol=1e-4)
self.assertAllClose(numerical_dummy, numerical, atol=1e-4)
if __name__ == '__main__':
test.main()
| annarev/tensorflow | tensorflow/python/ops/image_grad_test_base.py | Python | apache-2.0 | 25,063 | [
"Gaussian"
] | c0f2c743135df020fd891ae4de5d4305fda265b66fde24e095a6d5efadfb5e72 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(win_find) module instead.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
aliases: ['pattern']
contains:
description:
- One or more regex patterns which should be matched against the file content.
paths:
required: true
aliases: [ name, path ]
description:
- List of paths of directories to search. All paths must be fully qualified.
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in version 2.3.
choices: [ any, directory, file, link ]
default: file
recurse:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- If target is a directory, recursively descend into the directory looking for files.
size:
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
default: mtime
choices: [ atime, ctime, mtime ]
description:
- Choose the file property against which we compare age.
hidden:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
follow:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to follow symlinks in path for systems with python 2.6+.
get_checksum:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to retrieve a file's sha1 checksum.
use_regex:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- If false the patterns are file globs (shell) if true they are python regexes.
notes:
- For Windows targets, use the M(win_find) module instead.
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
'''
RETURN = r'''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
import fnmatch
import os
import re
import stat
import sys
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None:
return True
if use_regex:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
else:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None:
return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match(line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path']),
patterns=dict(type='list', default=['*'], aliases=['pattern']),
contains=dict(type='str'),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'mtime', 'ctime']),
size=dict(type='str'),
recurse=dict(type='bool', default='no'),
hidden=dict(type='bool', default='no'),
follow=dict(type='bool', default='no'),
get_checksum=dict(type='bool', default='no'),
use_regex=dict(type='bool', default='no'),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root, dirs, files in (sys.version_info < (2, 6, 0) and os.walk(npath)) or os.walk(npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and \
contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
| tsdmgz/ansible | lib/ansible/modules/files/find.py | Python | gpl-3.0 | 12,572 | [
"Brian"
] | 5cb9a9c194f1565d6cc2cb094d735463bc977d33e4a43b87610643bac3a28bac |
from functools import wraps
import multiprocessing
import numpy
import logging
import geodat.units
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
try:
import pyferret
PYFERRET_INSTALLED = True
_IMPORT_PYFERRET_ERROR = None
except ImportError:
logger.warning("Failed to load pyferret.")
PYFERRET_INSTALLED = False
_IMPORT_PYFERRET_ERROR = ImportError("Failed to load pyferret")
def num2fer(data, coords, dimunits,
varname="UNKNOWN", data_units=None, missing_value=None,
cartesian_axes=None, dimnames=None):
''' Create a dictionary that resemble the Ferret
data variable structure to be passed to pyferret.putdata
Args:
data (numpy.ndarray)
coords (a list of numpy.ndarray)
dimunits (a list of str): dimension units (e.g. ['months','degrees_N'])
varname (str, optional)
data_units (str, optional)
missing_value (numeric)
cartesian_axes (a list of characters): specifies the cartesian axes
e.g. ['T','Y','X']. If this is not specified, guesses will be made
using the dimension units (say unit month will be interpreted as a
[T]IME axis. Specifying cartesian_axes overwirtes the guesses.
dimnames (a list of str) - dimension names (e.g. ['time','lat','lon'])
Return:
dict
Length of cartesian_axes, dimnames, dimunits and coords need
to agree with the number of dimensions of data
'''
if not PYFERRET_INSTALLED:
raise _IMPORT_PYFERRET_ERROR
if len(dimunits) != data.ndim:
raise Exception("Number of dimunits does not match data.ndim")
if len(coords) != data.ndim:
raise Exception("Number of coords does not match data.ndim")
fer_var = {}
# Define the variable
fer_var['data'] = data.copy()
# Variable name
fer_var['name'] = varname
# Dataset
fer_var['dset'] = None
# Title = variable name
fer_var['title'] = fer_var['name']
# Set missing value
if missing_value is not None:
fer_var['missing_value'] = missing_value
# Set data unit
if data_units is not None:
fer_var['data_unit'] = data_units
# Determine the axis type
cax2ax_type = {'X': pyferret.AXISTYPE_LONGITUDE,
'Y': pyferret.AXISTYPE_LATITUDE,
'Z': pyferret.AXISTYPE_LEVEL,
'T': pyferret.AXISTYPE_CUSTOM}
# Make guessses for the axis type
if cartesian_axes is None:
cartesian_axes = [geodat.units.assign_caxis(dimunit)
for dimunit in dimunits]
if len(cartesian_axes) != data.ndim:
raise Exception("Number of cartesian_axes/dimunits does"+\
" not match data.ndim")
# Convert it to PyFerret convention
fer_var['axis_types'] = [cax2ax_type[cax]
if cax in cax2ax_type.keys()
else pyferret.AXISTYPE_NORMAL
for cax in cartesian_axes]
if dimnames is not None:
if len(dimnames) != data.ndim:
raise Exception("Number of dimnames does not match data.ndim")
fer_var['axis_names'] = dimnames
fer_var['axis_units'] = dimunits
fer_var['axis_coords'] = coords
# This will be used as the second argument to pyferret.putdata
axis_pos_dict = {'X': pyferret.X_AXIS,
'Y': pyferret.Y_AXIS,
'Z': pyferret.Z_AXIS,
'T': pyferret.T_AXIS}
# Force axis position
fer_var['axis_pos'] = [axis_pos_dict[cax]
if cax in axis_pos_dict.keys()
else cartesian_axes.index(cax)
for cax in cartesian_axes]
return fer_var
def fer2num(var):
''' Filter the dictionary returned by pyferret.getdata
PyFerret usually returns data with extra singlet dimension
Need to filter those
Args:
var (dict): as is returned by pyferret.getdata
Returns:
dict: {'data': a numpy ndarray, 'varname': the name of the variable,\n
'coords': a list of numpy ndarrays for the dimensions,
'dimunits': a list of strings, the units for the dimensions,
'dimnames': a list of strings, the names for the dimensions}
'''
if not PYFERRET_INSTALLED:
raise _IMPORT_PYFERRET_ERROR
results = {}
results['coords'] = [ax for ax in var['axis_coords']
if ax is not None]
if var['axis_names'] is not None:
results['dimnames'] = [var['axis_names'][i]
for i in range(len(var['axis_names']))
if var['axis_coords'][i] is not None]
# If the axis_type is TIME, the axis_unit is the calendar type which
# is not considered yet
if pyferret.AXISTYPE_TIME in var['axis_types']:
raise Exception("Immature function: axis_type from Ferret is TIME,"+\
"not CUSTOM; a situation not taken into yet.")
results['dimunits'] = [var['axis_units'][i]
for i in range(len(var['axis_units']))
if var['axis_coords'][i] is not None]
sliceobj = [0 if ax is None else slice(None)
for ax in var['axis_coords']]
results['data'] = var['data'][sliceobj]
results['varname'] = var['title']
return results
def run_worker(f):
''' A workaround for clearing memory used by PyFerret
'''
@wraps(f)
def run_func(*args, **kwargs):
P = multiprocessing.Pool(1)
result = P.apply(f, args, kwargs)
P.close()
P.terminate()
P.join()
return result
return run_func
def regrid_once_primitive(var, ref_var, axis,
verbose=False, prerun=None, transform='@ave'):
''' A generic function that regrids a variable without the dependence of
geodat.nc.Variable
Args:
var (dict) : arguments for num2fer
Required keys: data,coords,dimunits
ref_var (dict) : arguments for num2fer.
This supplies the grid for regridding
Required keys: coords,dimunits
axis (str) : the axis for regridding e.g. 'X'/'Y'/'XY'/"YX"
verbose (bool) : whether to print progress (default: False)
prerun (a list of str) : commands to be run at the start (default: None)
transform (str): "@ave" (Conserve area average),
"@lin" (Linear interpolation),...see Ferret doc
Returns:
dict
'''
if not PYFERRET_INSTALLED:
raise _IMPORT_PYFERRET_ERROR
pyferret.start(quiet=True, journal=verbose,
verify=False, server=True)
# commands to run before regridding
if prerun is not None:
if type(prerun) is str:
pyferret.run(prerun)
elif type(prerun) is list:
for s in prerun:
if type(s) is str:
pyferret.run(prerun)
else:
raise Exception("prerun has to be either a string or "+\
"a list of string")
else:
raise Exception("prerun has to be either a string or a list of "+\
"string")
assert isinstance(axis, str)
axis = axis.upper()
# Make sure axis is a string denoting X or Y axis
#if axis not in ['X', 'Y', 'XY', 'YX']:
# raise Exception("Currently axis can only be X/Y/XY")
# Construct the source data read by pyferret.putdata
source_fer = num2fer(**var)
source_fer["name"] = "source"
# Fill in unnecessary input for Ferret
if "data" not in ref_var:
ref_var['data'] = numpy.zeros((1,)*len(ref_var['coords']))
# Construct the destination data read by pyferret.putdata
dest_fer = num2fer(**ref_var)
dest_fer["name"] = "dest"
if verbose:
print source_fer
print dest_fer
pyferret.putdata(source_fer, axis_pos=source_fer['axis_pos'])
if verbose:
print "Put source variable"
pyferret.run('show grid source')
pyferret.putdata(dest_fer, axis_pos=dest_fer['axis_pos'])
if verbose:
print "Put destination variable"
pyferret.run('show grid dest')
pyfer_command = 'let result = source[g'+axis.lower()+'=dest'+transform+']'
pyferret.run(pyfer_command)
if verbose:
print "Regridded in FERRET"
pyferret.run('show grid result')
# Get results
result_ref = pyferret.getdata('result')
if verbose: print "Get data from FERRET"
# Convert from ferret data structure to geodat.nc.Variable
tmp_result = fer2num(result_ref)
if 'varname' in var:
tmp_result['varname'] = var['varname']
tmp_caxes = [geodat.units.assign_caxis(dimunit)
for dimunit in tmp_result['dimunits']]
var_caxes = [geodat.units.assign_caxis(dimunit)
for dimunit in var['dimunits']]
# Preserve dimension order (Ferret reverts the order)
neworder = [tmp_caxes.index(cax)
for cax in var_caxes]
# Change the dimension order of the result to match with the input
tmp_result['coords'] = [tmp_result['coords'][iax] for iax in neworder]
tmp_result['dimunits'] = [tmp_result['dimunits'][iax] for iax in neworder]
if 'dimnames' in tmp_result:
tmp_result['dimnames'] = [tmp_result['dimnames'][iax]
for iax in neworder]
tmp_result['data'] = tmp_result['data'].transpose(neworder).astype(
var['data'].dtype)
# Return the input var with the data and dimensions replaced by
# the regridded ones
var.update(tmp_result)
result = var
status = pyferret.stop()
if verbose:
if status:
print "PyFerret stopped."
else:
print "PyFerret failed to stop."
return result
regrid_primitive = run_worker(regrid_once_primitive)
if __name__ == '__main__':
import scipy.io.netcdf as netcdf
ncfile_low = netcdf.netcdf_file("land_mask_lowres.nc")
newvar = dict(data=ncfile_low.variables['land_mask'].data,
coords=[ncfile_low.variables[dim].data
for dim in ncfile_low.variables['land_mask'].\
dimensions],
dimunits=[ncfile_low.variables[dim].units
for dim in ncfile_low.variables['land_mask'].\
dimensions])
ncfile_high = netcdf.netcdf_file("land_mask_highres.nc")
var_high = dict(data=ncfile_high.variables['land_mask'].data,
coords=[ncfile_high.variables[dim].data
for dim in ncfile_high.variables['land_mask'].\
dimensions],
dimunits=[ncfile_high.variables[dim].units
for dim in ncfile_high.variables['land_mask'].\
dimensions])
regridded = regrid_primitive(var_high, newvar, 'XY')
| kitchoi/geodat | geodat/pyferret_func.py | Python | mit | 11,096 | [
"NetCDF"
] | 366d309ebd5ae6dfc0abe68191dfc2678f52cda2dd1972236527a8530c4f6445 |
import collections
import contextlib
import heapq
import inspect
import traceback
import weakref
import six
import chainer
from chainer import _backprop_utils
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_hook
from chainer.graph_optimizations.static_graph_utilities \
import static_forward_optimizations
from chainer import utils
from chainer.utils import type_check
from chainer import variable
import chainerx
def _to_variable_with_chainerx_fallback_array(
chainerx_device, chainerx_array, fallback_array):
# chainerx_array can be None.
assert (
chainerx_array is None
or chainerx_array.device == chainerx_device.device)
var = variable.Variable._init_unchecked(
chainerx_array,
device=chainerx_device,
requires_grad=(
False if chainerx_array is None
else chainerx_array.is_backprop_required()))
var._chainerx_fallback_array = fallback_array
return var
class FunctionNode(object):
"""Function node of the computational graph.
FunctionNode is a class representing a node in a computational graph. The
node corresponds to an application of a differentiable function to input
variables.
When a differentiable function is applied to :class:`~chainer.Variable`
objects,
it creates an instance of FunctionNode implementation and calls its
:meth:`apply` method. The :meth:`apply` method basically does the following
three things.
1. Adding an edge from the function node to the variable node corresponding
to each input. The node of each input is extracted by
:attr:`Variable.node <chainer.Variable.node>`.
2. Computing the output arrays of the function.
3. Creating a :class:`~chainer.Variable` object for each output array and
adding an edge from the node of the variable to the function node.
The output variables are then returned.
.. admonition:: Example
Let ``x`` be an instance of :class:`~chainer.Variable` and ``f`` be an
instance of :class:`FunctionNode` taking only one argument.
Then the following code
>>> import numpy, chainer
>>> x = chainer.Variable(numpy.zeros(10))
>>> f = chainer.functions.math.identity.Identity()
>>> y = f.apply((x,))[0]
computes a new variable ``y`` and creates backward references. The
backward references are actually set as per the following diagram::
x.node <--- f <--- y.node
If an application of another function ``g`` occurs as
>>> g = chainer.functions.math.identity.Identity()
>>> z = g.apply((x,))[0]
then the graph grows with a branch::
|--- f <--- y.node
x.node <-+
|--- g <--- z.node
Note that the branching is correctly managed on backward computation,
i.e. the gradients from ``f`` and ``g`` are accumulated to the gradient
of ``x``.
Every function-node implementation should provide :meth:`forward` and
:meth:`backward`. Instead of overriding :meth:`forward`, one can also
implement :meth:`forward_cpu` and :meth:`forward_gpu` when the
implementations for CPU and GPU arrays are totally different.
Note that the input and output variables are inaccessible from
:meth:`backward` by default. If it needs accesses to these variables, the
:meth:`forward` method (or its CPU/GPU variants) has to call
:meth:`retain_inputs` and :meth:`retain_outputs` appropriately. The
retained input/output variables can be accessed from :meth:`backward` by
calling :meth:`get_retained_inputs` and :meth:`get_retained_outputs`.
.. note::
There are two types of differentiable functions in Chainer (since v3).
The first type is of a function using a subclass of
:class:`~chainer.Function`,
which is called *old-style differentiable function*. The second type is
of a function using a subclass of :class:`FunctionNode`, which is called
**new-style differentiable function**. There are several advantages on
using the new-style differentiable function.
- The new-style differentiable function supports *differentiable
backpropagation*. The backpropagated gradients computed through the
new-style differentiable functions themselves support further
backpropagations so that the automatic higher-order differentiation is
available.
- The backpropagation of the new-style differentiable function can be
more computationally efficient because the interface allows an
implementation to omit the computation of unneeded input gradients.
Note that the new-style differentiable function is the standard way of
defining a function node of the computational graph in Chainer; old-
style differentiable functions are implemented as wrappers of the new-
style differentiable functions.
Attributes:
~FunctionNode.inputs: A tuple of the input
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.outputs: A tuple of weak references to the output
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.rank (int): An ordinal following the topological order
of the computational graph.
~FunctionNode.stack: Stack trace retrieved at the forward computation.
The stack trace is available only in the debug mode.
.. versionadded:: 3.0.0
"""
inputs = None
outputs = None
_output_count = None
rank = 0
stack = None
_input_indexes_to_retain = None
_output_indexes_to_retain = None
_retained_output_data = None
_local_function_hooks = None
_supports_static_optimizations = False
# True if the function node is operating on ChainerX arrays and it falls
# back to NumPy/CuPy implementation.
_is_chainerx_fallback_mode = False
# chainerx.Device instance if _is_chainerx_fallback_mode == True
chainerx_device = None
_chainerx_retained_inputs = None
_chainerx_retained_outputs = None
lazy_grad_sum = False
@property
def local_function_hooks(self):
"""Ordered dictionary of registered function hooks.
Contrary to ``chainer.thread_local.function_hooks``,
which registers its elements to all functions,
Function hooks in this property is specific to this function.
"""
if self._local_function_hooks is None:
self._local_function_hooks = collections.OrderedDict()
return self._local_function_hooks
@property
def _n_local_function_hooks(self):
return (0 if self._local_function_hooks is None
else len(self._local_function_hooks))
@property
def label(self):
"""Short text that represents the function.
The default implementation returns its type name.
Each function should override it to give more information.
"""
return self.__class__.__name__
@property
def output_data(self):
"""A tuple of the retained output arrays.
This property is mainly used by :class:`Function`. Users basically do
not have to use this property; use :meth:`get_retained_outputs`
instead.
"""
if self._is_chainerx_fallback_mode:
retained_output_data = [
None if var is None
else var.array
for var in self._chainerx_retained_outputs]
else:
if self._retained_output_data is None:
raise RuntimeError('retained output data is gone')
retained_output_data = self._retained_output_data
out_data = [None] * self._output_count
for index, data in six.moves.zip(self._output_indexes_to_retain,
retained_output_data):
out_data[index] = data
return tuple(out_data)
@property
def _impl_name(self):
return self.__class__.__name__
def __call__(self, *args, **kwargs):
if self.__class__.__module__.startswith('chainer.'):
msg = '''\
Chainer's built-in function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use FunctionNode.apply() method instead.
Furthermore, it's not recommended that you use built-in function classes \
directly; use corresponding function aliases (those with snake_case name, \
such as F.convolution_nd) instead.\
'''.format(self.__class__.__name__)
else:
msg = '''\
A function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use apply() method instead.\
'''.format(self.__class__.__name__)
raise RuntimeError(msg)
def apply(self, inputs):
"""Computes output variables and grows the computational graph.
Basic behavior is expressed in the documentation of
:class:`FunctionNode`.
.. note::
If the :data:`~Variable.data` attributes of the input variables
exist on a GPU device, that device is made current before calling
:meth:`forward`, so implementers do not need to take care of device
selection in most cases.
Args:
inputs: Tuple of input variables. Each element can be either
:class:`~chainer.Variable` or :ref:`ndarray`. If the element
is an ndarray, it is automatically wrapped with
:class:`~chainer.Variable`.
Returns:
A tuple of output :class:`~chainer.Variable` objects.
"""
chainerx_in_data = None
chainerx_device = None
is_chainerx, in_data = _extract_apply_in_data(inputs)
utils._check_arrays_forward_compatible(in_data, self.label)
if is_chainerx:
# Try ChainerX C++ implementation.
# If it's supported, the output arrays are wrapped with Variables
# and returned.
# If not supported, FunctionNode.forward_chainerx should return
# Fallback.
# In that case the input arrays are converted to numpy.ndarray
# or cupy.ndarray (depending on the ChainerX backend) and
# forward computation falls back to the conventional
# FunctionNode.forward() implementaion.
outputs = self.forward_chainerx(in_data)
if outputs is not chainer.Fallback:
# Supported. Wrap with variables and return
assert isinstance(outputs, tuple)
return tuple([
variable.Variable._init_unchecked(
y, requires_grad=y.is_backprop_required(),
is_chainerx_array=True)
for y in outputs])
# Fall back to FunctionNode.forward()
chainerx_in_data, in_data, chainerx_device = (
self._chainerx_apply_fallback_preprocess(in_data, inputs))
self._is_chainerx_fallback_mode = True
self.chainerx_device = chainerx_device
is_debug = chainer.is_debug()
if is_debug:
# Keep stack trace for debug
self.stack = traceback.extract_stack()
if configuration.config.type_check:
self._check_data_type_forward(in_data)
hooks = chainer.get_function_hooks()
if self._n_local_function_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_function_hooks)
hooks = hooks.values() # avoid six for performance
for hook in hooks:
hook.forward_preprocess(self, in_data)
# Forward propagation
with chainer.using_device(backend.get_device_from_array(*in_data)):
self._input_indexes_to_retain = None
self._output_indexes_to_retain = None
if chainer.config.schedule_func is not None:
outputs = static_forward_optimizations(self, in_data)
elif self._is_chainerx_fallback_mode:
# In ChainerX fallback, __class__ is temporarily replaced with
# the fabricated one with automatic attirbute fallback.
with _chainerx_attribute_fallback(self, chainerx_device):
outputs = self.forward(in_data)
else:
# In normal case, simply run the forward method.
outputs = self.forward(in_data)
# Check for output array types
if not isinstance(outputs, tuple):
raise TypeError(
'forward output must be a tuple ({})\n'
'Actual: {}'.format(self.label, type(outputs)))
if not chainer.is_arrays_compatible(outputs):
raise TypeError(
'incompatible array types are mixed in the forward output '
'({}).\n'
'Actual: {}'.format(
self.label,
', '.join(str(type(x)) for x in outputs)))
for hook in hooks:
hook.forward_postprocess(self, in_data)
# NaN check of output values
if is_debug:
for out in outputs:
if out is not None and chainer.backend._contains_nan(out):
msg = ('NaN is detected on forward computation of '
'{}'.format(self.label))
raise RuntimeError(msg)
self._output_count = len(outputs)
if self._is_chainerx_fallback_mode:
ret = self._chainerx_apply_fallback_postprocess(
chainerx_device,
chainerx_in_data, inputs, outputs)
else:
input_vars = [chainer.as_variable(x) for x in inputs]
requires_grad = any([x.requires_grad for x in input_vars])
ret = tuple(
[variable.Variable(y, requires_grad=requires_grad)
for y in outputs])
if configuration.config.enable_backprop:
# Topological ordering
self.rank = max(
[x.rank for x in input_vars]) if input_vars else 0
# Add backward edges
for y in ret:
y.creator_node = self
self.inputs = tuple([x.node for x in input_vars])
# Add forward edges (must be weak references)
self.outputs = tuple([weakref.ref(y.node) for y in ret])
if self._input_indexes_to_retain is not None:
for index in self._input_indexes_to_retain:
input_vars[index].retain_data()
if self._output_indexes_to_retain is not None:
retained_data = []
for index in self._output_indexes_to_retain:
ret[index].retain_data()
retained_data.append(outputs[index])
self._retained_output_data = tuple(retained_data)
self.lazy_grad_sum = configuration.config.lazy_grad_sum
return ret
def _check_data_type_forward(self, in_data):
in_type = type_check.get_light_types(in_data)
try:
with type_check.light_mode:
self.check_type_forward(in_type)
return
except type_check.InvalidType:
# Ignore errors on first run
pass
in_type = type_check.get_types(in_data, 'in_types', False)
with type_check.get_function_check_context(self):
self.check_type_forward(in_type)
def check_type_forward(self, in_types):
"""Checks types of input data before forward propagation.
This method is called before :meth:`forward` and validates the types of
input variables using
:ref:`the type checking utilities <type-check-utils>`.
Args:
in_types (~chainer.utils.type_check.TypeInfoTuple): The type
information of input variables for :meth:`forward`.
"""
pass
def _chainerx_apply_fallback_preprocess(self, in_data, inputs):
chainerx_in_data = in_data
in_data = []
device = None
for data, x in six.moves.zip(chainerx_in_data, inputs):
if data is None:
fallback_data = None
else:
# Use the cached fallback arrays as inputs if they exist.
x_is_variable = isinstance(x, variable.Variable)
if x_is_variable and x._chainerx_fallback_array is not None:
fallback_data = x._chainerx_fallback_array
if device is None:
device = x.device
else:
fallback_data = backend.from_chx(data)
if device is None:
device = backend.ChainerxDevice(data.device)
# Update the fallback cache if possible.
if x_is_variable:
x._chainerx_fallback_array = fallback_data
in_data.append(fallback_data)
in_data = tuple(in_data)
return chainerx_in_data, in_data, device
def _chainerx_apply_fallback_postprocess(
self, chainerx_device, chainerx_in_data, inputs, outputs):
# TODO(hvy): Take configuration.config.enable_backprop into
# account?
chainerx_out_data = chainerx_device.send(outputs)
# Insert a ChainerX op-node that calls FunctionNode.backward in
# backprop. Note that chainerx_out_data may not require gradients.
chainerx._core._function_node_forward(
self, chainerx_in_data, chainerx_out_data,
[] if self._input_indexes_to_retain is None
else self._input_indexes_to_retain,
[] if self._output_indexes_to_retain is None
else self._output_indexes_to_retain)
self.inputs = tuple([
None if x is None
else variable._ChainerxVariableNodeProps(x) for x in inputs])
ret = tuple([
_to_variable_with_chainerx_fallback_array(
chainerx_device,
chainerx_out_array, out_array)
for chainerx_out_array, out_array
in six.moves.zip(chainerx_out_data, outputs)])
return ret
def forward_chainerx(self, inputs):
"""Computes the output arrays from the input ChainerX arrays.
This method may check the input arrays and other attributes to see
if the computation can be done using ChainerX implementation.
If it's not supported, :data:`chainer.Fallback` should be returned
instead of output arrays. In that case, computation using conventional
Python implementation will be performed.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s) or :data:`chainer.Fallback`\\ .
"""
return chainer.Fallback
def forward(self, inputs):
"""Computes the output arrays from the input arrays.
It delegates the procedure to :meth:`forward_cpu` or
:meth:`forward_gpu` by default. Which of them this method selects is
determined by the type of input arrays. Implementations of
:class:`FunctionNode` must implement either CPU/GPU methods or this
method.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s).
.. warning::
Implementations of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
assert len(inputs) > 0
if isinstance(inputs[0], cuda.ndarray):
return self.forward_gpu(inputs)
return self.forward_cpu(inputs)
def forward_cpu(self, inputs):
"""Computes the output arrays from the input NumPy arrays.
Args:
inputs: Tuple of input :class:`numpy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
def forward_gpu(self, inputs):
"""Computes the output arrays from the input CuPy arrays.
Args:
inputs: Tuple of input :class:`cupy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
def retain_inputs(self, indexes):
"""Lets specified input variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which inputs are required for backprop. The input variables
with retained arrays can then be obtained by calling
:meth:`get_retained_inputs` from inside :meth:`backward`.
Unlike :class:`~chainer.Function`, the function node **DOES NOT** keep
input
arrays by default. If you want to keep some or all input arrays, do not
forget to call this method.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
"""
self._input_indexes_to_retain = indexes
def retain_outputs(self, indexes):
"""Lets specified output variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which outputs are required for backprop. If this method is not
called, no output variables will be marked to keep their data array at
the point of returning from :meth:`apply`. The output variables with
retained arrays can then be obtained by calling
:meth:`get_retained_outputs` from inside :meth:`backward`.
.. note::
It is recommended to use this method if the function requires some
or all output arrays in backprop. The function can also use output
arrays just by keeping references to them directly, although it
might affect the performance of later function applications on the
output variables.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of output variables that the
function will require for backprop.
"""
self._output_indexes_to_retain = indexes
def backward(self, target_input_indexes, grad_outputs):
"""Computes gradients w.r.t.\\ specified inputs given output gradients.
This method is used to compute one step of the backpropagation
corresponding to the forward computation of this function node.
Given the gradients w.r.t. output variables, this method computes the
gradients w.r.t. specified input variables. Note that this method does
not need to compute any input gradients not specified by
``target_input_indices``.
Unlike :meth:`Function.backward() <chainer.Function.backward>`,
gradients are given as :class:`~chainer.Variable` objects and this
method itself has to return input gradients as
:class:`~chainer.Variable` objects. It enables the function node to
return the input gradients with the full computational history, in
which case it supports *differentiable backpropagation* or
*higher-order differentiation*.
The default implementation returns ``None`` s, which means the
function is not differentiable.
Args:
target_input_indexes (tuple of int): Sorted indices of the input
variables w.r.t. which the gradients are required. It is
guaranteed that this tuple contains at least one element.
grad_outputs (tuple of :class:`~chainer.Variable`\\ s): Gradients
w.r.t. the output variables.
If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. The length of the tuple can be same as either
``len(target_input_indexes)`` or the number of inputs. In the
latter case, the elements not specified by ``target_input_indexes``
will be discarded.
.. seealso::
:meth:`backward_accumulate` provides an alternative interface that
allows you to implement the backward computation fused with the
gradient accumulation.
"""
return (None,) * len(target_input_indexes)
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
"""Computes gradients w.r.t.\\ specified inputs and accumulates them.
This method provides a way to fuse the backward computation and the
gradient accumulations in the case that the multiple functions are
applied to the same variable.
Users have to override either of this method or :meth:`backward`.
It is often simpler to implement :meth:`backward` and is recommended
if you do not need to provide efficient gradient accumulation.
Args:
target_input_indexes (tuple of int): Sorted indices of the input
variables w.r.t. which the gradients are required. It is
guaranteed that this tuple contains at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (tuple of Variable): Gradients w.r.t. the input
variables specified by ``target_input_indexes``. These values
are computed by other computation paths. If there is no
gradient value existing for the variable, the corresponding
element is ``None``. See also the note below.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. Unlike :meth:`backward`, the length of the tuple
**must** be same as that of ``target_input_indices``.
.. note::
Gradient variables in ``grad_outputs`` are distinct, even if a
variable is passed to multiple input arguments of the function.
This is an implementation-detail convention to avoid the
complication of correctly accumulating gradients in such a case.
Usually, only the first position of ``grad_inputs`` corresponding to
these input arguments may contain the gradient variable
corresponding to that input variable, and other entries are set to
``None``. This is not the case with the ``lazy_grad_sum`` feature.
This behavior might be changed in a future version.
"""
# If backward_accumulate is implemented, it should be equivalent to
# the following code using backward(). This code is provided for the
# convenience, and it's *not* used unless you override it. You don't
# have to use backward().
assert isinstance(target_input_indexes, tuple)
assert isinstance(grad_outputs, tuple)
assert isinstance(grad_inputs, tuple)
gxs = self._backward_target_inputs(target_input_indexes, grad_outputs)
return tuple([gx if g_input is None else
g_input if gx is None else
gx + g_input
for gx, g_input in six.moves.zip(gxs, grad_inputs)])
def _backward_chainerx(self, target_input_indexes, grad_outputs,
retained_inputs, retained_outputs):
# Backward wrapper that is called from C++ via a Python binding in case
# self.apply was called with chainerx.ndarrays.
assert self._is_chainerx_fallback_mode
assert len(target_input_indexes) > 0
assert (
(self._input_indexes_to_retain is None
and len(retained_inputs) == 0)
or (len(self._input_indexes_to_retain) == len(retained_inputs)))
assert (
(self._output_indexes_to_retain is None
and len(retained_outputs) == 0)
or (len(self._output_indexes_to_retain) == len(retained_outputs)))
assert all([
a is None or isinstance(a, chainerx.ndarray)
for a in grad_outputs])
self._chainerx_retained_inputs = tuple([
None if array is None
else variable.Variable(
array, requires_grad=array.is_backprop_required())
for array in retained_inputs])
self._chainerx_retained_outputs = tuple([
None if array is None
else variable.Variable(
array, requires_grad=(
False if array is None else array.is_backprop_required()))
for array in retained_outputs])
device = backend.get_device_from_array(
*(retained_inputs + retained_outputs + grad_outputs))
with chainer.using_device(device):
gxs = self._backward_target_inputs(
tuple(target_input_indexes),
tuple([
None
if gy is None
else chainer.Variable(
gy, requires_grad=gy.is_backprop_required())
for gy in grad_outputs]))
gx_arrs = [gx._data[0] for gx in gxs]
assert all([isinstance(gx, chainerx.ndarray) for gx in gx_arrs])
return gx_arrs
def _backward_target_inputs(self, target_input_indexes, grad_outputs):
# Filters out input gradients that are not required and returns the
# rest.
gxs = self.backward(target_input_indexes, grad_outputs)
len_gxs = len(gxs)
if len_gxs == len(self.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
else:
assert len_gxs == len(target_input_indexes)
return gxs
def _get_error_message(self, message):
lines = [
message,
' function={} ({})'.format(self._impl_name, self.label)
]
if self.inputs:
for i, input in enumerate(self.inputs):
lines.append(
' input {}: shape={} dtype={}'.format(
i, input.shape, input.dtype))
if self.outputs:
for i, output_ref in enumerate(self.outputs):
output = output_ref()
if output is None:
lines.append(
' output {}: not available')
else:
lines.append(
' output {}: shape={} dtype={}'.format(
i, output.shape, output.dtype))
return '\n'.join(lines)
def get_retained_inputs(self):
"""Returns a tuple of retained input variables.
This method is used to retrieve the input variables retained in
:meth:`forward`.
Returns:
A tuple of retained input variables, if available. Otherwise
return `None`.
"""
if self._is_chainerx_fallback_mode:
return self._chainerx_retained_inputs
if self._input_indexes_to_retain is None or self.inputs is None:
return ()
retained_inputs = []
for index in self._input_indexes_to_retain:
input = self.inputs[index]
if input.data is None:
retained_inputs.append(None)
else:
retained_inputs.append(input.get_variable())
return tuple(retained_inputs)
def get_retained_outputs(self):
"""Returns a tuple of retained output variables.
This method is used to retrieve the output variables retained in
:meth:`forward`.
Returns:
A tuple of retained output variables, if available. Otherwise
return `None`.
.. note::
This method does a tricky thing to support the case of an output
node garbage-collected before this method is called; in this case,
this method creates a fresh variable node that acts as an output
node of the function node.
"""
if self._is_chainerx_fallback_mode:
return self._chainerx_retained_outputs
if self._output_indexes_to_retain is None or self.outputs is None:
return ()
# TODO(hvy): It should be safe to remove this check.
if self._retained_output_data is None:
raise ValueError(self._get_error_message(
'retain_outputs is not called in forward.'))
ret = []
outputs = self.outputs
new_outputs = list(outputs)
outputs_modified = False
for index, data in six.moves.zip(self._output_indexes_to_retain,
self._retained_output_data):
output = outputs[index]()
if output is None:
# The output node is garbage collected, so create a fresh
# Variable object.
output_var = variable.Variable(data)
output_var.creator_node = self
new_outputs[index] = weakref.ref(output_var.node)
outputs_modified = True
else:
output_var = output.get_variable()
if output_var.array is None:
ret.append(None)
else:
ret.append(output_var)
if outputs_modified:
self.outputs = tuple(new_outputs)
return tuple(ret)
def unchain(self):
"""Purges in/out nodes and this function node itself from the graph."""
if self._is_chainerx_fallback_mode:
raise NotImplementedError(
'Unchaining is not yet supported in ChainerX fallback mode.')
for y in self.outputs:
y_ref = y()
if y_ref is not None:
y_ref.unchain()
self.inputs = None
self.outputs = None
def add_hook(self, hook, name=None):
"""Registers a function hook.
Args:
hook (~chainer.FunctionHook): Function hook to be
registered.
name (str): Name of the function hook. The name must be unique
among function hooks registered to this function. If ``None``,
the default name of the function hook is used.
"""
if not isinstance(hook, function_hook.FunctionHook):
raise TypeError('Hook must be of type FunctionHook')
if name is None:
name = hook.name
hooks = self.local_function_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(self)
def delete_hook(self, name):
"""Unregisters the function hook.
Args:
name (str): The name of the function hook to be unregistered.
"""
if name in self.local_function_hooks:
self.local_function_hooks[name].deleted(self)
del self.local_function_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
def grad(outputs, inputs, grad_outputs=None, grad_inputs=None, set_grad=False,
retain_grad=False, enable_double_backprop=False, loss_scale=None):
"""Computes the gradient of output variables w.r.t.\\ the input variables.
This function implements the backpropagation algorithm. While
:meth:`Variable.backward` also implements backprop, this function selects
the smallest paths in the computational graph needed to compute the
gradients w.r.t. inputs. The error is backpropagated only through these
selected paths, which may reduce the overall computational cost.
This function also differs from :meth:`Variable.backward` in the way to
return the gradients; it directly returns the gradient variables as a list
instead of setting gradients to the :attr:`Variable.grad_var` attribute of
the original variable. It means users do not need to clear the gradient
w.r.t. each variable before computing the gradient using this function.
If ``set_grad`` option is set to ``True``, the computed gradient is also
stored in the :attr:`Variable.grad_var` attribute of each variable, in
which case any original value of :attr:`Variable.grad_var` will be updated
even if it had already been set.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
inputs (tuple or list of :class:`~chainer.Variable`):
A sequence of input variables each of which this function computes
the gradient w.r.t.
grad_outputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each output
gradient.
If an element is set to ``None``, an array filled with 1 is used.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
grad_inputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each input
gradient. The gradients computed by the backprop
algorithm are accumulated to them (not in-place). If an element
is set to ``None``, the gradient is not accumulated to this value.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
set_grad (bool): If it is ``True``, the :attr:`Variable.grad_var`
attribute of each input variable is set to the corresponding
computed gradient variable.
retain_grad (bool): If it is ``True``, the gradients w.r.t. all the
intermediate variables are stored in the :attr:`Variable.grad_var`
attribute. In this case, the ``set_grad`` option is ignored.
enable_double_backprop (bool): If it is ``True``, the computed
gradients can be further backpropagated. Enabling it may increase
the memory consumption (and possibly the computational time) to
remember the intermediate gradient values for the second
backpropagation.
loss_scale (float): Loss scaling factor. Loss scaling is a usefull
technique to mitigate vanishing gradient issue that tends to happen
when low precision data type like float16 is used during training.
If you set loss scaling factor, gradients of loss values are to be
multiplied by the factor before backprop starts. The factor is
propagated to whole gradients in a computational graph along the
backprop. The gradients of parameters are divided by the factor
just before the parameters are to be updated.
Returns:
A list of gradient variables w.r.t. the inputs.
"""
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
if not isinstance(inputs, (tuple, list)):
raise TypeError(
'inputs must be a tuple or a list, not {}.'.format(type(inputs)))
if grad_outputs is not None:
if not isinstance(grad_outputs, (tuple, list)):
raise TypeError(
'grad_outputs must be a tuple or a list or None, not {}.'
.format(type(grad_outputs)))
if len(outputs) != len(grad_outputs):
raise ValueError(
'grad_outputs must be of the same length as outputs.\n'
'len(outputs) = {}, len(grad_outputs) = {}'
.format(len(outputs), len(grad_outputs)))
if grad_inputs is not None:
if not isinstance(grad_inputs, (tuple, list)):
raise TypeError(
'grad_inputs must be a tuple or a list or None, not {}.'
.format(type(grad_inputs)))
if len(inputs) != len(grad_inputs):
raise ValueError(
'grad_inputs must be of the same length as inputs.\n'
'len(inputs) = {}, len(grad_inputs) = {}'
.format(len(inputs), len(grad_inputs)))
# Check if all the inputs are chainerx arrays and if so
# Relies in chainerx.grad function
n_chx_inputs = sum([False if x is None else x._has_chainerx_array
for x in inputs])
if n_chx_inputs == len(inputs):
if loss_scale is not None:
raise ValueError(
'loss_scale is not supported on chainerx.grad interface')
# Need to access the arrays to invoke the chainer grad function
if grad_outputs:
grad_outputs_chx = [x._data[0] for x in grad_outputs]
else:
grad_outputs_chx = []
outputs_chx = [x._data[0] for x in outputs]
inputs_chx = [x._data[0] for x in inputs]
grads = chainerx.grad(outputs_chx, inputs_chx,
backprop_id=None,
enable_double_backprop=enable_double_backprop,
set_grad=set_grad,
retain_grad=retain_grad,
grad_outputs=grad_outputs_chx)
if grad_inputs:
grads = [g+gi._data[0] for g, gi in zip(grads, grad_inputs)]
return [variable.Variable(g, requires_grad=g.is_backprop_required())
for g in grads]
elif n_chx_inputs > 0:
raise TypeError(
'Mixing chainerx and non-chainerx variables is not allowed')
for v in outputs:
# Raise error here if v is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
v.node._check_old_style_gradient()
# The implementation consists of three steps.
# 1. Backward enumeration: all the nodes reachable backward from the output
# nodes are enumerated. The forward direction links are collected in
# this step. Note that the variable nodes whose requires_grad is false
# are ignored and their creators are not searched.
candidate_funcs = [v.creator_node for v in outputs
if v.creator_node is not None]
visited_funcs = set()
forward_graph = collections.defaultdict(list)
while candidate_funcs:
func = candidate_funcs.pop()
if func in visited_funcs:
continue
visited_funcs.add(func)
for x in func.inputs:
# Raise error here if x is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
x._check_old_style_gradient()
if not x.requires_grad:
continue
forward_graph[x].append(func)
creator = x.creator_node
if creator is not None and creator not in visited_funcs:
candidate_funcs.append(creator)
# 2. Forward enumeration: all the nodes in the subgraph reachable from the
# input nodes are enumerated. The extracted (sub-)subgraph is the union
# of all paths that backpropagation will visit.
candidate_vars = [x.node for x in inputs]
visited_funcs = set()
grad_required = set()
while candidate_vars:
x = candidate_vars.pop()
grad_required.add(x)
for func in forward_graph[x]:
if func in visited_funcs:
continue
visited_funcs.add(func)
for y_ref in func.outputs:
y = y_ref()
if y is not None and y in forward_graph:
candidate_vars.append(y)
# 3. Backpropagation: the backpropagation is executed along the
# (sub-)subgraph. It uses the topological order of the subgraph which is
# induced by the reversed order of function applications ("rank").
grads = _backprop_utils.GradTable()
# Initialize the gradient mapping.
if grad_outputs is None:
grad_outputs = (None,) * len(outputs)
for y, gy in zip(outputs, grad_outputs):
if gy is None:
with chainer.using_device(y.device):
gy_data = y.device.xp.ones_like(y.array)
gy = variable.Variable(gy_data, requires_grad=False)
if loss_scale is not None:
gy.data *= loss_scale
grads[y.node] = gy
if grad_inputs is not None:
for x, gx in zip(inputs, grad_inputs):
if gx is not None:
grads[x.node] = gx
# Backprop implementation. It edits grads which will only contain the
# gradients w.r.t. the inputs.
with chainer.using_config('enable_backprop', enable_double_backprop):
ret_dict = _backprop(
outputs, inputs, grad_required, retain_grad, grads, loss_scale)
# Extract the gradients w.r.t. the inputs and return them.
ret = [ret_dict[x.node] for x in inputs]
if set_grad:
for x, gx in zip(inputs, ret):
x.grad_var = gx
return ret
def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
ret_dict = {}
is_debug = chainer.is_debug()
base_hooks = chainer.get_function_hooks().values()
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
ys = [y() for y in func.outputs] # access via weak ref
gys = tuple([grads.pop(y)
if y is not None and y.creator_node is not None else None
for y in ys])
for node, gy in six.moves.zip(ys, gys):
if node is not None:
if node in input_nodes:
ret_dict[node] = gy
if retain_grad:
y = node.get_variable_or_none()
if y is not None:
y.grad_var = gy
y._loss_scale = loss_scale
# Collect the gradients w.r.t. the inputs
input_indexes = []
x_grads = collections.OrderedDict()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x not in x_grads:
x_grads[x] = grads.get_as_list(x)
if not input_indexes:
continue
input_indexes = tuple(input_indexes)
# Do backward
# Call pre-backward hooks
if func._n_local_function_hooks != 0:
local_hooks = collections.OrderedDict(chainer.get_function_hooks())
local_hooks.update(func.local_function_hooks)
hooks = local_hooks.values() # avoid six for performance
else:
hooks = base_hooks
in_data = [x.data for x in func.inputs]
out_grad_data = [None if g is None else g.data for g in gys]
with chainer.using_device(backend.get_device_from_array(*in_data)):
for hook in hooks:
hook.backward_preprocess(
func, tuple(in_data), tuple(out_grad_data))
_backprop_utils.backprop_step(func, input_indexes, gys, x_grads,
is_debug)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(
func, tuple(in_data), tuple(out_grad_data))
# Update grads
for node, g in x_grads.items():
if not g: # gradient == None
continue
creator = node.creator_node
if creator is not None:
push_candidate(creator)
for x in input_nodes:
if x not in ret_dict:
ret_dict[x] = grads.pop(x)
return ret_dict
def _extract_apply_in_data(inputs):
# Extracts arrays from FunctionNode.apply() inputs.
#
# A flag that indicates whether inputs are chainerx arrays is also
# returned.
#
# Each object in `inputs` may be `Variable` or an array.
# If it's a `Variable` and its underlying array is a chainerx array,
# `Variable._data[0]` (which is backproppable in contrast to
# `Variable.array`) is returned.
#
# If at least one of the arrays is a ChainerX array, all other
# arrays need to be ChainerX arrays.
if not inputs:
return False, ()
if chainerx.is_available():
has_chainerx_array = False
# Unwrap arrays
arrays = []
for x in inputs:
if isinstance(x, variable.Variable):
if x._has_chainerx_array:
arrays.append(x._data[0])
has_chainerx_array = True
else:
arrays.append(x.array)
else: # x is ndarray
arrays.append(x)
if not has_chainerx_array:
if isinstance(x, chainerx.ndarray):
has_chainerx_array = True
return has_chainerx_array, tuple(arrays)
else:
return False, tuple([
x.array if isinstance(x, variable.Variable) else x
for x in inputs])
def _get_ordered_func_heap():
heap = []
visited_funcs = set()
def push_heap(func):
if func not in visited_funcs:
# Negate since heapq is min-heap
# The second element is used to make each item unique
ordered_func = -func.rank, len(visited_funcs), func
visited_funcs.add(func)
heapq.heappush(heap, ordered_func)
def pop_heap():
_, _, func = heapq.heappop(heap)
return func
return heap, push_heap, pop_heap
def _make_chainerx_attribute_fallback_class(obj, device):
# Creates a fabricated class based on a concerete class
# (either FunctionNode or Function),
# equipped with the automatic attribute fallback. This is enabled
# during FunctionNode.forward(), Function.forward() and
# Function.backward().
#
# In the fallback mechanism, when an array with the fallback ndarray
# type (e.g. numpy.ndarray for ChainerX native devices) is assigned
# as an attribute, it's automatically converted to a ChainerX ndarray
# with the corresponding ChainerX device and stored in that form.
# Conversely, when an attribute with ChainerX ndarray type is queried,
# it's converted to the fallback ndarray before being returned.
# That way, concrete function implementations can use attributes
# as ndarray storage, without converting from/to ChainerX manually.
#
# Note that it works only if the attribute has an ndarray type. If the
# array is wrapped in a tuple, for example, no automatic conversion
# will be taken place.
fallback_device = device.fallback_device
sup = super(obj.__class__, obj)
# Cache to avoid converting same arrays multiple times
fallback_array_cache = {}
# self.__getattribute__ for fallback arrays
def getattribute(self, name):
value = sup.__getattribute__(name)
if isinstance(value, chainerx.ndarray):
fallback_arr = fallback_array_cache.get(name)
if fallback_arr is None:
fallback_arr = backend.from_chx(value)
fallback_array_cache[name] = fallback_arr
return fallback_arr
return value
# self.__setattr__ for fallback arrays
def setattr(self, name, value):
if isinstance(value, fallback_device.xp.ndarray):
fallback_array_cache[name] = value
sup.__setattr__(name, backend.to_chx(value))
return
sup.__setattr__(name, value)
# Return a fabricated FunctionNode class
new_class = type(
obj.__class__.__name__,
inspect.getmro(obj.__class__),
{
'__getattribute__': getattribute,
'__setattr__': setattr,
})
return new_class
@contextlib.contextmanager
def _chainerx_attribute_fallback(obj, chainerx_device):
old_class = obj.__class__
obj.__class__ = _make_chainerx_attribute_fallback_class(
obj, chainerx_device)
try:
yield
finally:
obj.__class__ = old_class
| okuta/chainer | chainer/function_node.py | Python | mit | 53,362 | [
"VisIt"
] | 4fd92aaeffc782bcbd62c65e2758b472d233235578cee93459397fcc51165274 |
"""
Tool for converting from a suntans to untrim netcdf formats
"""
from sunpy import Spatial, Grid
import othertime
from datetime import datetime
from netCDF4 import Dataset
import numpy as np
from untrim_tools import untrim_ugrid as ugrid
import pdb
#Dictionary containing the suntans-untrim equivalent grid variables
untrim_gridvars = {\
'xp':'Mesh2_node_x',\
'yp':'Mesh2_node_y',\
'xv':'Mesh2_face_x',\
'yv':'Mesh2_face_y',\
'xe':'Mesh2_edge_x',\
'ye':'Mesh2_edge_y',\
'mark':'Mesh2_edge_bc',\
'edges':'Mesh2_edge_nodes',\
'grad':'Mesh2_edge_faces',\
'cells':'Mesh2_face_nodes',\
'face':'Mesh2_face_edges',\
'dv':'Mesh2_face_depth',\
'de':'Mesh2_edge_depth',\
'z_r':'Mesh2_layer_3d',\
'time':'Mesh2_data_time',\
'mark':'Mesh2_edge_bc',\
'facemark':'Mesh2_face_bc'\
}
# Dictionary containing the suntans-untrim equivalent grid dimensions
untrim_griddims = {\
'Np':'nMesh2_node',\
'Ne':'nMesh2_edge',\
'Nc':'nMesh2_face',\
'Nkmax':'nMesh2_layer_3d',\
'numsides':'nMaxMesh2_face_nodes',\
'time':'nMesh2_data_time'\
}
# Dimensions with hard-wired values
other_dims = {\
'Three':3,\
'Two':2,\
'nsp':1,\
'date_string_length':19,\
'nMesh2_time':1\
}
# physical variables that are directly transferable
untrim_vars = {\
'salt':'Mesh2_salinity_3d',\
'nu_v':'Mesh2_vertical_diffusivity',\
'eta':'Mesh2_sea_surface_elevation'
}
varnames = ['Mesh2_salinity_3d',\
'Mesh2_vertical_diffusivity_3d',\
'Mesh2_sea_surface_elevation',\
'h_flow_avg',\
'v_flow_avg',\
'Mesh2_edge_wet_area',\
'Mesh2_face_wet_area',\
'Mesh2_edge_bottom_layer',\
'Mesh2_edge_top_layer',\
'Mesh2_face_bottom_layer',\
'Mesh2_face_top_layer',\
'Mesh2_face_water_volume',\
]
FILLVALUE=-9999
def suntans2untrim(ncfile,outfile,tstart,tend,grdfile=None):
"""
Converts a suntans averages netcdf file into untrim format
for use in particle tracking
"""
####
# Step 1: Load the suntans data object
####
sun = Spatial(ncfile,klayer=[-99])
# Calculate some other variables
sun.de = sun.get_edgevar(sun.dv,method='min')
sun.mark[sun.mark==5]=0
sun.mark[sun.mark==3]=2
sun.facemark = np.zeros((sun.Nc,),dtype=np.int)
# Update the grad variable from the ascii grid file if supplied
if not grdfile == None:
print 'Updating grid with ascii values...'
grd = Grid(grdfile)
sun.grad = grd.grad[:,::-1]
###
# Step 2: Write the grid variables to a netcdf file
###
nc = Dataset(outfile,'w',format='NETCDF4_CLASSIC')
# Global variable
nc.Description = 'UnTRIM history file converted from SUNTANS output'
# Write the dimensions
for dd in untrim_griddims.keys():
if dd == 'time':
nc.createDimension(untrim_griddims[dd],0)
elif dd =='numsides':
nc.createDimension(untrim_griddims[dd],sun.maxfaces)
else:
nc.createDimension(untrim_griddims[dd],sun[dd])
for dd in other_dims:
nc.createDimension(dd,other_dims[dd])
###
# Step 3: Initialize all of the grid variables
###
def create_nc_var(name, dimensions, attdict,data=None, \
dtype='f8',zlib=False,complevel=0,fill_value=999999.0):
tmp=nc.createVariable(name, dtype, dimensions,\
zlib=zlib,complevel=complevel,fill_value=fill_value)
for aa in attdict.keys():
tmp.setncattr(aa,attdict[aa])
if not data==None:
nc.variables[name][:] = data
# Make sure the masked cells have a value of -1
mask = sun['cells'].mask.copy()
sun['cells'][mask]=FILLVALUE
sun['face'][mask]=FILLVALUE
for vv in untrim_gridvars.keys():
vname = untrim_gridvars[vv]
print 'Writing grid variable %s (%s)...'%(vname,vv)
if vv=='time':
continue
# add dz_min attribute to z_r variable
if vv == 'z_r':
ugrid[vname]['attributes'].update({'dz_min':1e-5})
#sun[vv][:]=sun[vv][::-1]
sun[vv][:]=sun['z_w'][0:-1][::-1]
# Reverse the order of grad(???)
if vv=='grad':
sun[vv][:]=sun[vv][:,::-1]
## Fix one-based indexing
#if vv in ['cells','edges','grad']:
# mask = sun[vv][:]==-1
# tmp = sun[vv][:]+1
# tmp[mask]=-1
# #sun[vv][:]=sun[vv][:]+1
# create_nc_var(vname,ugrid[vname]['dimensions'],ugrid[vname]['attributes'],\
# data=tmp,dtype=ugrid[vname]['dtype'])
create_nc_var(vname,ugrid[vname]['dimensions'],ugrid[vname]['attributes'],\
data=sun[vv],dtype=ugrid[vname]['dtype'])
# Initialize the two time variables
vname=untrim_gridvars['time']
create_nc_var(vname,ugrid[vname]['dimensions'],ugrid[vname]['attributes'],\
dtype=ugrid[vname]['dtype'])
vname = 'Mesh2_data_time_string'
create_nc_var(vname,ugrid[vname]['dimensions'],ugrid[vname]['attributes'],\
dtype=ugrid[vname]['dtype'])
###
# Step 4: Initialize all of the time-varying variables (but don't write)
###
for vname in varnames:
print 'Creating variable %s...'%(vname)
create_nc_var(vname,ugrid[vname]['dimensions'],ugrid[vname]['attributes'],\
dtype=ugrid[vname]['dtype'],zlib=True,complevel=1,fill_value=999999.)
###
# Step 5: Loop through all of the time steps and write the variables
###
tsteps = sun.getTstep(tstart,tend)
tdays = othertime.DaysSince(sun.time,basetime=datetime(1899,12,31))
for ii, tt in enumerate(tsteps):
# Convert the time to the untrim formats
timestr = datetime.strftime(sun.time[tt],'%Y-%m-%d %H:%M:%S')
print 'Writing data at time %s (%d of %d)...'%(timestr,tt,tsteps[-1])
#Write the time variables
nc.variables['Mesh2_data_time'][ii]=tdays[ii]
nc.variables['Mesh2_data_time_string'][:,ii]=timestr
# Load each variable or calculate it and convert it to the untrim format
sun.tstep=[tt]
###
# Compute a few terms first
eta = sun.loadData(variable='eta' )
U = sun.loadData(variable='U_F' )
dzz = sun.getdzz(eta)
dzf = sun.getdzf(eta)
vname='Mesh2_sea_surface_elevation'
#print '\tVariable: %s...'%vname
nc.variables[vname][:,ii]=eta
vname = 'Mesh2_salinity_3d'
#print '\tVariable: %s...'%vname
tmp3d = sun.loadData(variable='salt' )
nc.variables[vname][:,:,ii]=tmp3d.swapaxes(0,1)[:,::-1]
vname = 'Mesh2_vertical_diffusivity_3d'
#print '\tVariable: %s...'%vname
tmp3d = sun.loadData(variable='nu_v' )
nc.variables[vname][:,:,ii]=tmp3d.swapaxes(0,1)[:,::-1]
vname = 'h_flow_avg'
#print '\tVariable: %s...'%vname
nc.variables[vname][:,:,ii]=U.swapaxes(0,1)[:,::-1]
vname = 'v_flow_avg'
#print '\tVariable: %s...'%vname
tmp3d = sun.loadData(variable='w' ) * sun.Ac # m^3/s
nc.variables[vname][:,:,ii]=tmp3d.swapaxes(0,1)[:,::-1]
# Need to calculate a few terms for the other variables
vname = 'Mesh2_edge_wet_area'
#print '\tVariable: %s...'%vname
#dzf = sun.loadData(variable='dzf')
tmp3d = dzf*sun.df
nc.variables[vname][:,:,ii]=tmp3d.swapaxes(0,1)[:,::-1]
vname = 'Mesh2_face_water_volume'
#print '\tVariable: %s...'%vname
#dzz = sun.loadData(variable='dzz')
tmp3d = dzz*sun.Ac
nc.variables[vname][:,:,ii]=tmp3d.swapaxes(0,1)[:,::-1]
vname = 'Mesh2_face_wet_area'
#print '\tVariable: %s...'%vname
tmp3d = np.repeat(sun.Ac[np.newaxis,...],sun.Nkmax,axis=0)
nc.variables[vname][:,:,ii]=tmp3d.swapaxes(0,1)[:,::-1]
# UnTRIM references from bottom to top i.e.
# k = 0 @ bed ; k = Nkmax-1 @ top
vname = 'Mesh2_edge_bottom_layer'
#print '\tVariable: %s...'%vname
#tmp2d = sun.Nkmax-sun.Nke # zero based
tmp2d = sun.Nkmax-sun.Nke+1 # one based
nc.variables[vname][:,ii]=tmp2d
vname = 'Mesh2_edge_top_layer'
#print '\tVariable: %s...'%vname
etop = sun.loadData(variable='etop')
#tmp2d = sun.Nkmax-etop-1 # zero based
tmp2d = sun.Nkmax-etop # one based
nc.variables[vname][:,ii]=tmp2d
vname = 'Mesh2_face_bottom_layer'
#print '\tVariable: %s...'%vname
#tmp2d = sun.Nkmax-sun.Nk + 1 # zero based
tmp2d = sun.Nkmax-sun.Nk # one based
nc.variables[vname][:,ii]=tmp2d
vname = 'Mesh2_face_top_layer'
#print '\tVariable: %s...'%vname
ctop = sun.loadData(variable='ctop')
#tmp2d = sun.Nkmax-ctop-1 # zero based
tmp2d = sun.Nkmax-ctop # one based
nc.variables[vname][:,ii]=tmp2d
print 72*'#'
print '\t Finished SUNTANS->UnTRIM conversion'
print 72*'#'
# close the file
nc.close()
##############
# Testing
##############
##################
if __name__=="__main__":
# Inputs
ncfile = '../data/Racetrack_AVG_0000.nc'
outfile = '../data/Racetrack_untrim.nc'
tstart = '20020629.0000'
tend = '20060701.1200'
suntans2untrim(ncfile,outfile,tstart,tend)
| UT-CWE/Hyospy | Hyospy_ensemble/lib/SUNTANS/UNTRIM/suntans2untrim.py | Python | mit | 9,710 | [
"NetCDF"
] | c540080792bd94e7e1b990e8dc9a4a3fcf00ca0ebb0bca47dddae6d55d9c12a8 |
#%pylab inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from scipy.special import cbrt
import os
import sys
sys.path.append("..")
from PythonLibrary.residuals import *
from PythonLibrary.selectDomain import selectdomain
from PythonLibrary.estError import est_error
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
def calibration(plot_cal):
try:
data = np.genfromtxt("data/run_3/na_22_cal_2.tsv", skip_header=26, usecols=(0,2))
except Exception:
data = np.genfromtxt("data/run_3/na_22_cal_2.tsv", skip_header=26)
data_err = np.sqrt(data[:,1])
peak_1_x, peak_1_y = selectdomain(data[:,0], data[:,1], [120, 180])
p1 = [1e6, 50, 170, -10, 200]
popt1, pcov1 = curve_fit(gaussian_back, peak_1_x, peak_1_y, p0 = p1)
yFit1 = gaussian_back(peak_1_x, *popt1)
yuFit1 = gaussian_back(peak_1_x, *p1)
#print popt1[2]
peak_2_x, peak_2_y = selectdomain(data[:,0], data[:,1], [330, 430])
p2 = [1e6, 100, 350, 0]
popt2, pcov2 = curve_fit(gaussian, peak_2_x, peak_2_y, p0 = p2)
yFit2 = gaussian(peak_2_x, *popt2)
yuFit2 = gaussian(peak_2_x, *p2)
#print popt2[2]
data_cs = np.genfromtxt("data/run_3/cs_137_cal_2.tsv", skip_header=26, usecols=(0,2))
cs_err = np.sqrt(data_cs[:,1])
peak_3_x, peak_3_y = selectdomain(data_cs[:,0], data_cs[:,1], [160, 230])
p3 = [1e3, 50, 200, -10, 200]
popt3, pcov3 = curve_fit(gaussian_back, peak_3_x, peak_3_y, p0 = p3)
yFit3 = gaussian_back(peak_3_x, *popt3)
yuFit3 = gaussian_back(peak_3_x, *p3)
#print popt3[2]
if plot_cal:
plt.figure(figsize=(10,10))
plt.annotate("Na-22 511 keV", (popt1[2], 1.7e4), (popt1[2]+50, 1.7e4), arrowprops = dict(width=2, headwidth=4, facecolor="red"))
plt.annotate("Cs-137 662 keV", (popt3[2], .98e4), (popt3[2], 1.3e4), arrowprops = dict(width=2, headwidth=4, facecolor="red"))
plt.annotate("Na-22 1275 keV", (popt2[2], 3e3), (popt2[2], 4e3), arrowprops = dict(width=2, headwidth=4, facecolor="red"))
plt.errorbar(data[:,0], data[:,1], data_err)
# plt.plot(peak_1_x, yuFit1)
plt.plot(peak_1_x, yFit1, alpha=.8, lw=3, label="Center: %.0f $\pm$ %.2f channel" % (popt1[2], np.sqrt(pcov1[2,2])))
# plt.plot(peak_2_x, yuFit2)
plt.plot(peak_2_x, yFit2, alpha=.8, lw=3, label="Center: %.0f $\pm$ %.2f channel" % (popt2[2], np.sqrt(pcov2[2,2])))
#plt.figure(figsize=(10, 10))
plt.errorbar(data_cs[:,0], data_cs[:,1], cs_err)
# plt.plot(peak_3_x, yuFit3)
plt.plot(peak_3_x, yFit3, alpha=.8, lw=3, label="Center: %.0f $\pm$ %.2f channel" % (popt3[2], np.sqrt(pcov3[2,2])))
plt.xlabel("Channel")
plt.ylabel("Counts")
plt.title("Energy Calibration Using Known Sources")
plt.legend()
plt.savefig("plots/calibration_peaks.pdf")
cal_line_x = np.array([popt1[2], popt3[2], popt2[2]])
cal_line_y = np.array([511, 662, 1275])
x_err = np.array([np.sqrt(pcov1[2,2]), np.sqrt(pcov3[2,2]), np.sqrt(pcov2[2,2])])
p_lin = [2.0, 150.0]
lin, lin_pcov = curve_fit(linear, cal_line_x, cal_line_y, p0 = p_lin)
# print lin
yFit = linear(cal_line_x, *lin)
yuFit = linear(cal_line_x, *p_lin)
if plot_cal:
plt.figure(figsize=(10, 10))
plt.errorbar(cal_line_x, cal_line_y, x_err, fmt='o', ms=np.average(x_err), label="Point Uncertainty: %.3f channel" % np.average(x_err))
plt.plot(cal_line_x, yFit, alpha = .7, lw = np.sqrt(lin_pcov[0,0]), label="Slope Uncertainty: %.3f keV/channel" % np.sqrt(lin_pcov[0,0]))
plt.xlabel("Channel")
plt.ylabel("Energy (keV)")
plt.text(175, 1100, "Ax + B \n %.3fx + %.3f" % (lin[0], lin[1]))
plt.title("Calibrating Channel to Energy")
plt.legend(loc=4)
plt.savefig("plots/channel_energy_cal.pdf")
return lin
# save figure, etc.
def spectrum(dataset, plot_full = False):
conversion = calibration(False)
try:
data = np.genfromtxt(dataset, skip_header=26, usecols=(0,2))
except ValueError:
data = np.genfromtxt(dataset, skip_header=26, usecols=(0,1))
data_x = linear(data[:,0], *conversion)
domain = [2000, 2450]
peak_x, peak_y = selectdomain(data_x, data[:,1], domain)
back_x, back_y = selectdomain(data_x, data[:,1], [800, 3500], domain)
back_x_full, back_y_full = selectdomain(data_x, data[:,1], [800, 3500])
p_back = np.array([1e4, -1e-3, 6e2])
back_popt, back_pcov = curve_fit(exponential, back_x, back_y, p0 = p_back, maxfev=int(1e4))
back_yFit = exponential(back_x_full, *back_popt)
back_yuFit = exponential(back_x_full, *p_back)
to_subtract_x, to_subtract_y = selectdomain(back_x_full, back_yFit, domain)
if plot_full:
plt.figure(figsize=(10, 10))
plt.errorbar(data_x, data[:,1], np.sqrt(data[:,1]), fmt='o', ms=1)
plt.errorbar(peak_x, peak_y, np.sqrt(peak_y), fmt='o', ms=1, label = "Region of Interest")
plt.plot(back_x_full, back_yFit, label = "Background Fit")
plt.ylabel("Counts")
plt.xlabel("Energy (keV)")
plt.title("Isolating the Peak")
plt.legend()
plt.savefig("plots/peak_isolation_%s.pdf" % dataset.split("/")[2].split(".")[0])
flat_peak = peak_y - to_subtract_y
peak_p = [450, 18, 2200, 11]
peak_popt, peak_pcov = curve_fit(gaussian, peak_x, flat_peak, p0 = peak_p)
peak_yFit = gaussian(peak_x, *peak_popt)
# print peak_popt
with open(dataset) as f:
f.seek(298)
try:
livetime = float(f.read(6))
except Exception:
f.seek(404)
livetime = float(f.read(6))
f.close()
# print livetime
plt.figure(figsize=(10, 10))
plt.errorbar(peak_x, flat_peak, np.sqrt(flat_peak), fmt='o')
plt.plot(peak_x, peak_yFit, label = "Gaussian Fit\nCenter: %.0f $\pm$ %.0f keV\nCountrate: %.2f $\pm$ %.2f counts/s" % (peak_popt[2], np.absolute(peak_popt[1]/np.sqrt(len(peak_x))), peak_popt[0]/livetime, np.sqrt(peak_pcov[0,0])/livetime))
plt.ylabel("Counts (after subtracting background)")
plt.xlabel("Energy (keV)")
plt.title("Finding the Peak Center")
plt.legend()
plt.savefig("plots/peak_center_%s.pdf" % dataset.split("/")[2].split(".")[0])
def cross_section(dataset, plot = False):
def calibrate():
data = np.genfromtxt("data/cross_section/na_compton.tsv", skip_header=26)
xdata, ydata = selectdomain(data[:,0], data[:,1], [100, 2048])
plt.figure(figsize=(10, 10))
plt.plot(xdata, ydata)
plt.annotate("Na-22 1275 keV Compton Edge", (500, 50), (750, 200), arrowprops = dict(width=2, headwidth=4, facecolor="red"))
plt.xlabel("Channel")
plt.ylabel("Counts")
plt.title("One Point Calibration")
plt.savefig("plots/calibration.pdf")
return 500
# calibrate()
data = np.genfromtxt("data/cross_section/%s" % dataset, skip_header=26)
def sim_data(n_runs):
countrates = []
for i in range(0, n_runs):
start = np.random.randn(1)*50 + 1500
xdata, ydata = selectdomain(data[:,0], data[:,1], [start, 2048])
with open("data/cross_section/%s" % dataset) as f:
f.seek(300)
try:
livetime = float(f.read(6))
except Exception:
f.seek(404)
livetime = float(f.read(6))
f.close()
#print livetime
countrates.append(np.trapz(ydata, xdata)/livetime)
return countrates
countrates = sim_data(1500)
countrate = np.average(countrates)
dc = np.std(countrates)
with open("data/cross_section/countrates.tsv", 'a+') as f:
f.write(str(countrate))
f.write("\t")
f.write(dataset)
f.write("\t")
f.write(str(dc))
f.write("\n")
f.close()
#print np.trapz(ydata, xdata)/livetime
if plot:
xdata, ydata = selectdomain(data[:,0], data[:,1], [1500, 2048])
plt.figure(figsize=(10, 10))
plt.semilogy(data[:,0], data[:,1])
plt.fill_between(xdata, np.min(ydata), ydata, alpha = 0.5, label="Region of Interest: \n 3 x 1275 keV Compton Edge - End = channel 1500-2048")
plt.xlabel("Channel")
plt.ylabel("Counts (log(counts))")
plt.title("Showing Region of Interest - 3.75 cm Al")
plt.legend(loc=0)
# plt.savefig("plots/example_cross_section_highlight_roi.pdf")
def countrates(dataset):
ydata = np.genfromtxt("data/cross_section/countrates.tsv", usecols=(0))
xdata_1 = np.genfromtxt("data/cross_section/countrates.tsv", dtype="string", usecols=(1))
dy_1 = np.genfromtxt("data/cross_section/countrates.tsv", usecols=(2))
xdata_cu = [0]
ydata_cu = [23.4563458529]
xdata_al = [0]
ydata_al = [23.4563458529]
xdata_c = [0]
ydata_c = [23.4563458529]
xdata_pb = [0]
ydata_pb = [23.4563458529]
dy_cu = [3.90536763219]
dy_al = [3.90536763219]
dy_c = [3.90536763219]
dy_pb = [3.90536763219]
Na = 6.022e23
if dataset == "cu":
dataset_x = xdata_cu
dataset_y = ydata_cu
dy = dy_cu
rho = 8.92
A = 63.546
elif dataset == "al":
dataset_x = xdata_al
dataset_y = ydata_al
dy = dy_al
rho = 2.70
A = 26.98
elif dataset == "carbon":
dataset_x = xdata_c
dataset_y = ydata_c
dy = dy_c
rho = 2.26
A = 12.01
elif dataset == "pb":
dataset_x = xdata_pb
dataset_y = ydata_pb
dy = dy_pb
rho = 11.34
A = 207.2
for i in range(0, len(xdata_1)):
try:
if xdata_1[i].split("_")[0] == "cu":
xdata_cu.append(float(xdata_1[i].split("_")[1])+.01*float(xdata_1[i].split("_")[2]))
ydata_cu.append(ydata[i])
dy_cu.append(dy_1[i])
elif xdata_1[i].split("_")[0] == "al":
xdata_al.append(float(xdata_1[i].split("_")[1])+.01*float(xdata_1[i].split("_")[2]))
ydata_al.append(ydata[i])
dy_al.append(dy_1[i])
elif xdata_1[i].split("_")[0] == "carbon":
xdata_c.append(float(xdata_1[i].split("_")[1])+.01*float(xdata_1[i].split("_")[2]))
ydata_c.append(ydata[i])
dy_c.append(dy_1[i])
elif xdata_1[i].split("_")[0] == "pb":
xdata_pb.append(float(xdata_1[i].split("_")[1])+.01*float(xdata_1[i].split("_")[2]))
ydata_pb.append(ydata[i])
dy_pb.append(dy_1[i])
except Exception:
pass
baseline = 4.23917702529 * np.ones_like(dataset_x)
dataset_x = np.array(sorted(dataset_x, reverse=True))
dataset_y = np.array(sorted(dataset_y))
dy = np.array(sorted(dy))
p = [15.44, -.3, 7]
popt, pcov = curve_fit(exponential, dataset_x, dataset_y, p0 = p)
fitx = np.linspace(dataset_x[0], dataset_x[-1], 500)
yFit = exponential(fitx, *popt)
yuFit = exponential(fitx, *p)
#print popt
sigma = np.absolute(popt[1])/(rho*Na/A)
dsigma = np.absolute(np.sqrt(pcov[1,1]))/(rho*Na/A)
with open("data/cross_section/neutron_rad.tsv", 'a+') as f:
f.write(str(sigma))
f.write("\t")
f.write(str(dsigma))
f.write("\t")
f.write(str(A))
f.write("\n")
f.close()
plt.errorbar(dataset_x, dataset_y, dy, fmt='o')
plt.plot(dataset_x, baseline, label="Baseline/Background Level")
plt.plot(fitx, yFit, lw=np.sqrt(pcov[1,1]))
plt.text(6, 20, r"Fit Form: $R(0)e^{\frac{\rho N_A \sigma}{A} x} + C$"\
"\n"\
r"$\frac{\rho N_A \sigma}{A} = (%.2f\,\pm\,%.2f)\,cm^{-1}$"\
"\n"\
r"$\sigma = (%.2e\,\pm\,%.1e)\,cm^{2}$" % (np.absolute(popt[1]), np.sqrt(pcov[1,1]), sigma, dsigma))
plt.xlabel("Absorber Thickenss (cm)")
plt.ylabel("Countrate (count/sec)")
plt.title("Countrate Plots")
plt.legend()
plt.savefig("plots/countrate_%s.pdf" % dataset)
def neutron_radius():
data = np.genfromtxt("data/cross_section/neutron_rad.tsv", usecols=(0,1,2))
sigma = data[:,0]
dsigma = data[:,1]
A = data[:,2]
ydata = np.sqrt(sigma/(2*np.pi))
xdata = cbrt(A)
dy = np.sqrt(dsigma/(2*np.pi))
#print ydata
def sim_data(nruns):
r0 = []
dbw = [] # de broglie wavelength
for i in range(0, nruns):
yd = np.random.randn(len(ydata))*np.min(dy) + ydata
popt = np.polyfit(xdata, yd, 1)
r0.append(popt[0])
dbw.append(popt[1])
return r0, dbw
r0, dbw = sim_data(1500)
popt = [np.average(r0), np.average(dbw)]
pcov = [np.std(r0), np.std(dbw)]
yFit = linear(xdata, *popt)
yFit_max = linear(xdata, *np.add(popt,pcov))
yFit_min = linear(xdata, *np.subtract(popt,pcov))
plt.figure(figsize=(10, 10))
plt.errorbar(xdata, ydata, yerr=dy, fmt='o')
plt.plot(xdata, yFit, label="Best Fit")
plt.plot(xdata, yFit_max, label="Plus 1$\sigma$")
plt.plot(xdata, yFit_min, label="Minus 1$\sigma$")
plt.text(xdata[0], ydata[0] + 1e-12, "$r_0A^{1/3} + \lambda$ \n $r_{0} = %.1f\,\pm\,%.1f\,fm$ \n $\lambda = %.0f\,\pm\,%.0f\,fm$" % (popt[0]*1e13, pcov[0]*1e13, popt[1]*1e13, pcov[1]*1e13))
plt.xlabel("$A^{1/3}$ $(g/mol)^{1/3}$")
plt.ylabel(r"$\sqrt{\frac{\sigma}{2 \pi}}$ $(\sqrt{\frac{cm^2}{rad}})$")
plt.title("Radius and deBroglie Wavelength of the Neutron")
plt.legend(loc=3)
plt.savefig("plots/neutron_radius.pdf")
plt.show()
if __name__ == '__main__':
# calibration(True)
datasets = []
for f in os.listdir("data/run_3"):
try:
f.split("_")[2]
continue
except IndexError:
datasets.append(f)
for dataset in datasets:
spectrum("data/run_3/%s" % dataset, True)
# spectrum("data/run_3/shielded_carbon.tsv", True)
# for item in ["al", "cu", "pb", "carbon"]:
# for f in os.listdir("data/cross_section/%s" % item):
# if f.split(".")[1] == "tsv":
# cross_section("%s/%s" % (item, f))
# cross_section("0_thickness.tsv")
# cross_section("pb_blocked.tsv")
#
# countrates("pb")
# neutron_radius()
| bzcheeseman/phys211 | Neutron_Studies/data_processing.py | Python | lgpl-3.0 | 14,509 | [
"Gaussian"
] | 6837137b4bc79e81b76b9212e7ff0425d6ff8927fdecde5b983bbb45556c15e2 |
import os, sys, site, re, urllib
site.addsitedir(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../core'))
from db import SQLite3
from db.mapper import Pages
from summary import *
from classifier import get_words, FisherClassifier
from pprint import pprint
from datetime import datetime
pages = [
['http://www.creativeapplications.net/air/boks-air/','air'],
['http://www.creativeapplications.net/air/buro-destruct-designer-iphone-flash/','air'],
['http://www.creativeapplications.net/air/colorbrowser-air/','air'],
['http://www.creativeapplications.net/air/destroy-twitter-air/','air'],
['http://www.creativeapplications.net/air/fifa-development-globe-visualizing-fifas-work-across-the-world/','air'],
['http://www.creativeapplications.net/air/flash-on-the-beach-events/','air'],
['http://www.creativeapplications.net/air/fractal4d-air/','air'],
['http://www.creativeapplications.net/air/jelly-swarm-air/','air'],
['http://www.creativeapplications.net/air/little-boxes-music-box-for-the-ipad-by-joelle-aeschlimann-ecal/','air'],
['http://www.creativeapplications.net/air/pyramid-cascades-by-ostrario-projection-mapping-meets-balldroppings/','air'],
['http://www.creativeapplications.net/air/spin-motion-collaborative-gyro-stop-motions-by-simeon-brandner-ecal/','air'],
['http://www.creativeapplications.net/air/tiltshift-air/','air'],
['http://www.creativeapplications.net/android/antimap-of-android-iphone-javascript-processing/','android'],
['http://www.creativeapplications.net/android/architecture-in-your-hand-iphone-android/','android'],
['http://www.creativeapplications.net/android/captured-processing-arduino/','android'],
['http://www.creativeapplications.net/android/from-processing-to-the-android-market-tutorial/','android'],
['http://www.creativeapplications.net/android/g1-android-reviews-android/','android'],
['http://www.creativeapplications.net/android/lamp-north-arduino-objects/','android'],
['http://www.creativeapplications.net/android/mobile-app-development-processing-android-tutorial/','android'],
['http://www.creativeapplications.net/android/precocious-mouse-1-android-sound/','android'],
['http://www.creativeapplications.net/android/pss-studie-processing-arduino/','android'],
['http://www.creativeapplications.net/android/rymdkapsel-new-game-by-martin-grapefrukt-jonasson/','android'],
['http://www.creativeapplications.net/android/slow-listening-re-enabling-focus-in-music-listening/','android'],
['http://www.creativeapplications.net/android/sonaur-android-processing/','android'],
['http://www.creativeapplications.net/android/soundbow-drawing-based-musical-instrument-for-android/','android'],
['http://www.creativeapplications.net/android/space-physics-android-games/','android'],
['http://www.creativeapplications.net/android/stonespray-3d-printing-with-sand/','android'],
['http://www.creativeapplications.net/android/the-hanging-garden-arduino/','android'],
['http://www.creativeapplications.net/android/touch-vision-interface-openframeworks-arduino-android/','android'],
['http://www.creativeapplications.net/android/untitled-faces-openframeworks-processing-arduino/','android'],
['http://www.creativeapplications.net/android/wtph-what-the-phonics-pronouncing-street-names-in-denmark/','android'],
['http://www.creativeapplications.net/cinder/difluxe-by-avoka-production-upsetting-the-balance-of-a-microcosm-cinder/','cinder'],
['http://www.creativeapplications.net/cinder/first-experiments-with-leap-motion-and-cinder/','cinder'],
['http://www.creativeapplications.net/cinder/jacobs-cave-cinder/','cinder'],
['http://www.creativeapplications.net/cinder/observations-cinder/','cinder'],
['http://www.creativeapplications.net/cinder/real-time-electromagnet-dot-display-by-breakfast-ny/','cinder'],
['http://www.creativeapplications.net/cinder/rubans-for-ipad-new-version-of-cinder-cocoa-touch-wrapper/','cinder'],
['http://www.creativeapplications.net/cinder/so-i-was-at-a-party-last-night-cinder/','cinder'],
['http://www.creativeapplications.net/cinder/solyaris-ipad-cinder/','cinder'],
['http://www.creativeapplications.net/cinder/space-colonization-cinder-plask-javascript-ipad/','cinder'],
['http://www.creativeapplications.net/cinder/tether-cinder/','cinder'],
['http://www.creativeapplications.net/cinder/the-company-cinder/','cinder'],
['http://www.creativeapplications.net/environment/0-1-blink-and-the-lights-go-out-light-installation-by-michal-kohut/','environment'],
['http://www.creativeapplications.net/environment/555-kubik-environment/','environment'],
['http://www.creativeapplications.net/environment/a-colour-field-point-of-no-return-matthew-biedermans-event-horizon/','environment'],
['http://www.creativeapplications.net/environment/along-the-line-florian-gronds-linear-space-time-recordings/','environment'],
['http://www.creativeapplications.net/environment/amphibious-architecture-environment/','environment'],
['http://www.creativeapplications.net/environment/aspect-ratio-array-rafael-rozendaals-popular-screen-sizes/','environment'],
['http://www.creativeapplications.net/environment/best-and-most-memorable-projects-of-2011/','environment'],
['http://www.creativeapplications.net/environment/candle-light-environment/','environment'],
['http://www.creativeapplications.net/environment/caten-by-david-letellier/','environment'],
['http://www.creativeapplications.net/environment/constructed-land-65000-variations-on-a-theme/','environment'],
['http://www.creativeapplications.net/environment/cpu-central-processing-unit/','environment'],
['http://www.creativeapplications.net/environment/crackle-canvas-environment-sound/','environment'],
['http://www.creativeapplications.net/environment/datamorphose-processing-environment/','environment'],
['http://www.creativeapplications.net/environment/dotje-environment-objects-nxc/','environment'],
['http://www.creativeapplications.net/environment/eyjafjallajokull-vvvv-events-environment-inspiration/','environment'],
['http://www.creativeapplications.net/environment/feelspace/','environment'],
['http://www.creativeapplications.net/environment/fragments-in-space-olivier-ratsis-white-roads-in-the-red-matrix/','environment'],
['http://www.creativeapplications.net/environment/framework-f5x5x5-environment/','environment'],
['http://www.creativeapplications.net/environment/hyper-matrix-thousands-of-physical-pixels-in-a-180o-vertical-landscape/','environment'],
['http://www.creativeapplications.net/environment/lotus-dome-objects/','environment'],
['http://www.creativeapplications.net/environment/me-wonderland-environment/','environment'],
['http://www.creativeapplications.net/environment/moving-picture-show-by-jurg-lehni-and-contributors/','environment'],
['http://www.creativeapplications.net/environment/processing-10-mac-windows-linux/','environment'],
['http://www.creativeapplications.net/environment/revolving-realities/','environment'],
['http://www.creativeapplications.net/environment/sticky-light-smart-laser-environment/','environment'],
['http://www.creativeapplications.net/environment/syndyn-maxmsp-game-environment/','environment'],
['http://www.creativeapplications.net/environment/the-one-way-ticket-voyage-into-deep-space-with-no-return-di-rca-2012/','environment'],
['http://www.creativeapplications.net/environment/the-stealth-project-environment-objects/','environment'],
['http://www.creativeapplications.net/environment/unpacking-the-cinechamber-naut-humon-on-nomadic-av-performance/','environment'],
['http://www.creativeapplications.net/events/','events'],
['http://www.creativeapplications.net/events/3d-printshow-london-20th-21st-october/','events'],
['http://www.creativeapplications.net/events/alpha-ville-festival-events/','events'],
['http://www.creativeapplications.net/events/art-code-events/','events'],
['http://www.creativeapplications.net/events/beyond-tellerrand-events/','events'],
['http://www.creativeapplications.net/events/data-event-46-0-events/','events'],
['http://www.creativeapplications.net/events/decoded-events/','events'],
['http://www.creativeapplications.net/events/eta-2012-a-branding-singularity/','events'],
['http://www.creativeapplications.net/events/euphorie-events-openframeworks/','events'],
['http://www.creativeapplications.net/events/eyeo-2012-afterthoughts-and-asides/','events'],
['http://www.creativeapplications.net/events/flash-on-the-beach-events-2/','events'],
['http://www.creativeapplications.net/events/gli-tch-events/','events'],
['http://www.creativeapplications.net/events/high-arctic-by-uva-c-events/','events'],
['http://www.creativeapplications.net/events/just-another-day-at-the-lab-mutek-avisions-2012/','events'],
['http://www.creativeapplications.net/events/kikk-festival-events/','events'],
['http://www.creativeapplications.net/events/momas-paola-antonelli-on-talk-to-me-events/','events'],
['http://www.creativeapplications.net/events/node10-forum-for-digital-arts-events-vvvv/','events'],
['http://www.creativeapplications.net/events/oarn-2012-the-shape-of-ar-to-come/','events'],
['http://www.creativeapplications.net/events/offf-2010-book-giveaway-events/','events'],
['http://www.creativeapplications.net/events/onedotzero-23%e2%80%9327-november-bfi-london-events/','events'],
['http://www.creativeapplications.net/events/onedotzero-code-warriors-events/','events'],
['http://www.creativeapplications.net/events/open-gdnm-2011-events/','events'],
['http://www.creativeapplications.net/events/open-gdnm-2012-london-special-discount-code-for-can-readers/','events'],
['http://www.creativeapplications.net/events/particles-openframeworks-arduino-events/','events'],
['http://www.creativeapplications.net/events/quadrotors-at-the-saatchi-saatchi-new-directors-showcase-2012-by-mlf-details/','events'],
['http://www.creativeapplications.net/events/reasons-to-be-creative-brighton-3-5-september/','events'],
['http://www.creativeapplications.net/events/resonate-events/','events'],
['http://www.creativeapplications.net/events/resonate-festival-2013-exploring-boundaries-of-art-media-and-technology/','events'],
['http://www.creativeapplications.net/events/resonate-platform-for-art-and-technology/','events'],
['http://www.creativeapplications.net/events/rgbdepth-workshop-with-james-george-and-alexander-porter-barcelona-spain-28th-april/','events'],
['http://www.creativeapplications.net/events/seiko-mikami-desire-of-codes-ycam/','events'],
['http://www.creativeapplications.net/events/sosolimited-reconstitution-profile-events-c/','events'],
['http://www.creativeapplications.net/events/the-16th-japan-media-arts-festival-call-for-entries/','events'],
['http://www.creativeapplications.net/events/the-immortal-life-support-machines-modified-to-breathe-in-a-closed-circuit/','events'],
['http://www.creativeapplications.net/events/the-r18-ultra-chair-custom-chair-geometry-from-crowd-sourced-collected-data-milan-2012/','events'],
['http://www.creativeapplications.net/events/voice-array-and-last-breath-by-rafeal-lozano-hemmer/','events'],
['http://www.creativeapplications.net/events/zef-santo-3d-real-time-performance-workflow-and-collaboration/','events'],
['http://www.creativeapplications.net/featured/anatomy-of-a-series-santiago-ortiz-lostalgic/','featured'],
['http://www.creativeapplications.net/featured/buchstabengewitter-dynamic-type-that-morphs-from-a-to-z-vvvv/','featured'],
['http://www.creativeapplications.net/featured/ice-angel-by-dominic-harris-from-snow-angels-to-angelic-forms/','featured'],
['http://www.creativeapplications.net/featured/the-carp-and-the-seagull-interactive-short-film-by-evan-boehm/','featured'],
['http://www.creativeapplications.net/featured/the-making-of-the-mill-touch-cinder/','featured'],
['http://www.creativeapplications.net/featured/visual-complexity-mapping-patterns-of-information-books/','featured'],
['http://www.creativeapplications.net/featured/zzz-archive-film-animation-videogame-image-analysis/','featured'],
['http://www.creativeapplications.net/flash/adidas-megalizer-processing-flash-sound/','flash'],
['http://www.creativeapplications.net/flash/audiotool-flash-sound-webapp/','flash'],
['http://www.creativeapplications.net/flash/av-clash-flash-webapp-sound/','flash'],
['http://www.creativeapplications.net/flash/bear-71-interactive-documentary-flash-review/','flash'],
['http://www.creativeapplications.net/flash/can-at-flashbelt-2010-contest-events/','flash'],
['http://www.creativeapplications.net/flash/coordy-flash/','flash'],
['http://www.creativeapplications.net/flash/depict1-games-flash/','flash'],
['http://www.creativeapplications.net/flash/digging-in-the-crates-flash-sound/','flash'],
['http://www.creativeapplications.net/flash/every-day-the-same-dream-flash-games/','flash'],
['http://www.creativeapplications.net/flash/flixel-2-tutorial-flash-tutorials-games/','flash'],
['http://www.creativeapplications.net/flash/hello-world-mac-windows-flash/','flash'],
['http://www.creativeapplications.net/flash/helvetica-face-flash/','flash'],
['http://www.creativeapplications.net/flash/history-of-video-games-flash/','flash'],
['http://www.creativeapplications.net/flash/hypo-flash/','flash'],
['http://www.creativeapplications.net/flash/hyundai-i40-reveal-openframeworks-flash/','flash'],
['http://www.creativeapplications.net/flash/inside-a-dead-skyscraper-flash-not-games/','flash'],
['http://www.creativeapplications.net/flash/jared-tarbell-profile-events/','flash'],
['http://www.creativeapplications.net/flash/link-openframeworks-ipad-flash-vvvv/','flash'],
['http://www.creativeapplications.net/flash/newspeak-flash/','flash'],
['http://www.creativeapplications.net/flash/seaquence-flash-sound/','flash'],
['http://www.creativeapplications.net/flash/the-pirata-boat-race-iphone-flash-games/','flash'],
['http://www.creativeapplications.net/flash/today-i-die-iphone-games-flash/','flash'],
['http://www.creativeapplications.net/flash/toneglue-webapp-flash/','flash'],
['http://www.creativeapplications.net/flash/trauma-games-flash/','flash'],
['http://www.creativeapplications.net/flash/unverse-created-by-ian-snyder-and-ported-to-ios-by-lucky-frame/','flash'],
['http://www.creativeapplications.net/flash/visualator-flash-iphone-ipad/','flash'],
['http://www.creativeapplications.net/games/%e2%84%a2-iphone-games-preview/','games'],
['http://www.creativeapplications.net/games/2sleep1-games/','games'],
['http://www.creativeapplications.net/games/alt-ctrl-openframeworks-games/','games'],
['http://www.creativeapplications.net/games/bad-at-sports-interview-with-jason-rohrer-games/','games'],
['http://www.creativeapplications.net/games/boom-shakalaka-openframeworks/','games'],
['http://www.creativeapplications.net/games/chasing-aurora-2d-aerial-action-game-by-broken-rules/','games'],
['http://www.creativeapplications.net/games/edge-iphone/','games'],
['http://www.creativeapplications.net/games/eliss-iphone/','games'],
['http://www.creativeapplications.net/games/empty-black-by-mary-rose-cook-fun-html5-2d-platform-shooter/','games'],
['http://www.creativeapplications.net/games/eufloria-intergalactic-guerrilla-gardening-for-the-ipad-games/','games'],
['http://www.creativeapplications.net/games/exhausting-gameplay-by-douglas-edric-stanley-theory-games/','games'],
['http://www.creativeapplications.net/games/faraway-iphone-ipad/','games'],
['http://www.creativeapplications.net/games/flight-of-the-fireflies-atmospheric-journey-through-places-and-emotions-ipad-games/','games'],
['http://www.creativeapplications.net/games/games-games-events/','games'],
['http://www.creativeapplications.net/games/hard-wired-devices-objects-games/','games'],
['http://www.creativeapplications.net/games/horror-vacui-iphone/','games'],
['http://www.creativeapplications.net/games/how-to-do-things-with-videogames-by-ian-bogost-books-review-games/','games'],
['http://www.creativeapplications.net/games/indie-game-the-movie-released/','games'],
['http://www.creativeapplications.net/games/jigazo/','games'],
['http://www.creativeapplications.net/games/limbo-games-mac/','games'],
['http://www.creativeapplications.net/games/memory-of-a-broken-dimension-glitch-and-narrative/','games'],
['http://www.creativeapplications.net/games/motionbeam-ipod-touch-c-games/','games'],
['http://www.creativeapplications.net/games/mr-bounce','games'],
['http://www.creativeapplications.net/games/neven-mrgans-curious-incident-games-iphone-ipad/','games'],
['http://www.creativeapplications.net/games/no-more-sweden-2010-games-events/','games'],
['http://www.creativeapplications.net/games/oko-interactive-journey-through-nasa-image-database/','games'],
['http://www.creativeapplications.net/games/on-simulation-aesthetics-and-play-artifactual-playground/','games'],
['http://www.creativeapplications.net/games/parallax-games/','games'],
['http://www.creativeapplications.net/games/photonykto-ipad-openframeworks/','games'],
['http://www.creativeapplications.net/games/ping-augmented-pixel-tutorials-games/','games'],
['http://www.creativeapplications.net/games/pulsus-ipad-games-sound/','games'],
['http://www.creativeapplications.net/games/robert-overweg-profile-games/','games'],
['http://www.creativeapplications.net/games/sensible-software-1986-1999-by-read-only-memory-2/','games'],
['http://www.creativeapplications.net/games/shot-shot-shoot-iphone-games-of/','games'],
['http://www.creativeapplications.net/games/show-me-how-not-to-fight-openframeworks-games/','games'],
['http://www.creativeapplications.net/games/sir-you-are-being-hunted-generative-english-landscape-and-tea-drinking-robots/','games'],
['http://www.creativeapplications.net/games/soundshapes-by-queasy-games-now-available-for-ps-vita-ps3/','games'],
['http://www.creativeapplications.net/games/soundstory-1000pm-an-interactive-musical-vignette-by-matthew-lopresti/','games'],
['http://www.creativeapplications.net/games/space-invaders-infinity-gene-3-0-iphone-games/','games'],
['http://www.creativeapplications.net/games/space-invaders-infinity-gene-iphone-2/','games'],
['http://www.creativeapplications.net/games/spelltower-ipad-games-openframeworks/','games'],
['http://www.creativeapplications.net/games/spirits-ipad-iphone-games-preview/','games'],
['http://www.creativeapplications.net/games/sword-sworcery-ep-games/','games'],
['http://www.creativeapplications.net/games/the-ballad-of-the-psychotropic-robots-eddie-lee/','games'],
['http://www.creativeapplications.net/games/the-black-forest','games'],
['http://www.creativeapplications.net/games/the-culture-of-game-jams-games/','games'],
['http://www.creativeapplications.net/games/this-is-infinity-games/','games'],
['http://www.creativeapplications.net/games/where-is-my-heart-games/','games'],
['http://www.creativeapplications.net/games/windosill-ipad-games/','games'],
['http://www.creativeapplications.net/games/world-war-ii-redux-games/','games'],
['http://www.creativeapplications.net/inspiration/1024-projections-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/agricultural-landscapes-seen-from-space-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/dutch-wife-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/enigmatica-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/golden-tiger-moving-projection-on-the-streets-of-paris/','inspiration'],
['http://www.creativeapplications.net/inspiration/in-statu-nascendi-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/organic-electric-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/start-from-the-beginning-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/supernova-by-siggi-eggertsson-inspiration/','inspiration'],
['http://www.creativeapplications.net/inspiration/the-transcendent-city-inspiration/','inspiration'],
['http://www.creativeapplications.net/ipad/configuration-space-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/hunters-moon-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/konfetti-for-ipad-playful-mirror-image-using-openscenegraph-stephan-huber/','ipad'],
['http://www.creativeapplications.net/ipad/lygia-clarks-livro-obra-1983-for-the-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/making-future-magic-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/motion-phone-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/numby-by-pitaru-and-paterson-learning-counting-in-weird-and-wonky-ways/','ipad'],
['http://www.creativeapplications.net/ipad/phaidon-design-classics-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/playart-uniting-classic-art-and-childrens-creativity/','ipad'],
['http://www.creativeapplications.net/ipad/rorschach-cards-and-balloon-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/satromizer-os-ipad/','ipad'],
['http://www.creativeapplications.net/ipad/sketching-dynamic-geometry-on-the-ipad/','ipad'],
['http://www.creativeapplications.net/iphone/12-tone-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/15-best-and-must-have-iphone-apps-of-2009-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/a-prism-for-interface-design-theory/','iphone'],
['http://www.creativeapplications.net/iphone/addlib-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/an-interview-with-bowyer-theory-ipad-interview/','iphone'],
['http://www.creativeapplications.net/iphone/arart-by-kei-shiratori-new-stories-with-ar/','iphone'],
['http://www.creativeapplications.net/iphone/asdfbmp-iphone-of-canapps/','iphone'],
['http://www.creativeapplications.net/iphone/automaton-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/begotten-iphone-mobilizing-c/','iphone'],
['http://www.creativeapplications.net/iphone/best-iphone-and-ipad-projects-of-2011/','iphone'],
['http://www.creativeapplications.net/iphone/bitboxland-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/bjork-biophilia-iphone-ipad-sound/','iphone'],
['http://www.creativeapplications.net/iphone/bjork-biophilia-virus-iphone-ipad-sound/','iphone'],
['http://www.creativeapplications.net/iphone/bla-bla-bla-iphone-of-processing-sound/','iphone'],
['http://www.creativeapplications.net/iphone/brion-gysin-dream-machine-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/c4-new-creative-coding-framework-for-the-ios/','iphone'],
['http://www.creativeapplications.net/iphone/c74-maxmsp-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/cambox-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/camera-apps-iphone-tutorial-openframeworks/','iphone'],
['http://www.creativeapplications.net/iphone/circlo-iphone-ipad-sound/','iphone'],
['http://www.creativeapplications.net/iphone/circuit-synth-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/clock01-06-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/cloudie-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/creative-review-annual-2010-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/curious-iphones-by-interactivelab-arduino-processing-opencv-ios-of/','iphone'],
['http://www.creativeapplications.net/iphone/d0tsechoplex-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/daisyphone-iphone-java-sound/','iphone'],
['http://www.creativeapplications.net/iphone/dmesh-delaunay-your-photos-directly-on-the-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/dmitry-fyodorov-shapeless-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/dopplerpad-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/dscan-slit-scanning-for-the-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/eboy-fixpix-iphone-preview/','iphone'],
['http://www.creativeapplications.net/iphone/edit-app-visual-blog-reader-%e2%86%92-visual-content-management-system/','iphone'],
['http://www.creativeapplications.net/iphone/euphonics-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/extending-the-touchscreen-of-arduino-ipad-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/faust-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/feel-me-by-marco-triverio-digital-touch-and-new-interactive-channels-for-bit-intimacy/','iphone'],
['http://www.creativeapplications.net/iphone/fish-a-tap-essay-for-iphone-by-robin-sloan/','iphone'],
['http://www.creativeapplications.net/iphone/floating-forecaster-maxmsp-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/flowerium-iphone-ipad-openframeworks/','iphone'],
['http://www.creativeapplications.net/iphone/g-ipad-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/glory-math-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/hana-by-andreas-muller-allows-ios-devices-to-dream-about-flowers/','iphone'],
['http://www.creativeapplications.net/iphone/horizons-iphone-ipad-of/','iphone'],
['http://www.creativeapplications.net/iphone/inkstrumental-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/instacrt-a-real-world-iphone-filter-for-photos/','iphone'],
['http://www.creativeapplications.net/iphone/it%e2%80%99s-about-time-%e2%80%93-volume-2-iphone-ipad/','iphone'],
['http://www.creativeapplications.net/iphone/its-about-time-volume-1-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/jasuto-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/konkreet-performer-iphone-ipad/','iphone'],
['http://www.creativeapplications.net/iphone/lost-in-lace-iphone-openframeworks-events/','iphone'],
['http://www.creativeapplications.net/iphone/luminair-iphone-quarzcomposer/','iphone'],
['http://www.creativeapplications.net/iphone/mcsweeney%e2%80%99s-iphone-ipad/','iphone'],
['http://www.creativeapplications.net/iphone/mini-composer-ipad-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/minipops-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/museum-of-the-phantom-city-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/myfry-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/nabit-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/nlug-iphone-ipad-openframeworks/','iphone'],
['http://www.creativeapplications.net/iphone/noby-noby-boy-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/nodebeat-iphone-ipad-of/','iphone'],
['http://www.creativeapplications.net/iphone/nothings-for-iphone-game-of-life-through-abstract-imagery/','iphone'],
['http://www.creativeapplications.net/iphone/nui-framework-objectivec-alternatives-iphone-c/','iphone'],
['http://www.creativeapplications.net/iphone/passion-pit-gossamer-new-interactive-music-app-by-scott-snibbe-studio/','iphone'],
['http://www.creativeapplications.net/iphone/philia02-iphone-openframeworks/','iphone'],
['http://www.creativeapplications.net/iphone/pixelwave-iphone-ipad-sound/','iphone'],
['http://www.creativeapplications.net/iphone/poolga-collection-1-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/popclock-ipad-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/proloop-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/pulsate-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/radiobones-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/reactable-mobile-iphone-ipad-sound/','iphone'],
['http://www.creativeapplications.net/iphone/rgb-petri-of-processing-iphone-ipad/','iphone'],
['http://www.creativeapplications.net/iphone/runxt-life-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/sampletoy-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/scott-snibbe-profile-iphone-ipad-of/','iphone'],
['http://www.creativeapplications.net/iphone/situationist-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/sktch-2-0-iphone-ipad-openframeworks/','iphone'],
['http://www.creativeapplications.net/iphone/sonar-ruler-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/springmesh-iphone-ipad-openframeworks/','iphone'],
['http://www.creativeapplications.net/iphone/star6-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/stilla-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/strange-rain-iphone-ipad/','iphone'],
['http://www.creativeapplications.net/iphone/sum05-by-lia-generative-experience-for-iphone-and-ipad/','iphone'],
['http://www.creativeapplications.net/iphone/sunvox-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/suwappu-prototype-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/textify-it-webapp-javascript-ipad-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/the-grix-by-eboy-new-kind-of-pixel-editor-for-the-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/the-three-little-pigs-and-cinderella-interactive-storytelling-by-nosy-crow/','iphone'],
['http://www.creativeapplications.net/iphone/time-as-color-and-place-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/tweakybeat-iphone-sound/','iphone'],
['http://www.creativeapplications.net/iphone/twitter-to-facebook-tutorial/','iphone'],
['http://www.creativeapplications.net/iphone/what-the-ipad-means-to-developers-news-iphone/','iphone'],
['http://www.creativeapplications.net/iphone/whatthefont-iphone-webapp/','iphone'],
['http://www.creativeapplications.net/iphone/zio-iphone-in-development/','iphone'],
['http://www.creativeapplications.net/java/interactive-fabrication-of-functional-mechanical-devices/','java'],
['http://www.creativeapplications.net/java/phenomene-java/','java'],
['http://www.creativeapplications.net/java/quakescapes-java/','java'],
['http://www.creativeapplications.net/java/tween-java/','java'],
['http://www.creativeapplications.net/java/xylobot-java/','java'],
['http://www.creativeapplications.net/linux/fohnseher-linux/','linux'],
['http://www.creativeapplications.net/linux/instaprint-by-breakfast-nyc-physical-instagrams-prints-for-your-home/','linux'],
['http://www.creativeapplications.net/linux/iographica-mac-windows-linux/','linux'],
['http://www.creativeapplications.net/linux/maya-interface-overview-tutorial/','linux'],
['http://www.creativeapplications.net/linux/maya-modeling-tutorial/','linux'],
['http://www.creativeapplications.net/linux/maya-rendering-tutorial/','linux'],
['http://www.creativeapplications.net/linux/overbug-processing-java-sound/','linux'],
['http://www.creativeapplications.net/mac/ballon-experiment-processing-mac-windows/','mac'],
['http://www.creativeapplications.net/mac/bandwidth-openframeworks-mac-windows/','mac'],
['http://www.creativeapplications.net/mac/bupp-mac-windows/','mac'],
['http://www.creativeapplications.net/mac/carbon-mac-windows/','mac'],
['http://www.creativeapplications.net/mac/cells-mac/','mac'],
['http://www.creativeapplications.net/mac/cr-annual-cover-c-mac/','mac'],
['http://www.creativeapplications.net/mac/cubie-java-mac-windows/','mac'],
['http://www.creativeapplications.net/mac/dmesh-cinder-mac/','mac'],
['http://www.creativeapplications.net/mac/embers-by-tda-1-kilobyte-to-rule-them-all/','mac'],
['http://www.creativeapplications.net/mac/extrafile-file-formats-mac/','mac'],
['http://www.creativeapplications.net/mac/fugu-procedural-modelling-system-for-3d-graphics/','mac'],
['http://www.creativeapplications.net/mac/gestural-music-sequencer-processing-sound/','mac'],
['http://www.creativeapplications.net/mac/hairrrr-mac-cinder/','mac'],
['http://www.creativeapplications.net/mac/nodebox-2-mac-windows/','mac'],
['http://www.creativeapplications.net/mac/refreq-sound-openframeworks-mac/','mac'],
['http://www.creativeapplications.net/mac/ribbonpaint-cinder-mac/','mac'],
['http://www.creativeapplications.net/mac/speakatron-sound-openframeworks-mac/','mac'],
['http://www.creativeapplications.net/mac/textoy-mac-windows-cinder/','mac'],
['http://www.creativeapplications.net/mac/the-hypercard-legacy-theory-mac/','mac'],
['http://www.creativeapplications.net/mac/visuals-for-sonar-festival-vdmx-to-unity3d-tutorials/','mac'],
['http://www.creativeapplications.net/mac/youglitch-openframeworks-mac/','mac'],
['http://www.creativeapplications.net/maxmsp/city-symphonies-the-future-sound-of-traffic-by-mark-mckeague-di-rca-2012/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/data-anatomy-civic-by-ryoji-ikeda-berlin-19-april-1st-may/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/dazzled-cinder-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/dj-light-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/drawing-machine-maxmsp-processing/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/ground-maxmsp-sound/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/helioscillator-visualising-the-oscillations-of-sunspot-size-and-density/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/illucia-processing-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/interactive-swarm-space-c-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/l-s-d-light-sequencer-iphone-pd-arduino-sound/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/photobiennale-sites-processing-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/rheo-5-horizons-maxmsp-sound/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/richti-areal-maxmap-ipad/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/skube-tangible-interface-to-last-fm-spotify-radio/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/soundaffects-maxmsp-sound-arduino/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/talking-tree-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/tele-present-water-maxmsp-arduino/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/the-infinite-adventure-machine-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/tripwire-by-j-m-albert-and-a-fure-a-wall-of-strings-in-constant-flux/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/visual-music-collaborative-events-results/','maxmsp'],
['http://www.creativeapplications.net/maxmsp/voice-lessons-maxmsp/','maxmsp'],
['http://www.creativeapplications.net/news/','news'],
['http://www.creativeapplications.net/news/1000-posts-news/','news'],
['http://www.creativeapplications.net/news/3d-freehand-drawing-with-field-news/','news'],
['http://www.creativeapplications.net/news/blogs-we-read-love-news/','news'],
['http://www.creativeapplications.net/news/call-for-guest-writers-on-can-news/','news'],
['http://www.creativeapplications.net/news/can-fundraiser-new-t-shirt-free-apps-news/','news'],
['http://www.creativeapplications.net/news/can-offf2011-workshop-collaborative-events/','news'],
['http://www.creativeapplications.net/news/disney-researchers-augment-touch-sensation-with-revel/','news'],
['http://www.creativeapplications.net/news/drone-as-metaphor-interview-with-artistdirector-alex-rivera/','news'],
['http://www.creativeapplications.net/news/first-digital-3d-rendered-film-1972/','news'],
['http://www.creativeapplications.net/news/formcode-book-giveaway-news/','news'],
['http://www.creativeapplications.net/news/frieze-magazine-talks-to-julius-von-bismarck-cerns-artist-in-residence/','news'],
['http://www.creativeapplications.net/news/general-indifference-towards-the-digital-divide/','news'],
['http://www.creativeapplications.net/news/happy-new-year/','news'],
['http://www.creativeapplications.net/news/interactivos12-dublin-hack-the-city-cfp/','news'],
['http://www.creativeapplications.net/news/kinect-opensource-news/','news'],
['http://www.creativeapplications.net/news/laser-cutter-plays-super-mario-news/','news'],
['http://www.creativeapplications.net/news/new-artscience-affinities-news-books/','news'],
['http://www.creativeapplications.net/news/new-ca-logo-css-news/','news'],
['http://www.creativeapplications.net/news/new-on-the-web-section-on-can/','news'],
['http://www.creativeapplications.net/news/processing-org-exhibition-now-curated-by-fv-news/','news'],
['http://www.creativeapplications.net/news/substratum-interview-series-news/','news'],
['http://www.creativeapplications.net/news/support-can-news/','news'],
['http://www.creativeapplications.net/news/tedxvancouver-jer-thorp-the-weight-of-data/','news'],
['http://www.creativeapplications.net/news/thank-you-to-cans-sponsors-news-10/','news'],
['http://www.creativeapplications.net/news/the-future-of-art-by-ks12-news/','news'],
['http://www.creativeapplications.net/news/the-secret-war-between-downloading-and-uploading-books-ipad-cinder/','news'],
['http://www.creativeapplications.net/news/the-sketchbook-of-susan-kare-news/','news'],
['http://www.creativeapplications.net/news/upverter-news/','news'],
['http://www.creativeapplications.net/news/written-images-books-news/','news'],
['http://www.creativeapplications.net/news/youworkforthem-how-proce55ing-typeface-came-to-be-sponsor/','news'],
['http://www.creativeapplications.net/objects/a-colloidal-display-membrane-screen-that-combines-transparency-and-3d-volume/','objects'],
['http://www.creativeapplications.net/objects/a-tool-to-deceive-and-slaughter-objects/','objects'],
['http://www.creativeapplications.net/objects/apparel-by-the-normals-clothes-that-evolve-in-real-time-with-the-user/','objects'],
['http://www.creativeapplications.net/objects/best-friends-objects/','objects'],
['http://www.creativeapplications.net/objects/beta-objects/','objects'],
['http://www.creativeapplications.net/objects/bill-never-ending-plane-in-the-sky-by-emmanuel-le-cerf/','objects'],
['http://www.creativeapplications.net/objects/blissbomb-procedurally-drawn-soundpattern-game-for-sifteo-cubes/','objects'],
['http://www.creativeapplications.net/objects/collaborative-instrument-objects/','objects'],
['http://www.creativeapplications.net/objects/crash-photography-objects-inspiration/','objects'],
['http://www.creativeapplications.net/objects/daily-stack-objects/','objects'],
['http://www.creativeapplications.net/objects/denki-puzzle-by-yuri-suzuki-technology-will-save-us-at-the-design-museum/','objects'],
['http://www.creativeapplications.net/objects/devices-for-mindless-communication-objects/','objects'],
['http://www.creativeapplications.net/objects/electrolibrary-by-waldek-wegrzyn-paper-book-as-interface/','objects'],
['http://www.creativeapplications.net/objects/esper-domino-objects/','objects'],
['http://www.creativeapplications.net/objects/faith-condition-2012-by-lukas-franciszkiewicz-an-out-of-body-experience/','objects'],
['http://www.creativeapplications.net/objects/flutter-objects-inspiration/','objects'],
['http://www.creativeapplications.net/objects/free-universal-construction-kit-a-disruption-at-the-society-f-a-t-lab/','objects'],
['http://www.creativeapplications.net/objects/giffi-objects/','objects'],
['http://www.creativeapplications.net/objects/glitch-reality-ii-objects/','objects'],
['http://www.creativeapplications.net/objects/hot-networks-complexities-and-opportunities-in-collaborative-robotics/','objects'],
['http://www.creativeapplications.net/objects/hyphae-cinder-objects/','objects'],
['http://www.creativeapplications.net/objects/inpulse-objects/','objects'],
['http://www.creativeapplications.net/objects/introspectre-and-optocoupler-objects/','objects'],
['http://www.creativeapplications.net/objects/kindle-as-an-art-object-two-projects/','objects'],
['http://www.creativeapplications.net/objects/little-printer-objects/','objects'],
['http://www.creativeapplications.net/objects/lumibots-arduino-objects/','objects'],
['http://www.creativeapplications.net/objects/machine-to-keep-a-feather-in-the-air-openframeworks/','objects'],
['http://www.creativeapplications.net/objects/monolith-vvvv-objects-arduino/','objects'],
['http://www.creativeapplications.net/objects/neri-oxman-and-mediated-matter-at-the-mit-media-lab/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle1-2/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle10/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle11/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle12/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle13/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle2/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle3/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle7/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle8/','objects'],
['http://www.creativeapplications.net/objects/new-angles-objects/attachment/newangle9/','objects'],
['http://www.creativeapplications.net/objects/nokia-hackerbox-objects-maemo/','objects'],
['http://www.creativeapplications.net/objects/novel-hospital-toys-machines-that-keep-us-alive-or-not/','objects'],
['http://www.creativeapplications.net/objects/o-system-the-future-of-personal-electronics-by-peter-krige-rca-ide/','objects'],
['http://www.creativeapplications.net/objects/olars-objects/','objects'],
['http://www.creativeapplications.net/objects/perpetual-energy-wasting-machine-by-niklas-roy/','objects'],
['http://www.creativeapplications.net/objects/perpetual-storytelling-apparatus-objects/','objects'],
['http://www.creativeapplications.net/objects/playing-with-a-flat-screen-from-the-trash-objects/','objects'],
['http://www.creativeapplications.net/objects/point-cloud-by-james-lang-arduino-controlled-structure-breathes-weather-data/','objects'],
['http://www.creativeapplications.net/objects/printed-optics-3d-printed-devices/','objects'],
['http://www.creativeapplications.net/objects/prismatica-kit-webster-uses-a-3d-crystal-surface-as-a-lens/','objects'],
['http://www.creativeapplications.net/objects/pristitrope-zoetrope-with-18-lcd-screens-and-smart-rotation/','objects'],
['http://www.creativeapplications.net/objects/rgb-colorspace-atlas-by-tauba-auerbach/','objects'],
['http://www.creativeapplications.net/objects/robo-rainbow-objects/','objects'],
['http://www.creativeapplications.net/objects/signal-to-noise-by-labau-512-mechanical-split-flaps-and-the-noise-of-data/','objects'],
['http://www.creativeapplications.net/objects/six-forty-by-four-eighty-objects/','objects'],
['http://www.creativeapplications.net/objects/skal-objects/','objects'],
['http://www.creativeapplications.net/objects/solar-sinter-objects/','objects'],
['http://www.creativeapplications.net/objects/still-life-unity-c-objects/','objects'],
['http://www.creativeapplications.net/objects/the-bright-eyes-kit-diy-led-glasses-to-inspire-programming/','objects'],
['http://www.creativeapplications.net/objects/the-kernels-of-chimaera-living-artefacts-by-stefan-schwabe-di-rca-2012/','objects'],
['http://www.creativeapplications.net/objects/the-transparency-grenade-by-julian-oliver-design-fiction-for-leaking-data/','objects'],
['http://www.creativeapplications.net/objects/troblion-objects/','objects'],
['http://www.creativeapplications.net/objects/tsumiki-c-objects/','objects'],
['http://www.creativeapplications.net/objects/versus-c-objects/','objects'],
['http://www.creativeapplications.net/objects/we-play-bodies-inspiration-objects/','objects'],
['http://www.creativeapplications.net/objects/words-of-a-middle-man-human-to-machine-and-machine-to-machine-dialogues/','objects'],
['http://www.creativeapplications.net/ontheweb/3d-printing-photobooth/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/baxter-hobbyist-hardware/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/drift-transients/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/easy-interactive-building-blocks-for-kids/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/eta-2012/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/evan-roth-solitaire-exe/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/gaming-the-imagination/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/golan-levin-on-radically-local/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/i-had-the-pleasure-of-discussing-the-modulor-at/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/la-game-space-kickstarter-campaign/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/large-stag-head/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/medialab-prado-media-facade-cfp/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/nandan-ghiya/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/new-bjork-video/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/pandabot-new-affordable-3d-printer-by-panda-robotics/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/pc-games-are-all-about-terrain/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/the-futility-of-media-art-in-a-contemporary-art-world/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/unfolding-maps/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/upgrade-soul-preview/','ontheweb'],
['http://www.creativeapplications.net/ontheweb/who-works-with-creative-coders/','ontheweb'],
['http://www.creativeapplications.net/openframeworks/action-painting-redux-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/ai-controller-ipad-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/all-eyes-on-you-openframeworks-javascript-kinect-arduino/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/assembly-by-kimchi-and-chips-5500-physical-pixels-illuminated-by-digital-light/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/augmented-perspective-c/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/augmented-sound-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/barcode-by-smallfly-interactive-exploration-of-everyday-objects-around-us/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/bernhard-willhelm-spring-summer-2013/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/composite-ipad-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/directors-showcase-cannes-openframeworks-cinder-kinect-ipad/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/dolfball-ipad-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/fabricate-yourself-openframeworks-kinect/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/hand-tracking-gesture-experiment-with-iisu-middleware-and-of/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/here-to-there-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/holler-logo-bash/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/image-cloning-library-openframeworks-c/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/interactive-laser-sculpture-by-jayson-haebich/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/interactive-puppet-prototype-w-kinect-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/interactive-wall-at-ud-openframeworks-kinect/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/iq-font-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/kinect-rgbdepth-filmmaking-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/latest-drawing-experiments-by-kynd-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/light-form-interactive-landscape-by-mathieu-rivier-ecal/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/magic-and-storytelling-at-ted-collaboration-marco-tempest-onformative-checksum5/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/mormor-ingrid-interactive-3d-drawing-by-oculart/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/overscan-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/paik-times-five-by-flightphase/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/painting-with-a-digital-brush-ascii-art-in-physical-space/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/pencil-drawing-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/pennant-ipad-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/puppet-parade-kinect-arm-tracker-by-design-io/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/puppet-parade-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/realtime-dithered-screen-capture-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/recent-work-by-daito-manabe-motoi-ishibashi-of-profile/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/recompose-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/scramble-suit-face-tracking-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/selective-memory-theatre-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/shadowplay-by-kyle-mcdonald-experimental-interaction-paradigm-at-ycam/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/silo468-permanent-light-installation-in-helsinki/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/sketchsynth-drawable-user-interface-by-billy-keyes/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/snake-the-planet-adopting-classic-mobile-phone-game-for-the-urban-canvas/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/stutterspot-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/submap-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/the-maccabees-in-the-dark-live-performance-recording-with-10-kinect-cameras/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/the-variable-city-francois-quevillons-derive/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/the-wombats-techno-fan-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/vincent-van-goghs-starry-night-interactive-by-petros-vrellis-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/visua-musio-by-wow-music-compositions-using-geometric-shapes/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/voyagers-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/openframeworks/wind-tunnel-msafluid-openframeworks/','openframeworks'],
['http://www.creativeapplications.net/other/alan-gilbert-music-director-for-the-new-yorks-philharmonic-explains-his-gestures/','other'],
['http://www.creativeapplications.net/other/algorithmic-architecture-by-charlie-behrens/','other'],
['http://www.creativeapplications.net/other/botanicus-interacticus-designing-interactive-plants-at-disney-research/','other'],
['http://www.creativeapplications.net/other/dancing-with-swarming-particles-kinect-unity/','other'],
['http://www.creativeapplications.net/other/glitch-textiles-phillip-stearns-uses-short-circuited-cameras-to-create-blanket-patterns/','other'],
['http://www.creativeapplications.net/other/hydro-fold-by-christophe-guberan-self-folding-inkjet-printed-paper/','other'],
['http://www.creativeapplications.net/other/l-train-notwork-events/','other'],
['http://www.creativeapplications.net/processing/115c8-and-edf0-by-raven-kwok-recursive-and-transforming/','processing'],
['http://www.creativeapplications.net/processing/3d-soundclash-processing/','processing'],
['http://www.creativeapplications.net/processing/actelion-imagery-processing/','processing'],
['http://www.creativeapplications.net/processing/affection-station-by-salvador-orara-reconnecting-to-our-devices/','processing'],
['http://www.creativeapplications.net/processing/aleph-openframeworks-processing/','processing'],
['http://www.creativeapplications.net/processing/an-instrument-for-the-sonification-of-everyday-things/','processing'],
['http://www.creativeapplications.net/processing/anthem-of-hearts-processing-sound/','processing'],
['http://www.creativeapplications.net/processing/antivj-profile-openframeworks-processing/','processing'],
['http://www.creativeapplications.net/processing/augmented-reality-with-processing-tutorial-processing/','processing'],
['http://www.creativeapplications.net/processing/binary-counting-processing/','processing'],
['http://www.creativeapplications.net/processing/bloom-urban-toy-invites-participants-to-seed-new-formations/','processing'],
['http://www.creativeapplications.net/processing/boo-processing/','processing'],
['http://www.creativeapplications.net/processing/cascades-processing/','processing'],
['http://www.creativeapplications.net/processing/chronotape-by-peter-bennett-tangible-timeline-for-family-history/','processing'],
['http://www.creativeapplications.net/processing/cinemetrics-processing/','processing'],
['http://www.creativeapplications.net/processing/circuit-explorations-openframeworks-processing/','processing'],
['http://www.creativeapplications.net/processing/cloud-pink-by-everyware-another-world-above/','processing'],
['http://www.creativeapplications.net/processing/connexions-processing/','processing'],
['http://www.creativeapplications.net/processing/daiku-processing-sound/','processing'],
['http://www.creativeapplications.net/processing/decode-recode-processing/','processing'],
['http://www.creativeapplications.net/processing/digital-chronophotography-processing/','processing'],
['http://www.creativeapplications.net/processing/digital-natives-glitched-realities-3d-printed-in-colour-resin/','processing'],
['http://www.creativeapplications.net/processing/dromolux-processing-objects/','processing'],
['http://www.creativeapplications.net/processing/electroplastique-and-grid-distortions-processing/','processing'],
['http://www.creativeapplications.net/processing/emoto-data-sculpture-by-studio-nand-and-moritz-stefaner-drew-hemment/','processing'],
['http://www.creativeapplications.net/processing/fadertouch-3-0-with-av-instruments-processing/','processing'],
['http://www.creativeapplications.net/processing/fbfaces-processing-scripts/','processing'],
['http://www.creativeapplications.net/processing/fluid-vase-processing-objects/','processing'],
['http://www.creativeapplications.net/processing/four-letter-words-arduino-processing/','processing'],
['http://www.creativeapplications.net/processing/fragments-of-rgb-processing/','processing'],
['http://www.creativeapplications.net/processing/generative-art-a-practical-guide-books-processing/','processing'],
['http://www.creativeapplications.net/processing/generative-art-in-html5-processing-javascript-tutorial/','processing'],
['http://www.creativeapplications.net/processing/generative-jigsaw-puzzles-nervous-system/','processing'],
['http://www.creativeapplications.net/processing/graphic-narratives-generative-book-covers-by-ligia-duro/','processing'],
['http://www.creativeapplications.net/processing/hemesh-and-hemeshgui-processing/','processing'],
['http://www.creativeapplications.net/processing/idill-2011-trophy-processing-objects/','processing'],
['http://www.creativeapplications.net/processing/infobjects-by-johannes-tsopanides-shaping-objects-by-co%c2%b2-emissions/','processing'],
['http://www.creativeapplications.net/processing/interactive-experiments-in-the-p3-gallery-events-processing/','processing'],
['http://www.creativeapplications.net/processing/invisible-cities-processing/','processing'],
['http://www.creativeapplications.net/processing/iris-by-hybe-new-kind-of-monochrome-lcd/','processing'],
['http://www.creativeapplications.net/processing/kinect-cloth-simulations-processing/','processing'],
['http://www.creativeapplications.net/processing/kinect-physics-tutorial-for-processing/','processing'],
['http://www.creativeapplications.net/processing/kinect-physics-tutorial-for-processing/comment-page-2/','processing'],
['http://www.creativeapplications.net/processing/known-unknowns-processing-objects/','processing'],
['http://www.creativeapplications.net/processing/la-gabbia-the-cage-by-aurorameccanica-releasing-the-untangible/','processing'],
['http://www.creativeapplications.net/processing/lowres-media-facade-dockville-2010-processing/','processing'],
['http://www.creativeapplications.net/processing/lux4j-processing-java/','processing'],
['http://www.creativeapplications.net/processing/mclaren-p12-teaser-mclaren-vs-aerodynamics-by-mlf/','processing'],
['http://www.creativeapplications.net/processing/met3d-remix-we-met-heads-on-by-matthew-plummer-fernandez/','processing'],
['http://www.creativeapplications.net/processing/microsonic-landscapes-by-realitat-transforming-sound-into-matter/','processing'],
['http://www.creativeapplications.net/processing/moullinex-catalina-processing-kinect/','processing'],
['http://www.creativeapplications.net/processing/mycelium-processing/','processing'],
['http://www.creativeapplications.net/processing/nature-of-code-by-daniel-shiffman-natural-systems-using-processing/','processing'],
['http://www.creativeapplications.net/processing/new-identity-for-pigmentpol-by-feld-and-atmo/','processing'],
['http://www.creativeapplications.net/processing/on-journalism-2-typewriter-by-julian-koschwitz/','processing'],
['http://www.creativeapplications.net/processing/one-perfect-cube-processing/','processing'],
['http://www.creativeapplications.net/processing/onedotzero-app-processing/','processing'],
['http://www.creativeapplications.net/processing/onedotzero-bfi-london-processing-events/','processing'],
['http://www.creativeapplications.net/processing/paper-note-a-tangible-paper-waveform-with-processing/','processing'],
['http://www.creativeapplications.net/processing/particle-field-form-finding-processing/','processing'],
['http://www.creativeapplications.net/processing/paysages-processing/','processing'],
['http://www.creativeapplications.net/processing/physical-cellular-automata-time-lapse-by-jan-vantomme/','processing'],
['http://www.creativeapplications.net/processing/playing-with-glsl-in-processing-initial-experiments-tutorials/','processing'],
['http://www.creativeapplications.net/processing/playing-with-pixels-javascript-processing/','processing'],
['http://www.creativeapplications.net/processing/poetry-on-the-road-processing/','processing'],
['http://www.creativeapplications.net/processing/process-compendium-processing/','processing'],
['http://www.creativeapplications.net/processing/processing-paris-events/','processing'],
['http://www.creativeapplications.net/processing/processing-paris-workshops-with-andreas-gysin-v3ga-and-marius-watz/','processing'],
['http://www.creativeapplications.net/processing/quantum-parallelograph-processing-arduino-objects/','processing'],
['http://www.creativeapplications.net/processing/radius-music-processing/','processing'],
['http://www.creativeapplications.net/processing/republica-2012-analogue-twitter-wall-by-precious-38378-printed-tweets/','processing'],
['http://www.creativeapplications.net/processing/schwarm-autonomous-drawing-agents-by-andreas-nicolas-fischer/','processing'],
['http://www.creativeapplications.net/processing/silenc-at-ciid-visualisation-of-silent-letters-in-a-language-processing/','processing'],
['http://www.creativeapplications.net/processing/silica-esc-processing/','processing'],
['http://www.creativeapplications.net/processing/sms-to-paper-airplanes-processing/','processing'],
['http://www.creativeapplications.net/processing/soak-dye-in-light-processing-kinect/','processing'],
['http://www.creativeapplications.net/processing/soundmachines-objects-sound/','processing'],
['http://www.creativeapplications.net/processing/spike-solutions-processing/','processing'],
['http://www.creativeapplications.net/processing/spiral-wall-processing/','processing'],
['http://www.creativeapplications.net/processing/stem-by-diana-lange-building-stems-from-colour-with-processing/','processing'],
['http://www.creativeapplications.net/processing/synclost-processing/','processing'],
['http://www.creativeapplications.net/processing/tangible-processing-objects/','processing'],
['http://www.creativeapplications.net/processing/the-abyss-tutorial/','processing'],
['http://www.creativeapplications.net/processing/the-digital-rube-goldberg-processor-processing/','processing'],
['http://www.creativeapplications.net/processing/the-frankenfont-processing/','processing'],
['http://www.creativeapplications.net/processing/the-space-beyond-me-openframeworks-arduino-processing/','processing'],
['http://www.creativeapplications.net/processing/these-are-patterns-processing/','processing'],
['http://www.creativeapplications.net/processing/timecode-vinyl-processing/','processing'],
['http://www.creativeapplications.net/processing/titan-and-beyond-the-infinite-jean-pierre-aube-visualizes-an-epic-descent/','processing'],
['http://www.creativeapplications.net/processing/underwater-by-david-bowen-hundreds-of-servos-controlling-wave-patterns/','processing'],
['http://www.creativeapplications.net/processing/unfolded-liquid-processing/','processing'],
['http://www.creativeapplications.net/processing/unnamed-soundsculpture-by-daniel-franke-cedric-kiefer-kinect-processing/','processing'],
['http://www.creativeapplications.net/processing/untitled-246-processing/','processing'],
['http://www.creativeapplications.net/processing/valse-automatique-symbiosis-between-humans-and-technology-processing-rhino/','processing'],
['http://www.creativeapplications.net/processing/variations-on-pi-processing-objects/','processing'],
['http://www.creativeapplications.net/processing/videorative-portrait-of-randall-okita-processing/','processing'],
['http://www.creativeapplications.net/processing/visual-music-collaborative-2010-events/','processing'],
['http://www.creativeapplications.net/processing/visual-music-collaborative-events/','processing'],
['http://www.creativeapplications.net/processing/visualizing-fontanes-brucke-am-tay-processing/','processing'],
['http://www.creativeapplications.net/processing/visualizing-pressible-processing/','processing'],
['http://www.creativeapplications.net/processing/watercolor-sediment-processing/','processing'],
['http://www.creativeapplications.net/processing/wordcollider-colliding-phrases-and-signing-letters-with-phonetics/','processing'],
['http://www.creativeapplications.net/processing/working-with-toxiclibs-processing-tutorial/','processing'],
['http://www.creativeapplications.net/processing/ymyi-processing/','processing'],
['http://www.creativeapplications.net/sound/avouching-avisions-at-mutek-2011-events-sound/','sound'],
['http://www.creativeapplications.net/sound/baroque-me-javascript/','sound'],
['http://www.creativeapplications.net/sound/capture-to-sound-by-satoru-higa/','sound'],
['http://www.creativeapplications.net/sound/commute-as-composition-brian-houses-forty-eight-to-sixteen/','sound'],
['http://www.creativeapplications.net/sound/cube-with-magic-ribbons-by-simon-katan-openframeworks-and-supercollider/','sound'],
['http://www.creativeapplications.net/sound/dial-openframeworks-sound/','sound'],
['http://www.creativeapplications.net/sound/domestic-sound-scapes-objects-sound/','sound'],
['http://www.creativeapplications.net/sound/egregore-pure-data-sound/','sound'],
['http://www.creativeapplications.net/sound/electrostatic-bell-choir-feedback-babies-recent-work-by-darsha-hewitt/','sound'],
['http://www.creativeapplications.net/sound/faceshift-studio-experiments-by-kyle-mcdonald/','sound'],
['http://www.creativeapplications.net/sound/goethe-institute-in-barcelona-sebastian-neitsch-vvvv/','sound'],
['http://www.creativeapplications.net/sound/harmophon-vvvv-sound/','sound'],
['http://www.creativeapplications.net/sound/hexagrama-vvvv-sound/','sound'],
['http://www.creativeapplications.net/sound/keita-onishi-forest-and-trees-dynamics-of-the-subway/','sound'],
['http://www.creativeapplications.net/sound/knote-by-matthieu-minguet-rope-as-sound-and-visual-interface-for-iphone-ecal/','sound'],
['http://www.creativeapplications.net/sound/konkreet-performer-ipad-sound/','sound'],
['http://www.creativeapplications.net/sound/kulbuto-quarz-composer-sound/','sound'],
['http://www.creativeapplications.net/sound/muze-arduino-sound/','sound'],
['http://www.creativeapplications.net/sound/offf-can-workshop-collaborative-2011-cinder-of-js-events/','sound'],
['http://www.creativeapplications.net/sound/orphion-ipad-sound/','sound'],
['http://www.creativeapplications.net/sound/partitura-vvvv-sound/','sound'],
['http://www.creativeapplications.net/sound/ryoji-ikeda-profile/','sound'],
['http://www.creativeapplications.net/sound/samplr-for-ipad-music-at-your-fingertips/','sound'],
['http://www.creativeapplications.net/sound/simple-harmonic-motion-openframeworks-sound/','sound'],
['http://www.creativeapplications.net/sound/sona-by-ruslan-gaynutdinov-a-game-of-sound-networks-ecal/','sound'],
['http://www.creativeapplications.net/sound/super-pikix-caanoo-sound/','sound'],
['http://www.creativeapplications.net/sound/tangible-color-music-instrument-openframeworks-sound/','sound'],
['http://www.creativeapplications.net/sound/the-evil-eye-optical-audio-record-by-indianen/','sound'],
['http://www.creativeapplications.net/sound/the-infinite-music-machine-for-ipad-zone-out-and-drift-away/','sound'],
['http://www.creativeapplications.net/sound/touchdesigner-plastikman-windows-sound/','sound'],
['http://www.creativeapplications.net/sound/waveshaper-ipad-openframeworks-sound/','sound'],
['http://www.creativeapplications.net/sound/wireless-arduino-of-tutorial/','sound'],
['http://www.creativeapplications.net/sound/zimoun-volume-294-prepared-dc-motors-cork-balls-and-cardboard-boxes/','sound'],
['http://www.creativeapplications.net/theory/a-buttload-of-schmick-words-language-in-motion-by-helge-hjorth-bentsen-ecal/','theory'],
['http://www.creativeapplications.net/theory/a-philosophy-of-computer-art-by-dominic-lopes-books-review/','theory'],
['http://www.creativeapplications.net/theory/daito-manabe-being-real-about-being-material-theory/','theory'],
['http://www.creativeapplications.net/theory/designing-programs-theory/','theory'],
['http://www.creativeapplications.net/theory/from-0-to-c-teaching-programming-using-a-tangible-approach/','theory'],
['http://www.creativeapplications.net/theory/growing-objects-programming-biological-systems-theory/','theory'],
['http://www.creativeapplications.net/theory/interview-on-the-a-n-d-project-theory/','theory'],
['http://www.creativeapplications.net/theory/invisible-airs-a-short-documentary-on-yoha/','theory'],
['http://www.creativeapplications.net/theory/mediated-cityscapes-01-four-statements-about-urban-computing/','theory'],
['http://www.creativeapplications.net/theory/mediated-cityscapes-02-memory-and-the-city/','theory'],
['http://www.creativeapplications.net/theory/mediated-cityscapes-03-diy-cartography-theory/','theory'],
['http://www.creativeapplications.net/theory/meibach-and-posavec-data-visualization-poetry-and-sculpture/','theory'],
['http://www.creativeapplications.net/theory/morse-code-as-interaction-input-methodology-theory/','theory'],
['http://www.creativeapplications.net/theory/patch-schematics-%e2%80%93-the-aesthetics-of-constraint-best-practices-theory/','theory'],
['http://www.creativeapplications.net/theory/paul-prudence-interviews-mitchell-whitelaw-theory/','theory'],
['http://www.creativeapplications.net/theory/the-politics-of-the-new-aesthetic-electric-anthropology-and-ecological-vision/','theory'],
['http://www.creativeapplications.net/theory/the-psychoeconomy-war-room-table-theory/','theory'],
['http://www.creativeapplications.net/theory/what-is-at-stake-in-animate-design-theory/','theory'],
['http://www.creativeapplications.net/tutorials/arduino-servo-opencv-tutorial-openframeworks/','tutorials'],
['http://www.creativeapplications.net/tutorials/guide-to-meshes-in-cinder-cinder-tutorials/','tutorials'],
['http://www.creativeapplications.net/tutorials/images-in-cinder-tutorials-cinder/','tutorials'],
['http://www.creativeapplications.net/tutorials/shaders-geometry-and-the-kinect-part-1-cinder-tutorials/','tutorials'],
['http://www.creativeapplications.net/vvvv/clavilux-2000-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/computer-augmented-crafts-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/lucid-vvvv-kinect/','vvvv'],
['http://www.creativeapplications.net/vvvv/maintenance-repair-and-operations-av-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/nemore-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/noteput-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/parhelia-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/sonic-art-%e2%80%93-bagatelle-i-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/soundframe-festival-lightrails-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/white-light-white-heat-vvvv/','vvvv'],
['http://www.creativeapplications.net/vvvv/years-vvvv-arduino/','vvvv'],
['http://www.creativeapplications.net/webapp/5-twitter-art-projects-volume-3-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/5-twitter-art-projects-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/above-the-cloud-archaeology-of-social-networks/','webapp'],
['http://www.creativeapplications.net/webapp/cphsignals-connecting-copenhagen-neighbourhoods-using-morse-code/','webapp'],
['http://www.creativeapplications.net/webapp/edding-850-font-by-buro-destruct-collaborative-realtime-text-editor/','webapp'],
['http://www.creativeapplications.net/webapp/experimental-study-on-web-asynchronicity-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/fever-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/fractal-lab-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/human-interference-project-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/lines-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/little-sun-by-olafur-eliasson-and-frederik-ottesen/','webapp'],
['http://www.creativeapplications.net/webapp/livehoods-use-based-urban-analytics/','webapp'],
['http://www.creativeapplications.net/webapp/mindmeto-development-webapps/','webapp'],
['http://www.creativeapplications.net/webapp/multiuser-sketchpad-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/rot-sketch-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/street-views-patchwork-by-julien-levesque/','webapp'],
['http://www.creativeapplications.net/webapp/temporary-cc-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/the-wallpaper-application-webapp/','webapp'],
['http://www.creativeapplications.net/webapp/twitter-art-projects-part-2-webapp/','webapp']
]
def index_entry(data):
page = Pages()
page.data['url'] = data[0]
page.data['category'] = data[1]
page.data['classified'] = 1
page.data['deleted'] = 0
return page.save()
def main():
classifier = FisherClassifier(get_words)
for page in pages:
res = extract(urllib.urlopen(page[0]).read())
if res and res['body']:
index_entry(page)
classifier.train(res['body'], page[1])
print '[%s] Classified %s %s' % (datetime.today().strftime('%Y-%m-%d %H:%M:%S'), page[0], page[1])
else:
print '[%s] No body %s %s' % (datetime.today().strftime('%Y-%m-%d %H:%M:%S'), page[0], page[1])
if __name__ == '__main__': main() | after12am/expecto | machine_learning/feed_classifier/src/web/train.py | Python | mit | 75,349 | [
"Brian",
"CRYSTAL"
] | fa9efdcc7857dcc5df0301c748e9e2a57b2c5277051dce94d347409306e86a09 |
#!/usr/bin/env python
import os
import vtk
from vtk.util.misc import vtkGetDataRoot
from vtk.util.misc import vtkGetTempDir
VTK_DATA_ROOT = vtkGetDataRoot()
VTK_TEMP_DIR = vtkGetTempDir()
file0 = VTK_TEMP_DIR + '/ugFile0.vtu'
file1 = VTK_TEMP_DIR + '/ugFile1.vtu'
file2 = VTK_TEMP_DIR + '/ugFile2.vtu'
# read in some unstructured grid data
ugReader = vtk.vtkUnstructuredGridReader()
ugReader.SetFileName(VTK_DATA_ROOT + "/Data/blow.vtk")
ugReader.SetScalarsName("thickness9")
ugReader.SetVectorsName("displacement9")
extract = vtk.vtkExtractUnstructuredGridPiece()
extract.SetInputConnection(ugReader.GetOutputPort())
# write various versions
ugWriter = vtk.vtkXMLUnstructuredGridWriter()
ugWriter.SetFileName(file0)
ugWriter.SetDataModeToAscii()
ugWriter.SetInputConnection(ugReader.GetOutputPort())
ugWriter.Write()
ugWriter.SetFileName(file1)
ugWriter.SetInputConnection(extract.GetOutputPort())
ugWriter.SetDataModeToAppended()
ugWriter.SetNumberOfPieces(2)
ugWriter.Write()
ugWriter.SetFileName(file2)
ugWriter.SetDataModeToBinary()
ugWriter.SetGhostLevel(2)
ugWriter.Write()
# read the ASCII version
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file0)
reader.Update()
ug0 = vtk.vtkUnstructuredGrid()
ug0.DeepCopy(reader.GetOutput())
sF = vtk.vtkDataSetSurfaceFilter()
sF.SetInputData(ug0)
mapper0 = vtk.vtkPolyDataMapper()
mapper0.SetInputConnection(sF.GetOutputPort())
actor0 = vtk.vtkActor()
actor0.SetMapper(mapper0)
actor0.SetPosition(0, 40, 20)
# read appended piece 0
reader.SetFileName(file1)
sF1 = vtk.vtkDataSetSurfaceFilter()
sF1.SetInputConnection(reader.GetOutputPort())
mapper1 = vtk.vtkPolyDataMapper()
mapper1.SetInputConnection(sF1.GetOutputPort())
mapper1.SetPiece(1)
mapper1.SetNumberOfPieces(2)
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper1)
# read binary piece 0 (with ghost level)
reader2 = vtk.vtkXMLUnstructuredGridReader()
reader2.SetFileName(file2)
sF2 = vtk.vtkDataSetSurfaceFilter()
sF2.SetInputConnection(reader2.GetOutputPort())
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(sF2.GetOutputPort())
mapper2.SetPiece(1)
mapper2.SetNumberOfPieces(2)
mapper2.SetGhostLevel(2)
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.SetPosition(0, 0, 30)
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren.AddActor(actor0)
ren.AddActor(actor1)
ren.AddActor(actor2)
ren.ResetCamera()
ren.GetActiveCamera().SetPosition(180, 55, 65)
ren.GetActiveCamera().SetFocalPoint(3.5, 32, 15)
renWin.SetSize(300, 300)
renWin.Render()
#os.remove(file0)
#os.remove(file1)
#os.remove(file2)
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/IO/XML/Testing/Python/TestXMLUnstructuredGridIO.py | Python | gpl-3.0 | 2,784 | [
"VTK"
] | 92cda563f1a1546cc0476c767421ccc2d7f49bb2e47f2bf24ef74b093f2a4e02 |
""" The Script class provides a simple way for users to specify an executable
or file to run (and is also a simple example of a workflow module).
"""
import os
import sys
import re
import stat
import distutils.spawn
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC import gLogger
from DIRAC.Workflow.Modules.ModuleBase import ModuleBase
class Script( ModuleBase ):
#############################################################################
def __init__( self ):
""" c'tor
"""
self.log = gLogger.getSubLogger( 'Script' )
super( Script, self ).__init__( self.log )
# Set defaults for all workflow parameters here
self.executable = ''
self.applicationName = ''
self.applicationVersion = ''
self.applicationLog = ''
self.arguments = ''
self.step_commons = {}
self.environment = None
self.callbackFunction = None
self.bufferLimit = 52428800
#############################################################################
def _resolveInputVariables( self ):
""" By convention the workflow parameters are resolved here.
"""
super( Script, self )._resolveInputVariables()
super( Script, self )._resolveInputStep()
if self.step_commons.has_key( 'arguments' ):
self.arguments = self.step_commons['arguments']
#############################################################################
def _initialize( self ):
""" simple checks
"""
if not self.executable:
raise RuntimeError( 'No executable defined' )
def _setCommand( self ):
""" set the command that will be executed
"""
self.command = self.executable
if os.path.exists( os.path.basename( self.executable ) ):
self.executable = os.path.basename( self.executable )
if not os.access( '%s/%s' % ( os.getcwd(), self.executable ), 5 ):
os.chmod( '%s/%s' % ( os.getcwd(), self.executable ), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH )
self.command = '%s/%s' % ( os.getcwd(), self.executable )
elif re.search( '.py$', self.executable ):
self.command = '%s %s' % ( sys.executable, self.executable )
elif distutils.spawn.find_executable( self.executable ):
self.command = self.executable
if self.arguments:
self.command = '%s %s' % ( self.command, self.arguments )
self.log.info( 'Command is: %s' % self.command )
def _executeCommand( self ):
""" execute the self.command (uses shellCall)
"""
failed = False
outputDict = shellCall( 0, self.command,
env = self.environment,
callbackFunction = self.callbackFunction,
bufferLimit = self.bufferLimit )
if not outputDict['OK']:
failed = True
self.log.error( 'Shell call execution failed:', '\n' + str( outputDict['Message'] ) )
status, stdout, stderr = outputDict['Value'][0:3]
if status:
failed = True
self.log.error( "Non-zero status while executing", "%s: %s" % ( status, self.command ) )
else:
self.log.info( "%s execution completed with status %s" % ( self.executable, status ) )
self.log.verbose( stdout )
self.log.verbose( stderr )
if os.path.exists( self.applicationLog ):
self.log.verbose( 'Removing existing %s' % self.applicationLog )
os.remove( self.applicationLog )
fopen = open( '%s/%s' % ( os.getcwd(), self.applicationLog ), 'w' )
fopen.write( "<<<<<<<<<< %s Standard Output >>>>>>>>>>\n\n%s " % ( self.executable, stdout ) )
if stderr:
fopen.write( "<<<<<<<<<< %s Standard Error >>>>>>>>>>\n\n%s " % ( self.executable, stderr ) )
fopen.close()
self.log.info( "Output written to %s, execution complete." % ( self.applicationLog ) )
if failed:
raise RuntimeError( "'%s' Exited With Status %s" % ( os.path.basename( self.executable ), status ) )
def _finalize( self ):
""" simply finalize
"""
status = "%s (%s %s) Successful" % ( os.path.basename( self.executable ),
self.applicationName,
self.applicationVersion )
super( Script, self )._finalize( status )
| vmendez/DIRAC | Workflow/Modules/Script.py | Python | gpl-3.0 | 4,256 | [
"DIRAC"
] | 75fd1ab2d38b04dd4d5ee9faf772029f21677fdf3a4c159e955945ee033da5d6 |
"""
A buffered iterator for big arrays.
This module solves the problem of iterating over a big file-based array
without having to read it into memory. The ``Arrayterator`` class wraps
an array object, and when iterated it will return subarrays with at most
``buf_size`` elements.
The algorithm works by first finding a "running dimension", along which
the blocks will be extracted. Given an array of dimensions (d1, d2, ...,
dn), eg, if ``buf_size`` is smaller than ``d1`` the first dimension will
be used. If, on the other hand,
d1 < buf_size < d1*d2
the second dimension will be used, and so on. Blocks are extracted along
this dimension, and when the last block is returned the process continues
from the next dimension, until all elements have been read.
"""
from __future__ import division
from operator import mul
__all__ = ['Arrayterator']
class Arrayterator(object):
"""
Buffered iterator for big arrays.
This class creates a buffered iterator for reading big arrays in small
contiguous blocks. The class is useful for objects stored in the
filesystem. It allows iteration over the object *without* reading
everything in memory; instead, small blocks are read and iterated over.
The class can be used with any object that supports multidimensional
slices, like variables from Scientific.IO.NetCDF, pynetcdf and ndarrays.
"""
def __init__(self, var, buf_size=None):
self.var = var
self.buf_size = buf_size
self.start = [0 for dim in var.shape]
self.stop = [dim for dim in var.shape]
self.step = [1 for dim in var.shape]
def __getattr__(self, attr):
return getattr(self.var, attr)
def __getitem__(self, index):
"""
Return a new arrayterator.
"""
# Fix index, handling ellipsis and incomplete slices.
if not isinstance(index, tuple): index = (index,)
fixed = []
length, dims = len(index), len(self.shape)
for slice_ in index:
if slice_ is Ellipsis:
fixed.extend([slice(None)] * (dims-length+1))
length = len(fixed)
elif isinstance(slice_, (int, long)):
fixed.append(slice(slice_, slice_+1, 1))
else:
fixed.append(slice_)
index = tuple(fixed)
if len(index) < dims:
index += (slice(None),) * (dims-len(index))
# Return a new arrayterator object.
out = self.__class__(self.var, self.buf_size)
for i, (start, stop, step, slice_) in enumerate(
zip(self.start, self.stop, self.step, index)):
out.start[i] = start + (slice_.start or 0)
out.step[i] = step * (slice_.step or 1)
out.stop[i] = start + (slice_.stop or stop-start)
out.stop[i] = min(stop, out.stop[i])
return out
def __array__(self):
"""
Return corresponding data.
"""
slice_ = tuple(slice(*t) for t in zip(
self.start, self.stop, self.step))
return self.var[slice_]
@property
def flat(self):
for block in self:
for value in block.flat:
yield value
@property
def shape(self):
return tuple(((stop-start-1)//step+1) for start, stop, step in
zip(self.start, self.stop, self.step))
def __iter__(self):
# Skip arrays with degenerate dimensions
if [dim for dim in self.shape if dim <= 0]: raise StopIteration
start = self.start[:]
stop = self.stop[:]
step = self.step[:]
ndims = len(self.var.shape)
while 1:
count = self.buf_size or reduce(mul, self.shape)
# iterate over each dimension, looking for the
# running dimension (ie, the dimension along which
# the blocks will be built from)
rundim = 0
for i in range(ndims-1, -1, -1):
# if count is zero we ran out of elements to read
# along higher dimensions, so we read only a single position
if count == 0:
stop[i] = start[i]+1
elif count <= self.shape[i]: # limit along this dimension
stop[i] = start[i] + count*step[i]
rundim = i
else:
stop[i] = self.stop[i] # read everything along this
# dimension
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
# yield a block
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
yield self.var[slice_]
# Update start position, taking care of overflow to
# other dimensions
start[rundim] = stop[rundim] # start where we stopped
for i in range(ndims-1, 0, -1):
if start[i] >= self.stop[i]:
start[i] = self.start[i]
start[i-1] += self.step[i-1]
if start[0] >= self.stop[0]:
raise StopIteration
| illume/numpy3k | numpy/lib/arrayterator.py | Python | bsd-3-clause | 5,149 | [
"NetCDF"
] | d9db88c3477ce47825a436f01437496b44209a7d163699428203ec353f424363 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************
espressopp.analysis.Autocorrelation
***********************************
.. function:: espressopp.analysis.Autocorrelation(system)
:param system:
:type system:
.. function:: espressopp.analysis.Autocorrelation.clear()
:rtype:
.. function:: espressopp.analysis.Autocorrelation.compute()
:rtype:
.. function:: espressopp.analysis.Autocorrelation.gather(value)
:param value:
:type value:
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from _espressopp import analysis_Autocorrelation
class AutocorrelationLocal(analysis_Autocorrelation):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_Autocorrelation, system)
def gather(self, value):
return self.cxxclass.gather(self, value)
def clear(self):
return self.cxxclass.clear(self)
def compute(self):
return self.cxxclass.compute(self)
if pmi.isController:
class Autocorrelation(metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.analysis.AutocorrelationLocal',
pmicall = [ "gather", "clear", "compute" ],
localcall = ["__getitem__", "all"],
pmiproperty = ["size"]
)
| espressopp/espressopp | src/analysis/Autocorrelation.py | Python | gpl-3.0 | 2,306 | [
"ESPResSo"
] | f22f9ab22b17831bd96f8078da3b4142b17cf260e43cd99fea3cb0cf6ae049b6 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Google Connection and Authentication classes.
Information about setting up your Google OAUTH2 credentials:
For libcloud, there are two basic methods for authenticating to Google using
OAUTH2: Service Accounts and Client IDs for Installed Applications.
Both are initially set up from the Cloud Console Console -
https://cloud.google.com/console
Setting up Service Account authentication (note that you need the cryptography
package installed to use this):
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Click on "Create New Client ID..."
- Select "Service account" and click on "Create Client ID"
- Download the Private Key (should happen automatically). The key you download
is in JSON format.
- Move the .json file to a safe location.
- Optionally, you may choose to Generate a PKCS12 key from the Console.
It needs to be converted to the PEM format. Please note, the PKCS12 format
is deprecated and may be removed in a future release.
- Convert the key using OpenSSL (the default password is 'notasecret').
- Move the .pem file to a safe location.
- To Authenticate, you will need to pass the Service Account's "Email
address" in as the user_id and the path to the .pem file as the key.
Setting up Installed Application authentication:
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Select "Installed application" and "Other" then click on
"Create Client ID"
- To Authenticate, pass in the "Client ID" as the user_id and the "Client
secret" as the key
- The first time that you do this, the libcloud will give you a URL to
visit. Copy and paste the URL into a browser.
- When you go to the URL it will ask you to log in (if you aren't already)
and ask you if you want to allow the project access to your account.
- Click on Accept and you will be given a code.
- Paste that code at the prompt given to you by the Google libcloud
connection.
- At that point, a token & refresh token will be stored in your home
directory and will be used for authentication.
Please remember to secure your keys and access tokens.
"""
from __future__ import with_statement
try:
import simplejson as json
except ImportError:
import json # type: ignore
import logging
import base64
import errno
import time
import datetime
import os
import socket
import sys
from libcloud.utils.connection import get_response_object
from libcloud.utils.py3 import b, httplib, urlencode, urlparse, PY3
from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
PollingConnection)
from libcloud.common.base import BaseDriver
from libcloud.common.types import (ProviderError,
LibcloudError)
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
except ImportError:
# The cryptography library is unavailable
SHA256 = None
UTC_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
LOG = logging.getLogger(__name__)
def _utcnow():
"""
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
return datetime.datetime.utcnow()
def _utc_timestamp(datetime_obj):
"""
Return string of datetime_obj in the UTC Timestamp Format
"""
return datetime_obj.strftime(UTC_TIMESTAMP_FORMAT)
def _from_utc_timestamp(timestamp):
"""
Return datetime obj where date and time are pulled from timestamp string.
"""
return datetime.datetime.strptime(timestamp, UTC_TIMESTAMP_FORMAT)
def _get_gce_metadata(path=''):
try:
url = 'http://metadata/computeMetadata/v1/' + path.lstrip('/')
headers = {'Metadata-Flavor': 'Google'}
response = get_response_object(url, headers=headers)
return response.status, '', response.body
except Exception as e:
return -1, str(e), None
class GoogleAuthError(LibcloudError):
"""Generic Error class for various authentication errors."""
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
class GoogleBaseError(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(GoogleBaseError, self).__init__(value, http_code, driver)
class InvalidRequestError(GoogleBaseError):
pass
class JsonParseError(GoogleBaseError):
pass
class ResourceNotFoundError(GoogleBaseError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
if isinstance(value, dict) and 'message' in value and \
value['message'].count('/') == 1 and \
value['message'].count('projects/') == 1:
value['message'] = value['message'] + ". A missing project " \
"error may be an authentication issue. " \
"Please ensure your auth credentials match " \
"your project. "
super(ResourceNotFoundError, self).__init__(value, http_code, driver)
class QuotaExceededError(GoogleBaseError):
pass
class ResourceExistsError(GoogleBaseError):
pass
class ResourceInUseError(GoogleBaseError):
pass
class GoogleResponse(JsonResponse):
"""
Google Base Response class.
"""
def success(self):
"""
Determine if the request was successful.
For the Google response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def _get_error(self, body):
"""
Get the error code and message from a JSON response.
Return just the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: Tuple containing error code and message
:rtype: ``tuple`` of ``str`` or ``int``
"""
if 'errors' in body['error']:
err = body['error']['errors'][0]
else:
err = body['error']
if 'code' in err:
code = err.get('code')
message = err.get('message')
else:
code = None
if 'reason' in err:
code = err.get('reason')
message = body.get('error_description', err)
return (code, message)
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
json_error = False
try:
body = json.loads(self.body)
except Exception:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
valid_http_codes = [
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.CONFLICT,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status, None)
elif 'error' in body:
(code, message) = self._get_error(body)
if code == 'QUOTA_EXCEEDED':
raise QuotaExceededError(message, self.status, code)
elif code == 'RESOURCE_ALREADY_EXISTS':
raise ResourceExistsError(message, self.status, code)
elif code == 'alreadyExists':
raise ResourceExistsError(message, self.status, code)
elif code.startswith('RESOURCE_IN_USE'):
raise ResourceInUseError(message, self.status, code)
else:
raise GoogleBaseError(message, self.status, code)
else:
return body
elif self.status == httplib.NOT_FOUND:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise ResourceNotFoundError(message, self.status, code)
elif self.status == httplib.BAD_REQUEST:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise InvalidRequestError(message, self.status, code)
else:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise GoogleBaseError(message, self.status, code)
class GoogleBaseDriver(BaseDriver):
name = "Google API"
class GoogleBaseAuthConnection(ConnectionUserAndKey):
"""
Base class for Google Authentication. Should be subclassed for specific
types of authentication.
"""
driver = GoogleBaseDriver
responseCls = GoogleResponse
name = 'Google Auth'
host = 'accounts.google.com'
auth_path = '/o/oauth2/auth'
def __init__(self, user_id, key=None, scopes=None,
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
login_hint=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:param scopes: A list of urls defining the scope of authentication
to grant.
:type scopes: ``list``
:keyword redirect_uri: The Redirect URI for the authentication
request. See Google OAUTH2 documentation for
more info.
:type redirect_uri: ``str``
:keyword login_hint: Login hint for authentication request. Useful
for Installed Application authentication.
:type login_hint: ``str``
"""
scopes = scopes or []
self.scopes = " ".join(scopes)
self.redirect_uri = redirect_uri
self.login_hint = login_hint
super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
def add_default_headers(self, headers):
"""
Add defaults for 'Content-Type' and 'Host' headers.
"""
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Host'] = self.host
return headers
def _token_request(self, request_body):
"""
Return an updated token from a token request body.
:param request_body: A dictionary of values to send in the body of the
token request.
:type request_body: ``dict``
:return: A dictionary with updated token information
:rtype: ``dict``
"""
data = urlencode(request_body)
try:
response = self.request('/o/oauth2/token', method='POST',
data=data)
except AttributeError:
raise GoogleAuthError('Invalid authorization response, please '
'check your credentials and time drift.')
token_info = response.object
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
def refresh_token(self, token_info):
"""
Refresh the current token.
Fetch an updated refresh token from internal metadata service.
:param token_info: Dictionary containing token information.
(Not used, but here for compatibility)
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
# pylint: disable=no-member
return self.get_new_token()
class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
"""Authentication connection for "Installed Application" authentication."""
def get_code(self):
"""
Give the user a URL that they can visit to authenticate and obtain a
code. This method will ask for that code that the user can paste in.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Code supplied by the user after authenticating
:rtype: ``str``
"""
auth_params = {'response_type': 'code',
'client_id': self.user_id,
'redirect_uri': self.redirect_uri,
'scope': self.scopes,
'state': 'Libcloud Request'}
if self.login_hint:
auth_params['login_hint'] = self.login_hint
data = urlencode(auth_params)
url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
print('\nPlease Go to the following URL and sign in:')
print(url)
if PY3:
code = input('Enter Code: ')
else:
code = raw_input('Enter Code: ') # NOQA pylint: disable=undefined-variable
return code
def get_new_token(self):
"""
Get a new token. Generally used when no previous token exists or there
is no refresh token
:return: Dictionary containing token information
:rtype: ``dict``
"""
# Ask the user for a code
code = self.get_code()
token_request = {'code': code,
'client_id': self.user_id,
'client_secret': self.key,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'}
return self._token_request(token_request)
def refresh_token(self, token_info):
"""
Use the refresh token supplied in the token info to get a new token.
:param token_info: Dictionary containing current token information
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
if 'refresh_token' not in token_info:
return self.get_new_token()
refresh_request = {'refresh_token': token_info['refresh_token'],
'client_id': self.user_id,
'client_secret': self.key,
'grant_type': 'refresh_token'}
new_token = self._token_request(refresh_request)
if 'refresh_token' not in new_token:
new_token['refresh_token'] = token_info['refresh_token']
return new_token
class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for "Service Account" authentication."""
def __init__(self, user_id, key, *args, **kwargs):
"""
Check to see if cryptography is available, and convert key file path
into a key string if the key is in a file.
:param user_id: Email address to be used for Service Account
authentication.
:type user_id: ``str``
:param key: The RSA Key or path to file containing the key.
:type key: ``str``
"""
if SHA256 is None:
raise GoogleAuthError('cryptography library required for '
'Service Account Authentication.')
# Check to see if 'key' is a file and read the file if it is.
if key.find("PRIVATE KEY---") == -1:
# key is a file
keypath = os.path.expanduser(key)
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
if not is_file_path:
raise ValueError("Missing (or not readable) key "
"file: '%s'" % key)
with open(keypath, 'r') as f:
contents = f.read()
try:
key = json.loads(contents)
key = key['private_key']
except ValueError:
key = contents
super(GoogleServiceAcctAuthConnection, self).__init__(
user_id, key, *args, **kwargs)
def get_new_token(self):
"""
Get a new token using the email address and RSA Key.
:return: Dictionary containing token information
:rtype: ``dict``
"""
# The header is always the same
header = {'alg': 'RS256', 'typ': 'JWT'}
header_enc = base64.urlsafe_b64encode(b(json.dumps(header)))
# Construct a claim set
claim_set = {'iss': self.user_id,
'scope': self.scopes,
'aud': 'https://accounts.google.com/o/oauth2/token',
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
claim_set_enc = base64.urlsafe_b64encode(b(json.dumps(claim_set)))
# The message contains both the header and claim set
message = b'.'.join((header_enc, claim_set_enc))
# Then the message is signed using the key supplied
key = serialization.load_pem_private_key(
b(self.key),
password=None,
backend=default_backend()
)
signature = key.sign(
data=b(message),
padding=PKCS1v15(),
algorithm=SHA256()
)
signature = base64.urlsafe_b64encode(signature)
# Finally the message and signature are sent to get a token
jwt = b'.'.join((message, signature))
request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': jwt}
return self._token_request(request)
class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for self-authentication when used with a GCE
instance that supports serviceAccounts.
"""
def get_new_token(self):
"""
Get a new token from the internal metadata service.
:return: Dictionary containing token information
:rtype: ``dict``
"""
path = '/instance/service-accounts/default/token'
http_code, http_reason, token_info = _get_gce_metadata(path)
if http_code == httplib.NOT_FOUND:
raise ValueError("Service Accounts are not enabled for this "
"GCE instance.")
if http_code != httplib.OK:
raise ValueError("Internal GCE Authorization failed: "
"'%s'" % str(http_reason))
token_info = json.loads(token_info)
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
class GoogleAuthType(object):
"""
SA (Service Account),
IA (Installed Application),
GCE (Auth from a GCE instance with service account enabled)
GCS_S3 (Cloud Storage S3 interoperability authentication)
"""
SA = 'SA'
IA = 'IA'
GCE = 'GCE'
GCS_S3 = 'GCS_S3'
ALL_TYPES = [SA, IA, GCE, GCS_S3]
OAUTH2_TYPES = [SA, IA, GCE]
@classmethod
def guess_type(cls, user_id):
if cls._is_sa(user_id):
return cls.SA
elif cls._is_gcs_s3(user_id):
return cls.GCS_S3
elif cls._is_gce():
return cls.GCE
else:
return cls.IA
@classmethod
def is_oauth2(cls, auth_type):
return auth_type in cls.OAUTH2_TYPES
@staticmethod
def _is_gce():
"""
Checks if we can access the GCE metadata server.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
http_code, http_reason, body = _get_gce_metadata()
if http_code == httplib.OK and body:
return True
return False
@staticmethod
def _is_gcs_s3(user_id):
"""
Checks S3 key format: alphanumeric chars starting with GOOG.
"""
return user_id.startswith('GOOG')
@staticmethod
def _is_sa(user_id):
return user_id.endswith('.gserviceaccount.com')
class GoogleOAuth2Credential(object):
default_credential_file = '~/.google_libcloud_auth'
def __init__(self, user_id, key, auth_type=None, credential_file=None,
scopes=None, **kwargs):
self.auth_type = auth_type or GoogleAuthType.guess_type(user_id)
if self.auth_type not in GoogleAuthType.ALL_TYPES:
raise GoogleAuthError('Invalid auth type: %s' % self.auth_type)
if not GoogleAuthType.is_oauth2(self.auth_type):
raise GoogleAuthError(('Auth type %s cannot be used with OAuth2' %
self.auth_type))
self.user_id = user_id
self.key = key
default_credential_file = '.'.join([self.default_credential_file,
user_id])
self.credential_file = credential_file or default_credential_file
# Default scopes to read/write for compute, storage, and dns.
self.scopes = scopes or [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
]
self.token = self._get_token_from_file()
if self.auth_type == GoogleAuthType.GCE:
self.oauth2_conn = GoogleGCEServiceAcctAuthConnection(
self.user_id, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.SA:
self.oauth2_conn = GoogleServiceAcctAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.IA:
self.oauth2_conn = GoogleInstalledAppAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
else:
raise GoogleAuthError('Invalid auth_type: %s' %
str(self.auth_type))
if self.token is None:
self.token = self.oauth2_conn.get_new_token()
self._write_token_to_file()
@property
def access_token(self):
if self.token_expire_utc_datetime < _utcnow():
self._refresh_token()
return self.token['access_token']
@property
def token_expire_utc_datetime(self):
return _from_utc_timestamp(self.token['expire_time'])
def _refresh_token(self):
self.token = self.oauth2_conn.refresh_token(self.token)
self._write_token_to_file()
def _get_token_from_file(self):
"""
Read credential file and return token information.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Token information dictionary, or None
:rtype: ``dict`` or ``None``
"""
token = None
filename = os.path.realpath(os.path.expanduser(self.credential_file))
try:
with open(filename, 'r') as f:
data = f.read()
token = json.loads(data)
except (IOError, ValueError) as e:
# Note: File related errors (IOError) and errors related to json
# parsing of the data (ValueError) are not fatal.
LOG.info('Failed to read cached auth token from file "%s": %s',
filename, str(e))
return token
def _write_token_to_file(self):
"""
Write token to credential file.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
filename = os.path.expanduser(self.credential_file)
filename = os.path.realpath(filename)
try:
data = json.dumps(self.token)
write_flags = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
with os.fdopen(os.open(filename, write_flags,
int('600', 8)), 'w') as f:
f.write(data)
except Exception as e:
# Note: Failure to write (cache) token in a file is not fatal. It
# simply means degraded performance since we will need to acquire a
# new token each time script runs.
LOG.info('Failed to write auth token to file "%s": %s',
filename, str(e))
class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
"""Base connection class for interacting with Google APIs."""
driver = GoogleBaseDriver
responseCls = GoogleResponse
host = 'www.googleapis.com'
poll_interval = 2.0
timeout = 180
def __init__(self, user_id, key=None, auth_type=None,
credential_file=None, scopes=None, **kwargs):
"""
Determine authentication type, set up appropriate authentication
connection and get initial authentication information.
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword auth_type: See GoogleAuthType class for list and description
of accepted values.
If not supplied, auth_type will be guessed based
on value of user_id or if the code is running
on a GCE instance.
:type auth_type: ``str``
:keyword credential_file: Path to file for caching authentication
information.
:type credential_file: ``str``
:keyword scopes: List of OAuth2 scope URLs. The empty default sets
read/write access to Compute, Storage, and DNS.
:type scopes: ``list``
"""
super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
self.oauth2_credential = GoogleOAuth2Credential(
user_id, key, auth_type, credential_file, scopes, **kwargs)
python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
self.user_agent_append(ver_platform)
def add_default_headers(self, headers):
"""
@inherits: :class:`Connection.add_default_headers`
"""
headers['Content-Type'] = 'application/json'
headers['Host'] = self.host
return headers
def pre_connect_hook(self, params, headers):
"""
Check to make sure that token hasn't expired. If it has, get an
updated token. Also, add the token to the headers.
@inherits: :class:`Connection.pre_connect_hook`
"""
headers['Authorization'] = ('Bearer ' +
self.oauth2_credential.access_token)
return params, headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
def request(self, *args, **kwargs):
"""
@inherits: :class:`Connection.request`
"""
# Adds some retry logic for the occasional
# "Connection Reset by peer" error.
retries = 4
tries = 0
while tries < (retries - 1):
try:
return super(GoogleBaseConnection, self).request(
*args, **kwargs)
except socket.error as e:
if e.errno == errno.ECONNRESET:
tries = tries + 1
else:
raise e
# One more time, then give up.
return super(GoogleBaseConnection, self).request(*args, **kwargs)
def has_completed(self, response):
"""
Determine if operation has completed based on response.
:param response: JSON response
:type response: I{responseCls}
:return: True if complete, False otherwise
:rtype: ``bool``
"""
if response.object['status'] == 'DONE':
return True
else:
return False
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
@inherits: :class:`PollingConnection.get_poll_request_kwargs`
"""
return {'action': response.object['selfLink']}
def morph_action_hook(self, action):
"""
Update action to correct request path.
In many places, the Google API returns a full URL to a resource.
This will strip the scheme and host off of the path and just return
the request. Otherwise, it will prepend the base request_path to
the action.
:param action: The action to be called in the http request
:type action: ``str``
:return: The modified request based on the action
:rtype: ``str``
"""
if action.startswith('https://'):
u = urlparse.urlsplit(action)
request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
else:
request = self.request_path + action
return request
| mistio/libcloud | libcloud/common/google.py | Python | apache-2.0 | 30,831 | [
"VisIt"
] | 98ade432fd6181605fd50ad306c7f9ad88fbb791e8721bd81681ed8c2741e585 |
import numpy as np
import scipy.signal as signal
from scipy.special import erf
from scipy.interpolate import interp1d
import matplotlib.mlab as ml
from ..misc import nextpow2
from ..simulate import SimLightcurve
from math import factorial
def estimate_noise_ps(lightcurve, estfrac=0.5):
"""
Use the high frequency part of the power spectrum of a light curve
to estimate the time domain noise standard deviation of the
data. This avoids the estimate being contaminated by low-frequency lines
and flare signals.
Parameters
----------
lightcurve : :class:`.Lightcurve`
A :class:`.Lightcurve` instance containing the time series data.
estfrac : float, optional, default: 0.5
The fraction of the spectrum (from the high frequency end)
with which to estimate the noise. The default is 0.5 i.e. use
the final half of the spectrum for the estimation.
Returns
-------
sqrt(sk) : float
The noise standard deviation
sk : float
The noise variance
noise_v : :class:`numpy.array`
A vector of noise variance values
"""
l = len(lightcurve.clc)
# get the power spectrum of the lightcurve data
sk, f = lightcurve.psd()
# get the mean of the final quarter of the data
sk = np.mean(sk[np.floor((1.-estfrac)*len(sk)):])
# scale to give noise variance
sk = sk * lightcurve.fs() / 2.
noise_v = np.ones(nextpow2(2*len(lightcurve.clc)-1)) * sk
return np.sqrt(sk), sk, noise_v
def estimate_noise_tv(d, sigma=1.0):
"""
A method of estimating the noise, whilst ignoring large outliers.
This uses the cumulative distribution of the data point and uses the probability
contained within a Gaussian range (defined by sigma) to work out what the
standard deviation is (i.e. it doesn't use tails of the distribution that
contain large outliers, although the larger the sigma value to more outliers
will effect the result.) This is mainly suitable to data in which the
underlying noise is Gaussian.
Parameters
----------
d : array-like
The time series of data (either a :class:`numpy.array` or a list).
sigma: float
The number of standard deviations giving the cumulative probability
to be included in the noise calculation e.g. if sigma=1 then the central
68% of the cumulative probability distribution is used.
Returns
-------
std: float
The noise standard deviation
mean: float
The value at the middle of the distribution
"""
ld = len(d)
# get normalised histogram
n, bins = np.histogram(d, bins=ld, density=True)
bincentres = (bins[:-1] + bins[1:])/2. # bin centres
# get the cumulative probability distribution
cs = np.cumsum(n*(bins[1]-bins[0]))
# get unique values (helps with interpolation)
csu, idx = np.unique(cs, return_index=True)
binsu = bincentres[idx]
# get the cumulative % probability covered by sigma
cp = erf(sigma/np.sqrt(2.))
interpf = interp1d(csu, binsu) # interpolation function
# get the upper and lower interpolated data values that bound the range
lowS = interpf(0.5 - cp/2.);
highS = interpf(0.5 + cp/2.);
# get the value at the middle of the distribution
m = interpf(0.5);
# get the standard deviation estimate
std = (highS - lowS)/(2.*sigma)
return std, m
def addNoise(z, mean, stdev):
import random
"""
Adds gaussian noise to the time series
Parameters
----------
z : :class:`numpy.ndarray`
An array containing a time series.
mean : float
The mean of the desired noise.
stdev : float
The standard deviation of the desired noise.
Returns
-------
z : :class:`numpy.ndarray`
The input time series with added Gaussian noise.
"""
z += [random.gauss(mean,stdev) for _ in range(len(z))]
return z
def make_noise_lightcurve(dt = 1765.55929, length=33.5, sigma=0.5, mean=1):
"""
Produce a time series of gaussian noise, which can be used to
estimate confidence thresholds.
Parameters
----------
dt : float, optional
The sample time of the required data in seconds.
Default is 1765.55929, the sample time of the quarter 1
*Kepler* data
days : float, optional
The number of days long the required data should be.
Default is 33.5, the length of the quarter 1 *Kepler*
data
sigma : float, optional
The standard deviation of the noise in the time series.
Default is 0.5
mean : float, optional
The mean of the noise in the time series.
The default is 1.
Returns
-------
a : :class:`.Lightcurve`
The generated light curve.
"""
days = 86400
dt = 1765.55929 # sample interval (sec)
x = np.arange(0, length*days, dt) # create the time stamps
z = np.zeros_like(x) # create the data array
ze = np.zeros_like(x)
# Add Gaussian noise
z = addNoise(z, mean, sigma)
x = x/86400
a = SimLightcurve()
a.sigma = sigma
a.ts.append(x)
a.lc.append(z)
a.le.append(ze)
a.combine()
return a
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""
Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques. This implementation is
taken from [3]_.
Parameters
----------
y : array_like, shape (N,)
The values of the time history of the signal.
window_size : int
The length of the window. Must be an odd integer number.
order : int
The order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int, default: 0
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : :class:`numpy.ndarray`, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
>>> t = np.linspace(-4, 4, 500)
>>> y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
>>> ysg = savitzky_golay(y, window_size=31, order=4)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y, label='Noisy signal')
>>> plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
>>> plt.plot(t, ysg, 'r', label='Filtered signal')
>>> plt.legend()
>>> plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
.. [3] http://wiki.scipy.org/Cookbook/SavitzkyGolay
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = list(range(order+1))
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def highpass_filter_lightcurve(lightcurve, knee=(1./(0.3*86400.))):
"""
Detrends a light curve by high-pass filtering it using a third order Butterworth
filter (:func:`scipy.signal.butter`).
Parameters
-----------
x : :class:`numpy.ndarray`
An array of time stamps
z : :class:`numpy.ndarray`
An array containing the time series data
knee : float, optional, default: 3.858e-05
The high-pass filter knee frequency in Hz (default is 3.858e-05 Hz or (1/0.3)/day).
Returns
-------
z : :class:`numpy.ndarray`
An array which contains a time series which has been smoothed.
"""
x = lightcurve.cts
z = lightcurve.clc
dt = lightcurve.dt()
if dt <= 0:
raise NameError
fs = lightcurve.fs()
highcut = knee/(1./(2.*dt))
zr = z[::-1] # Reverse the timeseries to remove phase offset
zd = np.concatenate((zr, z))
b, a = signal.butter(3, highcut, btype='highpass')
y = signal.lfilter(b, a, zd)
z = y[np.floor(len(y)/2):]
return z
def running_median(y, window):
"""
A method to subtract a running median for smoothing data.
Parameters
----------
y : :class:`numpy.ndarray`
A 1D array containing the data time series.
window : int
The number of time bins to use for the running median. At edges
the window will be shorter.
Returns
-------
ffit : :class:`numpy.ndarray`
A 1D array containing the running median of the data time series.
"""
ffit = np.array([])
idxs = np.arange(len(y))
halfwin = int(window/2)
for i in range(len(y)):
v = (idxs < (idxs[i]+halfwin)) & (idxs > (idxs[i]-halfwin))
ffit = np.append(ffit, np.median(y[v]))
return ffit
| BayesFlare/bayesflare | bayesflare/noise/noise.py | Python | gpl-2.0 | 10,350 | [
"Gaussian"
] | b4309b6d40570799fa0f42d775e470b81ce17d94d250b3c456154346211a69f5 |
# -*- coding: utf-8 -*-
"""
pbm_ImageAnalysis is an analysis module for ACQ4.
This module provides:
1. Bleaching correction of image stacks
2. Normalization of image stacks
3. ROI's on Z-stacks (or T-stacks), including saving and retrieving the ROI files
(the format is the same as in PyImageAnalysis - simple text file)
4. Display of simultaneously recorded physiology:
simple spike detection (on cell, intracellular)
5. Cross-correlation of ROI signals in the imaging data (pairwise), and some
display of the results
6. Cross-correlation of ROI and spike trains.
Fall, 2011
Jan, 2012.
Paul B. Manis, Ph.D.
UNC Chapel Hill
Supported by NIH/NIDCD Grants:
DC004551 (Cellular mechanisms of auditory information processing)
DC000425 (Physiology of the Dorsal Cochlear Nucleus Molecular Layer)
DC009809 (Auditory Cortex: Synaptic organization and plasticity)
Has potential dependency on openCV for some functions.
"""
from PyQt4 import QtGui, QtCore
from acq4.analysis.AnalysisModule import AnalysisModule
from collections import OrderedDict
import os
import shutil
import csv
import os.path
import pickle
import acq4.pyqtgraph as pg
import acq4.pyqtgraph.debug as debug
import acq4.util.DatabaseGui as DatabaseGui
import PIL as Image
from acq4.util.metaarray import MetaArray
import numpy as np
import scipy
import ctrlTemplate
import ctrlROIsTemplate
import ctrlAnalysisTemplate
import ctrlPhysiologyTemplate
from acq4.analysis.tools import Utility
from acq4.analysis.tools import Fitting
from acq4.analysis.tools import PlotHelpers as PH # matlab plotting helpers
from acq4.util import functions as FN
from acq4.util.HelpfulException import HelpfulException
from acq4.devices.Scanner.scan_program import rect
try:
import cv2
#import cv2.cv as cv
openCVInstalled = True
except:
openCVInstalled = False
#import smc as SMC # Vogelstein's OOPSI analysis for calcium transients
import pylab as PL
#from mpl_toolkits.axes_grid1 import AxesGrid
#
# We use matplotlib/pylab for *some* figure generation.
#
class pbm_ImageAnalysis(AnalysisModule):
def __init__(self, host, flowchartDir=None, dbIdentity="ImageAnalysis"):
AnalysisModule.__init__(self, host)
self.dbIdentity = dbIdentity
# per-instance parameters:
self.currentDataDirectory = None # currently selected data directory (if valid)
self.refImage = None # Reference image data used for ratio calculations
# This image may come from a separate file or a calculation on the present file
self.physData = None # physiology data associated with the current image
self.dataStruct = 'flat' # 'flat' or 'interleaved' are valid at present.
self.imageInfo = []
self.ignoreFirst = 1 # ImagePhys_ignoreFirst # note this is a number of images, not T/F
self.rectSelect = True #
self.tStart = 0.0 # baseline time start = applies to the image: ImagePhys_BaseStart
self.tEnd = 50.0 # baseline time end (msec) : ImagePhys_BaseEnd
self.imageLPF = 0.0 # low pass filter of the image data, Hz: ImagePhys_ImgLPF
self.physLPF = 0.0 # low pass filter of the physiology data, Hz (0 = no filtering): ImagePhys_PhysLPF
self.physLPFChanged = False # flag in case the physiology LPF changes (avoid recalculation)
# self.physSign = 0.0 # ImagePhys_PhysSign (detection sign for events)
self.physThresh = -50.0 # ImagePhys_PhysThresh (threshold in pA to detect events)
self.physThreshLine = None
self.ratioImages = False # only set true once a ratio (reference) image is loaded
self.ROIfig = None
self.baseImages = []
self.viewFlag = False # false if viewing movie, true if viewing fixed image
self.referenceImage = []
self.ratioImage = None
self.useRatio = False
self.AllRois = []
self.nROI = 0 # count of ROI's in the window
self.rois = []
self.currentRoi = None
self.imageData = np.array(None) # Image Data array, information about the data is in the dataState dictionary
self.lastROITouched=[]
self.spikesFound = None
self.burstsFound = None
self.spikeTimes = []
self.burstTimes = []
self.specImage = []
self.specImageCalcFlag = False
self.stdImage = []
self.avgImage = []
self.imageType = 'camera' # frames for camera (all pixels simultaneous); scanner for scanner (need scan timing)
self.analogMode = True # if false, we are using digital mode.
self.csvFileName = None
self.csvData = None
self.spikesFoundpk = None
self.withinBurstsFound = None
self.FData = []
self.MPLFig = None # We keep one instance of a matplotlib figure, create and destroy as needed
self.floatingWindow = None # one instance of a pyqtgraph window that floats.
self.pgwin = None
# ------ Graphical Elements ------
self._sizeHint = (1280, 900) # try to establish size of window
self.ctrlWidget = QtGui.QWidget()
self.ctrl = ctrlTemplate.Ui_Form()
self.ctrl.setupUi(self.ctrlWidget)
self.ctrlROIFuncWidget = QtGui.QWidget()
self.ctrlROIFunc = ctrlROIsTemplate.Ui_Form()
self.ctrlROIFunc.setupUi(self.ctrlROIFuncWidget)
self.ctrlImageFuncWidget = QtGui.QWidget()
self.ctrlImageFunc = ctrlAnalysisTemplate.Ui_Form()
self.ctrlImageFunc.setupUi(self.ctrlImageFuncWidget)
self.ctrlPhysFuncWidget = QtGui.QWidget()
self.ctrlPhysFunc = ctrlPhysiologyTemplate.Ui_Form()
self.ctrlPhysFunc.setupUi(self.ctrlPhysFuncWidget)
self.initDataState()
self.RGB = Utility.makeRGB()
## Setup basic GUI
self._elements_ = OrderedDict([
('File Loader', {'type': 'fileInput', 'size': (150, 300), 'host': self, 'showFileTree': True}),
('Image', {'type': 'imageView', 'pos': ('right', 'File Loader'), 'size': (500, 500)}),
('Analysis', {'type': 'ctrl', 'object': self.ctrlImageFuncWidget, 'host': self, 'size': (150,300)}),
('Physiology', {'type': 'ctrl', 'object': self.ctrlPhysFuncWidget, 'pos' : ('above', 'Analysis'), 'size': (150,300)}),
('ROI', {'type': 'ctrl', 'object': self.ctrlROIFuncWidget, 'pos' : ('above', 'Physiology'), 'size': (150,300)}),
('Imaging Parameters', {'type': 'ctrl', 'object': self.ctrlWidget, 'pos' : ('above', 'ROI'), 'size': (150,300)}),
('Background Plot', {'type': 'plot', 'pos': ('right', 'Imaging Parameters'),'size': (1000, 100)}),
('ROI Plot', {'type': 'plot', 'pos': ('bottom', 'Background Plot'),'size': (1000, 300)}),
('Phys Plot', {'type': 'plot', 'pos': ('bottom', 'ROI Plot'),'size': (1000, 300)}),
# ('Line Scan', {'type': 'imageView', 'size': (1000, 300)}),
#('Data Table', {'type': 'table', 'pos': ('below', 'Time Plot')}),
])
self.initializeElements()
self.ctrl.ImagePhys_RectSelect.stateChanged.connect(self.updateRectSelect)
self.ctrl.ImagePhys_Update.clicked.connect(self.updateAnalysis)
self.ROI_Plot = self.getElement('ROI Plot', create=True)
self.backgroundPlot = self.getElement('Background Plot', create=True)
self.physPlot = self.getElement('Phys Plot', create = True)
self.lr = pg.LinearRegionItem([0, 1])
# self.ROI_Plot.addItem(self.lr)
self.updateRectSelect()
self.ROI_Plot.plotItem.vb.setXLink('Phys Plot') # not sure - this seems to be at the wrong level in the window manager
self.imageView = self.getElement('Image', create=True)
self.imageItem = self.imageView.imageItem
self.fileLoaderInstance = self.getElement('File Loader', create=True)
# Plots are updated when the selected region changes
self.lr.sigRegionChanged.connect(self.updateAnalysis)
self.imageView.sigProcessingChanged.connect(self.processData)
# main image processing buttons
self.ctrl.ImagePhys_getRatio.clicked.connect(self.loadRatioImage)
self.ctrl.ImagePhys_clearRatio.clicked.connect(self.clearRatioImage)
self.ctrl.ImagePhys_ImgNormalize.clicked.connect(self.doNormalize)
self.ctrl.ImagePhys_View.currentIndexChanged.connect(self.changeView)
self.ctrl.ImagePhys_GetFileInfo.clicked.connect(self.getFileInfo)
self.ctrl.ImagePhys_RegisterStack.clicked.connect(self.RegisterStack)
self.ctrl.ImagePhys_DisplayTraces.clicked.connect(self.makeROIDataFigure)
self.ctrl.ImagePhys_ExportTiff.clicked.connect(self.ExportTiff)
self.ctrl.ImagePhys_PhysROIPlot.toggled.connect(self.setupPhysROIPlot)
# PMT scan data adjustments
self.ctrl.ImagePhys_Restore_decomb.clicked.connect(self.restoreDecomb)
self.ctrl.ImagePhys_PMT_decomb.valueChanged.connect(self.processPMT)
self.ctrl.ImagePhys_PMT_autoButton.clicked.connect(self.processPMT)
# ROI function buttons and controls
self.ctrlROIFunc.ImagePhys_addRoi.clicked.connect(self.addOneROI)
self.ctrlROIFunc.ImagePhys_clearRoi.clicked.connect(self.clearAllROI)
self.ctrlROIFunc.ImagePhys_UnBleach.clicked.connect(self.unbleachImage)
self.ctrlROIFunc.ImagePhys_SpecCalc.clicked.connect(self.spectrumCalc)
self.ctrlROIFunc.ImagePhys_RecalculateROIs.clicked.connect(self.calculateAllROIs)
self.ctrlROIFunc.ImagePhys_RetrieveROI.clicked.connect(self.restoreROI)
self.ctrlROIFunc.ImagePhys_SaveROI.clicked.connect(self.saveROI)
self.ctrlROIFunc.ImagePhys_findROIs.clicked.connect(self.findROIs)
# self.ctrl.ImagePhys_CorrTool_BL1.clicked.connect(self.Baseline1) # these are checkboxes now...
self.ctrlROIFunc.ImagePhys_CorrTool_HPF.stateChanged.connect(self.refilterCurrentROI) # corr tool is the checkbox
self.ctrlROIFunc.ImagePhys_CorrTool_LPF.stateChanged.connect(self.refilterCurrentROI)
self.ctrlROIFunc.ImagePhys_ImgHPF.editingFinished.connect(self.refilterCurrentROI) # ImgHPF is the is the spinbox
self.ctrlROIFunc.ImagePhys_ImgLPF.editingFinished.connect(self.refilterCurrentROI)
# Physiology analysis buttons and controls
self.ctrlPhysFunc.ImagePhys_DetectSpikes.clicked.connect(self.detectSpikes)
self.ctrlPhysFunc.ImagePhys_PhysThresh.valueChanged.connect(self.showPhysTrigger)
#self.ctrlPhysFunc.ImagePhysFuncs_RevSTA.clicked.connect(self.RevSTA)
self.ctrlPhysFunc.ImagePhys_STA.clicked.connect(self.computeSTA)
self.ctrlPhysFunc.ImagePhys_BTA.clicked.connect(self.computeBTA)
self.ctrlPhysFunc.ImagePhys_PhysLPF.valueChanged.connect(self.physLPF_valueChanged)
#
# Imaging analysis buttons
#
self.ctrlImageFunc.IAFuncs_Distance.clicked.connect(self.ROIDistances)
self.ctrlImageFunc.IAFuncs_DistanceStrength.clicked.connect(self.ROIDistStrength)
self.ctrlImageFunc.IAFuncs_NetworkGraph.clicked.connect(self.NetworkGraph)
self.ctrlImageFunc.IAFuncs_Analysis_AXCorr_Individual.clicked.connect(self.Analog_Xcorr_Individual)
self.ctrlImageFunc.IAFuncs_Analysis_AXCorr.clicked.connect(self.Analog_Xcorr)
self.ctrlImageFunc.IAFuncs_Analysis_UnbiasedXC.clicked.connect(self.Analog_Xcorr_unbiased)
self.ctrlImageFunc.IAFuncs_DistanceStrengthPrint.clicked.connect(self.printDistStrength)
self.ctrlImageFunc.IAFuncs_AnalogRadioBtn.clicked.connect(self.setAnalogMode)
self.ctrlImageFunc.IAFuncs_DigitalRadioBtn.clicked.connect(self.setDigitalMode)
self.ctrlImageFunc.IAFuncs_GetCSVFile.clicked.connect(self.getCSVFile)
def initDataState(self):
"""
Create clean data State (self.dataState) for new files
:return nothing:
"""
self.dataState = {'Loaded': False, 'bleachCorrection': False, 'Normalized': False,
'NType' : None, 'Structure': 'Flat', 'NTrials': 0, 'ratioLoaded': False}
self.ctrlROIFunc.ImagePhys_BleachInfo.setText('None')
self.ctrl.ImagePhys_NormInfo.setText('None')
self.IXC_Strength = []
self.ROIDistanceMap = []
self.tc_bleach = []
def setAnalogMode(self):
"""
:return:
"""
self.analogMode = True
self.ctrlImageFunc.IA_Funcs.AnalogRadioBtn.checked(True)
self.ctrlImageFunc.IA_Funcs.DigitalRadioBtn.checked(False)
def setDigitalMode(self):
self.digitalMode = False
self.ctrlImageFunc.IA_Funcs.AnalogRadioBtn.checked(False)
self.ctrlImageFunc.IA_Funcs.DigitalRadioBtn.checked(True)
def updateRectSelect(self):
self.rectSelect = self.ctrl.ImagePhys_RectSelect.isChecked()
if self.rectSelect:
self.ROI_Plot.plotItem.vb.setLeftButtonAction(mode='rect') # use the rubber band box instead
self.physPlot.plotItem.vb.setLeftButtonAction(mode='rect') # use the rubber band box instead
else:
self.ROI_Plot.plotItem.vb.setLeftButtonAction(mode='pan') # use the standard pan mode instead
self.physPlot.plotItem.vb.setLeftButtonAction(mode='pan') # use the standard pan modeinstead
def changeView(self):
view = self.ctrl.ImagePhys_View.currentText()
if self.dataState['ratioLoaded'] is True:
if view == 'Ratio Image':
self.imageView.setImage(self.ratioImage)
self.viewFlag = True
if self.dataState['Loaded'] is False:
return # no data - so skip this.
if view == 'Reference Image':
self.imageView.setImage(np.mean(self.imageData[self.baseImages, :, :], axis=0))
self.viewFlag = True
if view == 'Average Image':
self.imageView.setImage(self.aveImage)
if view == 'Std Image':
self.imageView.setImage(self.stdImage)
if view == 'Spectrum Image':
self.imageView.setImage(self.specImageDisplay)
if view == 'Movie':
self.imageView.setImage(self.imageData)
self.viewFlag = False
def processData(self):
self.normData = []
self.imageData = []
print 'in processData...'
for img in self.rawData:
print 'doing image processdata'
n = np.empty(img.shape, dtype=img.dtype)
for i in range(img.shape[0]):
n[i] = self.imageView.normalize(img[i])
self.normData.append(n)
imgSet = {'procMean': n.mean(axis=0), 'procStd': n.std(axis=0)}
print 'appending...'
self.imageData.append(imgSet)
def updateAnalysis(self):
self.getDataStruct()
roi = self.currentRoi
plot = self.getElement('Background Plot')
plot.clearPlots()
# print 'LPF Changed?: ', self.physLPFChanged
if self.physLPFChanged: # only update if the LPF filter has changed
self.readPhysiology(self.currentDataDirectory) # re-read in case LPF has changed
c = 0
if self.currentRoi is None:
return
for img in self.normData: # pull from all the normalized data arrays (in a list)
#img = img.mean(axis=1)
rgn = self.lr.getRegion()
img = img[:, rgn[0]:rgn[1]].mean(axis=1)
data = roi.getArrayRegion(img, self.imageItem, axes=(1,2))
m = data.mean(axis=1).mean(axis=1)
#data = roi.getArrayRegion(img, self.view.imageView, axes=(1,2))
#s = data.mean(axis=1).mean(axis=1)
plot.plot(m, pen=pg.hsvColor(c*0.2, 1.0, 1.0))
#self.plot.plot(m-s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
#self.plot.plot(m+s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
c += 1
#if c == 1:
#self.getElement('Line Scan').setImage(data.mean(axis=2))
#if self.traces is None:
#return
#rgn = self.lr.getRegion()
#data = self.traces['Time': rgn[0]:rgn[1]]
#self.plot2.plot(data.mean(axis=1), clear=True)
#self.plot2.plot(data.max(axis=1))
#self.plot2.plot(data.min(axis=1))
def loadFileRequested(self, dh):
"""Called by file loader when a file load is requested.
In this case, we request a directory, corresponding to a sample run,
which may contain both physiology and image data.
If multiple files are selected, this routine will be called for each one...
"""
# ds = self.dataModel.isSequence(dh[0])
#dirtype = self.dataModel.dirType(dh[0])
# if dirtype == 'ProtocolSequence':
# dsp = self.dataModel.listSequenceParams(dh[0])
dlh = self.fileLoaderInstance.selectedFiles()
if self.ctrl.ImagePhys_PhysROIPlot.isChecked():
print 'multiple file load, for # of files: ', len(dlh)
self.makePhysROIPlot(dh, dlh)
else:
if len(dlh) > 1:
raise HelpfulException("pbm_ImageAnalysis: loadFileRequested Error\nCan only load from single file", msgType='status')
else:
self.loadSingleFile(dh[0])
def setupPhysROIPlot(self):
if self.ctrl.ImagePhys_PhysROIPlot.isChecked():
self.checkMPL()
self.firstPlot = False
self.plotCount = 0
def makePhysROIPlot(self, dh, dlh):
if type(dh) is list:
dh = dh[0]
fname = dh.name()
(head, tail) = os.path.split(fname)
self.MPRncolumns = 2
self.MPRnrows = len(dlh)
if len(dlh) % 2 == 1:
self.MPRnrows += 2
if self.firstPlot is False:
(self.MPLFig, self.MPPhysPlots) = PL.subplots(num="Physiology-Fluor comparison",
nrows=self.MPRnrows, ncols=self.MPRncolumns, sharex=True, sharey=False)
self.MPLFig.suptitle('Dataset: %s' % (head) , fontsize=10)
self.nPhysPlots = len(dlh)
c = 0
r = 0
for i in range(0, self.MPRnrows*self.MPRncolumns, 2):
self.MPPhysPlots[r, c].sharey = True
r = r + 2
if r >= self.MPRnrows:
r = 0
c += 1
self.firstPlot = True
try:
self.loadSingleFile(dh)
except:
print 'problem loading data... skipping'
self.plotCount += 1
return
self.unbleachImage()
self.calculateAllROIs()
c = 0
r = self.plotCount*2
if r >= self.MPRnrows-1:
c += 1
r = self.plotCount*2 % self.MPRnrows
self.MPPhysPlots[r+1, c].plot(self.tdat, self.physData, 'k-', linewidth=0.5)
self.MPPhysPlots[r+1, c].set_title(tail)
for i in range(self.nROI):
ndpt = len(self.FData[i, :])
self.MPPhysPlots[r, c].plot(self.imageTimes[0:ndpt], (self.FData[i, :]-1.0)*100.)
self.plotCount += 1
PL.draw()
if self.plotCount >= self.nPhysPlots:
PL.show()
self.ctrl.ImagePhys_PhysROIPlot.setCheckState(False) # turn off now - to properly sequence reload
(d1, s1) = os.path.split(self.currentFileName)
(d2, s2) = os.path.split(d1)
(d3, s3) = os.path.split(s2)
sfn = s3+'-'+s2+'-'+s1
PL.savefig('/Users/Experimenters/Desktop/ePhysPlots/%s.png' % (sfn), dpi=600, format='png')
def readDataTypes(self):
requestType = []
if self.ctrl.ImagePhys_Camera_check.isChecked():
requestType.append('camera')
if self.ctrl.ImagePhys_PMT_check.isChecked():
requestType.append('PMT')
if self.ctrl.ImagePhys_Image_check.isChecked():
requestType.append('imaging')
return requestType
def clearImageTypes(self):
self.ctrl.ImagePhys_Camera_check.setText('Camera')
self.ctrl.ImagePhys_PMT_check.setText('PMT')
self.ctrl.ImagePhys_Image_check.setText('Imaging')
def loadSingleFile(self, dh):
"""
:param dh:
:return:
"""
self.imageView.setFocus()
self.downSample = int(self.ctrl.ImagePhys_Downsample.currentText())
if self.downSample <= 0:
self.downSample = 1 # same as "none"
self.initDataState()
self.shiftFlag = False # eventually, but at the moment it does NOT work
self.getDataStruct()
if type(dh) is list:
dh = dh[0]
self.currentFileName = dh.name()
self.imageScaleUnit = 'pixels'
self.imageTimes = np.array(None)
self.imageType = None # 'camera' for camera (all pixels simultaneous); imaging for scanner (need scan timing); PMT for photomultipler raw data
self.rs = None
img = None
self.clearImageTypes()
if self.dataStruct is 'flat':
#print 'getting Flat data structure!'
if dh.isFile():
fhandle = dh
else:
# test data type for the imaging
requestType = self.readDataTypes() # selection of image types for analysis - can exclude imaging for example.
if os.path.isfile(os.path.join(dh.name(), 'Camera/frames.ma')) and 'camera' in requestType:
fhandle = dh['Camera/frames.ma'] # get data from ccd camera
self.imageType = 'camera'
self.ctrl.ImagePhys_Camera_check.setText(u'Camera \u2713')
if self.downSample == 1:
imt = MetaArray(file=fhandle.name())
self.imageInfo = imt.infoCopy()
img = imt.asarray()
#img = fhandle.read() # read the image stack directly
else:
(img, info) = self.tryDownSample(fhandle)
self.imageInfo = info
self.imageTimes = self.imageInfo[0]['values']
self.imageData = img.view(np.ndarray)
sh = self.imageData.shape
self.scanTimes = np.zeros(sh[1]*sh[2]).reshape((sh[1], sh[2]))
self.prepareImages()
elif os.path.isfile(os.path.join(dh.name(), 'PMT.ma')) and 'PMT' in requestType:
fhandle = dh['PMT.ma'] # get data from PMT, as raw trace information
self.pmtData = MetaArray(file=fhandle.name())
self.imageType = 'PMT'
self.ctrl.ImagePhys_PMT_check.setText(u'PMT \u2713')
self.rs = rect.RectScan()
scanInfo = dh.info()['Scanner']['program'][0]['scanInfo']
self.rs.restoreState(scanInfo)
decombInfo = dh.info()['protocol']['analysis']['Imaging']['children']['decomb']
auto = decombInfo['children']['auto']['value']
subpixel = decombInfo['children']['subpixel']['value']
self.PMTInfo = {'scanInfo': scanInfo, 'decombInfo': decombInfo, 'auto': auto, 'subpixel': subpixel}
self.imageInfo = self.pmtData.infoCopy()
self.restoreDecomb() # restore the original decomb settings and process the image.
elif os.path.isfile(os.path.join(dh.name(), 'imaging.ma')) and 'imaging' in requestType:
fhandle = dh['imaging.ma'] # get data from a pre-processed imaging file of PMT data
self.imageType = 'imaging'
self.ctrl.ImagePhys_Image_check.setText(u'Imaging \u2713')
if self.downSample == 1:
imt = MetaArray(file=fhandle.name())
self.imageInfo = imt.infoCopy()
img = imt.asarray()
else:
(img, info) = self.tryDownSample(fhandle)
self.imageInfo = info
self.imageData = img.view(np.ndarray)
self.imageTimes = self.imageInfo[0]['values']
itdt = (np.max(self.imageTimes)/len(self.imageTimes)) # time between scans (duration)
sh = self.imageData.shape
self.scanTimes = np.linspace(0., itdt, sh[1]*sh[2]).reshape((sh[1], sh[2])) # estimated times for each point in the image.
self.prepareImages()
else:
raise Exception("No valid imaging data found")
self.clearPhysiologyInfo() # clear the physiology data currently in memory to avoid confusion
if not dh.isFile():
self.readPhysiology(dh)
if img is None:
return False
#self.processData()
else: # interleaved data structure (Deepti Rao's calcium imaging data)
dirs = dh.subDirs() # find out what kind of data we
images = [[], [], [], []]
## Iterate over sequence
minFrames = None
for d in dirs: # each of the directories contains a data set
d = dh[d]
try:
ind = d.info()[('Clamp1', 'amp')]
except:
print 'unable to read clamp data from : ', d
print d.info()
raise
img = d['Camera/frames.ma'].read()
images[ind].append(img)
if minFrames is None or img.shape[0] < minFrames:
minFrames = img.shape[0]
self.rawData = np.array(None)
self.imageData = np.array(None)
# print "len images: %d " % (len(images))
while len(images) > 0:
imgs = images.pop(0)
img = np.concatenate([i[np.newaxis, :minFrames, ...] for i in imgs], axis=0)
self.rawData.append(img.astype(np.float32))
#img /= self.background
## remove bleaching curve from first two axes
ctrlMean = self.rawData[0].mean(axis=2).mean(axis=2)
trialCurve = ctrlMean.mean(axis=1)[:, np.newaxis, np.newaxis, np.newaxis]
timeCurve = ctrlMean.mean(axis=0)[np.newaxis,:, np.newaxis, np.newaxis]
del ctrlMean
for img in self.rawData:
img /= trialCurve
img /= timeCurve
#for img in self.rawData:
#m = img.mean(axis=0)
#s = img.std(axis=0)
#if self.background is not None:
#m = m.astype(np.float32)
#m /= self.background
#s = s.astype(np.float32)
#s /= self.background
#imgSet = {'mean': m, 'std': s}
#self.data.append(imgSet)
#self.imgMeans.append(m)
#self.imgStds.append(s)
self.imageItem.setImage(self.rawData[1].mean(axis=0))
self.processData()
## set up the selection region correctly and prepare IV curves
#if len(dirs) > 0:
#end = cmd.xvals('Time')[-1]
#self.lr.setRegion([end *0.5, end * 0.6])
#self.updateAnalysis()
#info = [
#{'name': 'Command', 'units': cmd.axisUnits(-1), 'values': np.array(values)},
#data.infoCopy('Time'),
#data.infoCopy(-1)]
#self.traces = MetaArray(np.vstack(traces), info=info)
self.imageData = self.rawData
self.ROI_Plot.clearPlots()
self.getDataStruct()
self.currentDataDirectory = dh
self.ctrl.ImagePhys_View.setCurrentIndex(0) # always set to show the movie
self.specImageCalcFlag = False # we need to recalculate the spectrum
npts = self.imageData.shape[0]/2
freq = np.fft.fftfreq(npts, d=self.imagedT)
freq = freq[0:npts/2 + 1]
self.ctrlROIFunc.ImagePhys_SpecHPF.setMinimum(0.0)
self.ctrlROIFunc.ImagePhys_SpecHPF.setMaximum(np.max(freq))
self.ctrlROIFunc.ImagePhys_SpecHPF.setValue(freq[1])
self.ctrlROIFunc.ImagePhys_SpecLPF.setMinimum(freq[1])
self.ctrlROIFunc.ImagePhys_SpecLPF.setMaximum(np.max(freq))
self.ctrlROIFunc.ImagePhys_SpecLPF.setValue(np.max(freq))
#print dir(self.ctrl.ImagePhys_ImgNormalize)
self.ctrl.ImagePhys_ImgNormalize.setEnabled(True)
self.updateAvgStdImage() # make sure mean and std are properly updated
self.calculateAllROIs() # recompute the ROIS
self.updateThisROI(self.lastROITouched) # and make sure plot reflects current ROI (not old data)
return True
def restoreDecomb(self):
"""
Retrieve the original decombing value for the file, and reset the image
:return:
"""
self.ctrl.ImagePhys_PMT_decomb.setValue(1e6*self.PMTInfo['decombInfo']['value'])
self.ctrl.ImagePhys_PMT_auto_check.setChecked(self.PMTInfo['auto'])
self.ctrl.ImagePhys_PMT_decomb_subpixel.setChecked(self.PMTInfo['subpixel'])
self.processPMT()
def filterPMT(self, sdt, lpf):
if self.ctrl.ImagePhys_PMT_LPF_check.isChecked():
lpf = self.ctrl.ImagePhys_PMT_LPF.value()*1e3 # convert kHz to Hz
# print sdt, lpf
if 1./sdt < lpf/2.: # force nyquist happiness
lpf = 0.5/sdt
print 'reset lpf to ', lpf
filtdata = Utility.SignalFilter_LPFBessel(self.pmtData.asarray()[0], lpf, 1.0/sdt, NPole=4, bidir=True)
return filtdata
# img = self.rs.extractImage(filtdata, offset=lag, subpixel=subpixel)
else: # no filtering - just return original array
return self.pmtData.asarray()[0]
#img = self.rs.extractImage(self.pmtData.asarray()[0], offset=lag, subpixel=subpixel)
def processPMT(self):
"""
read, adjust and set up PMT data for analysis and display.
Includes decombing for bidirectional scans,
:return: Nothing
"""
if self.imageType != 'PMT':
return
sdt = self.pmtData.xvals('Time')[1] - self.pmtData.xvals('Time')[0]
lpf = self.ctrl.ImagePhys_PMT_LPF.value()*1e3 # convert kHz to Hz
pmt_d = self.filterPMT(sdt, lpf) # filter data first
if self.ctrl.ImagePhys_PMT_auto_check.isChecked():
(decombed, lag) = self.rs.measureMirrorLag(pmt_d, transpose=True, maxShift=100)
lag *= sdt/2. # lag from measureMirrorLag is expressed in pixels - convert to time.
self.ctrl.ImagePhys_PMT_decomb.setValue(lag*1e6)
else:
lag = self.ctrl.ImagePhys_PMT_decomb.value() * 1e-6
subpixel = self.ctrl.ImagePhys_PMT_decomb_subpixel.isChecked()
# if self.ctrl.ImagePhys_PMT_LPF_check.isChecked():
# # print sdt, lpf
# if 1./sdt < lpf/2.: # force nyquist happiness
# lpf = 0.5/sdt
# print 'reset lpf to ', lpf
# filtdata = Utility.SignalFilter_LPFBessel(self.pmtData.asarray()[0], lpf, 1.0/sdt, NPole=4)
# img = self.rs.extractImage(filtdata, offset=lag, subpixel=subpixel)
# else:
# img = self.rs.extractImage(self.pmtData.asarray()[0], offset=lag, subpixel=subpixel)
img = self.rs.extractImage(pmt_d, offset=lag, subpixel=subpixel)
self.imageData = img.view(np.ndarray)
self.imageData = self.imageData.transpose(0, 2, 1)
# compute global transform
tr = self.rs.imageTransform()
st = pg.QtGui.QTransform()
st.scale(self.downSample, 1)
tr = st * tr
self.pmtTransform = pg.SRTTransform3D(tr)
itx = self.rs.extractImage(self.pmtData.xvals('Time'), offset=lag, subpixel=subpixel)
self.imageTimes = itx[:,0,0]
self.scanTimes = itx[0,:,:] # use times from first scan; will adjust offset later
self.prepareImages()
def prepareImages(self):
"""
set up image data for analysis, and display image.
:return: Nothing
"""
fi = self.ignoreFirst
self.rawData = self.imageData.copy()[fi:] # save the raw data.
self.imageData = self.imageData[fi:]
self.imageTimes = self.imageTimes[fi:]
self.baseImages = range(1) # identify which images to show as the "base image"
if self.downSample > 1:
self.imageTimes = self.imageTimes[0:-1:self.downSample]
self.imagedT = np.mean(np.diff(self.imageTimes))
self.imageView.setImage(self.imageData)
self.imageView.getView().setAspectLocked(True)
self.imageView.imageItem.resetTransform()
if self.imageType == 'PMT':
self.imageView.imageItem.scale((self.rs.width/self.rs.height)/(float(self.imageData.shape[1])/float(self.imageData.shape[2])), 1.0)
self.imageView.autoRange()
self.dataState['Loaded'] = True
self.dataState['Structure'] = 'Flat'
self.background = self.rawData.mean(axis=2).mean(axis=1)
self.backgroundmean = self.background.mean(axis=0)
# if any ROIs available, update them.
self.updateAvgStdImage() # make sure mean and std are properly updated
self.calculateAllROIs() # recompute the ROIS
self.updateThisROI(self.lastROITouched) # and make sure plot reflects current ROI (not old data)
def getCSVFile(self):
""" read the CSV file for the ROI timing data """
fd = QtGui.QFileDialog(self)
self.fileName = fd.getOpenFileName()
from os.path import isfile
allcsvdata = []
if isfile(self.fileName):
self.statusBar().showMessage( "Loading: %s..." % (self.fileName) )
self.show()
csvfile = csv.reader(open(self.fileName), delimiter=",")
self.times = []
self.nROI = 0
self.bkgd=[]
self.bkgdpos = None
self.timepos = 0
self.roilist = []
firstline = csvfile.next()
allcsvdata.append(firstline)
return allcsvdata
def updateAvgStdImage(self):
""" update the reference image types and then make sure display agrees.
"""
self.aveImage = np.mean(self.imageData, axis=0)
self.stdImage = np.std(self.imageData, axis=0)
self.changeView()
def spectrumCalc(self):
"""
Calculate the spectrum and display the power across time in a frequency band as the image
intensity at each point. Useful for finding areas of activity.
"""
# sh = self.imageData.shape
if self.specImageCalcFlag is False: # calculate spectrum info
self.freim = np.abs(np.fft.fft(self.imageData, axis=0)/self.imageData.shape[0])
self.specImageCalcFlag = True
npts = self.imageData.shape[0]/2
freq = np.fft.fftfreq(npts, d=self.imagedT) # get frequency list
freq = freq[0:npts/2 + 1]
hpf = self.ctrlROIFunc.ImagePhys_SpecHPF.value()
lpf = self.ctrlROIFunc.ImagePhys_SpecLPF.value()
u = np.where(freq > hpf) # from frequencies, select those from the window
v = np.where(freq < lpf)
frl = list(set(u[0]).intersection(set(v[0])))
if len(frl) == 0: # catch bad selection
return
si = self.freim.take(frl, axis=0) # % make selection
self.specImage = np.mean(si, axis=0) # and get the average across the frequenies selected
sigma = self.ctrlROIFunc.ImagePhys_FFTSmooth.value()
self.specImageDisplay = scipy.ndimage.filters.gaussian_filter(self.specImage, sigma) # smooth a bit
self.ctrl.ImagePhys_View.setCurrentIndex(3)
self.changeView()
def getImageScaling(self):
"""
Retrieve scaling factor and set imageScaleUnit from the info on the image file
In the case where the information is missing, we just set units to pixels.
"""
if 'pixelSize' in self.imageInfo[3]:
pixelsize = self.imageInfo[3]['pixelSize']
region = self.imageInfo[3]['region']
# binning = self.imageInfo[3]['binning']
self.imageScaleUnit = 'um'
sf = 1.0e6
else:
print 'Old File without full scaling information on image, setting to defaults of pixels.'
sh = self.imageData.shape
region = [0, 0, sh[1], sh[2]] # make region from image data directly [x0,y0,y1,y1]
px = [1.0, 1.0] # scaling is now in pixels directly
self.imageScaleUnit = 'pixels'
sf = 1.0
pixelsize = [1.0, 1.0]
sx = region[2]-region[0]
sy = region[3]-region[1]
px = [0, 0]
px[0] = pixelsize[0] * sf
px[1] = pixelsize[1] * sf
sx = sx*px[0]
sy = sy*px[1]
#print "sx, sy, px", sx, sy, px
return(sx, sy, px)
def getFileInfo(self):
dh = self.fileLoaderInstance.selectedFiles()
dh = dh[0]
imt = MetaArray(file=dh.name()) # , subset=(slice(block_pos,block_pos+block_size),slice(None), slice(None)))
sh = imt.shape
info = imt.infoCopy()
self.downSample = int(self.ctrl.ImagePhys_Downsample.currentText())
if self.downSample <= 0:
self.downSample = 1 # same as "none"
totframes = int(np.ceil(sh[0]/self.downSample))
imageTimes = info[0].values()[1]
dt = np.mean(np.diff(imageTimes))
print '\n'
print '*'*80
print 'File %s\n Contains %d frames of %d x %d' % (dh.name(), sh[0], sh[1], sh[2])
print ' (would downsample to %d frames at downsample = %d ' % (totframes, self.downSample)
print 'Frame rate is: %12.5f s per frame or %8.2f Hz' % (dt, 1.0/dt)
def tryDownSample(self, dh):
imt = MetaArray(file=dh.name()) # , subset=(slice(block_pos,block_pos+block_size),slice(None), slice(None)))
if imt is None:
raise HelpfulException("Failed to read file %s in tryDownSample" % dh.name(), msgType='status')
sh = imt.shape
info = imt.infoCopy()
outframes = int(np.ceil(sh[0]/self.downSample))
bigblock = 1000
nbigblocks = int(np.floor(sh[0]/bigblock))
nlastblock = sh[0] - nbigblocks*bigblock
if nlastblock > 0:
nbigblocks += 1
nframesperblock = bigblock/self.downSample
print 'Reducing from %d frames to %d frames, downsample = %d ' % (sh[0], outframes, self.downSample)
imt_out = np.empty((outframes, sh[1], sh[2]), dtype=np.float32)
tfr = 0
# nfr = 0
with pg.ProgressDialog("Downsampling", 0, outframes) as dlg:
avgflag = True
dlg.setLabelText("Reading images...")
dlg.setValue(0)
dlg.setMaximum(outframes)
# bbcount = 0
for bb in range(nbigblocks):
img = imt[bb*bigblock:(bb+1)*bigblock, :, :]
try:
img = img.asarray()
except:
pass
if bb == nbigblocks-1:
nframesperblock = int(np.floor(nlastblock/self.downSample))
print "reading last block of short..."
for fr in range(nframesperblock):
dlg.setLabelText("Reading block %d of %d" % (tfr, outframes))
block_pos = fr * self.downSample
#print 'tfr: %d block: %5d, frame: %d ' % (tfr, block_pos, nfr)
if avgflag:
imt_out[tfr] = np.mean(img[block_pos:(block_pos+self.downSample)], axis=0)
# imt_out[fr] = np.mean(imt[block_pos:(block_pos+self.downSample)], axis=0)
else:
try:
imt_out[tfr] = img[block_pos,:,:]
except:
print 'Failing!!! fr: %d blockpos: %d bb: %d' % (fr, block_pos, bb)
dlg += 1
tfr += 1
# nfr = tfr*self.downSample
if dlg.wasCanceled():
raise Exception("Downample input canceled by user.")
return(imt_out, info)
def clearPhysiologyInfo(self):
self.physPlot.clearPlots()
self.physData = []
self.physThreshLine = None
self.spikesFound = None
self.spikeFoundpk = None
self.burstsFound = None
self.withinBurstsFound = None
self.makeSpikePointers() # prepare the graph
def readPhysiology(self, dh=None):
"""
call to read the physiology from the primary data channel
:params dh: is the handle to the directory where the data is stored (not the file itself)
"returns: Nothing
"""
if dh is None:
return
self.clearPhysiologyInfo()
data = self.dataModel.getClampFile(dh).read() # retrieve the physiology traces
self.physData = self.dataModel.getClampPrimary(data).asarray()
if self.dataModel.getClampMode(data) == 'IC':
self.physData = self.physData * 1e3 # convert to mV
units = 'mV'
self.ctrlPhysFunc.ImagePhys_PhysThresh.setSuffix(units)
else:
self.physData = self.physData * 1e12 # convert to pA, best for on-cell patches
units = 'pA'
info1 = data.infoCopy()
self.samplefreq = info1[2]['DAQ']['primary']['rate']
if self.physLPF >= 250.0 and self.physLPF < 0.5*self.samplefreq: # respect Nyquist, just minimally
self.physData = Utility.SignalFilter_LPFBessel(self.physData, self.physLPF, self.samplefreq, NPole=8)
self.physLPFChanged = False # we have updated now, so flag is reset
maxplotpts = 50000
shdat = self.physData.shape
decimate_factor = 1
if shdat[0] > maxplotpts:
decimate_factor = int(np.floor(shdat[0]/maxplotpts))
if decimate_factor < 1:
decimate_factor = 1
else:
pass
# store primary channel data and read command amplitude
#print 'decimate factor: %d' % (decimate_factor)
#print 'Number of points in original data set: ', shdat
tdat = data.infoCopy()[1]['values']
tdat = tdat[::decimate_factor]
self.tdat = data.infoCopy()[1]['values'] # / 1000. NOT
self.physPlot.plot(tdat, self.physData[::decimate_factor], pen=pg.mkPen('w')) # , decimate=decimate_factor)
self.showPhysTrigger()
try:
self.detectSpikes()
except:
pass
def loadRatioImage(self):
print 'loading ratio image'
dh = self.fileLoaderInstance.selectedFiles()
self.ratioImage = dh[0].read()[np.newaxis,...].astype('float')
print self.ratioImage
#self.background /= self.background.max()
if self.ratioImage is None:
self.dataState['ratioLoaded'] = False
self.useRatio = False
view = self.ctrl.ImagePhys_View.currentText()
if view == 'Ratio Image':
view = self.ctrl.ImagePhys_View.setCurrentIndex(0)
else:
self.useRatio = True
self.dataState['ratioLoaded'] = True
view = self.ctrl.ImagePhys_View.setCurrentIndex(4)
self.changeView()
def clearRatioImage(self):
self.ratioImage = None
self.dataState['ratioLoaded'] = False
self.useRatio = False
self.ctrl.ImagePhys_View.setCurrentIndex(0)
self.changeView()
def getDataStruct(self):
ds = self.ctrl.ImagePhys_DataStruct.currentIndex()
if ds == 0:
self.dataStruct = 'flat'
else:
self.dataStruct = 'interleaved'
self.ignoreFirst = self.ctrl.ImagePhys_ignoreFirst.value()
lpf = self.ctrlPhysFunc.ImagePhys_PhysLPF.value()
if lpf == 0.0:
self.physLPF = 0.0
else:
self.physLPF = lpf
#print "data struct = %s" % self.dataStruct
#print "ignore First: ", self.ignoreFirst
#print "lpf: %8.1f" % self.physLPF
def physLPF_valueChanged(self):
self.physLPFChanged = True # just note that it has changed
def doNormalize(self):
method = self.ctrl.ImagePhys_ImgMethod.currentIndex()
if method == 0: # (F-Fo)/Fo # referenced to a baseline of the first image
self.StandarddFFImage()
if method == 1: # reference to a baseline of images over a time window
self.StandarddFFImage(baseline=True)
if method == 2:
self.MediandFFImage() # Referenced to median of each image
if method == 3:
self.normalizeImage() # another normalization
if method == 4:
self.slowFilterImage() # slow filtering normalization: (F-Fslow)/Fslow on pixel basis over time
print 'normalize method: ', method
print self.dataState['ratioLoaded']
print self.useRatio
if method == 4: # g/r ratio - future: requires image to be loaded (hooks in place, no code yet)
if self.dataState['ratioLoaded'] and self.useRatio:
self.GRFFImage() # convert using the ratio
self.updateAvgStdImage()
self.calculateAllROIs()
def ExportTiff(self):
""" Take the current image data and make a directory with individual TIFF files
for the frames, using PIL.
Useful for making movies (read the tiffs into ImageJ, export QT or QVI file)
"""
# self.ImageData
tiffpath = '../TiffStacks/'
if not os.path.isdir(tiffpath):
os.makedirs(tiffpath)
else: # overwrite the directory - by deleting existing files first
if os.path.isdir(tiffpath): # keep the working directory clean.
for root, dirs, files in os.walk(tiffpath):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
image_sh = self.imageData.shape
nframes = image_sh[0]
# xsize = image_sh[1]
# ysize = image_sh[2]
print 'Writing tiff images to %s\n' % (tiffpath)
#print dir(Image.Image)
for i in range(0, nframes):
ai = Image.Image.fromarray(self.imageData[i, :, :]*8192.0)
fn = tiffpath + 'acq4_ImageAnalysis_%05d.tiff' % (i)
ai.save(fn)
#
#---------baseline correction routines --------------------
#
def Baseline0(self, roi=None):
if roi is None:
lrois = range(0, self.nROI)
else:
lrois = [roi]
t0 = self.ctrlROIFunc.ImagePhys_BaseStart.value()
t1 = self.ctrlROIFunc.ImagePhys_BaseEnd.value()
dt = np.mean(np.diff(self.imageTimes))
it0 = int(t0/dt)
it1 = int(t1/dt)
for roi in lrois:
bl = np.mean(self.FData[roi.ID][it0:it1])
self.BFData[roi.ID] /= bl
def Baseline1(self, roi=None):
### data correction routine to smooth out the baseline
###
self.FilterKernel = 11
self.FilterOrder = 3
thr = 2.0 # self.ui.CorrTool_Threshold.value()
dds = self.BFData[:,0:-1].copy()
if roi is None:
lrois = range(0, self.nROI)
else:
lrois = [roi]
for roi in lrois:
d = self.BFData[roi.ID].copy().T
ds = Utility.savitzky_golay(d, kernel=31, order=5) # smooth data
dds[roi.ID] = np.diff(ds) # take derivative of smoothed data
stdev = np.std(dds[roi.ID])
pts = np.where(np.abs(dds[roi.ID]) < thr*stdev) # get subset of points to fit
dds2 = np.diff(np.diff(ds))
stdev2 = np.std(dds2)
pts2 = np.where(np.abs(dds2) < thr*stdev2)
s0 = set(np.transpose(pts).flat)
s1 = set(np.transpose(pts2).flat)
ptsok = list(s1.intersection(s0))
if len(ptsok) == 0:
return
tf = self.imageTimes[ptsok]
df = d[ptsok]
p = np.polyfit(tf, df, 5)
bd = np.polyval(p, self.imageTimes)
# dm = np.mean(d[0:10])
bl = Utility.savitzky_golay(d/bd, kernel=self.FilterKernel,
order=self.FilterOrder)
self.BFData[roi.ID] = bl
return(self.BFData)
#self.FData[roi, :] = self.BFData[roi,:]
#self.plotdata(self.times, 100*(self.BFData-1.0), datacolor = 'blue', erase = True,
# background = False, scaleReset=False, yMinorTicks=0, yMajorTicks=3,
# yLabel = u'\u0394F/F<sub>ROI %d</sub>')
# self.makeROIDataFigure(clear=False, gcolor='g')
def SignalBPF(self, roi):
""" data correction
try to decrease baseline drift by high-pass filtering the data.
"""
#self.BFData = np.array(self.FData).copy()
HPF = self.ctrlROIFunc.ImagePhys_ImgHPF.value()
LPF = self.ctrlROIFunc.ImagePhys_ImgLPF.value() # 100.0
if LPF < 4.0*HPF:
print "please make lpf/hpf further apart in frequency"
return
dt = np.mean(np.diff(self.imageTimes))
samplefreq = 1.0/dt
if (LPF > 0.5*samplefreq):
LPF = 0.5*samplefreq
d = self.BFData[roi.ID].copy().T
return(Utility.SignalFilter(d, LPF, HPF, samplefreq))
def SignalHPF(self, roi):
""" data correction
try to decrease baseline drift by high-pass filtering the data.
"""
HPF = self.ctrlROIFunc.ImagePhys_ImgHPF.value()
dt = np.mean(np.diff(self.imageTimes))
samplefreq = 1.0/dt
d = self.BFData[roi.ID].copy().T
return(Utility.SignalFilter_HPFButter(d, HPF, samplefreq))
def SignalLPF(self, roi):
""" data correction
Low-pass filter the data.
"""
LPF = self.ctrlROIFunc.ImagePhys_ImgLPF.value() # 100.0
dt = np.mean(np.diff(self.imageTimes))
samplefreq = 1.0/dt
if (LPF > 0.5*samplefreq):
LPF = 0.5*samplefreq
d = self.BFData[roi.ID].copy().T
return(Utility.SignalFilter_LPFButter(d, LPF, samplefreq))
#
# detect spikes in physiology trace
#
def showPhysTrigger(self):
thr = self.ctrlPhysFunc.ImagePhys_PhysThresh.value()
if self.physThreshLine is None:
self.physThreshLine = self.physPlot.plot(x=np.array([self.tdat[0], self.tdat[-1]]),
y=np.array([thr, thr]), pen=pg.mkPen('r'), clear=False)
else:
self.physThreshLine.setData(x=np.array([self.tdat[0], self.tdat[-1]]),
y=np.array([thr, thr]))
def detectSpikes(self, burstMark=None):
spikescale = 1.0 # or 1e-12...
thr = spikescale*self.ctrlPhysFunc.ImagePhys_PhysThresh.value()
if thr < 0:
ysign = -1.0
else:
ysign = 1.0
(sptimes, sppts) = Utility.findspikes(self.tdat, ysign*self.physData, np.abs(thr)*spikescale, t0=None, t1=None,
dt=1.0/self.samplefreq, mode='peak', interpolate=False, debug=False)
self.SpikeTimes = sptimes
if len(sptimes) <= 1:
return
yspmarks = thr*spikescale
bList = self.defineSpikeBursts()
self.burstTimes = bList
yburstMarks = thr*0.9*spikescale
ywithinBurstMarks = thr*0.8*spikescale
self.makeSpikePointers(spikes=(sptimes, yspmarks), spikespk=(sptimes, self.physData[sppts]),
bursts = (bList, yburstMarks, ywithinBurstMarks))
print 'spikes detected: %d' % (len(sptimes))
def makeSpikePointers(self, spikes=None, spikespk=None, bursts=None):
# add scatterplot items to physiology trace - these start out empty, but we can replace
# the points in the arrays later.
if spikes is not None and len(spikes[0]) > 0:
if self.spikesFound is None:
self.spikesFound = pg.ScatterPlotItem(size=6, pen=pg.mkPen('g'), brush=pg.mkBrush(0, 255, 0, 200),
symbol = 't', identical=True)
#self.clearPhysiologyInfosetPoints(x=[], y=spikes[1])
self.physPlot.addItem(self.spikesFound)
else:
self.spikesFound.setPoints(x=spikes[0], y=spikes[1]*np.ones(len(spikes[0])))
if spikespk is not None and len(spikespk[0]) > 0:
if self.spikesFoundpk is None:
self.spikesFoundpk = pg.ScatterPlotItem(size=4, pen=pg.mkPen('r'), brush=pg.mkBrush(0, 255, 0, 200),
symbol = 'o', identical=True)
#self.spikesFoundpk.setPoints(x=spikespk[0], y=spikespk[1])
self.physPlot.addItem(self.spikesFoundpk)
else:
self.spikesFoundpk.setPoints(x=spikespk[0], y=spikespk[1]*np.ones(len(spikespk[0])))
if bursts is not None and len(bursts[0]) > 0:
if self.burstsFound is None:
self.burstsFound = pg.ScatterPlotItem(size=7, pen=pg.mkPen('y'), brush=pg.mkBrush(255, 255, 0, 200),
symbol = 's', identical = True)
#self.burstsFound.setPoints(x=bursts[0], y = bursts[1])
self.physPlot.addItem(self.burstsFound)
if self.withinBurstsFound is None:
self.withinBurstsFound = pg.ScatterPlotItem(size=7, pen=pg.mkPen('b'), brush=pg.mkBrush(0, 0, 255, 200),
symbol = 'o', identical = True)
#self.withinBurstsFound.addPoints(x=withinbursts[0], y = withinbursts[1])
self.physPlot.addItem(self.withinBurstsFound)
onsetSpikes = []
burstSpikes= []
for b in range(len(bursts[0])):
bdat = bursts[0][b]
onsetSpikes.append(bdat[0])
burstSpikes.extend(bdat[1:].tolist())
self.burstsFound.setPoints(x=onsetSpikes, y = [bursts[1] for x in range(len(onsetSpikes))])
self.withinBurstsFound.setPoints(x=burstSpikes, y = [bursts[2] for x in range(len(burstSpikes))])
def checkMPL(self):
if self.MPLFig is not None:
PL.close()
self.MPLFig = None
def RevSTA(self):
pass
def computeSTA(self):
"""
Compute the spike-triggered average of the ROI signals, given the spike train.
This one is just the basic spike-triggered average
"""
self.computeBTA(singleSpike=True)
def computeBTA(self, singleSpike=False):
"""
Compute the spike-triggered average of the ROI signals, given the spike train.
The following criteria are available to select from within the spike train:
1. minimum time before a spike
2. minimum rate AFTER the spike (for the next N spikes)
3. minimum # of spikes (N) for minimum rate determination (define burst)
"""
if not singleSpike: # normal processing is to do bursts, using first spike of burst
if self.burstTimes == []:
bList = self.defineSpikeBursts()
self.burstTimes = bList
onsetSpikes = []
burstSpikes = []
bList = self.burstTimes
for b in range(len(bList)):
bdat = bList[b]
onsetSpikes.append(bdat[0])
burstSpikes.extend(bdat[1:].tolist())
plotTitle = 'Burst-Onset-Triggered Fluorescence'
else: # but we can also handle just regular spike trains...
onsetSpikes = self.SpikeTimes
plotTitle = 'All-Spikes-Triggered Fluorescence'
self.calculateAllROIs()
N = len(onsetSpikes)
avCaF = [[0]*N for i in xrange(self.nROI)]
avCaT = [[0]*N for i in xrange(self.nROI)]
for roi in range(0, self.nROI):
i = 0
for onSp in onsetSpikes:
(x, y) = Utility.clipdata(self.FData[roi], self.imageTimes, onSp-0.1, onSp+0.5)
avCaF[roi][i] = y
avCaT[roi][i] = (x.tolist()-onSp)
i = i + 1
self.checkMPL()
(self.MPLFig, self.MPL_plots) = PL.subplots(num="Image Analysis", nrows=self.nROI+1, ncols=2,
sharex=False, sharey=False)
self.MPLFig.suptitle('%s:\n %s' % (plotTitle, self.currentFileName), fontsize=11)
dt = np.mean(np.diff(self.imageTimes))/2.
tbase = np.arange(-0.1, 0.5, dt)
axmin = 1e6
axmax = -1e6
ave = [[]]*self.nROI
std = [[]]*self.nROI
CaAmin = 1e6
CaAmax = -1e6
for roi in range(0, self.nROI):
self.MPL_plots[self.nROI][0].plot(self.imageTimes, self.BFData[roi])
interCaF = np.zeros((N, len(tbase)))
for i in range(0, len(onsetSpikes)):
#sp = self.MPL_plots.scatter(avCaT, avCaF, s=15, color='tomato')
self.MPL_plots[roi][0].plot(avCaT[roi][i], avCaF[roi][i]*100., color='k', linestyle='-')
f_int = scipy.interpolate.interp1d(avCaT[roi][i], avCaF[roi][i]*100., bounds_error=False)
interCaF[i, :] = f_int(tbase)
CaAmin = np.nanmin([np.nanmin(avCaF[roi][i]), CaAmin])
CaAmax = np.nanmax([np.nanmax(avCaF[roi][i]), CaAmax])
# self.MPL_plots[roi][1].plot(tbase, interCaF[roi,i,:], 'r')
ave[roi] = scipy.stats.nanmean(interCaF, axis=0)
std[roi] = scipy.stats.nanstd(interCaF, axis=0)
self.MPL_plots[roi][1].errorbar(tbase, ave[roi]*100., yerr=std[roi]*100., color='r')
self.MPL_plots[roi][0].set_xlabel('T (sec)')
self.MPL_plots[roi][0].set_ylabel('dF/F (%)')
axmin = np.nanmin([np.nanmin(ave[roi]-std[roi]), axmin])
axmax = np.nanmax([np.nanmax(ave[roi]+std[roi]), axmax])
for roi in range(0, self.nROI):
self.MPL_plots[roi][1].set_ylim((axmin*100., axmax*100.))
self.MPL_plots[roi][0].set_ylim((CaAmin*100., CaAmax*100.))
# self.MPL_plots[roi][1].errorbar(tbase, ave[roi], yerr=std[roi], color='r')
PL.show()
def defineSpikeBursts(self):
"""
The following criteria are avaiable to select from within the spike train:
1. minimum time before a spike
2. minimum rate AFTER the spike (for the next N spikes)
3. minimum # of spikes (N) for minimum rate determination (define burst length)
The return arrays are the times of first spikes
2 Feb 2012 P. B. Manis (working version)
"""
#minTime = 0.100 # in milliseconds
#maxInterval = 0.040 # maximum time between spikes to be counted in a burst
#minNspikes = 3 # minimum number of spikes for event to count as a burst
minTime = self.ctrlPhysFunc.ImagePhys_burstISI.value()/1000.0
maxInterval = self.ctrlPhysFunc.ImagePhys_withinBurstISI.value()/1000.0
minNspikes = self.ctrlPhysFunc.ImagePhys_minBurstSpikes.value()
# first we find the indices of all events that meet the above criteria:
if len(self.SpikeTimes) < 3:
return([], [])
isis = np.diff(self.SpikeTimes)
burstOnsetCandidates = np.where(isis > minTime)[0].tolist()
burstOnsetCandidates = [x + 1 for x in burstOnsetCandidates]
# those are candidate events...
allBurstList = []
burstOnsetList = []
for i in burstOnsetCandidates:
tempWithinBurst = [i] # list of spike times that follow this one
for j in range(i, len(self.SpikeTimes)-1):
if isis[j] <= maxInterval: # if interspike interval is long, we terminate
tempWithinBurst.append(j+1) # keep track of spikes that are "within" a burst
else: # if isi is too long, terminate burst
break
if len(tempWithinBurst) >= (minNspikes-1) and i not in burstOnsetList: # note, tempWithinBurst does not include the first spike.
burstOnsetList.append(i)
allBurstList.append(tempWithinBurst)
burstTList = []
for j in range(len(allBurstList)):
burstTList.append(self.SpikeTimes[allBurstList[j]])
return(burstTList)
def ROIDistStrength(self):
"""
Create a plot of the strength of the cross correlation (peak value) versus the distance
between the (center) of all pairs of ROIs
"""
if self.ROIDistanceMap == []:
self.ROIDistances() # make sure we ahve valid distance information
if self.IXC_Strength == []:
self.Analog_Xcorr_Individual(plottype=None)
threshold = self.ctrlImageFunc.IAFuncs_XCorrThreshold.value()
x0 = np.nanmin(np.nanmin(self.ROIDistanceMap))
x1 = np.nanmax(np.nanmax(self.ROIDistanceMap))
thrliney = [threshold, threshold]
nthrliney = [-threshold, -threshold]
thrlinex = [x0, x1]
self.use_MPL = self.ctrlImageFunc.IAFuncs_MatplotlibCheckBox.checkState()
mean = scipy.stats.nanmean(self.IXC_Strength.flatten())
std = scipy.stats.nanstd(self.IXC_Strength.flatten())
print 'Mean XC: %f std: %f' % (mean, std)
if self.use_MPL:
self.checkMPL()
(self.MPLFig, self.MPL_plots) = PL.subplots(num="Image Analysis", nrows=1, ncols=1,
sharex = True, sharey = True)
self.MPLFig.suptitle('Analog XCorr: %s' % self.currentFileName, fontsize=11)
self.MPL_plots.scatter(self.ROIDistanceMap, self.IXC_Strength, s=15, color='tomato')
self.MPL_plots.plot(thrlinex, thrliney)
self.MPL_plots.set_xlabel('Distance (%s)' % self.imageScaleUnit)
self.MPL_plots.set_ylabel('Correlation (R)')
self.MPL_plots.set_ylim((-1,1))
PL.show()
else:
self.floatingDistWin = pyqtgrwindow(title = 'ROI Distance Strength')
self.floatingDistWin.setWindowTitle('ROI Distance Strength: %s' % self.currentFileName)
self.floatingDistWin.layout.clear()
self.floatingDistWin.layout.setWindowTitle("New Title?")
s1 = pg.ScatterPlotItem(size=7, pen=pg.mkPen(None), brush=pg.mkBrush(255, 0, 0, 255))
X = np.reshape(self.ROIDistanceMap, -1)
X = X[~np.isnan(X)]
Y = np.reshape(self.IXC_Strength, -1)
Y = Y[~np.isnan(Y)]
p = self.floatingDistWin.layout.addPlot(0,0)
s1.addPoints(X, Y)
p.addItem(s1)
p.plot(thrlinex, thrliney, pen=pg.mkPen(width=0.75, color='c'))
p.plot(thrlinex, nthrliney, pen=pg.mkPen(width=0.75, color='c'))
p.setLabel('bottom', 'Distance (%s)' % self.imageScaleUnit)
p.setLabel('left', 'Correlation (R)')
p.setYRange(-1, 1)
(xm, xn) = self._calcMinMax(X)
p.setXRange(0., xn);
def _calcMinMax(self, x, p=0.05):
'''
Compute initial min and max axis scaling points.
Approach:
a) with buffer:
reserve a fraction p of the total span of an axis as buffer and
round to next order of magnitude
b) strict (p==0):
just round to the next order of magnitude
Special cases:
x_min==x_max : assign symmetric interval or [0,1], if zero.
From:
F. Oliver Gathmann (gathmann@scar.utoronto.ca)
Surface and Groundwater Ecology Research Group
University of Toronto
phone: (416) - 287 7420 ; fax: (416) - 287 7423
web: http://www.scar.utoronto.ca/~gathmann
'''
if len(x) > 0: # not an empty array passed
x_max, x_min = np.maximum.reduce(x),np.minimum.reduce(x)
if x_min != x_max: # esp. not both x_min,x_max equal to zero
span = x_max - x_min
buffer = p * span
if x_min-buffer > 0: # both (x_min-buffer),(x_max+buffer) > 0
x_min = round(x_min - buffer, -int((np.floor(np.log10(buffer) - 1))))
x_max = round(x_max + buffer, -int((np.ceil(np.log10(buffer) - 1))))
elif x_max+buffer < 0: # both (x_min-buffer),(x_max+buffer) < 0
x_min = round(x_min - buffer, -int((np.ceil(np.log10(buffer) - 1))))
x_max = round(x_max + buffer, -int((np.floor(np.log10(buffer) - 1))))
else: # (x_min-buffer </= 0)and(x_max+buffer >/= 0)
try:
x_min = round(x_min - buffer, -int((np.ceil(np.log10(buffer) - 1))))
except OverflowError: # buffer == 0
x_min = 0
try:
x_max = round(x_max + buffer, -int((np.ceil(np.log10(buffer) - 1))))
except OverflowError: # buffer == 0
x_max = 0
else:
if x_min != 0:
x_min = x_min - x_min/2.0
x_max = x_max + x_max/2.0
else:
x_min = 0
x_max = 1
else:
x_min = 0
x_max = 1
return x_min,x_max
def printDistStrength(self):
print '\n\n----------------------------------\nROI Distance Map\nFile: %s '% self.currentFileName
print 'roi1\troi2\td (um)\t R'
sh = self.ROIDistanceMap.shape
for i in range(0, sh[0]):
for j in range(i+1, sh[1]):
print '%d\t%d\t%8.0f\t%6.3f' % (i, j, self.ROIDistanceMap[i, j], self.IXC_Strength[i, j])
print '-------------------------------\n'
def NetworkGraph(self):
"""
Create a graph showing the network. Each node is an ROI, and the lines connecting
the nodes have a thickness that corresponds to the strength of the cross correlation.
"""
if self.ROIDistanceMap == []:
self.ROIDistances() # make sure we ahve valid distance information
if self.IXC_Strength == []:
self.Analog_Xcorr_Individual(plottype=None)
self.use_MPL = self.ctrlImageFunc.IAFuncs_MatplotlibCheckBox.checkState()
if self.use_MPL:
self.checkMPL()
(self.MPLFig, self.MPL_plots) = PL.subplots(num="Network Graph", nrows=1, ncols=1,
sharex=True, sharey=True)
self.MPLFig.suptitle('Network Graph: %s' % self.currentFileName, fontsize=11)
yFlip_flag = False
else:
self.floatingDistWin = pyqtgrwindow(title = 'Network Graph')
self.floatingDistWin.setWindowTitle('Network Graph: %s' % self.currentFileName)
self.floatingDistWin.layout.clear()
self.floatingDistWin.layout.setWindowTitle("Network Graph?")
plt = self.floatingDistWin.layout.addPlot(0,0)
yFlip_flag = True
(sx, sy, px) = self.getImageScaling()
maxStr = np.abs(np.nanmax(self.IXC_Strength))
# minStr = np.nanmin(self.IXC_Strength)
maxline = 4.0
minline = 0.20
threshold = self.ctrlImageFunc.IAFuncs_XCorrThreshold.value()
nd = len(self.AllRois)
X = np.zeros(nd)
Y = np.zeros(nd)
for i in range(0, nd):
wpos1 = [self.AllRois[i].pos().x(), self.AllRois[i].pos().y(),
self.AllRois[i].boundingRect().width(), self.AllRois[i].boundingRect().height()]
x1 = (wpos1[0]+0.5*wpos1[2])*px[0]
y1 = (wpos1[1]+0.5*wpos1[3])*px[1]
if yFlip_flag:
y1 = sy - y1
X[i] = x1
Y[i] = y1
for j in range(i+1, nd):
wpos2 = [self.AllRois[j].pos().x(), self.AllRois[j].pos().y(),
self.AllRois[j].boundingRect().width(), self.AllRois[j].boundingRect().height()]
x2 = (wpos2[0]+0.5*wpos2[2])*px[0]
y2 = (wpos2[1]+0.5*wpos2[3])*px[1]
if yFlip_flag:
y2 = sy-y2
if np.abs(self.IXC_Strength[i,j]) < threshold:
pass
# if self.use_MPL:
# self.MPL_plots.plot([x1, x2], [y1, y2],
# linestyle = '--', color='grey', marker='o', linewidth=minline)
# else:
# pn = pg.mkPen(width=minline, color=[128, 128, 128, 192], style=QtCore.Qt.DashLine)
# plt.plot([x1, x2], [y1, y2], pen = pn)
else:
lw = maxline*(abs(self.IXC_Strength[i, j])-threshold)/(maxStr-threshold)+minline
if self.IXC_Strength[i, j] >= threshold:
pn = pg.mkPen(width=lw, color=[255, 128, 128, 255])
mcolor = 'tomato'
else: # self.IXC_Strength[i,j] <= threshold:
pn = pg.mkPen(width=lw, color=[128, 128, 255, 255])
mcolor = 'blue'
if self.use_MPL:
self.MPL_plots.plot([x1, x2], [y1, y2], linewidth=lw,
linestyle='-', color=mcolor, marker='o')
else:
plt.plot([x1, x2], [y1, y2], pen=pn)
if self.use_MPL:
self.MPL_plots.set_xlim((0, sx))
self.MPL_plots.set_ylim((sy, 0))
self.MPL_plots.set_xlabel('X (%s)' % self.imageScaleUnit)
self.MPL_plots.set_ylabel('Y (%s)' % self.imageScaleUnit)
PL.show()
else:
s1 = pg.ScatterPlotItem(size=7, pen=pg.mkPen(None), brush=pg.mkBrush(255, 0, 0, 255))
s1.addPoints(X, Y)
plt.addItem(s1)
plt.setLabel('bottom', 'X (%s)' % self.imageScaleUnit)
plt.setLabel('left', 'Y (%s)' % self.imageScaleUnit)
plt.setXRange(0., sx)
plt.setYRange(0., sy)
#--------------- From PyImageAnalysis3.py: -----------------------------
#---------------- ROI routines on Images ------------------------------
def clearAllROI(self):
""" remove all rois and all references to the rois """
for i, roi in enumerate(self.AllRois):
roi.hide()
self.AllRois = []
self.nROI = 0
self.FData = [] # FData is the raw ROI data before any corrections
self.BFData = [] # ROI data after all corrections
self.lastROITouched = []
self.ROI_Plot.clear()
#self.clearPlots()
def deleteLastTouchedROI(self):
""" remove the currently (last) selected roi and all references to it,
then select and display a new ROI """
ourWidget = self.lastROITouched
if ourWidget not in self.AllRois:
raise Exception("Delete ROI - Error: Last ROI was not in ROI list?")
id = ourWidget.ID # get the id of the roi
self.AllRois.remove(ourWidget) # remove it from our list
ourWidget.hide()
del ourWidget
self.nROI = len(self.AllRois)
for roi in self.AllRois:
roi.ID = self.AllRois.index(roi) # renumber the roi list.
if id < 0:
id = self.AllRois[0].ID # pick first
if id > self.nROI:
id = self.AllRois[-1].ID # pick last
self.FData = []
self.BFData = []
for roi in self.AllRois: # navigate the list one more time
if id == roi.ID:
self.updateThisROI(roi) # display the next chosen ROI in the box below the image
# now update the overall ROI plot
self.plotdata(yMinorTicks=0, yMajorTicks=3,
yLabel=u'F0<sub>ROI %d</sub>')
def addOneROI(self, pos=(0, 0), hw=None):
"""
append one roi to the self.AllRois list, put it on the screen (scene), and
make sure it is actively connected to code.
:param pos: Initial roi posistion (tuple, (x, y))
:param hw: Initial ROI height and position (tuple (h,w)). If not defined, will get from current roi default
:return: The roi handle is returned.
"""
if hw is None:
dr = self.ctrlROIFunc.ImagePhys_ROISize.value()
hw = [dr, dr]
roi = pg.RectROI(pos, hw, scaleSnap=True, translateSnap=True)
roi.addRotateHandle(pos=(0, 0), center=(0.5, 0.5)) # handle at left top, rotation about center
# roi = qtgraph.widgets.EllipseROI(pos, hw, scaleSnap=True, translateSnap=True)
# roi = qtgraph.widgets.MultiLineROI([[0,0], [5,5], [10,10]], 3, scaleSnap=True, translateSnap=True)
roi.ID = self.nROI # give each ROI a unique identification number
rgb = self.RGB[self.nROI]
self.nROI = self.nROI + 1
roi.setPen(QtGui.QPen(QtGui.QColor(rgb[0], rgb[1], rgb[2])))
roi.color = rgb
self.AllRois.append(roi)
self.imageView.addItem(roi)
self.updateThisROI(self.AllRois[-1]) # compute the new ROI data
roi.sigRegionChanged.connect(self.updateThisROI) # if data region changes, update the information
roi.sigHoverEvent.connect(self.showThisROI) # a hover just causes the display below to show what is hre already.
return (roi)
# def plotImageROIs(self, ourWidget):
# """ plots a single ROIs in the image - as an initial instantiation.
# """
# if ourWidget in self.AllRois: # must be in the list of our rois - ignore other widgets
# tr = ourWidget.getArrayRegion(self.imageData, self.imageItem, axes=(1,2))
# tr = tr.mean(axis=2).mean(axis=1) # compute average over the ROI against time
# if self.datatype == 'int16':
# tr = tr / ourWidget.getArrayRegion(self.im_filt, self.imageItem, axes=(0,1)).mean(axis=1).mean(axis=0)
# sh = np.shape(self.FData)
# if sh[0] is 0:
# self.FData = atleast_2d(tr) # create a new trace in this place
# #sh = shape(self.FData)
# if sh[0] > ourWidget.ID: # did we move an existing widget?
# self.FData[ourWidget.ID,:] = np.array(tr) # then replace the trace
# else: # the widget is not in the list yet...
# self.FData = append(self.FData, atleast_2d(tr), 0)
# self.plotdata(roiUpdate=[ourWidget.ID], showplot=False, datacolor = ourWidget.color)
# def roiChanged(self, roi):
# if isinstance(roi, int):
# roi = self.currentRoi
# if roi is None:
# return
# self.ROI_Plot.clearPlots()
# lineScans = []
# for imgSet in self.imageData:
# data = roi.getArrayRegion(imgSet['procMean'], self.imageItem, axes=(1,2))
# m = data.mean(axis=1).mean(axis=1)
# lineScans.append(data.mean(axis=2))
# spacer = np.empty((lineScans[-1].shape[0], 1), dtype = lineScans[-1].dtype)
# spacer[:] = lineScans[-1].min()
# lineScans.append(spacer)
# data = roi.getArrayRegion(imgSet['procStd'], self.imageItem, axes=(1,2))
# s = data.mean(axis=1).mean(axis=1)
# self.ROI_Plot.plot(m, pen=pg.hsvColor(c*0.2, 1.0, 1.0))
# self.ROI_Plot.plot(m-s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
# self.ROI_Plot.plot(m+s, pen=pg.hsvColor(c*0.2, 1.0, 0.4))
#
# lineScan = np.hstack(lineScans)
# self.getElement('Line Scan').setImage(lineScan)
# self.currentRoi = roi
def updateThisROI(self, roi, livePlot=True):
"""
called when we need to update the ROI result plot for a particular ROI widget
:param roi: handle to the ROI
:param livePlot: flag for live plotting, passed to showThisROI
"""
if roi in self.AllRois:
tr = roi.getArrayRegion(self.imageData, self.imageView.imageItem, axes=(1, 2))
tr = tr.mean(axis=2).mean(axis=1) # compute average over the ROI against time
# trx = tr.copy()
if self.dataState['Normalized'] is False:
# trm = tr.mean() # mean value across all time
tr = tr/tr.mean() # (self.background[0:tr.shape[0]]*trm/self.backgroundmean)
self.FData = self.insertFData(self.FData, tr.copy(), roi)
self.applyROIFilters(roi)
self.showThisROI(roi, livePlot)
return(tr)
def scannerTimes(self, roi):
"""
compute mean time over the roi from the scanned time information estimates
:params: roi - the roi information
:returns: time array with mean roi collection time offset + base image time
"""
tr = roi.getArrayRegion(self.scanTimes, self.imageView.imageItem, axes=(0, 1))
tr = tr.mean(axis=1).mean(axis=0) # compute average over the ROI against time
times = self.imageTimes[0:len(self.BFData[roi.ID])] + tr
# print tr
return times
def showThisROI(self, roi, livePlot=True):
"""
Show one ROI, highlighting it and brining it to the top of the traces
other rois are dimmed and thinned
If the plot of the roi does not exist, the plot is
:param roi: the handle to the selected ROI
:param livePlot: flag to allow update of plot in real time (if livePlot is not set, the roi
may not be created at this time. (is this ever used?)
:return: Nothing
"""
if roi in self.AllRois:
if livePlot is True:
if self.imageType == 'camera':
times = self.imageTimes[0:len(self.BFData[roi.ID])]
elif self.imageType in ['imaging', 'PMT']:
times = self.scannerTimes(roi)
else:
raise ValueError('Image type for time array not known: %s', self.imageType)
try:
roi.plot.setData(times, self.BFData[roi.ID],
pen=pg.mkPen(np.append(roi.color[0:3], 255), width=1.0)) #, pen=pg.mkPen(roi.color), clear=True)
except:
roi.plot = self.ROI_Plot.plot(times, self.BFData[roi.ID],
pen=pg.mkPen(np.append(roi.color[0:3], 255), width=1.0), clear=False) # pg.mkPen('r'), clear=True)
c = np.append(roi.color[0:3], 255)
roi.plot.setPen(pg.mkPen(color=c, width=2.0))
roi.plot.setZValue(1000)
roi.show() # make sure the roi is visible
for otherroi in self.AllRois:
if otherroi != roi:
c = np.append(otherroi.color[0:3], 128)
otherroi.plot.setPen(pg.mkPen(color=c, width=1.0))
otherroi.plot.setZValue(500)
def markROITouched(self, roi):
"""
Highlight the last touched ROI in the field
"""
if self.lastROITouched == []:
self.lastROITouched = roi
roi.pen.setWidth(0.18) # just bump up the width
if roi != self.lastROITouched:
self.lastROITouched.pen.setWidth(0.18)
roi.pen.setWidthF(0.12)
self.lastROITouched = roi # save the most recent one
def calculateAllROIs(self):
"""
calculateAllROIs forces a fresh recalculation of all ROI values from the current image
"""
self.FData = []
self.BFData = []
currentROI = self.lastROITouched
for ourWidget in self.AllRois:
tr = self.updateThisROI(ourWidget, livePlot=False)
self.FData = self.insertFData(self.FData, tr, ourWidget)
self.applyROIFilters(self.AllRois)
self.updateThisROI(currentROI) # just update the latest plot with the new format.
def refilterCurrentROI(self):
"""
calculateCurrentROI forces a fresh recalculation of the most recently touched ROI
"""
roi = self.lastROITouched
if roi in self.AllRois:
self.applyROIFilters(roi)
self.ROI_Plot.plot(self.imageTimes, self.BFData[roi.ID], pen=pg.mkPen('r'), clear=True)
def insertFData(self, FData, tr, roi):
sh = np.shape(FData)
if sh[0] == 0:
FData = np.atleast_2d(tr) # create a new trace in this place
if sh[0] > roi.ID: # did we move an existing widget?
FData[roi.ID] = np.array(tr) # then replace the trace
else: # the widget is not in the list yet...
FData = np.append(FData, np.atleast_2d(tr), 0)
return(FData)
def applyROIFilters(self, rois):
"""
If checked, apply LPF, HPF, and baseline corrections to the resulting ROI data
"""
if type(rois) is not list:
rois = [rois]
# try:
# l = len(self.BFData)
# except:
# self.BFData = []
for roi in rois:
self.BFData = self.insertFData(self.BFData, self.FData[roi.ID], roi) # replace current data with raw data
if self.ctrl.ImagePhys_CorrTool_BL1.isChecked():
bl = self.Baseline1(roi)
self.BFData = self.insertFData(self.BFData, bl, roi)
if self.ctrlROIFunc.ImagePhys_CorrTool_LPF.isChecked() and self.ctrlROIFunc.ImagePhys_CorrTool_HPF.isChecked():
bpf = self.SignalBPF(roi)
self.BFData = self.insertFData(self.BFData, bpf, roi)
else:
if self.ctrlROIFunc.ImagePhys_CorrTool_LPF.isChecked():
lpf = self.SignalLPF(roi)
self.BFData = self.insertFData(self.BFData, lpf, roi)
if self.ctrlROIFunc.ImagePhys_CorrTool_HPF.isChecked():
hpf = self.SignalHPF(roi)
self.BFData = self.insertFData(self.BFData, hpf, roi)
def optimizeAll(self):
for roi in self.AllRois:
self.optimizeThisROI(roi)
def optimizeOne(self):
if self.lastROITouched in self.AllRois:
self.optimizeThisROI(self.lastROITouched)
def optimizeThisROI(self, ourWidget, livePlot=True):
""" This routine determines the best (largest) signal in a region in and
around the current ROI, by moving (dithering) the ROI. The ROI is left
positioned at the "best" location
"""
# ditherX = self.ui.ditherX.value()
# ditherY = self.ui.ditherY.value()
# ditherMode = self.ui.ditherMode.currentIndex()
ditherX = 2
ditherY = 2
ditherMode = 0
if ourWidget in self.AllRois:
#(tr_test, trDither) = self.__measDither(ditherMode, ourWidget)
wpos = ourWidget.state['pos']
tr_best = 0.0
tr_X = wpos[0]
tr_Y = wpos[1]
for x in range(-ditherX, ditherX):
for y in range(-ditherY, ditherY):
px = wpos[0]+x
py = wpos[1]+y
ourWidget.setPos([px, py])
(tr_test, trDither) = self.__measDither(ditherMode, ourWidget)
if tr_test > tr_best:
tr_X = px
tr_Y = py
tr_best = tr_test
tr = trDither # save peak signal
ourWidget.setPos([tr_X, tr_Y])
# if livePlot:
# MPlots.updatePlot(self.ui.liveROIPlot, range(0, np.shape(tr)[0]), tr, 'liveROI',
# color=self.RGB[ourWidget.ID-1])
def __measDither(self, ditherMode, ourWidget):
"""Compute the value that we are optimizing for the dithering."""
trDither = ourWidget.getArrayRegion(self.normData[0], self.imageItem, axes=(1,2))
trDither = trDither.mean(axis=2).mean(axis=1) # compute average over the ROI against time
if ditherMode is 0: # peak to peak
tr_test = np.amax(trDither) - np.amin(trDither)
elif ditherMode is 1: # baseline to peak
tr_test = np.amax(trDither)
elif ditherMode is 2: # standard deviation
tr_test = np.std(trDither)
else:
tr_test = 0.
return(tr_test, trDither)
def ROIDistances(self):
"""
measure the distances between all possible pairs of ROIs, store result in matrix...
The distances are scaled into microns or pixels.
"""
print 'Calculating ROI to ROI distances'
nd = len(self.AllRois)
self.ROIDistanceMap = np.empty((nd, nd)) # could go sparse, but this is simple...
self.ROIDistanceMap.fill(np.nan)
(sx, sy, px) = self.getImageScaling()
for i in range(0, nd):
wpos1 = [self.AllRois[i].pos().x(), self.AllRois[i].pos().y(),
self.AllRois[i].boundingRect().width(), self.AllRois[i].boundingRect().height()]
x1 = (wpos1[0]+0.5*wpos1[2])*px[0]
y1 = (wpos1[1]+0.5*wpos1[3])*px[1]
for j in range(i+1, nd):
wpos2 = [self.AllRois[j].pos().x(), self.AllRois[j].pos().y(),
self.AllRois[j].boundingRect().width(), self.AllRois[j].boundingRect().height()]
x2 = (wpos2[0]+0.5*wpos2[2])*px[0]
y2 = (wpos2[1]+0.5*wpos2[3])*px[1]
self.ROIDistanceMap[i,j] = np.sqrt((x1-x2)**2+(y1-y2)**2)
def newpgImageWindow(self, title='', border='w'):
newWin = pyqtgrwindow(title=title)
view = pg.GraphicsView()
newWin.setCentralWidget(view)
newWin.show()
img = pg.ImageItem(border=border)
view.scene().addItem(img)
view.setRange(QtCore.QRectF(0, 0, 500, 500))
return(newWin, view, img)
def saveROI(self, fileName=None):
"""Save the ROI information (locations) to a disk file."""
self.calculateAllROIs()
if self.FData == []:
print 'self.FData is empty!'
return
sh = np.shape(self.FData)
data = np.empty((sh[0]+2, sh[1]))
data[0] = np.arange(0,sh[1])
data[1] = self.imageTimes.copy()
roiData = []
for i in range(0, sh[0]):
data[i+2] = self.FData[i]
roiData.append([self.AllRois[i].pos().x(), self.AllRois[i].pos().y(),
self.AllRois[i].boundingRect().height(), self.AllRois[i].boundingRect().width()])
data = data.T ## transpose
if fileName is None or fileName is False:
fileName= QtGui.QFileDialog.getSaveFileName(None, "Save ROI as csv file", "",
self.tr("CSV Files (*.csv)"))
if not fileName:
return
(fnc, extc) = os.path.splitext(fileName)
fName = fnc + '.csv'
fd = open(fName, 'w')
stringVals=''
for col in range(0, data.shape[1]): # write a header for our formatting.
if col is 0:
fd.write('time(index),')
elif col is 1:
fd.write('time(sec),')
stringVals = ['R%03d' % x for x in range(0, data.shape[1]-2)]
fd.write(",".join(stringVals) + "\n")
for row in range(0, data.shape[0]):
stringVals = ["%f" % x for x in data[row]]
fd.write(",".join(stringVals) + "\n")
# print 'Wrote: %s\n' % (fName)
fd.close()
(fnc, extc) = os.path.splitext(fileName)
fName = fnc + '.roi'
fd = open(fName, 'w')
for rd in roiData:
fd.write(' '.join(map(str, rd)) + '\n')
# print 'Wrote: %s\n' % fName
fd.close()
def restoreROI(self, fileName=None):
"""Retrieve the ROI locations from a file, plot them on the image, and compute the traces."""
self.clearAllROI() # always start with a clean slate.
if fileName is False or fileName is None:
fileName = QtGui.QFileDialog.getOpenFileName(None, u'Retrieve ROI data', u'', u'ROIs (*.roi)')
if fileName:
fd = open(fileName, 'r')
for line in fd:
roixy = np.fromstring(line, sep=' ')
self.addOneROI(pos=[roixy[0], roixy[1]], hw=[roixy[2], roixy[3]])
fd.close()
self.calculateAllROIs()
#self.makeROIDataFigure(clear=True)
def makeROIDataFigure(self, clear = True, gcolor = 'k'):
self.checkMPL()
(self.MPLFig, self.MPL_plots) = PL.subplots(num="ROI Data", nrows = self.nROI, ncols=1,
sharex = True, sharey=True)
self.MPLFig.suptitle('ROI Traces: %s' % self.currentFileName, fontsize=10)
ndpt = len(self.FData[0,])
for i in range(self.nROI):
self.MPL_plots[i].plot(self.imageTimes[0:ndpt], self.FData[i,:], color = gcolor)
#self.MPL_plots[i].hold(True)
PL.show()
#----------------------Stack Ops (math on images) ---------------------------------
def stackOp_absmax(self): # absolute maximum
"""Make an image that is the maximum of each pixel across the image stack."""
self.clearAllROI()
sh = np.shape(self.imageData)
if len(sh) == 4:
self.image = np.amax(self.imageData[:,1,:,:], axis=0).astype('float32')
elif len(sh) == 3:
self.image = np.amax(self.imageData[:, :, :], axis=0).astype('float32')
self.paintImage(image=self.image, focus=False)
def stackOp_normmax(self): # normalized maximum
"""
Make an image that is the maximum of each pixel, normalized within each image, across the image stack.
"""
self.clearAllROI()
levindex = self.ui.stackOp_levels.currentIndex()
levels = [8., 16., 256., 4096., 65536.]
id_shape = np.shape(self.imageData)
id = np.zeros(id_shape)
self.imageLevels = levels[-1]
if len(id_shape) == 4:
plane = 1
amaxd = np.amax(self.imageData[:, plane, :, :], axis=0).astype('float32')
amind = np.amin(self.imageData[:, plane, :, :], axis=0).astype('float32')
id = np.floor((levels[levindex]/amaxd)*(self.imageData[:, plane, :, :].astype('float32')-amind))
elif len(id_shape) == 3:
amaxd = np.amax(self.imageData[:, :, :], axis=0).astype('float32')
amind = np.amin(self.imageData[:, :, :], axis=0).astype('float32')
id = np.floor((levels[levindex]/amaxd)*(self.imageData[:, :, :].astype('float32')-amind))
self.image = np.amax(id, axis = 0)
self.paintImage(image=self.image, focus=False)
def stackOp_std(self):
"""Make an image that is the standard deviation of each pixel across the image stack."""
self.clearAllROI()
sh = np.shape(self.imageData);
if len(sh) == 4:
self.image = np.std(self.imageData[:,1,:,:], axis = 0)
elif len(sh) == 3:
self.image = np.std(self.imageData[:,:,:], axis = 0)
self.paintImage(image=self.image, focus=False)
def stackOp_mean(self):
"""Make an image that is the mean of each pixel across the image stack."""
sh = np.shape(self.imageData);
self.clearAllROI()
if len(sh) == 4:
self.image = np.mean(self.imageData[:,1,:,:], axis = 0)
elif len(sh) == 3:
self.image = np.mean(self.imageData[:,:,:], axis = 0)
self.paintImage(image=self.image, focus=False)
def stackOp_restore(self):
"""Redraw the original image stack."""
self.paintImage(updateTools=True, focus=True) # return to the original imagedata
#----------------------Image Processing methods ----------------
# Includes bleach correction, filtering (median and gaussian), and deltaF/F calculation
def unbleachImage(self):
self.dataState['bleachCorrection'] = False # reset flag...
self.imageData = self.rawData.copy() # starts over, no matter what.
self.dataState['Normalized'] = False
bleachmode = '2DPoly'
imshape = np.shape(self.imageData)
tc_bleach = np.zeros(imshape[0])
b_corr = np.zeros(imshape[0])
Fits = Fitting.Fitting()
for k in range(0, imshape[0]):
tc_bleach[k] = np.mean(self.imageData[k, :, :])
dt = np.mean(np.diff(self.imageTimes)) # sampling rate, seconds
endT = np.amax(self.imageTimes)
mFluor = tc_bleach[0]
# replace tc_bleach with a smoothed version - 4th order polynomial
fitx = np.arange(0, np.shape(tc_bleach)[0])
if bleachmode == 'exp2':
# use a double exponential fit
(fpar, xf, yf, names) = Fits.FitRegion([0], 0, fitx, tc_bleach, 0.0, np.amax(fitx),
fitFunc = 'exp2', fitPars=[0.9, 0.5, endT/5.0, 0.5, endT/2.0],
plotInstance = None)
# (a0, a1, tau) = Fits.expfit(fitx, tc_bleach)
# print("fit result = a0: %f a1: %f tau: %f\n", (a0, a1, tau))
# print fpar
DC = fpar[0][0]
A0 = fpar[0][1]
tau1 = fpar[0][2]
A1 = fpar[0][3]
tau2 = fpar[0][4]
self.tc_bleach = (DC + A0*np.exp(-fitx/tau1) + A1*np.exp(-fitx/tau2)) # convert start value to 1.0, take it from there
if bleachmode == 'SG':
windur = endT/5.0
k = int(windur/dt) # make k the number of points in 2 second window
if k % 2 == 0:
k += 1
self.tc_bleach = Utility.savitzky_golay(tc_bleach, kernel = k, order = 5)
if bleachmode == '2DPoly':
import itertools
def polyfit2d(x, y, z, order=5):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
# x = np.repeat(np.arange(imshape[1]), imshape[2])
# y = np.tile(np.arange(imshape[1]), imshape[2]) # get array shape
mi = np.mean(self.imageData, axis=0)
z = np.reshape(mi, (imshape[1]*imshape[2], 1))
# nx = int(imshape[1]/10)
# ny = int(imshape[2]/10)
blimg = scipy.ndimage.filters.gaussian_filter(mi, 15, order = 0, mode='reflect')
#m = polyfit2d(x, y, z, order=3)
#xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), imshape[1]), np.linspace(y.min(), y.max(), imshape[2]))
#blimg = polyval2d(xx, yy, m)
#PL.imshow(blimg, extent=(x.min(), y.max(), x.max(), y.min()))
#PL.show()
self.tc_offset = np.zeros(imshape[0])
zz = blimg.reshape(blimg.size, 1)
self.tc_bleach = np.zeros(imshape[0])
A = np.vstack([zz.reshape(1, zz.size), np.ones(zz.size)]).T
for k in range(0, imshape[0]):
z, u, r, s = np.linalg.lstsq(A, self.imageData[k,:,:].reshape(imshape[1]*imshape[2], 1))
if k == 0:
print z
self.tc_bleach[k] = z[0]
self.tc_offset[k] = z[1]
BleachPct = 100.0*(self.tc_bleach[-1]-self.tc_bleach[0])/self.tc_bleach[0]
scaled_blimg = blimg/np.amax(np.amax(blimg)) # scale to max of 1.0
self.tc_bleach = self.tc_bleach/self.tc_bleach[0]
mean_orig = np.mean(tc_bleach)
for k in range(0, len(self.imageData)):
# avgint = np.mean(np.mean(self.imageData[k], axis=1), axis=0) # get the corrected value here
if bleachmode == '2DPoly': # whole field correction, not just linear with time
# print np.amax(np.amax(scaled_blimg, 0), 0)*tc_bleach[k], self.tc_offset[k]
self.imageData[k, :, :] = (self.imageData[k, :, :] - self.tc_offset[k]) / (scaled_blimg*self.tc_bleach[k])
else:
self.imageData[k, :, :] = self.imageData[k ,:, :] / (self.tc_bleach[k]/mFluor)
b_corr[k] = np.mean(self.imageData[k,:,:]) # get the corrected value here
# self.rawData[k,:,:] = self.rawData[k,:,:] / self.tc_bleach[k]
mean_final = np.mean(np.mean(np.mean(self.imageData[k], axis=1), axis=0))
for k in range(0, len(self.imageData)):
self.imageData[k, :, :] = self.imageData[k, :, :] * mean_orig/mean_final
b_corr[k] = np.mean(self.imageData[k, :, :]) # get the corrected value here
self.ctrlROIFunc.ImagePhys_BleachInfo.setText('B=%6.2f%%' % BleachPct)
ndl = len(tc_bleach)
self.backgroundPlot.plot(y=tc_bleach, x=self.imageTimes[0:ndl], pen=pg.mkPen('r'), clear=True)
#self.backgroundPlot.plot(y=self.tc_bleach, x=self.imageTimes[0:ndl], clear=False, pen=pg.mkPen('b'))
self.backgroundPlot.plot(y=b_corr, x=self.imageTimes[0:ndl], clear=False, pen=pg.mkPen('g'))
self.paintImage(focus = False)
self.updateAvgStdImage()
self.dataState['bleachCorrection'] = True # now set the flag
#------------------------------------------------------------------------------------
# Helpers for ROI finding, and the ROI finding routine:
def angle_cos(self, p0, p1, p2):
d1, d2 = p0-p1, p2-p1
return abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1)*np.dot(d2, d2)))
def pOpen(self, img, block_size):
""" consists of Dilation followed by erosion """
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (block_size, block_size))
dimg = cv2.dilate(img, kernel)
oimg = cv2.erode(dimg, kernel)
return(oimg)
def pClose(self, img, block_size):
""" consists of Erosion followed by Dilation """
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (block_size, block_size))
eimg = cv2.erode(img, kernel)
dimg = cv2.dilate(eimg, kernel)
return(dimg)
def ProperOpen(self, img, block_size):
return(self.pOpen(self.pClose(self.pOpen(img, block_size), block_size), block_size))
def findROIs(self):
""" find potential regions of interest in an image series.
This algorithm does the following:
1. We use the standard deviation or power spectrum of the image. A series of thresholds
are then set and contours identified. Each contour includes an area in which
the standard deviation of the image exceeds the threshold. The contours are checked for
minimum and maximum area.
2. Next, for each threshold level:
for each contour at that threshod, we identify contours at the next thresholded
level up whose center of mass is inside ours. There are 2 possiblities:
a. no contours fall inside the current site. This site is a "peak", and
it's center of mass is stored as an ROI location.
b. one or more contours have a CM at the next level that falls inside
the current site. This means that the peak is higher than the current
threshold.
i. If we are not at the next to the highest threshod, we do not save this
location as a potential ROI (it will be identified when looking at the
next threshold level).
ii. If we are at the next to the highest threshold, then those locations
are saved as candidate ROIs.
3. We filter candidate ROIs by distances, so that there are no overlapping ROIs.
"""
if openCVInstalled is False:
return
if self.ctrlROIFunc.ImagePhys_StdRB.isChecked():
imstd = self.stdImage
else:
imstd = self.specImage
dr = 3.0 # Roi size
dr = self.ctrlROIFunc.ImagePhys_ROISize.value() # get roi size fromthe control
diag = np.hypot(dr,dr)# note we only accept ROIs that are more than this distance apart - nonoverlapping
stdmax = np.amax(imstd)
imstd = 255.0*imstd/stdmax
imstd = scipy.ndimage.gaussian_filter(imstd, sigma=0.002)
block_size2 = int(self.ctrlROIFunc.ImagePhys_ROIKernel.currentText())
# Note: block_size must be odd, so control has only odd values and no edit.
stdmax = np.amax(imstd)
imstd = 255.0*imstd/stdmax
reconst2 = self.ProperOpen(imstd.astype('uint8'), block_size2)
maxt = int(np.amax(reconst2))
# mint = int(np.amin(reconst2))
meant = int(np.mean(reconst2))/2.0
# sqs = {}
pols = {}
thr_low = self.ctrlROIFunc.ImagePhys_ROIThrLow.value()
thr_high = self.ctrlROIFunc.ImagePhys_ROIThrHigh.value()
thrlist = np.arange(thr_low, thr_high*1.2, 0.05) # start at lowest and work up
import matplotlib.colors as mc
thrcols = list(mc.cnames.keys()) # ['r', 'orange', 'y', 'g', 'teal', 'c', 'b', 'violet', 'gray', '']
# find countours for each threshold level
for t in thrlist:
thr = (maxt-meant)*t
imctr = reconst2.copy() # cv2 method may modify input argument
retval, ci = cv2.threshold(imctr.astype('uint8'), thr, maxt, cv2.THRESH_BINARY)
contours, heirarchy = cv2.findContours(ci, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
oth = []
m = []
pols[t] = []
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
m.append(cv2.minAreaRect(cnt))
area = cv2.contourArea(cnt)
if len(cnt) == 4 and area > 2.0 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
if area > 2.0 and area < 400:
cnt = cnt.reshape(-1,2)
cnt = np.append(cnt, cnt[0]) # add the first point to the array to make sure it is closed
oth.append([cnt, True])
pols[t] = oth
# now check for the polygons whose center of mass is inside other polygons
# if, from lowest threshold upwards,
savpols = pols.copy()
# roi = []
npolys = 0
for t in thrlist:
npolys += len(pols[t])
regthresh = {} # we save the region threshold [Region: thresh]
finalregions = {} # and the location [Region: (x,y)]
nregs = 0
with pg.ProgressDialog("Searching for ROIs ...", 0, 100) as dlg:
for i in range(len(thrlist)-1): # work through all thresholds, starting at the bottom
t = thrlist[i]
# print '\n\n>>>>>>>>>>testing for threshold = %9.3f<<<<<<<<' % t,
if len(pols[t]) == 0:
# print ' (found no candidates at threshold) ', t
continue
#print ' found %d candidates' % len(pols[t])
for k1, s1 in enumerate(pols[t]): # for each region at this threshold
dlg.setMaximum(len(pols[t]))
dlg.setValue(k1)
if dlg.wasCanceled():
raise HelpfulException("The search for ROIs was canceled by the user.", msgType='status')
poly_low = np.array([s1[0].reshape(-1,2)]) # this is needed for cv2.moments to take tha argument.
t2 = thrlist[i+1] # examine the next higher threshold
oneabove = False
m = cv2.moments(poly_low)
cm_low = (m['m10']/m['m00'], m['m01']/m['m00']) # compute center of mass of this point
for k2, s2 in enumerate(pols[t2]): # for each region identified at the next theshold level:
poly_high = np.array([s2[0].reshape(-1,2)])
m_high = cv2.moments(poly_high)
cm_high = (m_high['m10']/m_high['m00'], m_high['m01']/m_high['m00']) # compute center of mass of this point
test = cv2.pointPolygonTest(poly_low, cm_high, False) # is that center of mass
if test >= 0: # a higher threshold center is definitely INSIDE the polygon of the lower threshold
oneabove = True # we just need to find one - there could be more
break
if oneabove is False: # no CM's were found above us, so save this value
finalregions[nregs] = cm_low # Accepte this polygon at this threshold as a candidate.
regthresh[nregs] = t
nregs += 1
# finally, also accept all peaks at the highest threshold level - they were "deferred" in the loop above
t = thrlist[-1]
for k1, s1 in enumerate(pols[t]):
poly=np.array([s1[0].reshape(-1,2)])
m = cv2.moments(poly)
cm = (m['m10']/m['m00'], m['m01']/m['m00'])
finalregions[nregs] = cm # all polygons at this level are accepted
regthresh[nregs] = t
nregs += 1
print 'Regions detected: %d' % (nregs)
# clean up the final regions - accept only those whose centers are more than
# "diag" of an ROI apart.
# first convert the dictionary to a simple list in order
fp = []
for u in finalregions:
fp.append(finalregions[u])
tree = scipy.spatial.KDTree(fp) # make a tree
candidates = {} # isolated
candidates_n = {} # the neighbors not selected
excluded = []
for i, p in enumerate(finalregions.keys()):
if p in excluded: # or p in candidates_n:
continue
set_close = tree.query(fp[i], k=100, distance_upper_bound=diag) # find all pairs that are close together
neighbors = []
allth = [] # get the thresholds for all the neighbors
for p2 in list(set_close[1]):
if p2 == len(fp): # return values include self and inf.
continue
if p2 in excluded or p2 in candidates_n:
continue
neighbors.append(p2) # build a list of local friends
allth.append(regthresh[p2])
if len(neighbors) == 1: # we are our only neighbor
candidates[p] = (finalregions[p], regthresh[p]) # no decision to make, this one is isolated
excluded.append(p)
continue
k = int(np.argmax(allth)) # find the one with the highest signal
candidates[p] = (finalregions[neighbors[k]], allth[k]) # candidates will have only the keys that are picked.
for n in neighbors:
excluded.append(n) # add these to the excluded list
print 'Found %d ROIs' % (len(candidates))
# next we verify that there are no close ROI pairs left:
# this may not be needed, but sometimes with the pairwise-comparison, it is
# possible for a proposed ROI to slip through.
nc = {}
for i, c in enumerate(candidates):
nc[i] = candidates[c] # just copy over with a new key
cp = []
# th = []
excluded = []
for i, u in enumerate(nc):
cp.append(nc[u][0]) # just get the coordinates
tree = scipy.spatial.KDTree(cp) # make a tree
for i, p in enumerate(nc.keys()):
if p in excluded:
continue
set_close = tree.query(cp[i], k=10, distance_upper_bound=diag) # find all pairs that are close together
allth = [] # get the thresholds for all the neighbors
neighbors=[]
for j, p1 in enumerate(set_close):
if set_close[0][j] == np.inf: # return values include self and inf.
continue
p2 = set_close[1][j] # indexed into cp
if p2 in excluded: # already kicked out
continue
neighbors.append(p2) # build a list of local friends, mapped to main list
allth.append(nc[p2][1]) # get the threshold
if len(neighbors) == 1: # we are our only neighbor
continue
k = int(np.argmax(allth)) # find the one with the highest signal
for i, n in enumerate(neighbors):
if n == p2:
continue
excluded.append(neighbors[i])
nc.pop(n) # remove the duplicates
print 'Reduced to %d ROIs' % (len(nc))
candidates = nc.copy()
self.oldROIs = self.AllRois
self.clearAllROI()
plotContours = False
if plotContours:
PL.subplot(111)
PL.cla()
PL.imshow(imstd, cmap=PL.cm.gray)
PL.axis('off')
# import matplotlib.cm as cmx
# import matplotlib.colors as colors
# jet = PL.get_cmap('jet')
# cNorm = colors.normalize(vmin=0, vmax=max(thrlist))
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
for i, t in enumerate(thrlist):
col = thrcols[i] # scalarMap.to_rgba(t)
if len(pols[t]) == 0:
continue
for p in savpols[t]: # for each region identified at this theshold:
if p[1]:
sr = p[0].reshape(-1,2)
PL.plot(sr[:,0], sr[:,1], color = col, linestyle='-')
for i, ra in enumerate(candidates):
rxy = candidates[ra][0]
if plotContours:
PL.plot(rxy[0], rxy[1], 'r+')
self.addOneROI(pos = [rxy[1]-dr/2, rxy[0]-dr/2], hw=[dr, dr])
if plotContours:
PL.show()
#-------------------------Corrections and/or Normalization---------------------------------
#
#
def slowFilterImage(self):
""" try automated signal extraction
Mellon and Tuong NeuroImage 47: 1331, 2009 """
if self.dataState['bleachCorrection'] is False:
print 'No Bleaching done, copy rawdata to image'
self.imageData = self.rawData.copy() # just copy over without a correction print 'Normalizing'
if self.dataState['Normalized'] is True and self.dataState['bleachCorrection'] is True:
print 'Data is already Normalized, type = %s ' % (self.dataState['NType'])
return
else:
self.imageData = self.rawData.copy() # just start over with the raw data...
sh = self.imageData.shape
t_delay = 0.2 # secs
t_targetSmooth = 0.25 # secs
t_subSmooth = 0.5 # secs
dt = np.mean(np.diff(self.imageTimes))
print dt
n_delay = t_delay/dt
n_targetSmooth = int(t_targetSmooth/dt)
n_subSmooth = int(t_subSmooth/dt)
# j_delay = 0
# k_delay = 0
smi = scipy.ndimage.filters.uniform_filter1d(self.imageData, axis = 0, size=n_targetSmooth)
smd = scipy.ndimage.filters.uniform_filter1d(self.imageData, axis = 0, size=n_subSmooth)
self.imageData = smi[n_delay:sh[0],:,:] - smd[0:sh[0]-n_delay+1,:,:] # shifted subtraction, reduces data set by the time involved
imstd = np.std(self.imageData, axis=0)
imstd = scipy.ndimage.gaussian_filter(imstd, sigma=0.002)
# isize = 1
# immax = scipy.ndimage.maximum_filter(imstd, size=isize, mode='constant')
imm = np.mean(np.mean(self.imageData, axis=2), axis=1)
ndl = imm.shape[0]
self.backgroundPlot.plot(y=imm, x=self.imageTimes[0:ndl], clear=True)
self.paintImage()
self.dataState['Normalized'] = True
self.dataState['NType'] = 'Slow Filter'
# self.ctrl.ImagePhys_NormInfo.setText('Slow Filter')
# this completes the "normalization for the "slow filtering mode"
# remainder of code here is for ROI detection.
def normalizeImage(self):
"""
Each image is normalized to the mean of the whole series, instead
of using the starting images as the baseline
"""
if self.dataState['bleachCorrection'] is False:
print 'No Bleaching done, copy rawdata to image'
self.imageData = self.rawData.copy() # just copy over without a correction print 'Normalizing'
if self.dataState['Normalized'] is True and self.dataState['bleachCorrection'] is True:
print 'Data is already Normalized, type = %s ' % (self.dataState['NType'])
return
else:
self.imageData = self.rawData.copy() # just start over with the raw data...
meanimage = np.mean(self.imageData, axis=0)
#meanimage = scipy.ndimage.filters.gaussian_filter(meanimage, (3,3))
sh = meanimage.shape
print 'mean image shape: ', sh
for i in range(len(self.imageData)):
self.imageData[i,:,:] = 1.0+(self.imageData[i,:,:] - meanimage)/meanimage
# imstd = np.std(self.imageData, axis=0)
# imstd = scipy.ndimage.gaussian_filter(imstd, sigma=0.002)
# isize = 1
# immax = scipy.ndimage.maximum_filter(imstd, size=isize, mode='constant')
# imm = np.mean(np.mean(self.imageData, axis=2), axis=1)
# ndl = imm.shape[0]
# self.backgroundPlot.plot(y=imm, x=self.imageTimes[0:ndl], clear=True)
self.dataState['Normalized'] = True
self.dataState['NType'] = 'norm'
self.paintImage()
self.ctrl.ImagePhys_NormInfo.setText('Norm')
# print 'norm: ', np.mean(self.imageData[1])
def MediandFFImage(self, data=None):
if self.dataState['bleachCorrection'] is False:
print 'No Bleaching done, copy rawdata to image'
self.imageData = self.rawData.copy() # just copy over without a correction print 'Normalizing'
if self.dataState['Normalized'] is True and self.dataState['bleachCorrection'] is True:
print 'Data is already Normalized, type = %s ' % (self.dataState['NType'])
return
else:
self.imageData = self.rawData.copy() # just start over with the raw data...
# sh = self.imageData.shape
imm = np.median(np.median(self.imageData, axis=2), axis=1)
samplefreq = 1.0/np.mean(np.diff(self.imageTimes))
if samplefreq < 100.0:
lpf = samplefreq/5.0
else:
lpf = 20.0
imm = Utility.SignalFilter_LPFButter(imm, lpf, samplefreq, NPole = 8)
print np.amin(imm), np.amax(imm)
for i in range(len(self.imageData)):
self.imageData[i,:,:] = 1.0+(self.imageData[i,:,:] - imm[i])/imm[i]
# imm = np.median(np.median(self.imageData, axis=2), axis=1)
# ndl = imm.shape[0]
# self.backgroundPlot.plot(y=imm, x=self.imageTimes[0:ndl], clear=True)
self.dataState['Normalized'] = True
self.dataState['NType'] = 'median'
self.ctrl.ImagePhys_NormInfo.setText('Median')
self.paintImage()
def StandarddFFImage(self, baseline = False):
if self.dataState['bleachCorrection'] is False:
print 'No Bleach Corrections: copying rawdata to image'
self.imageData = self.rawData.copy() # just copy over without a correction
if self.dataState['Normalized'] is True and self.dataState['bleachCorrection'] is True:
print 'Data is already Normalized, type = %s ' % (self.dataState['NType'])
return
else:
self.imageData = self.rawData.copy() # start over with the raw data...
if baseline is True:
t0 = self.ctrlROIFunc.ImagePhys_BaseStart.value()
t1 = self.ctrlROIFunc.ImagePhys_BaseEnd.value()
dt = np.mean(np.diff(self.imageTimes))
it0 = int(t0/dt)
it1 = int(t1/dt)
if it1-it0 > 1:
F0 = np.mean(self.imageData[it0:it1,:,:], axis=0) # save the reference
self.ctrl.ImagePhys_NormInfo.setText('(F-Fb)/Fb')
else:
self.ctrl.ImagePhys_NormInfo.setText('no Fb')
raise ValueError('baseline has < 2 points')
else:
F0= np.mean(self.imageData[0:1,:,:], axis=0) # save the reference
self.ctrl.ImagePhys_NormInfo.setText('(F-F0)/F0')
self.imageData = (self.imageData - F0) / F0 # do NOT replot!
self.dataState['Normalized'] = True
self.dataState['NType'] = 'dF/F'
# imm = np.mean(np.mean(self.imageData, axis=2), axis=1)
# ndl = imm.shape[0]
# self.backgroundPlot.plot(y=imm, x=self.imageTimes[0:ndl], clear=True)
self.paintImage()
def GRRatioImage(self):
print 'Doing G/R Ratio calculation'
if self.dataState['bleachCorrection'] is False:
print 'No Bleaching done, copy rawdata to image'
self.imageData = self.rawData.copy() # just copy over without a correction print 'Normalizing'
if self.dataState['ratioLoaded'] is False:
print 'NO ratio image loaded - so try again'
return
if self.dataState['Normalized'] is True and self.dataState['bleachCorrection'] is True:
print 'Data is already Normalized, type = %s ' % (self.dataState['NType'])
return
else:
self.imageData = self.rawData.copy() # just start over with the raw data...
#F0= np.mean(self.imageData[0:3,:,:], axis=0) # save the reference
self.imageData = self.imageData/self.ratioImage # do NOT replot!
self.dataState['Normalized'] = True
self.dataState['NType'] = 'GRRatio'
self.ctrl.ImagePhys_NormInfo.setText('G/R')
# imm = np.mean(np.mean(self.imageData, axis=2), axis=1)
# ndl = imm.shape[0]
# self.backgroundPlot.plot(y=imm, x=self.imageTimes[0:ndl], clear=True)
self.paintImage()
def smoothImage(self):
self.imageData = scipy.ndimage.filters.gaussian_filter(self.imageData, (3,3,3))
self.paintImage()
def paintImage(self, image = None, updateTools = True, focus=True):
if image == None:
pImage = self.imageData
else:
pImage = image
pImage = np.squeeze(pImage)
#self.initImage(len(pImage))
self.imageView.setImage(pImage)
def ccf(self, x, y, axis=None):
"""Computes the cross-correlation function of two series `x` and `y`.
Note that the computations are performed on anomalies (deviations from
average).
Returns the values of the cross-correlation at different lags.
Lags are given as [0,1,2,...,n,n-1,n-2,...,-2,-1] (not any more)
:Parameters:
`x` : 1D MaskedArray
Time series.
`y` : 1D MaskedArray
Time series.
`axis` : integer *[None]*
Axis along which to compute (0 for rows, 1 for cols).
If `None`, the array is flattened first.
"""
assert x.ndim == y.ndim, "Inconsistent shape !"
# assert(x.shape == y.shape, "Inconsistent shape !")
if axis is None:
if x.ndim > 1:
x = x.ravel()
y = y.ravel()
npad = x.size + y.size
xanom = (x - x.mean(axis=None))
yanom = (y - y.mean(axis=None))
Fx = np.fft.fft(xanom, npad, )
Fy = np.fft.fft(yanom, npad, )
iFxy = np.fft.ifft(Fx.conj()*Fy).real
varxy = np.sqrt(np.inner(xanom,xanom) * np.inner(yanom,yanom))
else:
npad = x.shape[axis] + y.shape[axis]
if axis == 1:
if x.shape[0] != y.shape[0]:
raise ValueError, "Arrays should have the same length!"
xanom = (x - x.mean(axis=1)[:,None])
yanom = (y - y.mean(axis=1)[:,None])
varxy = np.sqrt((xanom*xanom).sum(1) * (yanom*yanom).sum(1))[:,None]
else:
if x.shape[1] != y.shape[1]:
raise ValueError, "Arrays should have the same width!"
xanom = (x - x.mean(axis=0))
yanom = (y - y.mean(axis=0))
varxy = np.sqrt((xanom*xanom).sum(0) * (yanom*yanom).sum(0))
Fx = np.fft.fft(xanom, npad, axis=axis)
Fy = np.fft.fft(yanom, npad, axis=axis)
iFxy = np.fft.ifft(Fx.conj()*Fy,n=npad,axis=axis).real
# We juste turn the lags into correct positions:
iFxy = np.concatenate((iFxy[len(iFxy)/2:len(iFxy)],iFxy[0:len(iFxy)/2]))
return iFxy/varxy
#
#------------- cross correlation calculations -----------------
#
def Analog_Xcorr(self, FData = None, dt = None):
"""Average cross correlation of all traces"""
self.calculateAllROIs()
if not FData:
FData = self.FData
if dt is None:
if self.imageTimes is []:
dt = 1
else:
dt = np.mean(np.diff(self.imageTimes))
self.calculate_all_xcorr(FData, dt)
self.use_MPL = self.ctrlImageFunc.IAFuncs_MatplotlibCheckBox.checkState()
if not self.use_MPL:
self.floatingWindow = pyqtgrwindow(title = 'Analog_Xcorr_Average')
self.floatingWindow.setWindowTitle('Average XCorr: %s' % self.currentFileName)
# print dir(self.floatingWindow)
# print dir(self.floatingWindow.layout)
self.floatingWindow.layout.clear()
self.floatingWindow.layout.setWindowTitle("New Title?")
p = self.floatingWindow.layout.addPlot(0,0)
p.plot(self.lags,self.xcorr)
p.setXRange(np.min(self.lags), np.max(self.lags))
else:
self.checkMPL()
(self.MPLFig, self.MPL_plots) = PL.subplots(num = "Average XCorr", nrows = 1, ncols=1,
sharex = True, sharey = True)
self.MPLFig.suptitle('Average XCorr: %s' % self.currentFileName, fontsize=11)
self.MPL_plots.plot(self.lags, self.xcorr)
self.MPL_plots.plot(self.lags,np.zeros(self.lags.shape), color = '0.5')
self.MPL_plots.plot([0,0], [-0.5, 1.0], color = '0.5')
self.MPL_plots.set_title('Average XCorr', fontsize=10)
self.MPL_plots.set_xlabel('T (sec)', fontsize=10)
self.MPL_plots.set_ylabel('Corr (R)', fontsize=10)
PH.cleanAxes(self.MPL_plots)
PL.show()
def calculate_all_xcorr(self, FData = None, dt = None):
if FData is None:
FData = self.FData
nROI = self.nROI
else:
nROI = len(FData)
if dt is None:
if self.imageTimes is []:
dt = 1
else:
dt = np.mean(np.diff(self.imageTimes))
ndl = len(FData[0,:])
itime = self.imageTimes[0:ndl]
self.IXC_corr = [[]]*(sum(range(1,nROI)))
self.xcorr = []
xtrace = 0
for roi1 in range(0, len(FData)-1):
for roi2 in range(roi1+1, len(FData)):
(a1, b1) = np.polyfit(itime, FData[roi1,:], 1)
(a2, b2) = np.polyfit(itime, FData[roi2,:], 1)
y1 = np.polyval([a1, b1], itime)
y2 = np.polyval([a2, b2], itime)
sc = self.ccf(FData[roi1,:]-y1, FData[roi2,:]-y2)
self.IXC_corr[xtrace] = sc
if xtrace == 0:
self.xcorr = sc
else:
self.xcorr = self.xcorr + sc
xtrace += 1
self.xcorr = self.xcorr/xtrace
s = np.shape(self.xcorr)
self.lags = dt*(np.arange(0, s[0])-s[0]/2.0)
def Analog_Xcorr_unbiased(self, FData = None, dt = None):
""" hijacked -"""
# baseline
pass
# def Analog_Xcorr_unbiased(self, FData = None, dt = None):
# self.oldROIs = self.AllRois
# self.clearAllROI()
# img_sh = self.rawData.shape
# img_x = img_sh[1]
# img_y = img_sh[2]
# nx = 10
# ny = 10
# dx = int(img_x/nx)
# dy = int(img_y/ny)
# print dx, dy
# for i in range(0, nx):
# for j in range(0, ny):
# self.addOneROI(pos=[i*dx, j*dy], hw=[dx, dy])
# self.Analog_Xcorr_Individual(plottype = 'image')
def Analog_Xcorr_Individual(self, FData = None, dt = None, plottype = 'traces'):
""" compute and display the individual cross correlations between pairs of traces
in the data set"""
print 'Calculating cross-correlations between all ROIs'
self.use_MPL = self.ctrlImageFunc.IAFuncs_MatplotlibCheckBox.checkState()
self.calculateAllROIs()
if self.ROIDistanceMap == []:
self.ROIDistances()
if not FData:
FData = self.FData
nROI = self.nROI
else:
nROI = len(FData)
if dt is None:
if self.imageTimes is []:
dt = 1
else:
dt = np.mean(np.diff(self.imageTimes))
self.calculate_all_xcorr(self.FData, dt)
# nxc = 0
# rows = nROI-1
# cols = rows
self.IXC_plots = [[]]*(sum(range(1,nROI)))
self.IXC_Strength = np.empty((nROI, nROI))
self.IXC_Strength_Zero = np.empty((nROI, nROI))
self.IXC_Strength.fill(np.nan)
xtrace = 0
xtrace = 0
lag_zero = np.argmin(np.abs(self.lags)) # find lag closest to zero
for xtrace1 in range(0, nROI-1):
for xtrace2 in range(xtrace1+1, nROI):
self.IXC_Strength[xtrace1, xtrace2] = self.IXC_corr[xtrace].max()
self.IXC_Strength[xtrace1, xtrace2] = self.IXC_corr[xtrace][lag_zero]
xtrace = xtrace + 1
# yMinorTicks = 0
# bLegend = self.ctrlImageFunc.IAFuncs_checkbox_TraceLabels.isChecked()
# gridFlag = True
if plottype is None:
return
# if self.nROI > 8:
# gridFlag = False
if not self.use_MPL:
#if self.floatingWindow is None:
self.floatingWindow = pyqtgrwindow(title = 'Analog_Xcorr_Individual')
self.floatingWindow.layout.clear()
# self.gview = pg.GraphicsView()
# if self.pgwin is None:
# self.pgwin = pg.GraphicsLayout()
# self.pgwin.clear()
xtrace = 0
for xtrace1 in range(0, nROI-1):
for xtrace2 in range(xtrace1+1, nROI):
# print 'xtrace: ', xtrace
self.IXC_plots[xtrace] = self.floatingWindow.layout.addPlot(xtrace1, xtrace2)
# if xtrace == 0:
# print dir(self.IXC_plots[xtrace])
if xtrace > 0:
self.IXC_plots[xtrace].hideButtons()
xtrace = xtrace + 1
self.floatingWindow.layout.nextRow()
else:
self.checkMPL()
if plottype == 'traces':
(self.MPLFig, self.IXC_plots) = PL.subplots(num="Individual ROI Cross Correlations",
nrows = self.nROI-1, ncols=self.nROI-1,
sharex = True, sharey = True)
self.MPLFig.suptitle('XCorr: %s' % self.currentFileName, fontsize=11)
else:
self.MPLFig = PL.subplot(111)
# ndl = len(FData[0,:])
# itime = self.imageTimes[0:ndl]
dlg = 0
xtrace = 0
with pg.ProgressDialog("Analyzing ROIs...", 0, 100) as dlg:
for xtrace1 in range(0, nROI-1):
# dlg.setLabelText("I")
dlg.setValue(0)
dlg.setMaximum(nROI)
# temp_F = FData[xtrace1,:] #-y1
for xtrace2 in range(xtrace1+1, nROI):
# if bLegend:
# legend = legend=('%d vs %d' % (xtrace1, xtrace2))
# else:
# legend = None
if plottype == 'traces':
if not self.use_MPL: # pyqtgraph
self.IXC_plots[xtrace].plot(self.lags, self.IXC_corr[xtrace])
if xtrace == 0:
self.IXC_plots[0].registerPlot(name='xcorr_%03d' % xtrace)
if xtrace > 0:
self.IXC_plots[xtrace].vb.setXLink('xcorr_000') # not sure - this seems to be at the wrong level in the window manager
else: # pylab
plx = self.IXC_plots[xtrace1, xtrace2-1]
plx.plot(self.lags,self.IXC_corr[xtrace])
plx.hold = True
plx.plot(self.lags,np.zeros(self.lags.shape), color = '0.5')
plx.plot([0,0], [-0.5, 1.0], color = '0.5')
if xtrace1 == 0:
plx.set_title('ROI: %d' % (xtrace2), fontsize=8)
PH.cleanAxes(plx)
xtrace = xtrace + 1
dlg += 1
if dlg.wasCanceled():
raise HelpfulException("Calculation canceled by user.", msgType='status')
# now rescale all the plot Y axes by getting the min/max "viewRange" across all, then setting them all the same
if not self.use_MPL and plottype == 'traces':
ymin = 0
ymax = 0
bmin = []
bmax = []
for i in range(0, xtrace):
bmin.append(np.amin(self.IXC_plots[i].vb.viewRange()[1]))
bmax.append(np.amax(self.IXC_plots[i].vb.viewRange()[1]))
ymin = np.amin(bmin)
ymax = np.amax(bmax)
self.IXC_plots[i].setXRange(np.min(self.lags), np.max(self.lags))
for i in range(0, xtrace):
self.IXC_plots[i].setYRange(ymin, ymax) # remember, all are linked to the 0'th plot
self.IXC_plots[i].setLabel('left', text="R")
self.IXC_plots[i].setLabel('bottom', text="Time (s)")
if i == 0:
pass
#self.IXC_plots[i].setYlabel("R")
#self.IXC_plots[i].setXlabel("Time (s)")
if i > 0:
self.IXC_plots[i].hideAxis('left')
self.IXC_plots[i].hideAxis('bottom')
# self.IXC_plots[i].hideButtons()
elif plottype == 'traces':
for xtrace1 in range(0, nROI-1):
for xtrace2 in range(0, xtrace1):
plx = self.IXC_plots[xtrace1-1, xtrace2]
if xtrace1 == nROI-1:
plx.set_xlabel('T (sec)', fontsize=10)
if xtrace2 == 0:
plx.set_ylabel('R (%d)' % xtrace1, fontsize=10)
PH.cleanAxes(self.IXC_plots[xtrace1, xtrace2])
PL.show()
elif plottype == 'image':
# print self.IXC_Strength.shape
self.MPLFig.imshow(self.IXC_Strength)
PL.show()
#----------------Fourier Map (reports phase)----------------------------
def Analog_AFFT(self):
pass
def Analog_AFFT_Individual(self):
pass
def Analysis_FourierMap(self):
# print "times: ", self.times # self.times has the actual frame times in it.
# first squeeze the image to 3d if it is 4d
sh = np.shape(self.imageData);
if len(sh) == 4:
self.imageData = np.squeeze(self.imageData)
sh = np.shape(self.imageData)
print '**********************************\nImage shape: ', sh
self.imagePeriod = 6.0 # image period in seconds.
w = 2.0 * np.pi * self.imagePeriod
# identify an interpolation for the image for one cycle of time
dt = np.mean(np.diff(self.imageTimes)) # get the mean dt
maxt = np.amax(self.imageTimes) # find last image time
n_period = int(np.floor(maxt/self.imagePeriod)) # how many full periods in the image set?
n_cycle = int(np.floor(self.imagePeriod/dt)) # estimate image points in a stimulus cycle
ndt = self.imagePeriod/n_cycle
i_times = np.arange(0, n_period*n_cycle*ndt, ndt) # interpolation times
n_times = np.arange(0, n_cycle*ndt, ndt) # just one cycle
print "dt: %f maxt: %f # images %d" % (dt, maxt, len(self.imageTimes))
print "# full per: %d pts/cycle: %d ndt: %f #i_times: %d" % (n_period, n_cycle, ndt, len(i_times))
B = np.zeros([sh[1], sh[2], n_period, n_cycle])
#for i in range(0, sh[1]):
# for j in range(0, sh[2]):
# B[i,j,:] = np.interp(i_times, self.times, self.imageData[:,i,j])
B = self.imageData[range(0, n_period*n_cycle),:,:]
print 'new image shape: ', np.shape(self.imageData)
print "B shape: ", np.shape(B)
C = np.reshape(B, (n_cycle, n_period, sh[1], sh[2]))
print 'C: ', np.shape(C)
D = np.mean(C, axis=1)
print "D: ", np.shape(D)
sh = np.shape(D)
A = np.zeros((sh[0], 2), float)
print "A: ", np.shape(A)
A[:, 0] = np.sin(w*n_times)
A[:, 1] = np.cos(w*n_times)
sparse = 1
self.phaseImage = np.zeros((sh[1], sh[2]))
self.amplitudeImage = np.zeros((sh[1], sh[2]))
for i in range(0, sh[1], sparse):
for j in range(0, sh[2], sparse):
(p, residulas, rank, s) = np.linalg.lstsq(A, D[:,i,j])
self.amplitudeImage[i,j] = np.hypot(p[0],p[1])
self.phaseImage[i, j] = np.arctan2(p[1],p[0])
f = open('img_phase.dat', 'w')
pickle.dump(self.phaseImage, f)
f.close()
f = open('img_amplitude.dat', 'w')
pickle.dump(self.amplitudeImage, f)
f.close()
# PL.figure()
# PL.imshow(self.phaseImage)
# PL.show()
#
# ---------------SMC (oopsi, Vogelstein method) detection of calcium events in ROIs----------------
def Analysis_smcAnalyze(self):
try:
import SMC
except:
raise ImportError ("SMC not importable")
self.smc_A = self.ctrlAnalysis.smc_Amplitude.value()
self.smc_Kd = self.ctrlAnalysis.smc_Kd.value()
self.smc_C0 = self.ctrlAnalysis.smc_C0.value()
self.smc_TCa = self.ctrlAnalysis.smc_TCa.value()
if self.imageTimes is []:
dt = 1.0/30.0 # fake it... 30 frames per second
else:
dt = np.mean(np.diff(self.imageTimes))
print "Mean time between frames: %9.4f" % (dt)
if self.BFData is []:
print "No baseline corrected data to use!!!"
return
# dataIDString = 'smc_'
for roi in range(0, self.nROI):
print "ROI: %d" % (roi)
# normalized the data:
ndat = (self.BFData[roi,:] - np.min(self.BFData[roi,:]))/np.max(self.BFData[roi,:])
self.smc_V = SMC.Variables(ndat, dt)
self.smc_P = SMC.Parameters(self.smc_V, A=self.smc_A, k_d=self.smc_Kd, C_0=self.smc_C0, tau_c =self.smc_TCa)
self.smc_S = SMC.forward(self.smc_V, self.smc_P)
cbar = np.zeros(self.smc_P.V.T)
nbar = np.zeros(self.smc_P.V.T)
for t in xrange(self.smc_P.V.T):
for i in xrange(self.smc_P.V.Nparticles):
weight = self.smc_S.w_f[i,t]
cbar[t] += weight * self.smc_S.C[i,t]
nbar[t] += weight * self.smc_S.n[i,t]
print "ROI: %d cbar: " % (roi)
print cbar
print "ROI: %dnbar: " % (roi)
print nbar
# MPlots.PlotLine(self.plots[roi], self.imageTimes, cbar, color = 'black',
# dataID = ('%s%d' % (dataIDString, roi)))
print "finis"
# Use matlab to do the analysis with J. Vogelstein's code, store result on disk
def smc_AnalyzeMatlab(self):
import subprocess
subprocess.call(['/Applications/MATLAB_R2010b.app/bin/matlab', '-r', 'FigSimNoisy.m'], bufsize=1)
def Analysis_SpikeXCorr(self):
pass
def RegisterStack(self):
"""
Align a stack of images using openCV. We calculate a rigid transform
referenced to the first image, and transform each subsequent image
based on that.
It is fast, and better than nothing, but not perfect.
"""
# import scipy.ndimage.interpolation
# outstack = self.imageData.copy()
shd = self.imageData.shape
maximg = np.amax(self.imageData)
refimg = (255*np.mean(self.imageData, axis=0)/maximg).astype('uint8')
for i in range(0,shd[0]):
timage = (255*self.imageData[i,:,:]/maximg).astype('uint8')
affineMat = cv2.estimateRigidTransform(refimg, timage, False)
print timage.shape, self.imageData[i].shape
self.imageData[i,:,:] = cv2.warpAffine(timage, affineMat, dsize=timage.shape, borderMode = cv2.BORDER_REPLICATE).astype('float32')*maximg/255.
#x = scipy.ndimage.interpolation.affine_transform(self.imageData[i,:,:], affineMat[0:2,0:2] )
self.updateAvgStdImage()
def RegisterStack2(self):
""" THIS IS NOT IN USE!!!
Align a stack to one of its images using recursiveRegisterImages
from util/functions.py
Parameters:
imgstack: a list containing images
imgi: index of the standard position image inside imgstack
thresh: not used :threshold to use on the reference image; if it is
zero, then use the ImageP.graythresh algorithm
invert: note used: if True, invert the reference image
cut: if True, cut to the common area after shift
ROI: list or tuple of ndices i0,i1,j0,j1 so that the
subimage: img[i0:i1,j0:j1] shall be used for the
alignment.
verbose: plot actual convolution image
Return:
a list of the aligned images
"""
try:
from acq4.analysis.tools import ImageP # avaialable as part of the STXMPy package
except:
raise ImportError('cann import ImageP for stack registration')
imgstack = self.imageData
cut = False
imgi = 0 # use first image as reference
N = len(imgstack)
if imgi < 0 or imgi >= N:
print "Invalid index: %d not in 0 - %d" %(imgi, N)
return None
#end if
a = imgstack[imgi].copy()
# sh = a.shape
thresh = np.mean(a)*1.25
print "threshold is set to: %.3f" % thresh
#initialize result stack:
outstack = []
indx = np.zeros(imgstack[0].shape, dtype='bool') + True
imgN = 0
# print imgstack.shape
for i, img in enumerate(imgstack):
x = 0.
y = 0.
if i != imgi:
#c = ImageP.ConvFilter(a > thresh, img)
# print c
c = FN.recursiveRegisterImages(img, imgstack[imgi], maxDist=10)
x,y = (c == c.max()).nonzero()
x = x[0] - (c.shape[0]/2 -1)
y = y[0] - (c.shape[1]/2 -1)
img2 = ImageP.shift(img, x, y)
print 'n: %d shift: x %f y %f' % (imgN, x, y)
outstack.append(img2)
indx = indx * (img2 > 0)
imgN = imgN + 1
if cut is True:
ix, iy = indx.nonzero()
i0 = ix.min()
#+1 for the indexing limit...
i1 = ix.max()+1
j0 = iy.min()
j1 = iy.max()+1
print "Common boundaries:",i0,i1,j0,j1
#cut the list elements:
for i in xrange(N):
outstack[i] = outstack[i][i0:i1,j0:j1]
for i in range(self.imageData.shape[0]):
self.imageData[i,:,:] = outstack[i]
return np.atleast_2d(outstack)
#end of registerStack
#---------------------Database Operations ----------------------------- #
def storeToDB(self, data=None):
p = debug.Profiler("ImageAnalysis.storeToDB", disabled=True)
if data is None:
data = self.flowchart.output()['events']
if len(data) == 0:
return
dbui = self.getElement('Database')
table = dbui.getTableName(self.dbIdentity)
db = dbui.getDb()
if db is None:
raise Exception("No DB selected")
p.mark("DB prep done")
columns = db.describeData(data)
columns.update({
'ProtocolSequenceDir': 'directory:ProtocolSequence',
'ProtocolDir': 'directory:Protocol',
#'SourceFile': 'file'
})
p.mark("field list done")
## Make sure target table exists and has correct columns, links to input file
db.checkTable(table, owner=self.dbIdentity, columns=columns, create=True, addUnknownColumns=True)
p.mark("data prepared")
## collect all protocol/Sequence dirs
prots = {}
seqs = {}
for fh in set(data['SourceFile']):
prots[fh] = fh.parent()
seqs[fh] = self.dataModel.getParent(fh, 'ProtocolSequence')
## delete all records from table for current input files
for fh in set(data['SourceFile']):
db.delete(table, where={'SourceFile': fh})
p.mark("previous records deleted")
## assemble final list of records
records = {}
for col in data.dtype.names:
records[col] = data[col]
records['ProtocolSequenceDir'] = map(seqs.get, data['SourceFile'])
records['ProtocolDir'] = map(prots.get, data['SourceFile'])
p.mark("record list assembled")
## insert all data to DB
with pg.ProgressDialog("Storing events...", 0, 100) as dlg:
for n, nmax in db.iterInsert(table, records):
dlg.setMaximum(nmax)
dlg.setValue(n)
if dlg.wasCanceled():
raise HelpfulException("Scan store canceled by user.", msgType='status')
p.mark("records inserted")
p.finish()
def readFromDb(self, sequenceDir=None, sourceFile=None):
"""Read events from DB that originate in sequenceDir.
If sourceFile is specified, only return events that came from that file.
"""
dbui = self.getElement('Database')
table = dbui.getTableName(self.dbIdentity)
db = dbui.getDb()
if db is None:
raise Exception("No DB selected")
#identity = self.dbIdentity+'.events'
#table = dbui.getTableName(identity)
if not db.hasTable(table):
#return None, None
return None
#return np.empty(0)
#pRow = db.getDirRowID(sourceDir)
#if pRow is None:
#return None, None
if sourceFile is not None:
events = db.select(table, '*', where={'SourceFile': sourceFile}, toArray=True)
else:
events = db.select(table, '*', where={'ProtocolSequenceDir': sequenceDir}, toArray=True)
if events is None:
## need to make an empty array with the correct field names
schema = db.tableSchema(table)
## NOTE: dtype MUST be specified as {names: formats: } since the names are unicode objects
## [(name, format), ..] does NOT work.
events = np.empty(0, dtype={'names': [k for k in schema], 'formats': [object]*len(schema)})
return events
class DBCtrl(QtGui.QWidget):
def __init__(self, host, identity):
QtGui.QWidget.__init__(self)
self.host = host
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.dbgui = DatabaseGui.DatabaseGui(dm=host.dataManager(), tables={identity: 'EventDetector_events'})
self.storeBtn = pg.FeedbackButton("Store to DB")
#self.storeBtn.clicked.connect(self.storeClicked)
self.layout.addWidget(self.dbgui)
self.layout.addWidget(self.storeBtn)
for name in ['getTableName', 'getDb']:
setattr(self, name, getattr(self.dbgui, name))
class pyqtgrwindow(QtGui.QMainWindow):
def __init__(self, parent=None, title = '', size=(500,500)):
super(pyqtgrwindow, self).__init__(parent)
self.view = pg.GraphicsView()
self.layout = pg.GraphicsLayout(border=None) # pg.mkPen(0, 0, 255))
self.resize(size[0], size[1])
self.setWindowTitle(title)
self.view.setCentralItem(self.layout)
self.view.show()
| mgraupe/acq4 | acq4/analysis/modules/pbm_ImageAnalysis/pbm_ImageAnalysis.py | Python | mit | 145,987 | [
"Gaussian"
] | 90af00e62c4584387710dbc988a5390f83eea96a376463488cede7f1ccfce3cf |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
# Run this test like so:
# vtkpython TestGlobFileNames.py -D $VTK_DATA_ROOT
import re
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestGlobFileNames(vtk.test.Testing.vtkTest):
def testGlobFileNames(self):
globFileNames = vtk.vtkGlobFileNames()
globFileNames.SetDirectory(VTK_DATA_ROOT + "/Data/")
# globs do not include Kleene star support for pattern repetitions thus
# we insert a pattern for both single and double digit file extensions.
globFileNames.AddFileNames("headsq/quarter.[1-9]")
globFileNames.AddFileNames("headsq/quarter.[1-9][0-9]")
fileNames = globFileNames.GetFileNames()
n = globFileNames.GetNumberOfFileNames()
if n != 93:
for i in range(0, n):
print("File:", i, " ", fileNames.GetValue(i))
print("GetNumberOfValues should return 93, returned", n)
print("Listing of ", VTK_DATA_ROOT, "/Data/headsq")
directory = vtk.vtkDirectory()
directory.Open(VTK_DATA_ROOT + "/Data/headsq")
m = directory.GetNumberOfFiles()
for j in range(0, m):
print(directory.GetFile(j))
exit(1)
for i in range(0, n):
filename = fileNames.GetValue(i)
if filename != globFileNames.GetNthFileName(i):
print("mismatched filename for pattern quarter.*:", filename)
exit(1)
m = re.search("[\w|\W]*quarter.*", filename)
if m == None:
print("string does not match pattern quarter.*:", filename)
# check that we can re-use the Glob object
globFileNames.Reset()
globFileNames.SetDirectory(VTK_DATA_ROOT + "/Data/")
globFileNames.AddFileNames(VTK_DATA_ROOT + "/Data/financial.*")
fileNames = globFileNames.GetFileNames()
n = fileNames.GetNumberOfValues()
for i in range(0, n):
filename = fileNames.GetValue(i)
if filename != globFileNames.GetNthFileName(i):
print("mismatched filename for pattern financial.*: ", filename)
exit(1)
m = re.search("[\w|\W]*financial.*", filename)
if m == None:
print("string does not match pattern financial.*:", filename)
exit(1)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestGlobFileNames, 'test')])
| hlzz/dotfiles | graphics/VTK-7.0.0/IO/Core/Testing/Python/TestGlobFileNames.py | Python | bsd-3-clause | 3,266 | [
"VTK"
] | f58cb180b55d4a205696e28a75962cc4762c1870cedafd6786982688d4a175a1 |
"""
sender - SimplE Neuron DEleteR
Deletes the least significant neurons. Value of neuron is defined as
sum of absolute values of weights outgoing from neuron divided by
sum of absolute values of weights outgoing from entire layer.
"""
import numpy as np
from athenet.layers import FullyConnectedLayer
from athenet.algorithm.utils import list_of_percentage_rows, delete_row
from athenet.algorithm.deleting import delete_weights_by_global_fraction
def simple_neuron_indicators(layers, p, layer_limit):
"""
Returns list of indicators.
This function, for a given set of layers,
computes importance indicators for every weight.
Weights are considered in sets corresponding to neurons in network,
which in fully connected layers are represented as rows if weight's
matrix.
If some weight is not going to be erased then its indicator
is set to -1.
:param layers: list of layers
:type layers: list of instances of athenet.layers.FullyConnectedLayer
:param p: float between 0 and 1, fraction of neurons to be considered
:param layer_limit: float between 0 and 1, maximal fraction of neurons
which will be considered in a single layer.
"""
assert p >= 0. and p <= 1.
assert layer_limit >= 0. and layer_limit <= 1.
if layer_limit < p:
p = layer_limit
for layer in layers:
assert(isinstance(layer, FullyConnectedLayer))
# counter of neurons
neurons_for_layer = np.zeros((len(layers),))
neurons_in_general = 0
# counter of deleted neurons
deleted_for_layer = np.zeros((len(layers),))
deleted_in_general = 0
# results
results = []
# list of all neurons (interpreted as rows of matrices)
considered_neurons = []
for i in xrange(len(layers)):
layer = layers[i]
considered_neurons += list_of_percentage_rows(i, layer)
neurons_for_layer[i] = layer.W.shape[0]
neurons_in_general += neurons_for_layer[i]
results.append(-np.ones_like(layer.W))
considered_neurons = sorted(considered_neurons)
for val, row, layer_id in considered_neurons:
if deleted_in_general >= p * neurons_in_general:
break
if 1 + deleted_for_layer[layer_id] > \
layer_limit * neurons_for_layer[i]:
continue
deleted_for_layer[layer_id] += 1
results[layer_id][row] = val
deleted_in_general += 1
return results
def simple_neuron_deleter(network, p, layer_limit):
"""
:param network: an instance of athenet.Network.
:param p: float between 0 and 1, fraction of neurons to be deleted
from fully connected layers
:param layer_limit: float between 0 and 1, maximal fraction of neurons
which will be deleted from a single layer.
Modifies [network]. Deletes [p] neurons from layers connected direclty
to fully connected layers. Do not delete more than [layer_limit]
neurons from a single layer.
If [layer_limit] < [p] then at most [layer_limit] neurons will be
deleted.
Deletion of neuron is simulated by replacing all weights outgoing
form to it by 0. In athenet.network they are reprezented as rows
of next layer's weights matrix.
"""
fully_connected_layers = [layer for layer in network.weighted_layers
if isinstance(layer, FullyConnectedLayer)]
indicators = simple_neuron_indicators(fully_connected_layers, p,
layer_limit)
delete_weights_by_global_fraction(fully_connected_layers, p, indicators)
| heurezjusz/Athenet | athenet/algorithm/simple_neuron_deleter.py | Python | bsd-2-clause | 3,741 | [
"NEURON"
] | 5667a1960cd4ddce88b90441eefa169fcb560d0a2c7dec0202f9afade9d68d37 |
# -*- coding: utf-8 -*-
"""Utilities for PyBEL testing."""
from uuid import uuid4
from requests.compat import urlparse
from ..manager import Manager
from ..manager.models import Namespace, NamespaceEntry
from ..struct import BELGraph
from ..struct.summary import (
get_annotation_values_by_annotation,
iter_annotation_value_pairs,
)
from ..struct.summary.node_summary import get_names
_FRAUNHOFER_RESOURCES = "https://owncloud.scai.fraunhofer.de/index.php/s/JsfpQvkdx3Y5EMx/download?path="
def get_uri_name(url: str) -> str:
"""Get the file name from the end of the URL."""
url_parsed = urlparse(url)
if url.startswith(_FRAUNHOFER_RESOURCES):
return url_parsed.query.split("=")[-1]
else:
url_parts = url_parsed.path.split("/")
return url_parts[-1]
def n() -> str:
"""Return a UUID string for testing."""
return str(uuid4())[:15]
def make_dummy_namespaces(manager: Manager, graph: BELGraph) -> None:
"""Make dummy namespaces for the test."""
for keyword, names in get_names(graph).items():
graph.namespace_url[keyword] = url = n()
namespace = Namespace(keyword=keyword, url=url)
manager.session.add(namespace)
for name in names:
entry = NamespaceEntry(name=name, namespace=namespace)
manager.session.add(entry)
manager.session.commit()
def make_dummy_annotations(manager: Manager, graph: BELGraph):
"""Make dummy annotations for the test."""
namespaces = {}
for keyword, entity in iter_annotation_value_pairs(graph):
namespace = namespaces.get(keyword)
if namespace is None:
graph.annotation_url[keyword] = url = n()
namespace = Namespace(keyword=keyword, url=url, is_annotation=True)
manager.session.add(namespace)
entry = NamespaceEntry(name=entity.name, identifier=entity.identifier, namespace=namespace)
manager.session.add(entry)
manager.session.commit()
| pybel/pybel | src/pybel/testing/utils.py | Python | mit | 1,985 | [
"Pybel"
] | 81ddeca2c9e2a56a509eddec3787fbf19cba04abfc59f250cda0f0d7dd4c81f3 |
#! /usr/bin/env python
if __name__ == '__main__':
print "Importing..."
from enthought.mayavi import mlab
from enthought.mayavi.modules.surface import Surface
import numpy
import sys
print " done."
if(len(sys.argv) != 5):
raise RuntimeError("Wrong number of arguments!\nUsage: grid_to_m.py sol.vtu var_name width step")
mlab.options.offscreen = True
engine = mlab.get_engine()
vtk_file_reader = engine.open(sys.argv[1])
vtk_file_reader.point_scalars_name = sys.argv[2]
#ug = vtk_file_reader.outputs[0]
surface = Surface()
engine.add_filter(surface, vtk_file_reader)
# getting the data
# you need to set the vars
w = int(sys.argv[3])
s = int(sys.argv[4])
x_min = -w
x_max = w
x_step = s*1j
y_min = -w
y_max = w
y_step = s*1j
x_g, y_g, z_g = numpy.mgrid[x_min:x_max:x_step, y_min:y_max:y_step, 0:1:1j]
res = mlab.pipeline.probe_data(surface, x_g, y_g, z_g, type="scalars")
print "Writing samples to %s_%i.txt..."%(sys.argv[2],w)
f = open("%s_%i.txt"%(sys.argv[2],w), "w")
# now the data are accessible for you, so you can easly construct your matrix (x, y, val):
for i in range(int(x_step.imag)):
for j in range(int(y_step.imag)):
f.write("%g\t" % res[i,j,0])
f.write("\n")
f.close()
print " done."
#mlab.show()
| cdiener/bar1_project | tasks/grid_to_m.py | Python | gpl-3.0 | 1,273 | [
"Mayavi"
] | c474849d24406f32b2dd7bfe802bf876a36057e82e855a25459fa78d186e0a21 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.