input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of DDEC charges based on data parsed by cclib."""
import copy
import random
import numpy
import logging
import math
import os
import sys
from cclib.method.calculationmethod import Method
from cclib.method.volume import electrondensity_spin
from cclib.parser.utils import convertor
from cclib.parser.utils import find_package
from typing import List
class MissingInputError(Exception):
pass
class DDEC6(Method):
"""DDEC6 charges."""
# All of these are required for DDEC6 charges.
required_attrs = ("homos", "mocoeffs", "nbasis", "gbasis")
def __init__(
self, data, volume, proatom_path=None, progress=None, loglevel=logging.INFO, logname="Log"
):
# Inputs are:
# data -- ccData object that describe target molecule.
# volume -- Volume object that describe target Cartesian grid.
# proatom_path -- path to proatom densities
# (directory containing atoms.h5 in horton or c2_001_001_000_400_075.txt in chargemol)
super(DDEC6, self).__init__(data, progress, loglevel, logname)
self.volume = volume
self.fragresults = None
self.proatom_path = proatom_path
if numpy.sum(self.data.coreelectrons) != 0:
# TODO: Pseudopotentials should be added back
pass
# Check whether proatom_path is a valid directory or not.
assert os.path.isdir(
proatom_path
), "Directory that contains proatom densities should be added as an input."
# Read in reference charges.
self.proatom_density = []
self.radial_grid_r = []
for atom_number in self.data.atomnos:
density, r = self._read_proatom(proatom_path, atom_number, 0)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
def __str__(self):
"""Return a string representation of the object."""
return "DDEC6 charges of {}".format(self.data)
def __repr__(self):
"""Return a representation of the object."""
return "DDEC6({})".format(self.data)
def _check_required_attributes(self):
super(DDEC6, self)._check_required_attributes()
def _cartesian_dist(self, pt1, pt2):
""" Small utility function that calculates Euclidian distance between two points
pt1 and pt2 are numpy arrays representing a point in Cartesian coordinates. """
return numpy.sqrt(numpy.dot(pt1 - pt2, pt1 - pt2))
def _read_proatom(
self, directory, atom_num, charge # type = str # type = int # type = float
):
# type: (...) -> numpy.ndarray, numpy.ndarray
"""Return a list containing proatom reference densities."""
# TODO: Treat calculations with psuedopotentials
# TODO: Modify so that proatom densities are read only once for horton
# [https://github.com/cclib/cclib/pull/914#discussion_r464039991]
# File name format:
# ** Chargemol **
# c2_[atom number]_[nuclear charge]_[electron count]_[cutoff radius]_[# shells]
# ** Horton **
# atoms.h5
# File format:
# Starting from line 13, each line contains the charge densities for each shell
# If `charge` is not an integer, proatom densities have to be linearly interpolated between
# the densities of the ion/atom with floor(charge) and ceiling(charge)
charge_floor = int(math.floor(charge))
charge_ceil = int(math.ceil(charge))
chargemol_path_floor = os.path.join(
directory,
"c2_{:03d}_{:03d}_{:03d}_500_100.txt".format(
atom_num, atom_num, atom_num - charge_floor
),
)
chargemol_path_ceil = os.path.join(
directory,
"c2_{:03d}_{:03d}_{:03d}_500_100.txt".format(
atom_num, atom_num, atom_num - charge_ceil
),
)
horton_path = os.path.join(directory, "atoms.h5")
if os.path.isfile(chargemol_path_floor) or os.path.isfile(chargemol_path_ceil):
# Use chargemol proatom densities
# Each shell is .05 angstroms apart (uniform).
# *scalefactor* = 10.58354497764173 bohrs in module_global_parameter.f08
if atom_num <= charge_floor:
density_floor = numpy.array([0])
else:
density_floor = numpy.loadtxt(chargemol_path_floor, skiprows=12, dtype=float)
if atom_num >= charge_ceil:
density_ceil = numpy.array([0])
else:
density_ceil = numpy.loadtxt(chargemol_path_ceil, skiprows=12, dtype=float)
density = (charge_ceil - charge) * density_floor + (
charge - charge_floor
) * density_ceil
radiusgrid = numpy.arange(1, len(density) + 1) * 0.05
elif os.path.isfile(horton_path):
# Use horton proatom densities
assert find_package("h5py"), "h5py is needed to read in proatom densities from horton."
import h5py
with h5py.File(horton_path, "r") as proatomdb:
if atom_num <= charge_floor:
density_floor = numpy.array([0])
radiusgrid = numpy.array([0])
else:
keystring_floor = "Z={}_Q={:+d}".format(atom_num, charge_floor)
density_floor = numpy.asanyarray(list(proatomdb[keystring_floor]["rho"]))
# gridspec is specification of integration grid for proatom densities in horton.
# Example -- ['PowerRTransform', '1.1774580743206259e-07', '20.140888089596444', '41']
# is constructed using PowerRTransform grid
# with rmin = 1.1774580743206259e-07
# rmax = 20.140888089596444
# and ngrid = 41
# PowerRTransform is default in horton-atomdb.py.
gridtype, gridmin, gridmax, gridn = (
proatomdb[keystring_floor].attrs["rtransform"].split()
)
gridmin = convertor(float(gridmin), "bohr", "Angstrom")
gridmax = convertor(float(gridmax), "bohr", "Angstrom")
gridn = int(gridn)
# Convert byte to string in Python3
if sys.version[0] == "3":
gridtype = gridtype.decode("UTF-8")
# First verify that it is one of recognized grids
assert gridtype in [
"LinearRTransform",
"ExpRTransform",
"PowerRTransform",
], "Grid type not recognized."
if gridtype == "LinearRTransform":
# Linear transformation. r(t) = rmin + t*(rmax - rmin)/(npoint - 1)
gridcoeff = (gridmax - gridmin) / (gridn - 1)
radiusgrid = gridmin + numpy.arange(1, gridn + 1) * gridcoeff
elif gridtype == "ExpRTransform":
# Exponential transformation. r(t) = rmin*exp(t*log(rmax/rmin)/(npoint - 1))
gridcoeff = math.log(gridmax / gridmin) / (gridn - 1)
radiusgrid = gridmin * numpy.exp(numpy.arange(1, gridn + 1) * gridcoeff)
elif gridtype == "PowerRTransform":
# Power transformation. r(t) = rmin*t^power
# with power = log(rmax/rmin)/log(npoint)
gridcoeff = math.log(gridmax / gridmin) / math.log(gridn)
radiusgrid = gridmin * numpy.power(numpy.arange(1, gridn + 1), gridcoeff)
if atom_num <= charge_ceil:
density_ceil = numpy.array([0])
else:
keystring_ceil = "Z={}_Q={:+d}".format(atom_num, charge_ceil)
density_ceil = numpy.asanyarray(list(proatomdb[keystring_ceil]["rho"]))
density = (charge_ceil - charge) * density_floor + (
charge - charge_floor
) * density_ceil
del h5py
else:
raise MissingInputError("Pro-atom densities were not found in the specified path.")
if charge == charge_floor:
density = density_floor
return density, radiusgrid
def calculate(self, indices=None, fupdate=0.05):
"""
Calculate DDEC6 charges based on doi: 10.1039/c6ra04656h paper.
Cartesian, uniformly spaced grids are assumed for this function.
"""
# Obtain charge densities on the grid if it does not contain one.
if not numpy.any(self.volume.data):
self.logger.info("Calculating charge densities on the provided empty grid.")
if len(self.data.mocoeffs) == 1:
self.chgdensity = electrondensity_spin(
self.data, self.volume, [self.data.mocoeffs[0][: self.data.homos[0]]]
)
self.chgdensity.data *= 2
else:
self.chgdensity = electrondensity_spin(
self.data,
self.volume,
[
self.data.mocoeffs[0][: self.data.homos[0]],
self.data.mocoeffs[1][: self.data.homos[1]],
],
)
# If charge densities are provided beforehand, log this information
# `Volume` object does not contain (nor rely on) information about the constituent atoms.
else:
self.logger.info("Using charge densities from the provided Volume object.")
self.chgdensity = self.volume
# STEP 1
# Carry out step 1 of DDEC6 algorithm [Determining ion charge value]
# Refer to equations 49-57 in doi: 10.1039/c6ra04656h
self.logger.info("Creating first reference charges.")
ref, loc, stock = self.calculate_refcharges()
self.refcharges = [ref]
self._localizedcharges = [loc]
self._stockholdercharges = [stock]
# STEP 2
# Load new proatom densities.
self.logger.info("Creating second reference charges.")
self.proatom_density = []
self.radial_grid_r = []
for i, atom_number in enumerate(self.data.atomnos):
density, r = self._read_proatom(
self.proatom_path, atom_number, float(self.refcharges[0][i])
)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
# Carry out step 2 of DDEC6 algorithm [Determining ion charge value again]
ref, loc, stock = self.calculate_refcharges()
self.refcharges.append(ref)
self._localizedcharges.append(loc)
self._stockholdercharges.append(stock)
# STEP 3
# Load new proatom densities.
self.proatom_density = []
self.radial_grid_r = []
for i, atom_number in enumerate(self.data.atomnos):
density, r = self._read_proatom(
self.proatom_path, atom_number, float(self.refcharges[1][i])
)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
# Carry out step 3 of DDEC6 algorithm [Determine conditioned charge density and tau]
self.logger.info("Conditioning charge densities.")
self.condition_densities()
def calculate_refcharges(self):
""" Calculate reference charges from proatom density and molecular density
[STEP 1 and 2]
"""
# Generator object to iterate over the grid
xshape, yshape, zshape = self.chgdensity.data.shape
atoms = len(self.data.atomnos)
indices = (
(i, x, y, z)
for i in range(atoms)
for x in range(xshape)
for y in range(yshape)
for z in range(zshape)
)
stockholder_w = numpy.zeros((atoms, xshape, yshape, zshape))
localized_w = numpy.zeros((atoms, xshape, yshape, zshape))
self.closest_r_index = numpy.zeros((atoms, xshape, yshape, zshape), dtype=int)
for atomi, xindex, yindex, zindex in indices:
# Distance of the grid from atom grid
dist_r = self._cartesian_dist(
self.data.atomcoords[-1][atomi],
self.chgdensity.coordinates([xindex, yindex, zindex]),
)
self.closest_r_index[atomi][xindex][yindex][zindex] = numpy.abs(
self.radial_grid_r[atomi] - dist_r
).argmin()
# Equation 54 in doi: 10.1039/c6ra04656h
stockholder_w[atomi][xindex][yindex][zindex] = self.proatom_density[atomi][
self.closest_r_index[atomi][xindex][yindex][zindex]
]
# Equation 55 in doi: 10.1039/c6ra04656h
localized_w = numpy.power(stockholder_w, 4)
# Equation 53 in doi: 10.1039/c6ra04656h
stockholder_bigW = numpy.sum(stockholder_w, axis=0)
localized_bigW = numpy.sum(localized_w, axis=0)
refcharges = numpy.zeros((atoms))
localizedcharges = numpy.zeros((atoms))
stockholdercharges = numpy.zeros((atoms))
for atomi in range(atoms):
# Equation 52 and 51 in doi: 10.1039/c6ra04656h
localizedcharges[atomi] = self.data.atomnos[atomi] - self.chgdensity.integrate(
weights=(localized_w[atomi] / localized_bigW)
)
stockholdercharges[atomi] = self.data.atomnos[atomi] - self.chgdensity.integrate(
weights=(stockholder_w[atomi] / stockholder_bigW)
)
# In DDEC6, weights of 1/3 and 2/3 are assigned for stockholder and localized charges.
# (Equation 50 and 58 in doi: 10.1039/c6ra04656h)
refcharges[atomi] = (stockholdercharges[atomi] / 3.0) + (
localizedcharges[atomi] * 2.0 / 3.0
)
return refcharges, localizedcharges, stockholdercharges
def condition_densities(self):
""" Calculate conditioned densities
[STEP 3]
"""
# Generator object to iterate over the grid
xshape, yshape, zshape = | |
York",
},
{
"city": "Arcadia",
"growth_from_2000_to_2013": "8.3%",
"latitude": 34.1397292,
"longitude": -118.0353449,
"population": "57639",
"rank": "626",
"state": "California",
},
{
"city": "Redmond",
"growth_from_2000_to_2013": "26.0%",
"latitude": 47.6739881,
"longitude": -122.121512,
"population": "57530",
"rank": "627",
"state": "Washington",
},
{
"city": "Lake Elsinore",
"growth_from_2000_to_2013": "96.5%",
"latitude": 33.6680772,
"longitude": -117.3272615,
"population": "57525",
"rank": "628",
"state": "California",
},
{
"city": "Ocala",
"growth_from_2000_to_2013": "20.8%",
"latitude": 29.1871986,
"longitude": -82.14009229999999,
"population": "57468",
"rank": "629",
"state": "Florida",
},
{
"city": "Tinley Park",
"growth_from_2000_to_2013": "16.3%",
"latitude": 41.5731442,
"longitude": -87.7932939,
"population": "57282",
"rank": "630",
"state": "Illinois",
},
{
"city": "Port Orange",
"growth_from_2000_to_2013": "22.8%",
"latitude": 29.1383165,
"longitude": -80.9956105,
"population": "57203",
"rank": "631",
"state": "Florida",
},
{
"city": "Medford",
"growth_from_2000_to_2013": "2.7%",
"latitude": 42.4184296,
"longitude": -71.1061639,
"population": "57170",
"rank": "632",
"state": "Massachusetts",
},
{
"city": "Oak Lawn",
"growth_from_2000_to_2013": "3.3%",
"latitude": 41.719978,
"longitude": -87.7479528,
"population": "57073",
"rank": "633",
"state": "Illinois",
},
{
"city": "Rocky Mount",
"growth_from_2000_to_2013": "-3.1%",
"latitude": 35.9382103,
"longitude": -77.7905339,
"population": "56954",
"rank": "634",
"state": "North Carolina",
},
{
"city": "Kokomo",
"growth_from_2000_to_2013": "21.3%",
"latitude": 40.486427,
"longitude": -86.13360329999999,
"population": "56895",
"rank": "635",
"state": "Indiana",
},
{
"city": "Coconut Creek",
"growth_from_2000_to_2013": "28.4%",
"latitude": 26.2517482,
"longitude": -80.17893509999999,
"population": "56792",
"rank": "636",
"state": "Florida",
},
{
"city": "Bowie",
"growth_from_2000_to_2013": "8.6%",
"latitude": 39.0067768,
"longitude": -76.77913649999999,
"population": "56759",
"rank": "637",
"state": "Maryland",
},
{
"city": "Berwyn",
"growth_from_2000_to_2013": "5.1%",
"latitude": 41.85058739999999,
"longitude": -87.7936685,
"population": "56758",
"rank": "638",
"state": "Illinois",
},
{
"city": "Midwest City",
"growth_from_2000_to_2013": "4.5%",
"latitude": 35.4495065,
"longitude": -97.3967019,
"population": "56756",
"rank": "639",
"state": "Oklahoma",
},
{
"city": "Fountain Valley",
"growth_from_2000_to_2013": "3.0%",
"latitude": 33.7091847,
"longitude": -117.9536697,
"population": "56707",
"rank": "640",
"state": "California",
},
{
"city": "Buckeye",
"growth_from_2000_to_2013": "480.9%",
"latitude": 33.3703197,
"longitude": -112.5837766,
"population": "56683",
"rank": "641",
"state": "Arizona",
},
{
"city": "Dearborn Heights",
"growth_from_2000_to_2013": "-3.0%",
"latitude": 42.3369816,
"longitude": -83.27326269999999,
"population": "56620",
"rank": "642",
"state": "Michigan",
},
{
"city": "Woodland",
"growth_from_2000_to_2013": "13.8%",
"latitude": 38.67851570000001,
"longitude": -121.7732971,
"population": "56590",
"rank": "643",
"state": "California",
},
{
"city": "Noblesville",
"growth_from_2000_to_2013": "88.1%",
"latitude": 40.0455917,
"longitude": -86.0085955,
"population": "56540",
"rank": "644",
"state": "Indiana",
},
{
"city": "Valdosta",
"growth_from_2000_to_2013": "22.3%",
"latitude": 30.8327022,
"longitude": -83.2784851,
"population": "56481",
"rank": "645",
"state": "Georgia",
},
{
"city": "Diamond Bar",
"growth_from_2000_to_2013": "0.1%",
"latitude": 34.0286226,
"longitude": -117.8103367,
"population": "56449",
"rank": "646",
"state": "California",
},
{
"city": "Manhattan",
"growth_from_2000_to_2013": "22.8%",
"latitude": 39.18360819999999,
"longitude": -96.57166939999999,
"population": "56143",
"rank": "647",
"state": "Kansas",
},
{
"city": "Santee",
"growth_from_2000_to_2013": "5.7%",
"latitude": 32.8383828,
"longitude": -116.9739167,
"population": "56105",
"rank": "648",
"state": "California",
},
{
"city": "Taunton",
"growth_from_2000_to_2013": "0.0%",
"latitude": 41.900101,
"longitude": -71.0897674,
"population": "56069",
"rank": "649",
"state": "Massachusetts",
},
{
"city": "Sanford",
"growth_from_2000_to_2013": "42.8%",
"latitude": 28.8028612,
"longitude": -81.269453,
"population": "56002",
"rank": "650",
"state": "Florida",
},
{
"city": "Kettering",
"growth_from_2000_to_2013": "-3.1%",
"latitude": 39.68950359999999,
"longitude": -84.1688274,
"population": "55870",
"rank": "651",
"state": "Ohio",
},
{
"city": "New Brunswick",
"growth_from_2000_to_2013": "15.5%",
"latitude": 40.4862157,
"longitude": -74.4518188,
"population": "55831",
"rank": "652",
"state": "New Jersey",
},
{
"city": "Decatur",
"growth_from_2000_to_2013": "3.1%",
"latitude": 34.6059253,
"longitude": -86.9833417,
"population": "55816",
"rank": "653",
"state": "Alabama",
},
{
"city": "Chicopee",
"growth_from_2000_to_2013": "1.7%",
"latitude": 42.1487043,
"longitude": -72.6078672,
"population": "55717",
"rank": "654",
"state": "Massachusetts",
},
{
"city": "Anderson",
"growth_from_2000_to_2013": "-6.6%",
"latitude": 40.1053196,
"longitude": -85.6802541,
"population": "55670",
"rank": "655",
"state": "Indiana",
},
{
"city": "Margate",
"growth_from_2000_to_2013": "2.7%",
"latitude": 26.2445263,
"longitude": -80.206436,
"population": "55456",
"rank": "656",
"state": "Florida",
},
{
"city": "Weymouth Town",
"growth_from_2000_to_2013": "",
"latitude": 42.2180724,
"longitude": -70.94103559999999,
"population": "55419",
"rank": "657",
"state": "Massachusetts",
},
{
"city": "Hempstead",
"growth_from_2000_to_2013": "4.0%",
"latitude": 40.7062128,
"longitude": -73.6187397,
"population": "55361",
"rank": "658",
"state": "New York",
},
{
"city": "Corvallis",
"growth_from_2000_to_2013": "11.8%",
"latitude": 44.5645659,
"longitude": -123.2620435,
"population": "55298",
"rank": "659",
"state": "Oregon",
},
{
"city": "Eastvale",
"growth_from_2000_to_2013": "",
"latitude": 33.952463,
"longitude": -117.5848025,
"population": "55191",
"rank": "660",
"state": "California",
},
{
"city": "Porterville",
"growth_from_2000_to_2013": "20.1%",
"latitude": 36.06523,
"longitude": -119.0167679,
"population": "55174",
"rank": "661",
"state": "California",
},
{
"city": "West Haven",
"growth_from_2000_to_2013": "5.1%",
"latitude": 41.2705484,
"longitude": -72.9469711,
"population": "55046",
"rank": "662",
"state": "Connecticut",
},
{
"city": "Brentwood",
"growth_from_2000_to_2013": "122.3%",
"latitude": 37.931868,
"longitude": -121.6957863,
"population": "55000",
"rank": "663",
"state": "California",
},
{
"city": "Paramount",
"growth_from_2000_to_2013": "-0.7%",
"latitude": 33.8894598,
"longitude": -118.1597911,
"population": "54980",
"rank": "664",
"state": "California",
},
{
"city": "Grand Forks",
"growth_from_2000_to_2013": "11.5%",
"latitude": 47.9252568,
"longitude": -97.0328547,
"population": "54932",
"rank": "665",
"state": "North Dakota",
},
{
"city": "Georgetown",
"growth_from_2000_to_2013": "91.9%",
"latitude": 30.6332618,
"longitude": -97.6779842,
"population": "54898",
"rank": "666",
"state": "Texas",
},
{
"city": "<NAME>",
"growth_from_2000_to_2013": "6.5%",
"latitude": 38.7874699,
"longitude": -90.6298922,
"population": "54842",
"rank": "667",
"state": "Missouri",
},
{
"city": "Shoreline",
"growth_from_2000_to_2013": "2.9%",
"latitude": 47.7556531,
"longitude": -122.3415178,
"population": "54790",
"rank": "668",
"state": "Washington",
},
{
"city": "Mount Prospect",
"growth_from_2000_to_2013": "-2.5%",
"latitude": 42.0664167,
"longitude": -87.9372908,
"population": "54771",
"rank": "669",
"state": "Illinois",
},
{
"city": "Hanford",
"growth_from_2000_to_2013": "30.3%",
"latitude": 36.3274502,
"longitude": -119.6456844,
"population": "54686",
"rank": "670",
"state": "California",
},
{
"city": "Normal",
"growth_from_2000_to_2013": "19.7%",
"latitude": 40.5142026,
"longitude": -88.9906312,
"population": "54664",
"rank": "671",
"state": "Illinois",
},
{
"city": "Rosemead",
"growth_from_2000_to_2013": "1.7%",
"latitude": 34.0805651,
"longitude": -118.072846,
"population": "54561",
"rank": "672",
"state": "California",
},
{
"city": "Lehi",
"growth_from_2000_to_2013": "176.3%",
"latitude": 40.3916172,
"longitude": -111.8507662,
"population": "54382",
"rank": "673",
"state": "Utah",
},
{
"city": "Pocatello",
"growth_from_2000_to_2013": "5.4%",
"latitude": 42.8713032,
"longitude": -112.4455344,
"population": "54350",
"rank": "674",
"state": "Idaho",
},
{
"city": "Highland",
"growth_from_2000_to_2013": "21.0%",
"latitude": 34.1283442,
"longitude": -117.2086513,
"population": "54291",
"rank": "675",
"state": "California",
},
{
"city": "Novato",
"growth_from_2000_to_2013": "13.3%",
"latitude": 38.1074198,
"longitude": -122.5697032,
"population": "54194",
"rank": "676",
"state": "California",
},
{
"city": "Port Arthur",
"growth_from_2000_to_2013": "-6.0%",
"latitude": 29.8849504,
"longitude": -93.93994699999999,
"population": "54135",
"rank": "677",
"state": "Texas",
},
{
"city": "Carson City",
"growth_from_2000_to_2013": "2.9%",
"latitude": 39.1637984,
"longitude": -119.7674034,
"population": "54080",
"rank": "678",
"state": "Nevada",
},
{
"city": "San Marcos",
"growth_from_2000_to_2013": "48.5%",
"latitude": 29.8832749,
"longitude": -97.9413941,
"population": "54076",
"rank": "679",
"state": "Texas",
},
{
"city": "Hendersonville",
"growth_from_2000_to_2013": "31.7%",
"latitude": 36.3047735,
"longitude": -86.6199957,
"population": "54068",
"rank": "680",
"state": "Tennessee",
},
{
"city": "Elyria",
"growth_from_2000_to_2013": "-3.7%",
"latitude": 41.3683798,
"longitude": -82.10764859999999,
"population": "53956",
"rank": "681",
"state": "Ohio",
},
{
"city": "Revere",
"growth_from_2000_to_2013": "13.4%",
"latitude": 42.4084302,
"longitude": -71.0119948,
"population": "53756",
"rank": "682",
"state": "Massachusetts",
},
{
"city": "Pflugerville",
"growth_from_2000_to_2013": "123.4%",
"latitude": 30.4393696,
"longitude": -97.62000429999999,
"population": "53752",
"rank": "683",
"state": "Texas",
},
{
"city": "Greenwood",
"growth_from_2000_to_2013": "46.0%",
"latitude": 39.6136578,
"longitude": -86.10665259999999,
"population": "53665",
"rank": "684",
"state": "Indiana",
},
{
"city": "Bellevue",
"growth_from_2000_to_2013": "20.5%",
"latitude": 41.1543623,
"longitude": -95.9145568,
"population": "53663",
"rank": "685",
"state": "Nebraska",
},
{
"city": "Wheaton",
"growth_from_2000_to_2013": "-3.4%",
"latitude": 41.8661403,
"longitude": -88.1070127,
"population": "53648",
"rank": "686",
"state": "Illinois",
},
{
"city": "Smyrna",
"growth_from_2000_to_2013": "20.0%",
"latitude": 33.8839926,
"longitude": -84.51437609999999,
"population": "53438",
"rank": "687",
"state": "Georgia",
},
{
"city": "Sarasota",
"growth_from_2000_to_2013": "1.4%",
"latitude": 27.3364347,
"longitude": -82.53065269999999,
"population": "53326",
"rank": "688",
"state": "Florida",
},
{
"city": "Blue Springs",
"growth_from_2000_to_2013": "9.9%",
"latitude": 39.0169509,
"longitude": -94.2816148,
"population": "53294",
"rank": "689",
"state": "Missouri",
},
{
"city": "Colton",
"growth_from_2000_to_2013": "10.8%",
"latitude": 34.0739016,
"longitude": -117.3136547,
"population": "53243",
"rank": "690",
"state": "California",
},
{
"city": "Euless",
"growth_from_2000_to_2013": "15.1%",
"latitude": 32.8370727,
"longitude": -97.08195409999999,
"population": "53224",
"rank": "691",
"state": "Texas",
},
{
"city": "Castle Rock",
"growth_from_2000_to_2013": "153.5%",
"latitude": 39.3722121,
"longitude": -104.8560902,
"population": "53063",
"rank": "692",
"state": "Colorado",
},
{
"city": "Cathedral City",
"growth_from_2000_to_2013": "23.2%",
"latitude": 33.7805388,
"longitude": -116.4668036,
"population": "52977",
"rank": "693",
"state": "California",
},
{
"city": "Kingsport",
"growth_from_2000_to_2013": "16.7%",
"latitude": 36.548434,
"longitude": -82.5618186,
"population": "52962",
"rank": "694",
"state": "Tennessee",
},
{
"city": "Lake Havasu City",
"growth_from_2000_to_2013": "24.6%",
"latitude": 34.483901,
"longitude": -114.3224548,
"population": "52844",
"rank": "695",
"state": "Arizona",
},
{
"city": "Pensacola",
"growth_from_2000_to_2013": "-6.0%",
"latitude": 30.42130899999999,
"longitude": -87.2169149,
"population": "52703",
"rank": "696",
"state": "Florida",
},
{
"city": "Hoboken",
"growth_from_2000_to_2013": "35.8%",
"latitude": 40.7439905,
"longitude": -74.0323626,
"population": "52575",
"rank": "697",
"state": "New Jersey",
},
{
"city": "Yucaipa",
"growth_from_2000_to_2013": "26.8%",
"latitude": 34.033625,
"longitude": -117.0430865,
"population": "52536",
"rank": "698",
"state": "California",
},
{
"city": "Watsonville",
"growth_from_2000_to_2013": "12.7%",
"latitude": 36.910231,
"longitude": -121.7568946,
"population": "52477",
"rank": "699",
"state": "California",
},
{
"city": "Richland",
"growth_from_2000_to_2013": "34.6%",
"latitude": 46.2856907,
"longitude": -119.2844621,
"population": "52413",
"rank": "700",
"state": "Washington",
},
{
"city": "Delano",
"growth_from_2000_to_2013": "31.8%",
"latitude": 35.7688425,
"longitude": -119.2470536,
"population": "52403",
"rank": "701",
"state": "California",
},
{
"city": "Hoffman Estates",
"growth_from_2000_to_2013": "5.4%",
"latitude": 42.0629915,
"longitude": -88.12271989999999,
"population": "52398",
"rank": "702",
"state": "Illinois",
},
{
"city": "Florissant",
"growth_from_2000_to_2013": "-2.8%",
"latitude": 38.789217,
"longitude": -90.322614,
"population": "52363",
"rank": "703",
"state": "Missouri",
},
{
| |
void a(){
}
void foo(){
a();
}
void main(){
}
"""
expect = "Unreachable Function: foo"
self.assertTrue(TestChecker.test(input,expect,441))
def test_unreachable_func_nested_block(self):
input = """
void a(){
}
void b(){}
void foo(){
{
a();
}
}
void main(){
{
{
foo();
}
}
}
"""
expect = "Unreachable Function: b"
self.assertTrue(TestChecker.test(input,expect,442))
def test_unreachable_func_with_recursion(self):
input = """
void a(){
a();
}
void b(){
b();
b();
}
void foo(){
foo();
}
void main(){
a();
{
b();
}
}
"""
expect = "Unreachable Function: foo"
self.assertTrue(TestChecker.test(input,expect,443))
def test_not_left_value_with_var(self):
input = """
int a ;
void main(){
a + 1 = 10 ;
}
"""
expect = "Not Left Value: BinaryOp(+,Id(a),IntLiteral(1))"
self.assertTrue(TestChecker.test(input,expect,444))
def test_not_left_value_with_exp_left_not_storage(self):
input = """
int a ;
void main(){
3 = a;
}
"""
expect = "Not Left Value: IntLiteral(3)"
self.assertTrue(TestChecker.test(input,expect,445))
def test_not_left_value_with_exp_left_not_storage_function(self):
input = """
int foo(){
return 1;
}
void main(){
3 + 1 = foo();
}
"""
expect = "Not Left Value: BinaryOp(+,IntLiteral(3),IntLiteral(1))"
self.assertTrue(TestChecker.test(input,expect,446))
def test_not_left_value_with_function(self):
input = """
int foo(){
return 1;
}
void main(){
foo() = 10 ;
}
"""
expect = "Not Left Value: CallExpr(Id(foo),[])"
self.assertTrue(TestChecker.test(input,expect,447))
def test_not_left_value_with_arraycell(self):
input = """
int a[6],b[7],c[8];
void main(){
b[1] + 1 = c[7] + 10;
}
"""
expect = "Not Left Value: BinaryOp(+,ArrayCell(Id(b),IntLiteral(1)),IntLiteral(1))"
self.assertTrue(TestChecker.test(input,expect,448))
def test_correct_left_value_with_var(self):
input = """
int a,b,c;
void main(){
a = b = c = 10;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,449))
def test_correct_left_value_with_var(self):
input = """
int a,b,c;
void main(){
a = b = c = (10+3)/4 + 5*9 - 2;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,450))
def test_correct_left_value_with_arraycell(self):
input = """
int a[6],b[7],c[8];
void main(){
a[1] = a[2] = a[4] + 10;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,451))
def test_correct_left_value_with_arraycell_2(self):
input = """
int a[6],b[7],c[8];
void main(){
a[1] = b[1] = c[7] + 10%5;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,452))
def test_TypeMismatchExpr_ArrayCell_with_type_idx_float(self):
input = """
void main(){
int a[5];
a[1.2] = 5;
}
"""
expect = "Type Mismatch In Expression: ArrayCell(Id(a),FloatLiteral(1.2))"
self.assertTrue(TestChecker.test(input,expect,453))
def test_TypeMismatchExpr_ArrayCell_with_type_idx_string(self):
input = """
void main(){
int a[5];
a["1"] = 9;
}
"""
expect = "Type Mismatch In Expression: ArrayCell(Id(a),StringLiteral(1))"
self.assertTrue(TestChecker.test(input,expect,454))
def test_TypeMismatchExpr_ArrayCell_with_type_idx_bool(self):
input = """
void main(){
int a[5];
a[true] = 9;
}
"""
expect = "Type Mismatch In Expression: ArrayCell(Id(a),BooleanLiteral(true))"
self.assertTrue(TestChecker.test(input,expect,455))
def test_TypeMismatchExpr_ArrayCell_with_type_arr_int(self):
input = """
void main(){
int a;
a[5] = 10;
}
"""
expect = "Type Mismatch In Expression: ArrayCell(Id(a),IntLiteral(5))"
self.assertTrue(TestChecker.test(input,expect,456))
def test_TypeMismatchExpr_ArrayCell_with_type_arr_string(self):
input = """
void main(){
string a;
a[5] = 10;
}
"""
expect = "Type Mismatch In Expression: ArrayCell(Id(a),IntLiteral(5))"
self.assertTrue(TestChecker.test(input,expect,457))
def test_correct_ArrayCell_with_type_arr_ArrayPointer(self):
input = """
void main(int a[], float b){
a[1] = 9;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,458))
def test_TypeMismatchInExpression_IntType_BoolType(self):
input = """
void main(){int a; a = true;}
"""
expect = "Type Mismatch In Expression: BinaryOp(=,Id(a),BooleanLiteral(true))"
self.assertTrue(TestChecker.test(input,expect,459))
def test_TypeMismatchInExpression_Sum_IntType_StringType(self):
input = """
void main(){int a; string b; a = 1 + b;}
"""
expect = "Type Mismatch In Expression: BinaryOp(+,IntLiteral(1),Id(b))"
self.assertTrue(TestChecker.test(input,expect,460))
def test__simple_arrayInttype_and_exp_on_int(self):
input = """
int a[12];
int func(){
a[0] = 2;
a[1] = 3;
return a[0] + 4 + a[1];
}
void main(){
a[2] = func();
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,461))
def test__simple_nested_LB_and_RB_and_exp_on_string(self):
input = """
int a[12];
int func(){
if(((1 == 2))){
return a[2];
}
else return a[2]+a[3];
}
void main(){
a[4] = func();
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,462))
def test__arraycell_type_int_and_return_float(self):
input = """
int a[12];
float func(){
if(((1 == 2))){
return a[2];
}
else return a[2]+a[3];
}
void main(){
a[4] = func();
}
"""
expect = "Type Mismatch In Expression: BinaryOp(=,ArrayCell(Id(a),IntLiteral(4)),CallExpr(Id(func),[]))"
self.assertTrue(TestChecker.test(input,expect,463))
def test__nested_func(self):
input = """
int a[12];
float func(){
if(true){
return foo1();
}
else return a[2]+a[3]*foo1()-foo2(a[4]);
}
void main(){
float b;
b = func();
}
int foo1(){
return 1;
}
float foo2(int x){
return x;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,464))
def test__simple_redecl_arraytype(self):
input = """
int a[12];
int func(){
a[0] = 2;
a[1] = 3;
return a[0] + 4 + a[1];
}
void main(){
int a;
a[2] = func();
}
"""
expect = "Type Mismatch In Expression: ArrayCell(Id(a),IntLiteral(2))"
self.assertTrue(TestChecker.test(input,expect,465))
def test_rename_main_func(self):
input = """
int i;
int f(){
int main;
main =200;
return main;
}
void main(){
int main;
main = f();
putIntLn(main);
{
int i;
float main;
int f;
main = f = i = 100;
putIntLn(f);
}
putIntLn(main);
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,466))
def test_nested_nested_func(self):
input = """
int a[12];
int func(int x){
return func(func(func(func(x))));
}
void main(){
a[2] = func(a[0]);
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,467))
def test_arraypointer_and_array_nested_in_exp(self):
input = """
int[] func(int x){
int a[10];
return a;
}
void main(){
int x,a,b[5],c[9];
func(x+a)[x+b[1]*c[b[2]-a%b[c[6]%2]]]= b[func(b[1-c[5]])[x+c[b[c[4]]]]*c[b[4]-1]];
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,468))
def test_not_left_value_in_nested_assigmnet(self):
input = """
void main(){
float a,b,c,d;
a = b+c+d;
a = b+c+d=d;
}
"""
expect = "Not Left Value: BinaryOp(+,BinaryOp(+,Id(b),Id(c)),Id(d))"
self.assertTrue(TestChecker.test(input,expect,469))
def test_type_miss_match_exp_float_in_if(self):
input = """
void main(){
float a,b,c,d;
if(a){
a = a+1;
}
else b = b-c;
}
"""
expect = "Type Mismatch In Statement: If(Id(a),Block([BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)))]),BinaryOp(=,Id(b),BinaryOp(-,Id(b),Id(c))))"
self.assertTrue(TestChecker.test(input,expect,470))
def test_type_nested_unaryOp(self):
input = """
void main(){
float a,b;
int c,d,e;
a = -----c+--d--b----e;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,471))
def test_break_in_dowhile_not_in_block(self):
input = """
void main(){
int a,b,c;
do
a = 1;
b = a*2;
c = b-a+b*b;
break;
continue;
while(!(a>0));
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,472))
def test_simple_arraytype_string(self):
input = """
void main(){
int a,b,c;
string f[5];
f[0] = "hello";
f[1] = "hi";
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,473))
def test_simple_index_in_array_not_integer(self):
input = """
void main(){
int a,c;
float b;
b = 1;
string f[5];
f[0] = "hello";
f[b] = "hi";
}
"""
expect = "Type Mismatch In Expression: ArrayCell(Id(f),Id(b))"
self.assertTrue(TestChecker.test(input,expect,474))
def test_simple_return_stmt_and_unaryop_and_assigment(self):
input = """
boolean main(){
int a,b,c;
return !(a>=(b = c));
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,475))
def test_simple__un_not_left_value_in_nested_assigmnet(self):
input = """
void main(){
float a,b,c,d;
a = b+c+d;
a = b-(c = d*(a = a-1));
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,476))
def test_another_func_call_main_func_int_in_float(self):
input = """
boolean main(){
float a;
a = foo();
return true;
}
float foo(){
int a[5];
if(main()){
int a;
return a;
}
return a[2];
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,477))
def test_type_miss_match_stmt_Int_pointer_in_Float_pointer(self):
input = """
boolean main(){
float a[5];
foo();
return true;
}
float[] foo(){
int a[5];
if(main()){
float a[4];
return a;
}
return a;
}
"""
expect = "Type Mismatch In Statement: Return(Id(a))"
self.assertTrue(TestChecker.test(input,expect,478))
def test_simple_return_in_dowhile_and_for_not_in_block(self):
input = """
float main(){
int a,b,c,d;
for(1; true; 2) return a;
do
return b;
while(!(a==b));
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,479))
def test_simple_Prime_number_program(self):
input = """
int main()
{
int low, high, i, flag;
putIntLn(low);
putIntLn(high);
do
{
flag = 0;
for(i = 2; i <= low/2; i = i+1)
{
if(low % i == 0)
{
flag = 1;
break;
}
}
if (flag == 0)
i = getInt();
low = low +1;
}while (low < high);
return 0;
}
"""
expect = ""
self.assertTrue(TestChecker.test(input,expect,480))
def test_lhs_of_assignment_operator_is_not_array_cell(self):
"""LHS of assignment operator is not array cell """
input = """
int hung(int a, float b[], string c) {
return 23123;
}
void main(){
float hunG[23];
int a;
a = a + 1;
putInt(a);
a = a* 1;
hung(2, hunG, "32");
(hunG[23] + 2) = 43;
return;
}
"""
expect = "Not Left Value: BinaryOp(+,ArrayCell(Id(hunG),IntLiteral(23)),IntLiteral(2))"
self.assertTrue(TestChecker.test(input,expect,481))
def test_factorial_program(self):
"""Factorial program """
input = """
int func1(int n){
if (n == 0)
return 1;
else
return n * func1(n - 1);
}
int func2(int x){
return 32;
}
int main() {
int n, result;
n = 4;
result = func1(n);
return result;
}
"""
expect = "Unreachable Function: func2"
self.assertTrue(TestChecker.test(input,expect,482))
def test_random_program(self):
"""Random program """
input = """
int a, b, c, d, t;
void main1() {
foo();
return;
}
int foo () {
main1();
if (a+1 == 4) {{{{if(b+a == 2) foo();}}}} else {if (c+d == 32) | |
outer_radius, angle_range):
"""
arguments:
angle_range (tuple): (start_angle, stop_angle) in deg from [0,360)
"""
super(Post_sector_mask, self).__init__(nn)
self.masktype = 'Sector mask'
self.centre = centre
self.r_i = inner_radius
self.r_o = outer_radius
self.tmin, self.tmax = np.deg2rad(angle_range)
self.create_post_mask()
self.qxyz = np.zeros((self.nn, self.nn, 3))
#------------------------------------------------------------------------------
def create_post_mask(self):
x,y = np.ogrid[:self.nn,:self.nn]
cx,cy = self.centre
#ensure stop angle > start angle
if self.tmax<self.tmin:
self.tmax += 2*np.pi
#convert cartesian --> polar coordinates
r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)
theta = np.arctan2(x-cx,y-cy) - self.tmin
#wrap angles between 0 and 2*pi
theta %= (2*np.pi)
#circular mask
circmask = r2 <= self.r_o*self.r_o
circmask2 = r2 >= self.r_i*self.r_i
# angular mask
anglemask = theta <= (self.tmax-self.tmin)
self.mask = circmask*circmask2*anglemask
#------------------------------------------------------------------------------
def every_q(self):
"""
Calculates the qx, qy, qz value of a neutron arriving at a certain detector pixel,
considering the center of the mask to be the direct beam spot at on the detector.
"""
cx, cy = self.centre
qq = (2*np.pi/6.0)
for x in xrange(cx - (self.r_o + 1), cx + (self.r_o + 2)):
for y in xrange(cy - (self.r_o + 1), cy + (self.r_o + 2)):
n_path_length = np.sqrt(self.d_SD**2 + self.pixelsize**2*(x-cx)**2 + self.pixelsize**2*(y-cy)**2)
try:
self.qxyz[y,x,0] = self.pixelsize*(x-cx)/n_path_length * qq
self.qxyz[y,x,1] = self.pixelsize*(y-cy)/n_path_length * qq
self.qxyz[y,x,2] = (self.d_SD/n_path_length - 1) * qq
except IndexError:
pass
# for x in xrange(self.nn):
# for y in xrange(self.nn):
# n_path_length = np.sqrt(self.d_SD**2 + self.pixelsize**2*(x-cx)**2 + self.pixelsize**2*(y-cy)**2)
# self.qxyz[y,x,0] = self.pixelsize*(x-cx)/n_path_length * qq
# self.qxyz[y,x,1] = self.pixelsize*(y-cy)/n_path_length * qq
# self.qxyz[y,x,2] = (self.d_SD/n_path_length - 1) * qq
#------------------------------------------------------------------------------
def q(self, counter = 0):
"""
Calculates the average |q| value of a sector mask.
"""
while counter < 2:
# q_abs = np.sqrt(np.sum(self.qxyz**2, axis = 2)) * self.mask / self.mask.sum()
q_abs = np.sum(np.sqrt(np.sum(self.qxyz**2, axis = 2)) * self.mask) / self.mask.sum()
q_abs_err = np.sqrt(1.0/(self.mask.sum() - 1) * np.sum(((np.sqrt(np.sum(self.qxyz**2, axis = 2)) - q_abs) * self.mask)**2))
if q_abs.any() != 0:
return q_abs, q_abs_err
else:
self.every_q()
self.q(counter + 1)
#------------------------------------------------------------------------------
def show_post_mask(self):
"""
"""
Mask_Base.show_mask(np.where(self.mask == True, 1, 0), self.masktype)
return None
###############################################################################
class Post_square_mask(Mask_Base):
"""
Post mask with rectangular shape(s)
"""
def __init__(self, nn, llbh, *args):
"""
arguments:
llbh (tuple): (left, length, bottom, height) in pixels
args (tuple): for more squares in one map args = (left2, length2, bottom2, height2, left3, ...)
"""
super(Post_square_mask, self).__init__(nn)
self.masktype = 'Square mask'
self.lefts, self.lengths, self.bottoms, self.heights = [[val] for val in llbh]
if len(args) % 4 == 0 and len(args) != 0:
for i, el in enumerate(args):
if i % 4 == 2:
self.lefts.append(el)
elif i % 4 == 3:
self.lengths.append(el)
elif i % 4 == 0:
self.bottoms.append(el)
elif i % 4 == 1:
self.heights.append(el)
# =============================================================================
# # consistency check
# if len(self.lefts) == len(self.lengths) and len(self.bottoms) == len(self.heights) and len(self.lefts) == len(self.bottoms):
# pass
# else:
# raise AttributeError
# =============================================================================
self.mask = self.mask.astype(np.bool)
for llbhval in xrange(len(self.lefts)):
self.mask[self.lefts[llbhval]:self.lefts[llbhval] + self.lengths[llbhval], self.bottoms[llbhval]:self.bottoms[llbhval] + self.heights[llbhval]] = True
#------------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
#class ContrastFit(DataFrame_Base, Post_sector_mask, Pre_mask):
class ContrastFit(DataFrame_Base):
"""
Sinus fits to grouped Data sets
"""
def __init__(self, dfile, drootpath = ''):
super(ContrastFit, self).__init__(dfile, drootpath)
# self.load_jobs()
self.load_alljobsdata()
# obsolete since 'load_alljobsdata' gets the monitor counts
# self.__load_monitor_counts()
self.maskdict = {'pre_masks' : {}, 'post_masks' : {}}
# self.masktype = 'Ambiguous'
self.local_memory = {} # memory for intermediate results
#------------------------------------------------------------------------------
def dump_to_memory(self, key, item):
"""
stores item in 'local_memory'
"""
self.local_memory.update({key : item})
return None
#------------------------------------------------------------------------------
def get_from_memory(self, key):
"""
returns value from self.local_memory[key]
"""
return self.local_memory[key]
#------------------------------------------------------------------------------
def remove_from_memory(self, key):
"""
removes item with key from memory
"""
del self.local_memory[key]
return None
#------------------------------------------------------------------------------
def update_maskdict(self, mask, key):
"""
if key == 'pre_masks':
tempdict = dict((('nn', self.nn), ('tile_size', self.tile_size),\
('mask', self.mask)))
self.maskdict['pre_masks'].update({str(len(self.maskdict['pre_masks'])) : tempdict})
elif key == 'post_masks':
tempdict = dict((('nn', self.nn), ('centre', self.centre),\
('r_i', self.r_i), ('r_o', self.r_o),\
('angles', (self.tmin, self.tmax)),\
('mask', self.mask)))
self.maskdict['post_masks'].update({str(len(self.maskdict['post_masks'])) : tempdict})
"""
if key == 'pre_masks':
self.maskdict['pre_masks'].update({len(self.maskdict['pre_masks']) : mask})
elif key == 'post_masks':
self.maskdict['post_masks'].update({len(self.maskdict['post_masks']) : mask})
#------------------------------------------------------------------------------
def initialize_pre_mask(self, nn, tile_size):
"""
adds a pre-grouping mask instance to maskdict
"""
self.update_maskdict(Pre_mask(nn, tile_size), 'pre_masks')
#------------------------------------------------------------------------------
def initialize_post_sector_mask(self, nn, centre, inner_radius, outer_radius, angle_range):
"""
add a post-grouping mask instance to maskdict
"""
self.update_maskdict(Post_sector_mask(nn, centre, inner_radius, outer_radius, angle_range), 'post_masks')
#------------------------------------------------------------------------------
def initialize_post_square_mask(self, nn, llbh, *args):
"""
add a post-grouping mask instance to maskdict
"""
self.update_maskdict(Post_square_mask(nn, llbh, *args), 'post_masks')
#------------------------------------------------------------------------------
@staticmethod
def _contract_data(pre_mask, data_set):
"""
assuming that input is pre_mask instance
"""
tiles_per_row = pre_mask.nn/pre_mask.tile_size
temp = np.zeros(tiles_per_row*tiles_per_row)
for i in xrange(tiles_per_row*tiles_per_row):
mask_tile = np.where(pre_mask.mask == i, 1., 0.)
temp[i] = np.nansum(mask_tile*data_set)
return temp.reshape((tiles_per_row, tiles_per_row))
#------------------------------------------------------------------------------
def contract_data(self, mask_key, jobind, foil = (7,), tc = (0,), dump = False):
"""
more easy for CF-object
"""
shape = (len(foil), len(tc), self.maskdict['pre_masks'][mask_key].nn/self.maskdict['pre_masks'][mask_key].tile_size, self.maskdict['pre_masks'][mask_key].nn/self.maskdict['pre_masks'][mask_key].tile_size)
temp_contr = np.zeros(shape)
temp_contr_err = np.zeros(shape)
for find, f in enumerate(foil):
for tind, t in enumerate(tc):
temp_contr[find, tind] = ContrastFit._contract_data(self.maskdict['pre_masks'][mask_key],\
self.data_dict[self.jobs[jobind]][0, f, t, :, :])
temp_contr_err[find, tind] = ContrastFit._contract_data(self.maskdict['pre_masks'][mask_key],\
self.data_dict[self.jobs[jobind]][1, f, t, :, :])
# line above from real calcumlation dR = sum(dr_i) where dr_i are all the error summed up equivalent to R = sum(r)
# temp_contr_err = np.sqrt(temp_contr)
if dump:
try:
if dump != True: self.dump_to_memory(dump, np.array([temp_contr, temp_contr_err]))
except KeyError:
print "No valid key was passed in 'dump'!"
finally:
return np.array([temp_contr, temp_contr_err])
else:
return np.array([temp_contr, temp_contr_err])
# =============================================================================
# if norm_mon:
# return ContrastFit._contract_data(self.maskdict['pre_masks'][mask_key],\
# self.data_dict[self.jobs[jobind]][foil, tc, :, :]\
# /self.monitor[self.jobs[jobind]])
# else:
# return ContrastFit._contract_data(self.maskdict['pre_masks'][mask_key],\
# self.data_dict[self.jobs[jobind]][foil, tc, :, :])
# =============================================================================
#------------------------------------------------------------------------------
@staticmethod
def _expand_data(pre_mask, data_set):
"""
assuming that input is pre_mask instance which was used for prior contraction
"""
tile_size = pre_mask.tile_size
temp = np.zeros((pre_mask.nn,)*2)
for i, row in enumerate(data_set):
for j, el in enumerate(row):
temp[i*tile_size:(i+1)*tile_size, j*tile_size:(j+1)*tile_size] = el
return temp
#------------------------------------------------------------------------------
def expand_data(self, mask_key, memory_keys = (), dump_again = True):
"""
assuming that input is something stored in the local memory which was processed
"""
if len(memory_keys) != 0:
expanded_data = []
for mkey in memory_keys:
temp = np.zeros(self.get_from_memory(mkey).shape)
for find, f in enumerate(temp[0,:]):
for tind, t in f:
# make easier by using t-variable!
temp[0, find, tind] = self._expand_data(self.maskdict['pre_masks'][mask_key][0, find, tind], self.get_from_memory(mkey))
temp[1, find, tind] = self._expand_data(self.maskdict['pre_masks'][mask_key][1, find, tind], self.get_from_memory(mkey))
expanded_data.append(temp)
if dump_again:
dump_key = 'exp_{}'.format(mkey)
self.dump_to_memory(dump_key, temp)
return expanded_data
else:
return None
#------------------------------------------------------------------------------
def def_ROI(self):
"""
"""
pass
#------------------------------------------------------------------------------
def apply_pre_mask(self, pre_key, jobind, tc, foil = 7, contract = True):
"""
applies for one time bin
"""
mask = self.maskdict['pre_masks'][pre_key]
raw_data = self.data_dict[self.jobs[jobind]][:, foil, tc]
if contract:
return ContrastFit._contract_data(mask, raw_data)
# =============================================================================
# mask = self.maskdict['pre_masks'][pre_key]
# raw_data = self.data_dict[self.jobs[jobind]][foil, tc]
# if contract:
# return ContrastFit.contract_data(mask, raw_data)
# else:
# return self.expand_data(mask, self.contract_data(mask, raw_data))
# =============================================================================
#------------------------------------------------------------------------------
def apply_post_mask(self, pre_key, post_key, jobind, tc, foil = 7, contracted = True):
"""
"""
if contracted:
return self.maskdict['post_masks'][post_key].mask
#------------------------------------------------------------------------------
@staticmethod
def single_sinus_fit(tc_data, eps_tc_data, plot = False):
"""
Filter out "nan" values more sophisticated than np.ma.masked_equal() ...
"""
offset_est = np.mean(tc_data)
# omega_est = np.pi/2. / np.abs(np.argmax(tc_data) - np.argmin(np.abs(tc_data-offset_est))) #max to zero
# omega_est = np.pi / np.abs(np.argmin(tc_data) - np.argmax(tc_data))
dphi_1 = tc_data[1]-tc_data[0]
dphi_2 = tc_data[2]-tc_data[0]
params = Parameters()
params.add('offset', value=offset_est, min = 0.)
# params.add('omega', value=omega_est, min=0, max=np.pi/4.) # is a fixed parameter!!!
params.add('omega', value=np.pi/8.,vary = False)
params.add('pol_bound', value = 0.5, min = 0., max = 1., vary = True)
params.add('amp', value=(max(tc_data)-min(tc_data))/2., min = 0., expr = 'pol_bound*offset')
# params.add('amp', value=(max(tc_data)-min(tc_data))/2., min = 0.)
# params.add('phase', value=0, min = 0, max = 2.*np.pi)
if tc_data[0] > params['offset'] and dphi_1 > 0. and dphi_2 > 0.:
params.add('phase', value = np.pi/4., min = -np.pi/4., max = 3.*np.pi/4.)
elif tc_data[0] > params['offset'] and dphi_1 < 0. and dphi_2 < 0.:
params.add('phase', value = 3*np.pi/4., min = np.pi/4., max = 5.*np.pi/4.)
elif tc_data[0] < params['offset'] and dphi_1 < 0. and dphi_2 < 0.:
params.add('phase', value = 5*np.pi/4., min = 3./4.*np.pi, max = 7./4.*np.pi)
elif tc_data[0] < params['offset'] and dphi_1 > 0. and dphi_2 > 0.:
params.add('phase', value = 7*np.pi/4., min = 5./4.*np.pi, max = 9.*np.pi/4.)
elif tc_data[0] > params['offset'] and dphi_2 > 0.:
params.add('phase', value = np.pi/4., min = -np.pi/4., max = 3*np.pi/4.)
elif tc_data[0] > | |
<filename>megatron/optimizers.py
import torch
from torch.optim import Optimizer
def _compute_sparse_update(beta, acc, grad_values, grad_indices):
# In the sparse case, a single accumulator is used.
update_values = torch.gather(acc, 0, grad_indices[0])
if beta > 0.:
update_values.mul_(beta)
update_values.addcmul_(grad_values, grad_values, value=1. - beta)
return update_values
def _compute_update(beta, acc_list, grad):
rank = len(acc_list)
update = acc_list[0].clone()
for i in range(1, rank):
# We rely on broadcasting to get the proper end shape.
update = torch.min(update, acc_list[i])
if beta > 0.:
update.mul_(beta)
update.addcmul_(grad, grad, value=1. - beta)
return update
def _key(i):
# Returns key used for accessing accumulators
return 'accumulator_' + str(i)
def _add_initial_accumulators(state, grad):
# Creates initial accumulators. For a dense tensor of shape (n1, n2, n3),
# then our initial accumulators are of shape (n1, 1, 1), (1, n2, 1) and
# (1, 1, n3). For a sparse tensor of shape (n, *), we use a single
# accumulator of shape (n,).
shape = grad.shape
rank = len(shape)
defaults = {'device': grad.device, 'dtype': grad.dtype}
acc = {}
if grad.is_sparse:
acc[_key(0)] = torch.zeros(shape[0], **defaults)
elif rank == 0:
# The scalar case is handled separately
acc[_key(0)] = torch.zeros(shape, **defaults)
else:
for i in range(rank):
acc_shape = [1] * i + [shape[i]] + [1] * (rank - 1 - i)
acc[_key(i)] = torch.zeros(acc_shape, **defaults)
state.update(acc)
def _max_reduce_except_dim(tensor, dim):
# Computes max along all dimensions except the given dim.
# If tensor is a scalar, it returns tensor.
rank = len(tensor.shape)
result = tensor
if rank > 0:
assert dim < rank
for d in range(rank):
if d != dim:
result = result.max(dim=d, keepdim=True).values
return result
class SM3(Optimizer):
"""Implements SM3 algorithm.
It has been proposed in `Memory-Efficient Adaptive Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): coefficient that scale delta before it is applied
to the parameters (default: 0.1)
momentum (float, optional): coefficient used to scale prior updates
before adding. This drastically increases memory usage if
`momentum > 0.0`. This is ignored if the parameter's gradient
is sparse. (default: 0.0)
beta (float, optional): coefficient used for exponential moving
averages (default: 0.0)
eps (float, optional): Term added to square-root in denominator to
improve numerical stability (default: 1e-30)
.. _Memory-Efficient Adaptive Optimization:
https://arxiv.org/abs/1901.11150
"""
def __init__(self, params, lr=0.1, momentum=0.0, beta=0.0, eps=1e-30):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {0}".format(lr))
if not 0.0 <= momentum < 1.0:
raise ValueError("Invalid momentum: {0}".format(momentum))
if not 0.0 <= beta < 1.0:
raise ValueError("Invalid beta: {0}".format(beta))
if not 0.0 <= eps:
raise ValueError("Invalid eps: {0}".format(eps))
defaults = {'lr': lr, 'momentum': momentum, 'beta': beta, 'eps': eps}
super(SM3, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
beta = group['beta']
eps = group['eps']
for p in group['params']:
if p is None:
continue
grad = p.grad
state = self.state[p]
shape = grad.shape
rank = len(shape)
# State initialization
if len(state) == 0:
state['step'] = 0
state['momentum_buffer'] = 0.
_add_initial_accumulators(state, grad)
if grad.is_sparse:
# the update is non-linear so indices must be unique
grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
# Transform update_values into sparse tensor
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, grad.size())
acc = state[_key(0)]
update_values = _compute_sparse_update(beta, acc, grad_values, grad_indices)
self._update_sparse_accumulator(beta, acc, make_sparse(update_values))
# Add small amount for numerical stability
update_values.add_(eps).rsqrt_().mul_(grad_values)
update = make_sparse(update_values)
else:
# Get previous accumulators mu_{t-1}
if rank > 1:
acc_list = [state[_key(i)] for i in range(rank)]
else:
acc_list = [state[_key(0)]]
# Get update from accumulators and gradients
update = _compute_update(beta, acc_list, grad)
# Update accumulators.
self._update_accumulator(beta, acc_list, update)
# Add small amount for numerical stability
update.add_(eps).rsqrt_().mul_(grad)
if momentum > 0.:
m = state['momentum_buffer']
update.mul_(1. - momentum).add_(m, alpha=momentum)
state['momentum_buffer'] = update.detach()
p.sub_(update, alpha=group['lr'])
state['step'] += 1
return loss
@staticmethod
def _update_accumulator(beta, acc_list, update):
for i, acc in enumerate(acc_list):
nu_max = _max_reduce_except_dim(update, i)
if beta > 0.:
torch.max(acc, nu_max, out=acc)
else:
# No need to compare - nu_max is bigger because of grad ** 2
acc.copy_(nu_max)
@staticmethod
def _update_sparse_accumulator(beta, acc, update):
nu_max = _max_reduce_except_dim(update.to_dense(), 0).squeeze()
if beta > 0.:
torch.max(acc, nu_max, out=acc)
else:
# No need to compare - nu_max is bigger because of grad ** 2
acc.copy_(nu_max)
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# modifications - 4/4/2021 @lessw2020 (decay issue spotted by @nestordemeure )
# weight decay has been implemented AdamW style instead of the original madgrad Adam style.
# in initial image classification testing, this outperformed 0 weight decay or original style weight decay.
# closure is checked if callable or not since some code passes loss directly, rather than in closure param
import math
from typing import Collection, TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import collections
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class madgrad_wd(torch.optim.Optimizer):
"""
MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic
Optimization.
.. _MADGRAD: https://arxiv.org/abs/2101.11075
MADGRAD is a general purpose optimizer that can be used in place of SGD or
Adam may converge faster and generalize better. Currently GPU-only.
Typically, the same learning rate schedule that is used for SGD or Adam may
be used. The overall learning rate is not comparable to either method and
should be determined by a hyper-parameter sweep.
MADGRAD requires less weight decay than other methods, often as little as
zero. Momentum values used for SGD or Adam's beta1 should work here also.
On sparse problems both weight_decay and momentum should be set to 0.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate (default: 1e-2).
momentum (float):
Momentum value in the range [0,1) (default: 0.9).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).
"""
def __init__(
self,
params: _params_t,
lr: float = 1e-2,
momentum: float = 0.9,
weight_decay: float = 0,
eps: float = 1e-6,
):
if momentum < 0 or momentum >= 1:
raise ValueError(f"Momentum {momentum} must be in the range [0,1]")
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if weight_decay < 0:
raise ValueError(f"Weight decay {weight_decay} must be non-negative")
if eps < 0:
raise ValueError(f"Eps must be non-negative")
defaults = dict(lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self) -> bool:
return False
@property
def supports_flat_params(self) -> bool:
return True
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None and isinstance(closure, collections.Callable):
loss = closure()
# step counter must be stored in state to ensure correct behavior under
# optimizer sharding
if "k" not in self.state:
self.state["k"] = torch.tensor([0], dtype=torch.long)
k = self.state["k"].item()
for group in self.param_groups:
eps = group["eps"]
lr = group["lr"] + eps
decay = group["weight_decay"]
momentum = group["momentum"]
ck = 1 - momentum
lamb = lr * math.pow(k + 1, 0.5)
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if "grad_sum_sq" not in state:
state["grad_sum_sq"] = torch.zeros_like(p.data).detach()
state["s"] = torch.zeros_like(p.data).detach()
if momentum != 0:
state["x0"] = torch.clone(p.data).detach()
if momentum != 0.0 and grad.is_sparse:
raise RuntimeError(
"momentum != 0 is not compatible with sparse gradients"
)
grad_sum_sq = state["grad_sum_sq"]
s = state["s"]
# Apply weight decay - L2 / AdamW style
if decay:
p.data.mul_(1 - lr * decay)
""" original impl:
if decay != 0:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad.add_(p.data, alpha=decay)
"""
if grad.is_sparse:
grad = grad.coalesce()
grad_val = grad._values()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = | |
endpoint,
get_params,
json={'data': {}}
)
response.json.assert_called_once()
_request.reset_mock()
response.json.reset_mock()
action = utils.CREATE
kwargs = {
'data': {'serialized_id': 98},
'headers': {'Accept': 'text/plain'},
'other_param_for_requests_lib': True
}
result = utils.request(action, endpoint, get_params, **kwargs)
self.assertEqual(result, response.json.return_value)
_request.assert_called_once_with(
utils.VERBS[action],
endpoint,
get_params,
json={'data': {'serialized_id': 98}},
headers={'Accept': 'text/plain'},
other_param_for_requests_lib=True
)
response.json.assert_called_once()
response.json.return_value = {'errors': [{'error': {'details': 'test error message'}}]}
response.status_code = 401
_request.reset_mock()
response.json.reset_mock()
action = utils.RETRIEVE
kwargs = {}
with self.assertRaises(exceptions.BaseCRMAPIUnauthorized):
utils.request(action, endpoint, get_params, **kwargs)
response.status_code = 403
_request.reset_mock()
response.json.reset_mock()
action = utils.RETRIEVE
kwargs = {}
with self.assertRaises(Exception):
utils.request(action, endpoint, get_params, **kwargs)
response.status_code = 404
_request.reset_mock()
response.json.reset_mock()
action = utils.RETRIEVE
kwargs = {}
with self.assertRaises(Exception):
utils.request(action, endpoint, get_params, **kwargs)
response.status_code = 404
get_params = {'id': 99}
_request.reset_mock()
response.json.reset_mock()
action = utils.RETRIEVE
kwargs = {}
with self.assertRaises(exceptions.BaseCRMNoResult):
utils.request(action, endpoint, get_params, **kwargs)
response.status_code = 422
get_params = None
_request.reset_mock()
response.json.reset_mock()
action = utils.RETRIEVE
kwargs = {}
with self.assertRaises(exceptions.BaseCRMValidationError):
utils.request(action, endpoint, get_params, **kwargs)
class HelperMethodTests(TestCase):
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_get_contacts(self, parse, request):
request.return_value = {
'items': [{'id': 23, 'name': 'hello'}, {'id': 99, 'name': 'world'}],
'meta': {'count': 2}
}
parse.return_value = [{'id': 23, 'name': 'hello'}, {'id': 99, 'name': 'world'}]
result = helpers.get_contacts()
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'contacts', {})
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
result = helpers.get_contacts(id=456, hello='world')
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'contacts', {'id': 456, 'hello': 'world'})
parse.assert_called_once_with(request.return_value)
@mock.patch('basecrm.utils.validate_contact_dict')
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_create_contact(self, parse, request, validate):
request.return_value = {
'data': {'id': 23, 'name': '<NAME>'},
'meta': {'count': 1}
}
parse.return_value = {'id': 23, 'name': '<NAME>'}
validate.return_value = True
data = {'id': 999, 'name': '<NAME>'}
result = helpers.create_contact(data)
self.assertEqual(result, parse.return_value)
validate.assert_called_once_with(utils.CREATE, data)
request.assert_called_once_with(utils.CREATE, 'contacts', None, data=data)
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = exceptions.BaseCRMValidationError()
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.create_contact(data)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = None
validate.return_value = False
result = helpers.create_contact(data)
validate.assert_called_once_with(utils.CREATE, data)
self.assertFalse(request.called)
self.assertFalse(parse.called)
@mock.patch('basecrm.utils.validate_contact_dict')
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_update_contact(self, parse, request, validate):
request.return_value = {
'data': {'id': 23, 'name': '<NAME>'},
'meta': {'count': 1}
}
parse.return_value = {'id': 23, 'name': '<NAME>'}
validate.return_value = True
data = {'name': '<NAME>'}
id = 33
result = helpers.update_contact(id, data)
self.assertEqual(result, parse.return_value)
validate.assert_called_once_with(utils.UPDATE, data, skip_id=True)
request.assert_called_once_with(utils.UPDATE, 'contacts', {'id': id}, data=data)
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = exceptions.BaseCRMValidationError()
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.update_contact(id, data)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = None
validate.return_value = False
result = helpers.update_contact(id, data)
validate.assert_called_once_with(utils.UPDATE, data, skip_id=True)
self.assertFalse(request.called)
self.assertFalse(parse.called)
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_get_deals(self, parse, request):
request.return_value = {
'items': [{'id': 23, 'name': 'hello'}, {'id': 99, 'name': 'world'}],
'meta': {'count': 2}
}
parse.return_value = [{'id': 23, 'name': 'hello'}, {'id': 99, 'name': 'world'}]
result = helpers.get_deals()
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'deals', {})
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
result = helpers.get_deals(id=456, hello='world')
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'deals', {'id': 456, 'hello': 'world'})
parse.assert_called_once_with(request.return_value)
@mock.patch('basecrm.utils.validate_deal_dict')
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_create_deal(self, parse, request, validate):
request.return_value = {
'data': {'id': 23, 'name': 'Создание советской атомной бомбы'},
'meta': {'count': 1}
}
parse.return_value = {'id': 23, 'name': 'Создание советской атомной бомбы'}
validate.return_value = True
data = {'id': 999, 'name': 'Создание советской атомной бомбы'}
result = helpers.create_deal(data)
self.assertEqual(result, parse.return_value)
validate.assert_called_once_with(utils.CREATE, data)
request.assert_called_once_with(utils.CREATE, 'deals', None, data=data)
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = exceptions.BaseCRMValidationError()
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.create_deal(data)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = None
validate.return_value = False
result = helpers.create_deal(data)
validate.assert_called_once_with(utils.CREATE, data)
self.assertFalse(request.called)
self.assertFalse(parse.called)
@mock.patch('basecrm.utils.validate_deal_dict')
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_update_deal(self, parse, request, validate):
request.return_value = {
'data': {'id': 23, 'name': 'Manhattan Project'},
'meta': {'count': 1}
}
parse.return_value = {'id': 23, 'name': 'Manhattan Project'}
validate.return_value = True
data = {'id': 999, 'name': 'Manhattan Project'}
id = 23
result = helpers.update_deal(id, data)
self.assertEqual(result, parse.return_value)
validate.assert_called_once_with(utils.UPDATE, data, skip_id=True)
request.assert_called_once_with(utils.UPDATE, 'deals', {'id': id}, data=data)
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = exceptions.BaseCRMValidationError()
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.update_deal(id, data)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = None
validate.return_value = False
result = helpers.update_deal(id, data)
validate.assert_called_once_with(utils.UPDATE, data, skip_id=True)
self.assertFalse(request.called)
self.assertFalse(parse.called)
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_get_leads(self, parse, request):
request.return_value = {
'items': [{'id': 23, 'name': 'hello'}, {'id': 99, 'name': 'world'}],
'meta': {'count': 2}
}
parse.return_value = [{'id': 23, 'name': 'hello'}, {'id': 99, 'name': 'world'}]
result = helpers.get_leads()
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'leads', {})
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
result = helpers.get_leads(id=456, hello='world')
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'leads', {'id': 456, 'hello': 'world'})
parse.assert_called_once_with(request.return_value)
@mock.patch('basecrm.utils.validate_lead_dict')
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_create_lead(self, parse, request, validate):
request.return_value = {
'data': {
'id': 23,
'last_name': 'Бортников',
'organization_name': 'Федеральная Служба Безопасности Российской Федерации'
},
'meta': {
'count': 1
}
}
parse.return_value = {
'id': 23,
'last_name': 'Бортников',
'organization_name': 'Федеральная Служба Безопасности Российской Федерации'
}
validate.return_value = True
data = {
'id': 999,
'last_name': 'Бортников',
'organization_name': 'Федеральная Служба Безопасности Российской Федерации'
}
result = helpers.create_lead(data)
self.assertEqual(result, parse.return_value)
validate.assert_called_once_with(utils.CREATE, data)
request.assert_called_once_with(utils.CREATE, 'leads', None, data=data)
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = exceptions.BaseCRMValidationError()
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.create_lead(data)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = None
validate.return_value = False
result = helpers.create_lead(data)
validate.assert_called_once_with(utils.CREATE, data)
self.assertFalse(request.called)
self.assertFalse(parse.called)
@mock.patch('basecrm.utils.validate_lead_dict')
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_update_lead(self, parse, request, validate):
request.return_value = {
'data': {'id': 23, 'name': 'Manhattan Project'},
'meta': {'count': 1}
}
parse.return_value = {'id': 23, 'name': 'Manhattan Project'}
validate.return_value = True
data = {'id': 999, 'name': 'Manhattan Project'}
id = 23
result = helpers.update_lead(id, data)
self.assertEqual(result, parse.return_value)
validate.assert_called_once_with(utils.UPDATE, data, skip_id=True)
request.assert_called_once_with(utils.UPDATE, 'leads', {'id': id}, data=data)
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = exceptions.BaseCRMValidationError()
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.update_lead(id, data)
request.reset_mock()
parse.reset_mock()
validate.reset_mock()
validate.side_effect = None
validate.return_value = False
result = helpers.update_lead(id, data)
validate.assert_called_once_with(utils.UPDATE, data, skip_id=True)
self.assertFalse(request.called)
self.assertFalse(parse.called)
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_get_notes(self, parse, request):
request.return_value = {
'items': [{'id': 23, 'content': 'hello'}, {'id': 99, 'content': 'world'}],
'meta': {'count': 2}
}
parse.return_value = [{'id': 23, 'content': 'hello'}, {'id': 99, 'content': 'world'}]
resource_type = 'contact'
resource_id = 55
result = helpers.get_notes()
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'notes', {})
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
result = helpers.get_notes(resource_type)
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'notes', {'resource_type': 'contact'})
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
resource_type = 'lead'
result = helpers.get_notes(resource_type, resource_id)
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'notes', {'resource_type': 'lead', 'resource_id': 55}) # noqa
parse.assert_called_once_with(request.return_value)
request.reset_mock()
parse.reset_mock()
resource_type = 'deal'
result = helpers.get_notes(resource_type, resource_id, page=5)
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'notes', {'resource_type': 'deal', 'resource_id': 55, 'page': 5}) # noqa
parse.assert_called_once_with(request.return_value)
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.get_notes(resource_id)
resource_type = 'contacts'
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.get_notes(resource_type)
resource_type = 56
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.get_notes(resource_type)
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_create_note(self, parse, request):
request.return_value = {
'items': [{'id': 23, 'content': 'hello'}, {'id': 99, 'content': 'world'}],
'meta': {'count': 2}
}
parse.return_value = [{'id': 23, 'content': 'hello'}, {'id': 99, 'content': 'world'}]
resource_type = 'contact'
resource_id = 55
content = "Hi, this is å t€st note"
data = {
'resource_type': resource_type,
'resource_id': resource_id,
'content': content
}
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.create_note(None, resource_id, content)
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.create_note('foo', resource_id, content)
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.create_note(5577, resource_id, content)
with self.assertRaises(exceptions.BaseCRMValidationError):
helpers.create_note(resource_type, None, content)
result = helpers.create_note(resource_type, resource_id, content)
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.CREATE, 'notes', data=data)
parse.assert_called_once_with(request.return_value)
@mock.patch('basecrm.utils.instantiate_if_necessary')
@mock.patch('basecrm.helpers.django_apps')
def test_get_pipelines(self, _apps, instantiate):
_app_conf = mock.Mock()
_app_conf.pipeline = {'id': 6456, 'name': 'default'}
_apps.get_app_config.return_value = _app_conf
result = helpers.get_pipelines()
self.assertEqual(result, _app_conf.pipeline)
_apps.get_app_config.assert_called_once_with('basecrm')
_app_conf.instantiate_pipeline.assert_called_once_with()
@mock.patch('basecrm.helpers.get_stages_from_api')
@mock.patch('basecrm.helpers.settings')
@mock.patch('basecrm.helpers.django_apps')
def test_get_stages(self, _apps, settings, get_from_api):
settings.BASECRM_CACHE_STAGES = True
_app_conf = mock.Mock()
_app_conf.stages = [{'id': 6456, 'name': 'new'}, {'id': 6577, 'name': 'updated'}]
get_from_api.return_value = [{'id': 6993, 'name': 'won'}, {'id': 7004, 'name': 'lost'}]
_apps.get_app_config.return_value = _app_conf
result = helpers.get_stages()
self.assertEqual(result, _app_conf.stages)
_apps.get_app_config.assert_called_once_with('basecrm')
_app_conf.instantiate_stages.assert_called_once_with()
self.assertFalse(get_from_api.called)
settings.BASECRM_CACHE_STAGES = False
_app_conf.instantiate_stages.reset_mock()
_apps.get_app_config.reset_mock()
result = helpers.get_stages()
self.assertEqual(result, get_from_api.return_value)
self.assertFalse(_apps.get_app_config.called)
self.assertFalse(_app_conf.instantiate_stages.called)
get_from_api.assert_called_once()
@mock.patch('basecrm.helpers.get_users_from_api')
@mock.patch('basecrm.helpers.settings')
@mock.patch('basecrm.helpers.django_apps')
def test_get_users(self, _apps, settings, get_from_api):
settings.BASECRM_CACHE_USERS = True
_app_conf = mock.Mock()
_app_conf.users = [{'id': 6456, 'name': 'Albert'}, {'id': 6577, 'name': 'Bertie'}]
get_from_api.return_value = [{'id': 6993, 'name': 'Charlie'}, {'id': 7004, 'name': 'Davie'}]
_apps.get_app_config.return_value = _app_conf
result = helpers.get_users()
self.assertEqual(result, _app_conf.users)
_apps.get_app_config.assert_called_once_with('basecrm')
_app_conf.instantiate_users.assert_called_once()
self.assertFalse(get_from_api.called)
settings.BASECRM_CACHE_USERS = False
_app_conf.instantiate_users.reset_mock()
_apps.get_app_config.reset_mock()
result = helpers.get_users()
self.assertEqual(result, get_from_api.return_value)
self.assertFalse(_apps.get_app_config.called)
self.assertFalse(_app_conf.instantiate_users.called)
get_from_api.assert_called_once_with()
result = helpers.get_users(per_page=999, status='foo')
get_from_api.assert_called_with(per_page=999, status='foo')
@mock.patch('basecrm.helpers.get_stages')
def test_get_stage_ids(self, get_stages):
get_stages.return_value = [{'id': 8888, 'name': 'New'}, {'id': 9999, 'name': 'In Progress'}]
result = helpers.get_stage_ids()
self.assertEqual(result, [8888, 9999])
get_stages.assert_called_once()
@mock.patch('basecrm.helpers.get_users')
def test_get_user_ids(self, get_users):
get_users.return_value = [{'id': 8, 'name': 'Albert'}, {'id': 9, 'name': 'Berties'}]
result = helpers.get_user_ids()
self.assertEqual(result, [8, 9])
get_users.assert_called_once()
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
@mock.patch('basecrm.utils.count')
def test_get_pipelines_from_api(self, count, parse, request):
request.return_value = {
'items':[{'id':23, 'name':'hello'},{'id':99, 'name':'world'}],
'meta': {'count': 2}
}
parse.return_value = [{'id':23, 'name':'hello'},{'id':99, 'name':'world'}]
count.return_value = 2
with self.assertRaises(NotImplementedError):
helpers.get_pipelines_from_api()
request.assert_called_once_with(utils.RETRIEVE, 'pipelines', {})
count.assert_called_once_with(request.return_value)
request.reset_mock()
count.reset_mock()
parse.reset_mock()
count.return_value = 1
result = helpers.get_pipelines_from_api()
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'pipelines', {})
count.assert_called_once_with(request.return_value)
parse.assert_called_once_with(request.return_value)
request.reset_mock()
count.reset_mock()
parse.reset_mock()
result = helpers.get_pipelines_from_api(id=456, hello='world')
self.assertEqual(result, parse.return_value)
request.assert_called_once_with(utils.RETRIEVE, 'pipelines', {'id': 456, 'hello': 'world'})
count.assert_called_once_with(request.return_value)
parse.assert_called_once_with(request.return_value)
@mock.patch('basecrm.utils.request')
@mock.patch('basecrm.utils.parse')
def test_get_stages_from_api(self, parse, request):
request.return_value = {
'items':[{'id':23, | |
"""This file contains code used in "Think Bayes",
by <NAME>, available from greenteapress.com
Copyright 2012 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import matplotlib.pyplot as pyplot
import thinkplot
import numpy
import csv
import random
import shelve
import sys
import time
import thinkbayes2
import warnings
warnings.simplefilter('error', RuntimeWarning)
FORMATS = ['pdf', 'eps', 'png']
class Locker(object):
"""Encapsulates a shelf for storing key-value pairs."""
def __init__(self, shelf_file):
self.shelf = shelve.open(shelf_file)
def Close(self):
"""Closes the shelf.
"""
self.shelf.close()
def Add(self, key, value):
"""Adds a key-value pair."""
self.shelf[str(key)] = value
def Lookup(self, key):
"""Looks up a key."""
return self.shelf.get(str(key))
def Keys(self):
"""Returns an iterator of keys."""
return self.shelf.iterkeys()
def Read(self):
"""Returns the contents of the shelf as a map."""
return dict(self.shelf)
class Subject(object):
"""Represents a subject from the belly button study."""
def __init__(self, code):
"""
code: string ID
species: sequence of (int count, string species) pairs
"""
self.code = code
self.species = []
self.suite = None
self.num_reads = None
self.num_species = None
self.total_reads = None
self.total_species = None
self.prev_unseen = None
self.pmf_n = None
self.pmf_q = None
self.pmf_l = None
def Add(self, species, count):
"""Add a species-count pair.
It is up to the caller to ensure that species names are unique.
species: string species/genus name
count: int number of individuals
"""
self.species.append((count, species))
def Done(self, reverse=False, clean_param=0):
"""Called when we are done adding species counts.
reverse: which order to sort in
"""
if clean_param:
self.Clean(clean_param)
self.species.sort(reverse=reverse)
counts = self.GetCounts()
self.num_species = len(counts)
self.num_reads = sum(counts)
def Clean(self, clean_param=50):
"""Identifies and removes bogus data.
clean_param: parameter that controls the number of legit species
"""
def prob_bogus(k, r):
"""Compute the probability that a species is bogus."""
q = clean_param / r
p = (1-q) ** k
return p
print(self.code, clean_param)
counts = self.GetCounts()
r = 1.0 * sum(counts)
species_seq = []
for k, species in sorted(self.species):
if random.random() < prob_bogus(k, r):
continue
species_seq.append((k, species))
self.species = species_seq
def GetM(self):
"""Gets number of observed species."""
return len(self.species)
def GetCounts(self):
"""Gets the list of species counts
Should be in increasing order, if Sort() has been invoked.
"""
return [count for count, _ in self.species]
def MakeCdf(self):
"""Makes a CDF of total prevalence vs rank."""
counts = self.GetCounts()
counts.sort(reverse=True)
cdf = thinkbayes2.Cdf(dict(enumerate(counts)))
return cdf
def GetNames(self):
"""Gets the names of the seen species."""
return [name for _, name in self.species]
def PrintCounts(self):
"""Prints the counts and species names."""
for count, name in reversed(self.species):
print(count, name)
def GetSpecies(self, index):
"""Gets the count and name of the indicated species.
Returns: count-species pair
"""
return self.species[index]
def GetCdf(self):
"""Returns cumulative prevalence vs number of species.
"""
counts = self.GetCounts()
items = enumerate(counts)
cdf = thinkbayes2.Cdf(items)
return cdf
def GetPrevalences(self):
"""Returns a sequence of prevalences (normalized counts).
"""
counts = self.GetCounts()
total = sum(counts)
prevalences = numpy.array(counts, dtype=numpy.float) / total
return prevalences
def Process(self, low=None, high=500, conc=1, iters=100):
"""Computes the posterior distribution of n and the prevalences.
Sets attribute: self.suite
low: minimum number of species
high: maximum number of species
conc: concentration parameter
iters: number of iterations to use in the estimator
"""
counts = self.GetCounts()
m = len(counts)
if low is None:
low = max(m, 2)
ns = range(low, high+1)
#start = time.time()
self.suite = Species5(ns, conc=conc, iters=iters)
self.suite.Update(counts)
#end = time.time()
#print 'Processing time' end-start
def MakePrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attributes
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
curves = self.RunSimulations(num_sims, add_reads)
self.pmf_l = self.MakePredictive(curves)
def MakeQuickPrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attribute:
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
pmf = thinkbayes2.Pmf()
_, seen = self.GetSeenSpecies()
for _ in range(num_sims):
_, observations = self.GenerateObservations(add_reads)
all_seen = seen.union(observations)
l = len(all_seen) - len(seen)
pmf.Incr(l)
pmf.Normalize()
self.pmf_l = pmf
def DistL(self):
"""Returns the distribution of additional species, l.
"""
return self.pmf_l
def MakeFigures(self):
"""Makes figures showing distribution of n and the prevalences."""
self.PlotDistN()
self.PlotPrevalences()
def PlotDistN(self):
"""Plots distribution of n."""
pmf = self.suite.DistN()
print('90% CI for N:', pmf.CredibleInterval(90))
pmf.label = self.code
thinkplot.Clf()
thinkplot.PrePlot(num=1)
thinkplot.Pmf(pmf)
root = 'species-ndist-%s' % self.code
thinkplot.Save(root=root,
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def PlotPrevalences(self, num=5):
"""Plots dist of prevalence for several species.
num: how many species (starting with the highest prevalence)
"""
thinkplot.Clf()
thinkplot.PrePlot(num=5)
for rank in range(1, num+1):
self.PlotPrevalence(rank)
root = 'species-prev-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 1],
)
def PlotPrevalence(self, rank=1, cdf_flag=True):
"""Plots dist of prevalence for one species.
rank: rank order of the species to plot.
cdf_flag: whether to plot the CDF
"""
# convert rank to index
index = self.GetM() - rank
_, mix = self.suite.DistOfPrevalence(index)
count, _ = self.GetSpecies(index)
mix.label = '%d (%d)' % (rank, count)
print('90%% CI for prevalence of species %d:' % rank, end=' ')
print(mix.CredibleInterval(90))
if cdf_flag:
cdf = mix.MakeCdf()
thinkplot.Cdf(cdf)
else:
thinkplot.Pmf(mix)
def PlotMixture(self, rank=1):
"""Plots dist of prevalence for all n, and the mix.
rank: rank order of the species to plot
"""
# convert rank to index
index = self.GetM() - rank
print(self.GetSpecies(index))
print(self.GetCounts()[index])
metapmf, mix = self.suite.DistOfPrevalence(index)
thinkplot.Clf()
for pmf in metapmf.Values():
thinkplot.Pmf(pmf, color='blue', alpha=0.2, linewidth=0.5)
thinkplot.Pmf(mix, color='blue', alpha=0.9, linewidth=2)
root = 'species-mix-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 0.3],
legend=False)
def GetSeenSpecies(self):
"""Makes a set of the names of seen species.
Returns: number of species, set of string species names
"""
names = self.GetNames()
m = len(names)
seen = set(SpeciesGenerator(names, m))
return m, seen
def GenerateObservations(self, num_reads):
"""Generates a series of random observations.
num_reads: number of reads to generate
Returns: number of species, sequence of string species names
"""
n, prevalences = self.suite.SamplePosterior()
names = self.GetNames()
name_iter = SpeciesGenerator(names, n)
items = zip(name_iter, prevalences)
cdf = thinkbayes2.Cdf(dict(items))
observations = cdf.Sample(num_reads)
#for ob in observations:
# print ob
return n, observations
def Resample(self, num_reads):
"""Choose a random subset of the data (without replacement).
num_reads: number of reads in the subset
"""
t = []
for count, species in self.species:
t.extend([species]*count)
random.shuffle(t)
reads = t[:num_reads]
subject = Subject(self.code)
hist = thinkbayes2.Hist(reads)
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def Match(self, match):
"""Match up a rarefied subject with a complete subject.
match: complete Subject
Assigns attributes:
total_reads:
total_species:
prev_unseen:
"""
self.total_reads = match.num_reads
self.total_species = match.num_species
# compute the prevalence of unseen species (at least approximately,
# based on all species counts in match
_, seen = self.GetSeenSpecies()
seen_total = 0.0
unseen_total = 0.0
for count, species in match.species:
if species in seen:
seen_total += count
else:
unseen_total += count
self.prev_unseen = unseen_total / (seen_total + unseen_total)
def RunSimulation(self, num_reads, frac_flag=False, jitter=0.01):
"""Simulates additional observations and returns a rarefaction curve.
k is the number of additional observations
num_new is the number of new species seen
num_reads: how many new reads to simulate
frac_flag: whether to convert to fraction of species seen
jitter: size of jitter added if frac_flag is true
Returns: list of (k, num_new) pairs
"""
m, seen = self.GetSeenSpecies()
n, observations = self.GenerateObservations(num_reads)
curve = []
for i, obs in enumerate(observations):
seen.add(obs)
if frac_flag:
frac_seen = len(seen) / float(n)
frac_seen += random.uniform(-jitter, jitter)
curve.append((i+1, frac_seen))
else:
num_new = len(seen) - m
curve.append((i+1, num_new))
return curve
def RunSimulations(self, num_sims, num_reads, frac_flag=False):
"""Runs simulations and returns a list of curves.
Each curve is a sequence of (k, num_new) pairs.
num_sims: how many simulations to run
num_reads: how many samples to generate in each simulation
frac_flag: whether to convert num_new to fraction of total
"""
curves = [self.RunSimulation(num_reads, frac_flag)
for _ in range(num_sims)]
return curves
def MakePredictive(self, curves):
"""Makes a predictive distribution of additional species.
curves: list of (k, num_new) curves
Returns: Pmf of num_new
"""
pred = thinkbayes2.Pmf(label=self.code)
for curve in curves:
_, last_num_new = curve[-1]
pred.Incr(last_num_new)
pred.Normalize()
return pred
def MakeConditionals(curves, ks):
"""Makes Cdfs of the distribution of | |
#<NAME>
#<EMAIL>
#201507225
import time
import sys
import random
import copy
import statistics
import math
import interfaceUtils
import mapUtils
from worldLoader import WorldSlice
# x position, z position, x size, z size
area = (0, 0, 128, 128)
buildArea = interfaceUtils.requestBuildArea()
if buildArea != -1:
x1 = buildArea["xFrom"]
z1 = buildArea["zFrom"]
x2 = buildArea["xTo"]
z2 = buildArea["zTo"]
print(buildArea)
area = (x1, z1, x2 - x1, z2 - z1)
worldSlice = WorldSlice(area)
heightmap = mapUtils.calcGoodHeightmap(worldSlice)
treeList = ["minecraft:oak_log", "minecraft:spruce_log", "minecraft:birch_log", "minecraft:jungle_log", "minecraft:acacia_log",
"minecraft:dark_oak_log", "minecraft:brown_mushroom_block", "minecraft:red_mushroom_block",
"minecraft:mushroom_stem", "minecraft:oak_leaves", "minecraft:spruce_leaves", "minecraft:birch_leaves",
"minecraft:jungle_leaves", "minecraft:acacia_leaves", "minecraft:dark_oak_leaves"]
def heightAt(x, z):
return heightmap[(x - area[0], z - area[1])]
def setBlock(x, y, z, block):
interfaceUtils.setBlock(x, y, z, block)
def getBlock(x, y, z):
return interfaceUtils.getBlock(x, y, z)
def isIn(node, nodeList):
for i in range(len(nodeList)):
if node[0] == nodeList[i][0] and node[1] == nodeList[i][1]:
return True
return False
#Function which helps mst by getting rid of useless or duplicate paths
def trim(edgeList):
trimmedList = []
for i in range(len(edgeList)):
if len(edgeList[i]) > 2:
trimmedList.append(edgeList[i])
for i in range(len(trimmedList)):
for j in range(len(trimmedList)):
if i != j and trimmedList[i] != None and trimmedList[j] != None:
if (trimmedList[i][0] == trimmedList[j][0] and trimmedList[i][-1] == trimmedList[j][-1]) or (trimmedList[i][0] == trimmedList[j][-1] and trimmedList[i][-1] == trimmedList[j][0]):
if len(trimmedList[i]) > len(trimmedList[j]):
trimmedList[i] = None
else:
trimmedList[j] = None
trimmedList = [i for i in trimmedList if i]
return trimmedList
#area - stores given area
#division - how many minecraft blocks each cell will contain
class Grid:
def __init__(self, area, division):
self.area = area
self.division = division
self.grid = [[Cell() for x in range(int(area[2]/division))] for y in range(int(area[3]/division))]
#Displays grid in easily readable way
#Cells have different numbers as identifiers
# 0 == Blank Cell
# 1 == Path
# 2 == Blocked Cell
# 3 onward == Structures
def __str__(self):
string = "Grid: \n"
for z in range(len(self.grid[0])):
string += "[ "
for x in range(len(self.grid)):
string += str(self.grid[x][z].identifier) + " "
string += "]\n"
return string
#Function helper for populate
#It counts the neighbours and returns the number
def getNeighbours(self, x, z):
nbs = 0
if z-1 >= 0 and self.grid[x][z-1].identifier == 3:
nbs += 1
if x-1 >= 0 and self.grid[x-1][z].identifier == 3:
nbs += 1
if z+1 < len(self.grid[0]) and self.grid[x][z+1].identifier == 3:
nbs += 1
if x+1 < len(self.grid) and self.grid[x+1][z].identifier == 3:
nbs += 1
return nbs
#Similar to getNeighbours but this function returns a boolean list for which cells the neighbours are in
def returnNeighbours(self, x, z):
nbs = [False, False, False, False]
if z-1 >= 0 and self.grid[x][z-1].identifier != 2 and self.grid[x][z-1].identifier != 0:
nbs[0] = True
if x-1 >= 0 and self.grid[x-1][z].identifier != 2 and self.grid[x-1][z].identifier != 0:
nbs[1] = True
if z+1 < len(self.grid[0]) and self.grid[x][z+1].identifier != 2 and self.grid[x][z+1].identifier != 0:
nbs[2] = True
if x+1 < len(self.grid) and self.grid[x+1][z].identifier != 2 and self.grid[x+1][z].identifier != 0:
nbs[3] = True
return nbs
#This function is what places the structures
#It mainly has two ways it works, the first is by just assigning a chance a structure will spawn
#The second uses a cellular automata to produce clusters of structures
def populate(self, aliveChance, deathLimit=0, birthLimit=0, steps=0):
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
if random.randint(1, 100) <= aliveChance:
self.grid[x][z].identifier = 3
if steps == 0:
return
elif steps > 0:
tempGrid = copy.deepcopy(self.grid)
for step in range(steps):
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
nbs = self.getNeighbours(x, z)
if self.grid[x][z].identifier == 3:
if nbs < deathLimit:
tempGrid[x][z].identifier = 0
else:
tempGrid[x][z].identifier = 3
else:
if nbs > birthLimit:
tempGrid[x][z].identifier = 3
else:
tempGrid[x][z].identifier = 0
else:
print ("Error; Using default setting")
return
#This function looks through all of the cells to find out if there are any big obstructions such as a cliff
#If it finds such a obstruction it sets the cell identifier to 2 and therefore blocks it from use
def checkMountains(self, limit=2):
#Randomly set obstructions for testing purposes
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
rand = random.randint(1, 100)
if rand <= 35 and self.grid[x][z].identifier != 3:
self.grid[x][z].identifier = 2
return
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
heights = []
startX = self.area[0] + x*self.division
startZ = self.area[1] + z*self.division
for i in range(startX, startX+16):
for j in range(startZ, startZ+16):
if (not getBlock(i, heightAt(i, j)-1, j) in treeList):
heights.append(heightAt(i, j))
heights = sorted(heights)
if abs(heights[-1]-heights[0]) > limit:
self.grid[x][z].identifier = 2
#Helper function to getEdges(BFS)
#Gets other cells in grid which cell in questions wants to find path to
#Add those cells to a list and returns it
def getOtherCells(self, _x, _z):
cells = []
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
if self.grid[x][z].identifier == 3 and (x != _x or z != _z):
cells.append([x, z])
return cells
#This is the function that finds the optimal paths from one cell to another
#It uses BFS to spread out from the initial cell and find paths
#There is a limit function which you can set for how many paths to find
#If you leave it empty it will find all paths which can be very space and time consuming
def getEdges(self, limit=-1):
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
if self.grid[x][z].identifier == 3:
goalList = self.getOtherCells(x, z)
openList = [[[x, z]]]
while(len(openList) > 0):
node = openList.pop(0)
#print(node)
nodeX = node[-1][0]
nodeZ = node[-1][1]
#print(node[-1])
if isIn(node[-1], goalList):
tempNode = copy.deepcopy(node)
self.grid[x][z].edges.append(tempNode)
if limit != -1 and len(self.grid[x][z].edges) > limit:
break
if nodeZ-1 >= 0 and self.grid[nodeX][nodeZ-1].identifier != 2 and not isIn([nodeX, nodeZ-1], node):
tempNode = copy.deepcopy(node)
tempNode.append([nodeX, nodeZ-1])
openList.append(tempNode)
if nodeX-1 >= 0 and self.grid[nodeX-1][nodeZ].identifier != 2 and not isIn([nodeX-1, nodeZ], node):
tempNode = copy.deepcopy(node)
tempNode.append([nodeX-1, nodeZ])
openList.append(tempNode)
if nodeZ+1 < len(self.grid[0]) and self.grid[nodeX][nodeZ+1].identifier != 2 and not isIn([nodeX, nodeZ+1], node):
tempNode = copy.deepcopy(node)
tempNode.append([nodeX, nodeZ+1])
openList.append(tempNode)
if nodeX+1 < len(self.grid) and self.grid[nodeX+1][nodeZ].identifier != 2 and not isIn([nodeX+1, nodeZ], node):
tempNode = copy.deepcopy(node)
tempNode.append([nodeX+1, nodeZ])
openList.append(tempNode)
#Helper function to mst function
#It checks if all cells have been visited ie a path to all cells
def allVisited(self):
notVisited = 0
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
if self.grid[x][z].identifier == 3:
if not self.grid[x][z].visited:
notVisited += 1
if notVisited > 0:
return False
return True
#Function that sets neighbours to a cell as visited
#If used there will only be paths to clusters of houses rather than a path to every house
def setNeighboursVisited(self, x, z):
if z-1 >= 0 and self.grid[x][z-1].identifier == 3 and not self.grid[x][z-1].visited:
self.grid[x][z-1].visited = True
self.setNeighboursVisited(x, z-1)
if x-1 >= 0 and self.grid[x-1][z].identifier == 3 and not self.grid[x-1][z].visited:
self.grid[x-1][z].visited = True
self.setNeighboursVisited(x-1, z)
if z+1 < len(self.grid[0]) and self.grid[x][z+1].identifier == 3 and not self.grid[x][z+1].visited:
self.grid[x][z+1].visited = True
self.setNeighboursVisited(x, z+1)
if x+1 < len(self.grid) and self.grid[x+1][z].identifier == 3 and not self.grid[x+1][z].visited:
self.grid[x+1][z].visited = True
self.setNeighboursVisited(x+1, z)
#Function for setting a cell as visited
#Used in mst function
def setVisited(self, edge):
x = edge[0][0]
x2 = edge[-1][0]
z = edge[0][1]
z2 = edge[-1][1]
self.grid[x][z].visited = True
self.grid[x2][z2].visited = True
self.setNeighboursVisited(x, z)
self.setNeighboursVisited(x2, z2)
#Function which was to produce a mst
#It does not however produce a mst
#Very similarly to Kruskals MST however it takes from a list the best edge (shortest path)
#It then check for a certain condition and if it fails the edge is thrown away
#This continues until all cells are visited
#I tried using a DFS to check for cycles but was unsuccesful so i resorted to visiting all nodes instead
def mst(self):
edgeBank = []
nodes = 0
for z in range(len(self.grid[0])):
for x in range(len(self.grid)):
if self.grid[x][z].identifier == 3:
nodes += 1
for i in range(len(self.grid[x][z].edges)):
edgeBank.append(self.grid[x][z].edges[i])
for i in range(len(edgeBank)):
if len(edgeBank[i]) <= 2:
edgeBank[i] = None
edgeBank = [i for i in edgeBank if i]
edgeBank = sorted(edgeBank, key=len)
mst = []
while(not self.allVisited() and len(edgeBank) > 0):
edge = edgeBank.pop(0)
if (not self.grid[edge[0][0]][edge[0][1]].visited or not self.grid[edge[-1][0]][edge[-1][1]].visited):
mst.append(edge)
self.setVisited(edge)
return mst
#Simple function which takes a list of cells and | |
<filename>ietf/ydk/models/ietf/ietf_yang_library.py
""" ietf_yang_library
This module contains monitoring information about the YANG
modules and submodules that are used within a YANG\-based
server.
Copyright (c) 2016 IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD License
set forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC 7895; see
the RFC itself for full legal notices.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class ModulesState(Entity):
"""
Contains YANG module monitoring information.
.. attribute:: module_set_id
Contains a server\-specific identifier representing the current set of modules and submodules. The server MUST change the value of this leaf if the information represented by the 'module' list instances has changed
**type**\: str
**mandatory**\: True
.. attribute:: module
Each entry represents one revision of one module currently supported by the server
**type**\: list of :py:class:`Module <ydk.models.ietf.ietf_yang_library.ModulesState.Module>`
"""
_prefix = 'yanglib'
_revision = '2016-06-21'
def __init__(self):
super(ModulesState, self).__init__()
self._top_entity = None
self.yang_name = "modules-state"
self.yang_parent_name = "ietf-yang-library"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("module", ("module", ModulesState.Module))])
self._leafs = OrderedDict([
('module_set_id', YLeaf(YType.str, 'module-set-id')),
])
self.module_set_id = None
self.module = YList(self)
self._segment_path = lambda: "ietf-yang-library:modules-state"
def __setattr__(self, name, value):
self._perform_setattr(ModulesState, ['module_set_id'], name, value)
class Module(Entity):
"""
Each entry represents one revision of one module
currently supported by the server.
.. attribute:: name (key)
The YANG module or submodule name
**type**\: str
**pattern:** [a\-zA\-Z\_][a\-zA\-Z0\-9\\\-\_.]\*
.. attribute:: revision (key)
The YANG module or submodule revision date. A zero\-length string is used if no revision statement is present in the YANG module or submodule
**type**\: union of the below types:
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}
**type**\: str
**length:** 0
.. attribute:: schema
Contains a URL that represents the YANG schema resource for this module or submodule. This leaf will only be present if there is a URL available for retrieval of the schema for this entry
**type**\: str
.. attribute:: namespace
The XML namespace identifier for this module
**type**\: str
**mandatory**\: True
.. attribute:: feature
List of YANG feature names from this module that are supported by the server, regardless of whether they are defined in the module or any included submodule
**type**\: list of str
**pattern:** [a\-zA\-Z\_][a\-zA\-Z0\-9\\\-\_.]\*
.. attribute:: deviation
List of YANG deviation module names and revisions used by this server to modify the conformance of the module associated with this entry. Note that the same module can be used for deviations for multiple modules, so the same entry MAY appear within multiple 'module' entries. The deviation module MUST be present in the 'module' list, with the same name and revision values. The 'conformance\-type' value will be 'implement' for the deviation module
**type**\: list of :py:class:`Deviation <ydk.models.ietf.ietf_yang_library.ModulesState.Module.Deviation>`
.. attribute:: conformance_type
Indicates the type of conformance the server is claiming for the YANG module identified by this entry
**type**\: :py:class:`ConformanceType <ydk.models.ietf.ietf_yang_library.ModulesState.Module.ConformanceType>`
**mandatory**\: True
.. attribute:: submodule
Each entry represents one submodule within the parent module
**type**\: list of :py:class:`Submodule <ydk.models.ietf.ietf_yang_library.ModulesState.Module.Submodule>`
"""
_prefix = 'yanglib'
_revision = '2016-06-21'
def __init__(self):
super(ModulesState.Module, self).__init__()
self.yang_name = "module"
self.yang_parent_name = "modules-state"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name','revision']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("deviation", ("deviation", ModulesState.Module.Deviation)), ("submodule", ("submodule", ModulesState.Module.Submodule))])
self._leafs = OrderedDict([
('name', YLeaf(YType.str, 'name')),
('revision', YLeaf(YType.str, 'revision')),
('schema', YLeaf(YType.str, 'schema')),
('namespace', YLeaf(YType.str, 'namespace')),
('feature', YLeafList(YType.str, 'feature')),
('conformance_type', YLeaf(YType.enumeration, 'conformance-type')),
])
self.name = None
self.revision = None
self.schema = None
self.namespace = None
self.feature = []
self.conformance_type = None
self.deviation = YList(self)
self.submodule = YList(self)
self._segment_path = lambda: "module" + "[name='" + str(self.name) + "']" + "[revision='" + str(self.revision) + "']"
self._absolute_path = lambda: "ietf-yang-library:modules-state/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(ModulesState.Module, ['name', 'revision', 'schema', 'namespace', 'feature', 'conformance_type'], name, value)
class ConformanceType(Enum):
"""
ConformanceType (Enum Class)
Indicates the type of conformance the server is claiming
for the YANG module identified by this entry.
.. data:: implement = 0
Indicates that the server implements one or more
protocol-accessible objects defined in the YANG module
identified in this entry. This includes deviation
statements defined in the module.
For YANG version 1.1 modules, there is at most one
module entry with conformance type 'implement' for a
particular module name, since YANG 1.1 requires that,
at most, one revision of a module is implemented.
For YANG version 1 modules, there SHOULD NOT be more
than one module entry for a particular module name.
.. data:: import_ = 1
Indicates that the server imports reusable definitions
from the specified revision of the module but does
not implement any protocol-accessible objects from
this revision.
Multiple module entries for the same module name MAY
exist. This can occur if multiple modules import the
same module but specify different revision dates in
the import statements.
"""
implement = Enum.YLeaf(0, "implement")
import_ = Enum.YLeaf(1, "import")
class Deviation(Entity):
"""
List of YANG deviation module names and revisions
used by this server to modify the conformance of
the module associated with this entry. Note that
the same module can be used for deviations for
multiple modules, so the same entry MAY appear
within multiple 'module' entries.
The deviation module MUST be present in the 'module'
list, with the same name and revision values.
The 'conformance\-type' value will be 'implement' for
the deviation module.
.. attribute:: name (key)
The YANG module or submodule name
**type**\: str
**pattern:** [a\-zA\-Z\_][a\-zA\-Z0\-9\\\-\_.]\*
.. attribute:: revision (key)
The YANG module or submodule revision date. A zero\-length string is used if no revision statement is present in the YANG module or submodule
**type**\: union of the below types:
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}
**type**\: str
**length:** 0
"""
_prefix = 'yanglib'
_revision = '2016-06-21'
def __init__(self):
super(ModulesState.Module.Deviation, self).__init__()
self.yang_name = "deviation"
self.yang_parent_name = "module"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['name','revision']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', YLeaf(YType.str, 'name')),
('revision', YLeaf(YType.str, 'revision')),
])
self.name = None
self.revision = None
self._segment_path = lambda: "deviation" + "[name='" + str(self.name) + "']" + "[revision='" + str(self.revision) + "']"
def __setattr__(self, name, value):
self._perform_setattr(ModulesState.Module.Deviation, ['name', 'revision'], name, value)
class Submodule(Entity):
"""
Each entry represents one submodule within the
parent module.
.. attribute:: name (key)
The YANG module or submodule name
**type**\: str
**pattern:** [a\-zA\-Z\_][a\-zA\-Z0\-9\\\-\_.]\*
.. attribute:: revision (key)
The YANG module or submodule revision date. A zero\-length string is used if no revision statement is present in the YANG module or submodule
**type**\: union of the below types:
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}
**type**\: str
**length:** 0
.. attribute:: schema
Contains a URL that represents the YANG schema resource for this module or submodule. This leaf will only be present if there is a URL available for retrieval of the schema for this entry
**type**\: str
"""
_prefix = 'yanglib'
_revision = '2016-06-21'
def __init__(self):
super(ModulesState.Module.Submodule, self).__init__()
self.yang_name = "submodule"
self.yang_parent_name = "module"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['name','revision']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', YLeaf(YType.str, 'name')),
('revision', YLeaf(YType.str, 'revision')),
('schema', YLeaf(YType.str, 'schema')),
])
self.name = None
self.revision = None
self.schema = None
self._segment_path = lambda: "submodule" + "[name='" + str(self.name) + "']" + "[revision='" + str(self.revision) + "']"
def __setattr__(self, name, value):
self._perform_setattr(ModulesState.Module.Submodule, ['name', 'revision', 'schema'], name, value)
def clone_ptr(self):
self._top_entity = | |
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.decorators import login_required
from django_xhtml2pdf.utils import generate_pdf
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.contrib import messages
from django.db.models import Q
from docxtpl import DocxTemplate
from peticions.models import *
from peticions.forms import *
from cole.forms import *
import io
#
# staff
#
@user_passes_test(lambda u: u.is_staff)
def preview_docx(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
junta_instance.render_text_version()
tpl = DocxTemplate("/home/jprats/git/django-ampa/test.docx")
context = {
'junta_instance': junta_instance,
'issue_add_comments': False,
'issue_title_size': 'h4',
'user_admin': True,
'is_pdf': True
}
tpl.render(context)
# tpl.save('./test_output.docx')
tpl_io = io.BytesIO()
tpl.save(tpl_io)
tpl_io.seek(0)
response = HttpResponse(tpl_io.read())
# Content-Disposition header makes a file downloadable
response["Content-Disposition"] = "attachment; filename=preview.docx"
# Set the appropriate Content-Type for docx file
response["Content-Type"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
return response
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def preview_pdf(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
resp = HttpResponse(content_type='application/pdf')
return generate_pdf('peticions/juntes/render_pdf.html', file_object=resp, context={
'junta_instance': junta_instance,
'issue_add_comments': False,
'issue_title_size': 'h4',
'user_admin': True,
'is_pdf': True
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def delete_comment(request, issue_id, comment_id):
try:
comment_instance = Comment.objects.filter(id=comment_id, issue__id=issue_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
comment_instance.delete()
return redirect('peticions.edit.issue', {'issue_id': issue_id})
else:
messages.error(request, 'Error eliminant comentari')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/comments/delete.html', { 'comment': comment_instance })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.issue', {'issue_id': issue_id})
@user_passes_test(lambda u: u.is_staff)
def delete_junta(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
junta_instance.delete()
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Error eliminant la junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/delete.html', {
'junta_instance': junta_instance,
'issue_title_size': 'h4',
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def delete_representant(request, representant_id):
try:
instance_representant = Representant.objects.filter(id=representant_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
instance_representant.delete()
return redirect('peticions.list.representants')
else:
messages.error(request, 'Error eliminant representant')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/representants/delete.html', { 'instance_representant': instance_representant })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.representants')
@user_passes_test(lambda u: u.is_staff)
def delete_category(request, category_id):
try:
instance_category = Category.objects.filter(id=category_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
instance_category.delete()
return redirect('peticions.list.categories')
else:
messages.error(request, 'Error eliminant la categoria')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/categories/delete.html', { 'instance_category': instance_category })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.categories')
@user_passes_test(lambda u: u.is_staff)
def delete_issue(request, issue_id):
try:
instance_issue = Issue.objects.filter(id=issue_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
instance_issue.delete()
return redirect('peticions.list.issues')
else:
messages.error(request, 'Error eliminant la petició')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/issues/delete.html', { 'issue_instance': instance_issue })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.issues')
@user_passes_test(lambda u: u.is_staff)
def close_junta(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in junta_instance.issues.all():
issue.status = ISSUE_STATUS_CLOSED
issue.save()
junta_instance.save()
messages.info(request, 'Junta tancada')
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Error tancant la junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/close.html', {
'junta_instance': junta_instance,
'issue_title_size': 'h4',
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def publish_junta(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in junta_instance.issues.all():
issue.status = ISSUE_STATUS_CLOSED
issue.save()
junta_instance.public = True
junta_instance.save()
messages.info(request, 'Junta publicada')
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Error publicant junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/publish.html', {
'junta_instance': junta_instance,
'issue_title_size': 'h4',
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def edit_representant(request, representant_id=None):
try:
if representant_id:
representant_instance = Representant.objects.filter(id=representant_id)[0]
else:
representant_instance = Representant()
if request.method == 'POST':
form = RepresentantForm(request.POST, instance=representant_instance)
if form.is_valid():
form.save()
messages.info(request, 'Representant guardat correctament')
return redirect('peticions.list.representants')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/representants/edit.html', {
'form': form,
'representant_instance': representant_instance,
})
else:
form = RepresentantForm(instance=representant_instance)
return render(request, 'peticions/representants/edit.html', {
'form': form,
'representant_instance': representant_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.representants')
@user_passes_test(lambda u: u.is_staff)
def edit_category(request, category_id=None):
try:
if category_id:
category_instance = Category.objects.filter(id=category_id)[0]
else:
category_instance = Category()
if request.method == 'POST':
form = CategoryForm(request.POST, instance=category_instance)
if form.is_valid():
form.save()
messages.info(request, 'Categoria guardada correctament')
return redirect('peticions.list.categories')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/categories/edit.html', {
'form': form,
'category_instance': category_instance,
})
else:
form = CategoryForm(instance=category_instance)
return render(request, 'peticions/categories/edit.html', {
'form': form,
'category_instance': category_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.categories')
@user_passes_test(lambda u: u.is_staff)
def list_categories(request):
list_categories_raw = Category.objects.all()
page = request.GET.get('page', 1)
paginator = Paginator(list_categories_raw, 10)
try:
list_categories = paginator.page(page)
except PageNotAnInteger:
list_categories = paginator.page(1)
except EmptyPage:
list_categories = paginator.page(paginator.num_pages)
return render(request, 'peticions/categories/list.html', {
'list_categories': list_categories,
'public': False,
'user_admin': request.user.is_staff
})
@user_passes_test(lambda u: u.is_staff)
def list_representants(request):
list_representants_raw = Representant.objects.all()
page = request.GET.get('page', 1)
paginator = Paginator(list_representants_raw, 10)
try:
list_representants = paginator.page(page)
except PageNotAnInteger:
list_representants = paginator.page(1)
except EmptyPage:
list_representants = paginator.page(paginator.num_pages)
return render(request, 'peticions/representants/list.html', {
'list_representants': list_representants,
'public': False,
'user_admin': request.user.is_staff
})
@user_passes_test(lambda u: u.is_staff)
def forward_open_peticions(request):
try:
list_issues = Issue.objects.filter(public=True, status=ISSUE_STATUS_DRAFT)
config = Entitat.objects.first()
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in list_issues:
issue.status = ISSUE_STATUS_OPEN
issue.save()
messages.info(request, 'Canviat l\'estat de les peticions')
return redirect('peticions.list.issues')
else:
messages.error(request, 'Error fent el canvi d\'estat')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/issues/forward_open.html', {'list_issues': list_issues, 'config': config})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.issues')
@user_passes_test(lambda u: u.is_staff)
def edit_junta_peu(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = JuntaPeuForm(request.POST, instance=junta_instance)
if form.is_valid():
form.save()
messages.info(request, 'Peu de junta guardat correctament')
return redirect('peticions.edit.junta', junta_id=junta_id)
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/juntes/edit_peu.html', {
'form': form,
'junta_instance': junta_instance,
})
else:
form = JuntaPeuForm(instance=junta_instance)
return render(request, 'peticions/juntes/edit_peu.html', {
'form': form,
'junta_instance': junta_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.junta', junta_id=junta_id)
@user_passes_test(lambda u: u.is_staff)
def edit_junta(request, junta_id=None):
try:
if junta_id:
junta_instance = Junta.objects.filter(id=junta_id)[0]
else:
junta_instance = Junta()
for categoria in junta_instance.categories:
print(categoria)
if request.method == 'POST':
form = JuntaForm(request.POST, instance=junta_instance)
if form.is_valid():
form.save()
messages.info(request, 'Junta guardada correctament')
try:
boto_apretat = str(form.data['votarem'])
except:
try:
boto_apretat = str(form.data['queixarem'])
return redirect('peticions.edit.junta.list.peticions', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['veure'])
return redirect('peticions.show.junta', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['pudor'])
return redirect('peticions.edit.peu.junta', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['presentar'])
return redirect('peticions.present.junta', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['tancar'])
return redirect('peticions.close.junta', junta_id=junta_instance.id)
except:
pass
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/juntes/edit.html', {
'form': form,
'junta_instance': junta_instance,
})
else:
form = JuntaForm(instance=junta_instance)
return render(request, 'peticions/juntes/edit.html', {
'form': form,
'junta_instance': junta_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def list_junta_peticio(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
list_issues_add = Issue.objects.filter(public=True, status=ISSUE_STATUS_OPEN).exclude(id__in=junta_instance.issues.values('id'))
list_issues_remove = junta_instance.issues.all()
return render(request, 'peticions/juntes/add_to_junta_list.html', {
'list_issues_add': list_issues_add,
'list_issues_remove': list_issues_remove,
'public': False,
'user_admin': request.user.is_staff,
'junta_instance': junta_instance
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def add_all_junta_peticio(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in Issue.objects.filter(public=True, status=ISSUE_STATUS_OPEN):
issue.status = ISSUE_STATUS_WAITING
issue.save()
junta_instance.issues.add(issue)
junta_instance.save()
return redirect('peticions.edit.junta', junta_id=junta_id)
else:
messages.error(request, 'Error afegit peticions a la junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/add_all_issues.html', { 'junta_instance': junta_instance, 'list_issues_add': Issue.objects.filter(public=True, status=ISSUE_STATUS_OPEN) })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def add_junta_peticio(request, junta_id, issue_id):
try:
if request.method == "POST":
junta_instance = Junta.objects.filter(id=junta_id)[0]
issue_instance = Issue.objects.filter(id=issue_id)[0]
issue_instance.status = ISSUE_STATUS_WAITING
issue_instance.save()
junta_instance.issues.add(issue_instance)
junta_instance.save()
except Exception as e:
messages.error(request, "Error afegint petició a l'ordre del dia")
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.junta', junta_id=junta_id)
@user_passes_test(lambda u: u.is_staff)
def remove_junta_peticio(request, junta_id, issue_id):
try:
if request.method == "POST":
junta_instance = Junta.objects.filter(id=junta_id)[0]
issue_instance = Issue.objects.filter(id=issue_id)[0]
issue_instance.status = ISSUE_STATUS_OPEN
issue_instance.save()
junta_instance.issues.remove(issue_instance)
junta_instance.save()
except Exception as e:
messages.error(request, "Error eliminant petició de l'ordre del dia")
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.junta', junta_id=junta_id)
#
# registered
#
@user_passes_test(lambda u: u.is_staff)
def like_issue(request, issue_id):
try:
if request.method == "POST":
issue_instance = Issue.objects.filter(id=issue_id)[0]
if not request.user in issue_instance.likes.all():
if request.user in issue_instance.dislikes.all():
issue_instance.dislikes.remove(request.user)
issue_instance.likes.add(request.user)
else:
issue_instance.likes.add(request.user)
issue_instance.save()
except Exception as e:
messages.error(request, "Error fent like")
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.show.issue', issue_id=issue_id)
@user_passes_test(lambda u: u.is_staff)
def dislike_issue(request, issue_id):
try:
if request.method == "POST":
issue_instance = Issue.objects.filter(id=issue_id)[0]
if not request.user in issue_instance.dislikes.all():
if request.user in issue_instance.likes.all():
issue_instance.likes.remove(request.user)
issue_instance.dislikes.add(request.user)
else:
| |
<reponame>mihadyuk/gdal
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: gdalbuildvrt testing
# Author: <NAME> <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2008-2013, <NAME> <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import os
sys.path.append( '../pymod' )
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import gdaltest
import test_cli_utilities
###############################################################################
def test_gdalbuildvrt_check():
ds = gdal.Open('tmp/mosaic.vrt')
if ds.GetProjectionRef().find('WGS 84') == -1:
gdaltest.post_reason('Expected WGS 84\nGot : %s' % (ds.GetProjectionRef()) )
return 'fail'
gt = ds.GetGeoTransform()
expected_gt = [ 2, 0.1, 0, 49, 0, -0.1 ]
for i in range(6):
if abs(gt[i] - expected_gt[i] > 1e-5):
gdaltest.post_reason('Expected : %s\nGot : %s' % (expected_gt, gt) )
return 'fail'
if ds.RasterXSize != 20 or ds.RasterYSize != 20:
gdaltest.post_reason('Wrong raster dimensions : %d x %d' % (ds.RasterXSize, ds.RasterYSize) )
return 'fail'
if ds.RasterCount != 1:
gdaltest.post_reason('Wrong raster count : %d ' % (ds.RasterCount) )
return 'fail'
if ds.GetRasterBand(1).Checksum() != 3508:
gdaltest.post_reason('Wrong checksum')
return 'fail'
return 'success'
###############################################################################
# Simple test
def test_gdalbuildvrt_1():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
drv = gdal.GetDriverByName('GTiff')
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'WGS84' )
wkt = srs.ExportToWkt()
ds = drv.Create('tmp/gdalbuildvrt1.tif', 10, 10, 1)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 2, 0.1, 0, 49, 0, -0.1 ] )
ds.GetRasterBand(1).Fill(0)
ds = None
ds = drv.Create('tmp/gdalbuildvrt2.tif', 10, 10, 1)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 3, 0.1, 0, 49, 0, -0.1 ] )
ds.GetRasterBand(1).Fill(63)
ds = None
ds = drv.Create('tmp/gdalbuildvrt3.tif', 10, 10, 1)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 2, 0.1, 0, 48, 0, -0.1 ] )
ds.GetRasterBand(1).Fill(127)
ds = None
ds = drv.Create('tmp/gdalbuildvrt4.tif', 10, 10, 1)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 3, 0.1, 0, 48, 0, -0.1 ] )
ds.GetRasterBand(1).Fill(255)
ds = None
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalbuildvrt_path() + ' tmp/mosaic.vrt tmp/gdalbuildvrt1.tif tmp/gdalbuildvrt2.tif tmp/gdalbuildvrt3.tif tmp/gdalbuildvrt4.tif')
if not (err is None or err == '') :
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
return test_gdalbuildvrt_check()
###############################################################################
# Test with tile index
def test_gdalbuildvrt_2():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
if test_cli_utilities.get_gdaltindex_path() is None:
return 'skip'
try:
os.remove('tmp/tileindex.shp')
except:
pass
try:
os.remove('tmp/tileindex.dbf')
except:
pass
try:
os.remove('tmp/tileindex.shx')
except:
pass
try:
os.remove('tmp/mosaic.vrt')
except:
pass
gdaltest.runexternal(test_cli_utilities.get_gdaltindex_path() + ' tmp/tileindex.shp tmp/gdalbuildvrt1.tif tmp/gdalbuildvrt2.tif tmp/gdalbuildvrt3.tif tmp/gdalbuildvrt4.tif')
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' tmp/mosaic.vrt tmp/tileindex.shp')
return test_gdalbuildvrt_check()
###############################################################################
# Test with file list
def test_gdalbuildvrt_3():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
open('tmp/filelist.txt', 'wt').write('tmp/gdalbuildvrt1.tif\ntmp/gdalbuildvrt2.tif\ntmp/gdalbuildvrt3.tif\ntmp/gdalbuildvrt4.tif')
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' -input_file_list tmp/filelist.txt tmp/mosaic.vrt')
return test_gdalbuildvrt_check()
###############################################################################
# Try adding a raster in another projection
def test_gdalbuildvrt_4():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
drv = gdal.GetDriverByName('GTiff')
wkt = 'GEOGCS[\"WGS 72\",DATUM[\"WGS_1972\"]]'
ds = drv.Create('tmp/gdalbuildvrt5.tif', 10, 10, 1)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 47, 0.1, 0, 2, 0, -0.1 ] )
ds = None
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' tmp/mosaic.vrt tmp/gdalbuildvrt1.tif tmp/gdalbuildvrt2.tif tmp/gdalbuildvrt3.tif tmp/gdalbuildvrt4.tif tmp/gdalbuildvrt5.tif')
return test_gdalbuildvrt_check()
###############################################################################
# Try adding a raster with different band count
def test_gdalbuildvrt_5():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
drv = gdal.GetDriverByName('GTiff')
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'WGS84' )
wkt = srs.ExportToWkt()
ds = drv.Create('tmp/gdalbuildvrt5.tif', 10, 10, 2)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 47, 0.1, 0, 2, 0, -0.1 ] )
ds = None
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' tmp/mosaic.vrt tmp/gdalbuildvrt1.tif tmp/gdalbuildvrt2.tif tmp/gdalbuildvrt3.tif tmp/gdalbuildvrt4.tif tmp/gdalbuildvrt5.tif')
return test_gdalbuildvrt_check()
###############################################################################
# Test -separate option
def test_gdalbuildvrt_6():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' -separate tmp/stacked.vrt tmp/gdalbuildvrt1.tif tmp/gdalbuildvrt2.tif tmp/gdalbuildvrt3.tif tmp/gdalbuildvrt4.tif')
ds = gdal.Open('tmp/stacked.vrt')
if ds.GetProjectionRef().find('WGS 84') == -1:
gdaltest.post_reason('Expected WGS 84\nGot : %s' % (ds.GetProjectionRef()) )
return 'fail'
gt = ds.GetGeoTransform()
expected_gt = [ 2, 0.1, 0, 49, 0, -0.1 ]
for i in range(6):
if abs(gt[i] - expected_gt[i] > 1e-5):
gdaltest.post_reason('Expected : %s\nGot : %s' % (expected_gt, gt) )
return 'fail'
if ds.RasterXSize != 20 or ds.RasterYSize != 20:
gdaltest.post_reason('Wrong raster dimensions : %d x %d' % (ds.RasterXSize, ds.RasterYSize) )
return 'fail'
if ds.RasterCount != 4:
gdaltest.post_reason('Wrong raster count : %d ' % (ds.RasterCount) )
return 'fail'
if ds.GetRasterBand(1).Checksum() != 0:
gdaltest.post_reason('Wrong checksum')
return 'fail'
return 'success'
###############################################################################
# Test source rasters with nodata
def test_gdalbuildvrt_7():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
out_ds = gdal.GetDriverByName('GTiff').Create('tmp/vrtnull1.tif', 20, 10, 3, gdal.GDT_UInt16)
out_ds.SetGeoTransform([2,0.1,0,49,0,-0.1])
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:4326')
out_ds.SetProjection(srs.ExportToWkt())
out_ds.GetRasterBand(1).SetRasterColorInterpretation(gdal.GCI_RedBand)
out_ds.GetRasterBand(2).SetRasterColorInterpretation(gdal.GCI_GreenBand)
out_ds.GetRasterBand(3).SetRasterColorInterpretation(gdal.GCI_BlueBand)
out_ds.GetRasterBand(1).SetNoDataValue(256)
try:
ff = '\xff'.encode('latin1')
except:
ff = '\xff'
out_ds.GetRasterBand(1).WriteRaster( 0, 0, 10, 10, ff, buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds.GetRasterBand(2).WriteRaster( 0, 0, 10, 10, '\x00', buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds.GetRasterBand(3).WriteRaster( 0, 0, 10, 10, '\x00', buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds = None
out_ds = gdal.GetDriverByName('GTiff').Create('tmp/vrtnull2.tif', 20, 10, 3, gdal.GDT_UInt16)
out_ds.SetGeoTransform([2,0.1,0,49,0,-0.1])
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:4326')
out_ds.SetProjection(srs.ExportToWkt())
out_ds.GetRasterBand(1).SetRasterColorInterpretation(gdal.GCI_RedBand)
out_ds.GetRasterBand(2).SetRasterColorInterpretation(gdal.GCI_GreenBand)
out_ds.GetRasterBand(3).SetRasterColorInterpretation(gdal.GCI_BlueBand)
out_ds.GetRasterBand(1).SetNoDataValue(256)
out_ds.GetRasterBand(1).WriteRaster( 10, 0, 10, 10, '\x00', buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds.GetRasterBand(2).WriteRaster( 10, 0, 10, 10, ff, buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds.GetRasterBand(3).WriteRaster( 10, 0, 10, 10, '\x00', buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds = None
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' tmp/gdalbuildvrt7.vrt tmp/vrtnull1.tif tmp/vrtnull2.tif')
ds = gdal.Open('tmp/gdalbuildvrt7.vrt')
if ds.GetRasterBand(1).Checksum() != 1217:
gdaltest.post_reason('Wrong checksum')
return 'fail'
if ds.GetRasterBand(2).Checksum() != 1218:
gdaltest.post_reason('Wrong checksum')
return 'fail'
if ds.GetRasterBand(3).Checksum() != 0:
gdaltest.post_reason('Wrong checksum')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test -tr option
def test_gdalbuildvrt_8():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' -tr 0.05 0.05 tmp/mosaic2.vrt tmp/gdalbuildvrt1.tif tmp/gdalbuildvrt2.tif tmp/gdalbuildvrt3.tif tmp/gdalbuildvrt4.tif')
ds = gdal.Open('tmp/mosaic2.vrt')
gt = ds.GetGeoTransform()
expected_gt = [ 2, 0.05, 0, 49, 0, -0.05 ]
for i in range(6):
if abs(gt[i] - expected_gt[i] > 1e-5):
gdaltest.post_reason('Expected : %s\nGot : %s' % (expected_gt, gt) )
return 'fail'
if ds.RasterXSize != 40 or ds.RasterYSize != 40:
gdaltest.post_reason('Wrong raster dimensions : %d x %d' % (ds.RasterXSize, ds.RasterYSize) )
return 'fail'
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' -tr 0.1 0.1 tmp/mosaic.vrt tmp/mosaic2.vrt')
return test_gdalbuildvrt_check()
###############################################################################
# Test -te option
def test_gdalbuildvrt_9():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' -te 1 46 5 50 tmp/mosaic2.vrt tmp/gdalbuildvrt1.tif tmp/gdalbuildvrt2.tif tmp/gdalbuildvrt3.tif tmp/gdalbuildvrt4.tif')
ds = gdal.Open('tmp/mosaic2.vrt')
gt = ds.GetGeoTransform()
expected_gt = [ 1, 0.1, 0, 50, 0, -0.1 ]
for i in range(6):
if abs(gt[i] - expected_gt[i] > 1e-5):
gdaltest.post_reason('Expected : %s\nGot : %s' % (expected_gt, gt) )
return 'fail'
if ds.RasterXSize != 40 or ds.RasterYSize != 40:
gdaltest.post_reason('Wrong raster dimensions : %d x %d' % (ds.RasterXSize, ds.RasterYSize) )
return 'fail'
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' -te 2 47 4 49 tmp/mosaic.vrt tmp/mosaic2.vrt')
return test_gdalbuildvrt_check()
###############################################################################
# Test explicit nodata setting (#3254)
def test_gdalbuildvrt_10():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
out_ds = gdal.GetDriverByName('GTiff').Create('tmp/test_gdalbuildvrt_10_1.tif', 10, 10, 1, gdal.GDT_Byte, options = ['NBITS=1', 'PHOTOMETRIC=MINISWHITE'])
out_ds.SetGeoTransform([2,0.1,0,49,0,-0.1])
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:4326')
out_ds.SetProjection(srs.ExportToWkt())
out_ds.GetRasterBand(1).WriteRaster( 1, 1, 3, 3, '\x01', buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds = None
out_ds = gdal.GetDriverByName('GTiff').Create('tmp/test_gdalbuildvrt_10_2.tif', 10, 10, 1, gdal.GDT_Byte, options = ['NBITS=1', 'PHOTOMETRIC=MINISWHITE'])
out_ds.SetGeoTransform([2,0.1,0,49,0,-0.1])
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:4326')
out_ds.SetProjection(srs.ExportToWkt())
out_ds.GetRasterBand(1).WriteRaster( 6, 6, 3, 3, '\x01', buf_type = gdal.GDT_Byte, buf_xsize = 1, buf_ysize = 1 )
out_ds = None
gdaltest.runexternal(test_cli_utilities.get_gdalbuildvrt_path() + ' -srcnodata 0 tmp/gdalbuildvrt10.vrt tmp/test_gdalbuildvrt_10_1.tif tmp/test_gdalbuildvrt_10_2.tif')
ds = gdal.Open('tmp/gdalbuildvrt10.vrt')
if ds.GetRasterBand(1).Checksum() != 18:
print(ds.GetRasterBand(1).Checksum())
gdaltest.post_reason('Wrong checksum')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test that we can stack ungeoreference single band images with -separate (#3432)
def test_gdalbuildvrt_11():
if test_cli_utilities.get_gdalbuildvrt_path() is None:
return 'skip'
out_ds = gdal.GetDriverByName('GTiff').Create('tmp/test_gdalbuildvrt_11_1.tif', 10, 10, 1)
out_ds.GetRasterBand(1).Fill(255)
cs1 = out_ds.GetRasterBand(1).Checksum()
out_ds = None
out_ds = | |
# coding: utf-8
import numpy as np
from numpy import matrix as mat
import cv2
import os
import math
def undistort(img, # image data
fx, fy, cx, cy, # camera intrinsics
k1, k2, # radial distortion parameters
p1=None, p2=None, # tagential distortion parameters
radial_ud_only=True):
"""
undistort image using distort model
test gray-scale image only
"""
if img is None:
print('[Err]: empty image.')
return
is_bgr = len(img.shape) == 3
if is_bgr:
H, W, C = img.shape
elif len(img.shape) == 2:
H, W = img.shape
else:
print('[Err]: image format wrong!')
return
img_undistort = np.zeros_like(img, dtype=np.uint8)
# fill in each pixel in un-distorted image
for v in range(H):
for u in range(W): # u,v are pixel coordinates
# convert to camera coordinates by camera intrinsic parameters
x1 = (u - cx) / fx
y1 = (v - cy) / fy
r_square = (x1 * x1) + (y1 * y1)
r_quadric = r_square * r_square
if radial_ud_only: # do radial undistortion only
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric)
else: # do radial undistortion and tangential undistortion
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
2.0 * p1 * x1 * y1 + p2 * (r_square + 2.0 * x1 * x1)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
p1 * (r_square + 2.0 * y1 * y1) + 2.0 * p2 * x1 * y1
# convert back to pixel coordinates
# using nearest neighbor interpolation
u_corrected = int(fx * x2 + cx + 0.5)
v_corrected = int(fy * y2 + cy + 0.5)
# @Todo: using bilinear interpolation...
# processing pixel outside the image area
if u_corrected < 0 or u_corrected >= W \
or v_corrected < 0 or v_corrected >= H:
if is_bgr:
img_undistort[v, u, :] = 0
else:
img_undistort[v, u] = 0
else:
if is_bgr:
img_undistort[v, u, :] = img[v_corrected,
u_corrected, :] # y, x
else:
img_undistort[v, u] = img[v_corrected, u_corrected] # y, x
return img_undistort.astype('uint8')
def test_undistort_img():
img_path = './distorted.png'
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
camera_intrinsics = [fx, fy, cx, cy]
k1 = -0.28340811
k2 = 0.07395907
p1 = 0.00019359
p2 = 1.76187114e-05
# Init parameters to be optimized
params = np.array([[-0.1],
[0.1]]) # k1k2
# ---------- Run LM optimization
LM_Optimize(params)
k1 = params[0][0]
k2 = params[1][0]
# ----------
undistort_img(img_path, camera_intrinsics, k1, k2, p1, p2)
def undistort_img(img_path,
camera_intrinsics,
k1, k2, p1=None, p2=None,
is_color=True):
"""
undistort of image
given camera matrix and distortion coefficients
"""
# LM_Optimize()
fx = camera_intrinsics[0]
fy = camera_intrinsics[1]
cx = camera_intrinsics[2]
cy = camera_intrinsics[3]
if not os.path.isfile(img_path):
print('[Err]: invalid image path.')
return
img_orig = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if is_color:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
else:
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
if img is None:
print('[Err]: empty image.')
return
# ---------- Do undistortion
img_undistort = undistort(img,
fx, fy, cx, cy,
k1, k2, p1, p2)
# ----------
cv2.imshow('origin', img_orig)
cv2.imshow('undistort', img_undistort)
cv2.waitKey()
def show_points_of_curve():
"""
visualize points on the curve
"""
pts_on_curve = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
print('Total {:d} points on the curve.'.format(len(pts_on_curve)))
img_path = './distorted.png'
if not os.path.isfile(img_path):
print('[Err]: invalid image path.')
return
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img is None:
print('[Err]: empty image.')
return
# Draw points and centroid
centroid_x, centroid_y = 0.0, 0.0
for pt in pts_on_curve:
centroid_x += pt[0]
centroid_y += pt[1]
cv2.circle(img, tuple(pt), 5, (0, 255, 0), -1)
centroid_x /= float(len(pts_on_curve))
centroid_y /= float(len(pts_on_curve))
centroid_x = int(centroid_x + 0.5)
centroid_y = int(centroid_y + 0.5)
cv2.circle(img, (centroid_x, centroid_y), 7, (0, 0, 255), -1)
# Draw line of endpoints
cv2.line(img, tuple(pts_on_curve[0]), tuple(
pts_on_curve[-1]), (255, 0, 0), 2)
cv2.imshow('Curve', img)
cv2.waitKey()
def line_equation(first_x, first_y, second_x, second_y):
# Ax+By+C=0
A = second_y - first_y
B = first_x - second_x
C = second_x*first_y - first_x*second_y
# k = -1.0 * A / B
# b = -1.0 * C / B
return A, B, C
def dist_of_pt_to_line(pt, A, B, C):
"""
2D space point to line distance
"""
# tmp = abs(A*pt[0] + B*pt[1] + C) / math.sqrt(A*A + B*B)
tmp = -(A*pt[0] + B*pt[1] + C) / math.sqrt(A*A + B*B)
return tmp
# return math.sqrt(tmp * tmp)
def undistort_point(u, v,
fx, fy, cx, cy,
k1, k2, p1=None, p2=None,
radial_ud_only=True):
"""
"""
# convert to camera coordinates by camera intrinsic parameters
x1 = (u - cx) / fx
y1 = (v - cy) / fy
# compute r^2 and r^4
r_square = (x1 * x1) + (y1 * y1)
r_quadric = r_square * r_square
if radial_ud_only: # do radial undistortion only
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric)
else: # do radial undistortion and tangential undistortion
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
2.0 * p1 * x1 * y1 + p2 * (r_square + 2.0 * x1 * x1)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
p1 * (r_square + 2.0 * y1 * y1) + 2.0 * p2 * x1 * y
# convert back to pixel coordinates
# using nearest neighbor interpolation
u_corrected = fx * x2 + cx
v_corrected = fy * y2 + cy
return [u_corrected, v_corrected]
# the function
def test_undistort_pts_on_curve():
"""
"""
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
k1 = -0.28340811
k2 = 0.07395907
k1k2 = np.array([[k1],
[k2]])
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
pts_corrected = undistort_point(
pts_orig[:, 0], pts_orig[:, 1],
fx, fy, cx, cy,
k1k2[0][0], k1k2[1][0]
)
img_path = './distorted.png'
img_orig = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
def Func(fx, fy, cx, cy, k1k2, input_list):
ret = np.zeros(len(input_list))
for i, input_i in enumerate(input_list):
# using numpy array for SIMD
pts_orig = np.array(input_i) #
# applying undistortion of points
pts_corrected = undistort_point(
pts_orig[:, 0], pts_orig[:, 1],
fx, fy, cx, cy,
k1k2[0][0], k1k2[1][0]
)
# compute centroid of undistorted points
centroid = np.sum(pts_corrected, axis=1) # get sum by column
centroid /= float(pts_orig.shape[0])
# build line of undistorted endpoints
A, B, C = line_equation(pts_corrected[0][0], pts_corrected[0][1],
pts_corrected[-1][0], pts_corrected[-1][1])
# build loss function and return
dist = dist_of_pt_to_line(centroid, A, B, C)
ret[i] = dist
ret = np.array(ret)
ret = np.reshape(ret, (-1, 1))
return ret
def Deriv(fx, fy, cx, cy,
k1k2,
input_list,
i):
"""
"""
k1k2_delta_1 = k1k2.copy()
k1k2_delta_2 = k1k2.copy()
k1k2_delta_1[i, 0] -= 0.000001
k1k2_delta_2[i, 0] += 0.000001
p1 = Func(fx, fy, cx, cy, k1k2_delta_1, input_list)
p2 = Func(fx, fy, cx, cy, k1k2_delta_2, input_list)
d = (p2 - p1) * 1.0 / (0.000002)
return d
def test_func():
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
input_list = []
input_list.append(pts_orig)
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
# k1k2 = np.array([[0.1],
# [0.1]])
k1 = -0.28340811
k2 = 0.07395907
k1k2 = np.array([[k1],
[k2]])
dists = Func(fx, fy, cx, cy, k1k2, input_list) # N×1
print('Dist: {:.3f}'.format(dists[0][0]))
def LM_Optimize(params, max_iter=100):
"""
"""
# Known parameters(camera intrinsics)
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
# Input
pts_orig = [
[546, 20], [545, 40], | |
2*m.b42*m.b139 - 2*m.b42*m.b140 -
2*m.b42*m.b143 - 2*m.b42*m.b144 - 2*m.b42*m.b146 - 2*m.b42*m.b147 + 2*m.b42*m.b149 + 2*m.b42*
m.b150 - 2*m.b42*m.b152 + 2*m.b42*m.b153 + 2*m.b42*m.b155 + 2*m.b42*m.b156 + 2*m.b42*m.b157 +
2*m.b42*m.b158 + 2*m.b42*m.b160 + 2*m.b42*m.b161 + 2*m.b42*m.b162 + 2*m.b42*m.b163 - 2*m.b42*
m.b165 + 2*m.b42*m.b166 - 2*m.b42*m.b169 - 2*m.b42*m.b170 - 2*m.b42*m.b172 - 2*m.b42*m.b173 -
2*m.b42*m.b174 - 2*m.b42*m.b176 - 2*m.b42*m.b177 + 2*m.b42*m.b178 + 2*m.b42*m.b180 + 2*m.b42*
m.b181 - 2*m.b42*m.b182 - 2*m.b43*m.b94 - 4*m.b43 + 2*m.b43*m.b96 + 2*m.b43*m.b98 + 2*m.b43*
m.b100 - 2*m.b43*m.b101 + 2*m.b43*m.b103 + 2*m.b43*m.b104 + 2*m.b43*m.b105 + 2*m.b43*m.b106 +
2*m.b43*m.b108 + 2*m.b43*m.b110 - 2*m.b43*m.b111 + 2*m.b43*m.b114 + 2*m.b43*m.b116 - 2*m.b43*
m.b130 + 2*m.b43*m.b131 + 2*m.b43*m.b132 - 2*m.b43*m.b135 - 2*m.b43*m.b139 + 2*m.b43*m.b140 +
2*m.b43*m.b141 - 2*m.b43*m.b144 - 2*m.b43*m.b147 + 2*m.b43*m.b148 + 2*m.b43*m.b149 - 2*m.b43*
m.b152 + 2*m.b43*m.b155 + 2*m.b43*m.b156 + 2*m.b43*m.b157 + 2*m.b43*m.b158 + 2*m.b43*m.b160 +
2*m.b43*m.b161 - 2*m.b43*m.b163 - 2*m.b43*m.b164 - 2*m.b43*m.b165 - 2*m.b43*m.b166 - 2*m.b43*
m.b167 - 2*m.b43*m.b168 - 2*m.b43*m.b169 - 2*m.b43*m.b170 - 2*m.b43*m.b171 - 2*m.b43*m.b172 -
2*m.b43*m.b174 - 2*m.b43*m.b177 + 2*m.b43*m.b180 + 2*m.b43*m.b181 - 2*m.b44*m.b92 - 12*m.b44
- 2*m.b44*m.b94 - 2*m.b44*m.b95 + 2*m.b44*m.b96 - 2*m.b44*m.b97 + 2*m.b44*m.b98 - 2*m.b44*
m.b99 + 2*m.b44*m.b100 - 2*m.b44*m.b101 - 2*m.b44*m.b103 + 2*m.b44*m.b104 - 2*m.b44*m.b105 + 2
*m.b44*m.b106 - 2*m.b44*m.b107 + 2*m.b44*m.b108 - 2*m.b44*m.b109 + 2*m.b44*m.b110 - 2*m.b44*
m.b111 - 2*m.b44*m.b113 + 2*m.b44*m.b114 + 2*m.b44*m.b116 + 2*m.b44*m.b117 + 2*m.b44*m.b118 +
2*m.b44*m.b119 + 2*m.b44*m.b121 + 2*m.b44*m.b122 + 2*m.b44*m.b123 + 2*m.b44*m.b124 + 2*m.b44*
m.b126 + 2*m.b44*m.b127 - 2*m.b44*m.b130 - 2*m.b44*m.b135 + 2*m.b44*m.b137 - 2*m.b44*m.b139 -
2*m.b44*m.b144 + 2*m.b44*m.b146 - 2*m.b44*m.b147 - 2*m.b44*m.b152 + 2*m.b44*m.b154 + 2*m.b44*
m.b155 + 2*m.b44*m.b156 + 2*m.b44*m.b157 + 2*m.b44*m.b158 + 2*m.b44*m.b160 + 2*m.b44*m.b161 -
2*m.b44*m.b165 + 2*m.b44*m.b167 - 2*m.b44*m.b170 + 2*m.b44*m.b172 - 2*m.b44*m.b174 + 2*m.b44*
m.b176 - 2*m.b44*m.b177 + 2*m.b44*m.b179 + 2*m.b44*m.b180 + 2*m.b44*m.b181 + 2*m.b44*m.b182 -
2*m.b45*m.b92 + 2*m.b45 - 2*m.b45*m.b94 + 2*m.b45*m.b96 + 2*m.b45*m.b98 - 2*m.b45*m.b99 + 2*
m.b45*m.b100 - 2*m.b45*m.b103 + 2*m.b45*m.b104 - 2*m.b45*m.b105 + 2*m.b45*m.b106 + 2*m.b45*
m.b108 - 2*m.b45*m.b109 + 2*m.b45*m.b110 - 2*m.b45*m.b113 + 2*m.b45*m.b114 - 2*m.b45*m.b115 +
2*m.b45*m.b116 + 2*m.b45*m.b117 + 2*m.b45*m.b118 + 2*m.b45*m.b119 + 2*m.b45*m.b121 + 2*m.b45*
m.b122 + 2*m.b45*m.b123 + 2*m.b45*m.b124 + 2*m.b45*m.b126 + 2*m.b45*m.b127 - 2*m.b45*m.b129 -
2*m.b45*m.b131 - 2*m.b45*m.b132 - 2*m.b45*m.b134 - 2*m.b45*m.b136 - 2*m.b45*m.b137 - 2*m.b45*
m.b138 - 2*m.b45*m.b140 - 2*m.b45*m.b141 - 2*m.b45*m.b143 - 2*m.b45*m.b145 - 2*m.b45*m.b146 +
2*m.b45*m.b150 + 2*m.b45*m.b163 + 2*m.b45*m.b168 - 2*m.b45*m.b173 - 2*m.b45*m.b175 - 2*m.b45*
m.b176 - 2*m.b46*m.b92 - 14*m.b46 - 2*m.b46*m.b94 + 2*m.b46*m.b95 + 2*m.b46*m.b96 + 2*m.b46*
m.b98 + 2*m.b46*m.b99 + 2*m.b46*m.b100 + 2*m.b46*m.b104 + 2*m.b46*m.b105 + 2*m.b46*m.b106 + 2*
m.b46*m.b108 + 2*m.b46*m.b110 + 2*m.b46*m.b113 + 2*m.b46*m.b114 + 2*m.b46*m.b116 + 2*m.b46*
m.b117 + 2*m.b46*m.b118 + 2*m.b46*m.b119 + 2*m.b46*m.b121 + 2*m.b46*m.b122 + 2*m.b46*m.b123 +
2*m.b46*m.b124 + 2*m.b46*m.b126 + 2*m.b46*m.b127 - 2*m.b46*m.b128 - 2*m.b46*m.b131 - 2*m.b46*
m.b133 - 2*m.b46*m.b134 - 2*m.b46*m.b137 + 2*m.b46*m.b138 + 2*m.b46*m.b141 + 2*m.b46*m.b145 -
2*m.b46*m.b148 - 2*m.b46*m.b150 - 2*m.b46*m.b151 - 2*m.b46*m.b154 + 2*m.b46*m.b162 + 2*m.b46*
m.b166 - 2*m.b46*m.b168 - 2*m.b46*m.b169 - 2*m.b46*m.b172 + 2*m.b46*m.b175 + 2*m.b46*m.b178 -
2*m.b46*m.b182 - 2*m.b47*m.b92 + 9*m.b47 - 2*m.b47*m.b94 - 2*m.b47*m.b95 + 2*m.b47*m.b96 + 2*
m.b47*m.b98 - 2*m.b47*m.b99 + 2*m.b47*m.b100 - 2*m.b47*m.b101 + 2*m.b47*m.b102 - 2*m.b47*
m.b103 + 2*m.b47*m.b104 - 2*m.b47*m.b105 + 2*m.b47*m.b106 - 2*m.b47*m.b107 + 2*m.b47*m.b108 -
2*m.b47*m.b109 + 2*m.b47*m.b110 - 2*m.b47*m.b111 + 2*m.b47*m.b112 - 2*m.b47*m.b113 + 2*m.b47*
m.b114 - 2*m.b47*m.b115 + 2*m.b47*m.b117 + 2*m.b47*m.b118 + 2*m.b47*m.b119 + 2*m.b47*m.b120 +
2*m.b47*m.b121 + 2*m.b47*m.b122 + 2*m.b47*m.b123 + 2*m.b47*m.b124 + 2*m.b47*m.b125 + 2*m.b47*
m.b126 + 2*m.b47*m.b128 - 2*m.b47*m.b137 - 2*m.b47*m.b138 - 2*m.b47*m.b139 - 2*m.b47*m.b140 -
2*m.b47*m.b141 - 2*m.b47*m.b142 - 2*m.b47*m.b143 - 2*m.b47*m.b144 - 2*m.b47*m.b145 - 2*m.b47*
m.b146 - 2*m.b47*m.b154 - 2*m.b47*m.b161 - 2*m.b47*m.b167 - 2*m.b47*m.b172 - 2*m.b47*m.b176 -
2*m.b47*m.b179 - 2*m.b47*m.b181 - 2*m.b47*m.b182 - 2*m.b48*m.b92 - 7*m.b48 + 2*m.b48*m.b97 + 2
*m.b48*m.b99 + 2*m.b48*m.b101 + 2*m.b48*m.b107 + 2*m.b48*m.b109 + 2*m.b48*m.b117 + 2*m.b48*
m.b118 + 2*m.b48*m.b119 + 2*m.b48*m.b120 + 2*m.b48*m.b121 + 2*m.b48*m.b122 + 2*m.b48*m.b123 +
2*m.b48*m.b124 + 2*m.b48*m.b125 + 2*m.b48*m.b126 + 2*m.b48*m.b128 + 2*m.b48*m.b129 + 2*m.b48*
m.b130 + 2*m.b48*m.b133 + 2*m.b48*m.b134 - 2*m.b48*m.b140 - 2*m.b48*m.b141 - 2*m.b48*m.b144 -
2*m.b48*m.b145 - 2*m.b48*m.b148 - 2*m.b48*m.b149 - 2*m.b48*m.b152 - 2*m.b48*m.b153 - 2*m.b48*
m.b155 - 2*m.b48*m.b156 - 2*m.b48*m.b159 - 2*m.b48*m.b160 + 2*m.b48*m.b163 + 2*m.b48*m.b164 +
2*m.b48*m.b168 + 2*m.b48*m.b169 - 2*m.b48*m.b174 - 2*m.b48*m.b175 - 2*m.b48*m.b177 - 2*m.b48*
m.b178 - 2*m.b49*m.b94 - m.b49 + 2*m.b49*m.b96 + 2*m.b49*m.b98 - 2*m.b49*m.b99 + 2*m.b49*
m.b100 - 2*m.b49*m.b101 + 2*m.b49*m.b102 + 2*m.b49*m.b104 + 2*m.b49*m.b106 + 2*m.b49*m.b108 -
2*m.b49*m.b109 + 2*m.b49*m.b110 - 2*m.b49*m.b111 + 2*m.b49*m.b112 + 2*m.b49*m.b114 - 2*m.b49*
m.b129 - 2*m.b49*m.b130 - 2*m.b49*m.b134 - 2*m.b49*m.b135 - 2*m.b49*m.b138 - 2*m.b49*m.b139 -
2*m.b49*m.b143 - 2*m.b49*m.b144 + 2*m.b49*m.b148 + 2*m.b49*m.b149 + 2*m.b49*m.b150 + 2*m.b49*
m.b153 + 2*m.b49*m.b155 + 2*m.b49*m.b156 + 2*m.b49*m.b157 + 2*m.b49*m.b160 - 2*m.b49*m.b164 -
2*m.b49*m.b165 - 2*m.b49*m.b169 - 2*m.b49*m.b170 - 2*m.b49*m.b173 - 2*m.b49*m.b174 + 2*m.b49*
m.b178 + 2*m.b49*m.b180 + 2*m.b50*m.b95 + 5*m.b50 + 2*m.b50*m.b101 + 2*m.b50*m.b105 + 2*m.b50*
m.b109 + 2*m.b50*m.b111 - 2*m.b50*m.b115 - 2*m.b50*m.b128 - 2*m.b50*m.b129 - 2*m.b50*m.b131 -
2*m.b50*m.b133 - 2*m.b50*m.b136 - 2*m.b50*m.b137 + 2*m.b50*m.b139 + 2*m.b50*m.b141 + 2*m.b50*
m.b143 + 2*m.b50*m.b144 - 2*m.b50*m.b146 + 2*m.b50*m.b147 + 2*m.b50*m.b149 + 2*m.b50*m.b151 +
2*m.b50*m.b152 - 2*m.b50*m.b154 - 2*m.b50*m.b155 - 2*m.b50*m.b157 - 2*m.b50*m.b160 - 2*m.b50*
m.b161 + 2*m.b50*m.b162 + 2*m.b50*m.b164 + 2*m.b50*m.b165 - 2*m.b50*m.b167 - 2*m.b50*m.b168 -
2*m.b50*m.b171 - 2*m.b50*m.b172 + 2*m.b50*m.b173 + 2*m.b50*m.b174 - 2*m.b50*m.b176 - 2*m.b50*
m.b178 - 2*m.b50*m.b179 - 2*m.b50*m.b180 - 2*m.b50*m.b181 - 2*m.b50*m.b182 - 2*m.b51*m.b94 +
10*m.b51 + 2*m.b51*m.b96 + 2*m.b51*m.b98 - 2*m.b51*m.b99 + 2*m.b51*m.b100 - 2*m.b51*m.b101 + 2
*m.b51*m.b102 - 2*m.b51*m.b103 + 2*m.b51*m.b104 + 2*m.b51*m.b106 + 2*m.b51*m.b108 - 2*m.b51*
m.b109 + 2*m.b51*m.b110 - 2*m.b51*m.b111 + 2*m.b51*m.b112 + 2*m.b51*m.b114 - 2*m.b51*m.b115 -
2*m.b51*m.b129 - 2*m.b51*m.b130 - 2*m.b51*m.b131 - 2*m.b51*m.b134 - 2*m.b51*m.b135 - 2*m.b51*
m.b137 - 2*m.b51*m.b138 - 2*m.b51*m.b139 - 2*m.b51*m.b140 - 2*m.b51*m.b143 - 2*m.b51*m.b144 -
2*m.b51*m.b146 + 2*m.b51*m.b149 + 2*m.b51*m.b150 + 2*m.b51*m.b153 - 2*m.b51*m.b154 + 2*m.b51*
m.b156 + 2*m.b51*m.b157 + 2*m.b51*m.b160 - 2*m.b51*m.b161 + 2*m.b51*m.b162 + 2*m.b51*m.b163 +
2*m.b51*m.b166 - 2*m.b51*m.b167 - 2*m.b51*m.b169 - 2*m.b51*m.b170 - 2*m.b51*m.b172 - 2*m.b51*
m.b173 - 2*m.b51*m.b174 - 2*m.b51*m.b176 + 2*m.b51*m.b178 - 2*m.b51*m.b179 + 2*m.b51*m.b180 -
2*m.b51*m.b181 - 2*m.b51*m.b182 - 2*m.b52*m.b94 + 2*m.b52*m.b96 + 2*m.b52*m.b98 + 2*m.b52*
m.b100 + 2*m.b52*m.b102 + 2*m.b52*m.b103 + 2*m.b52*m.b104 + 2*m.b52*m.b105 + 2*m.b52*m.b106 +
2*m.b52*m.b108 + 2*m.b52*m.b110 + 2*m.b52*m.b112 + 2*m.b52*m.b114 - 2*m.b52*m.b115 + 2*m.b52*
m.b131 + 2*m.b52*m.b132 - 2*m.b52*m.b137 + 2*m.b52*m.b140 + 2*m.b52*m.b141 - 2*m.b52*m.b146 +
2*m.b52*m.b148 + 2*m.b52*m.b149 - 2*m.b52*m.b154 + 2*m.b52*m.b155 + 2*m.b52*m.b156 - 2*m.b52*
m.b161 - 2*m.b52*m.b163 - 2*m.b52*m.b164 - 2*m.b52*m.b165 - 2*m.b52*m.b166 - 2*m.b52*m.b167 -
2*m.b52*m.b168 - 2*m.b52*m.b169 - 2*m.b52*m.b170 - 2*m.b52*m.b171 - 2*m.b52*m.b172 - 2*m.b52*
m.b176 - 2*m.b52*m.b179 - 2*m.b52*m.b181 - 2*m.b52*m.b182 - 2*m.b53*m.b92 - 8*m.b53 - 2*m.b53*
m.b94 - 2*m.b53*m.b95 + 2*m.b53*m.b96 - 2*m.b53*m.b97 + 2*m.b53*m.b98 - 2*m.b53*m.b99 + 2*
m.b53*m.b100 - 2*m.b53*m.b101 + 2*m.b53*m.b102 - 2*m.b53*m.b103 + 2*m.b53*m.b104 - 2*m.b53*
m.b105 + 2*m.b53*m.b106 - 2*m.b53*m.b107 + 2*m.b53*m.b108 - 2*m.b53*m.b109 + 2*m.b53*m.b110 -
2*m.b53*m.b111 + 2*m.b53*m.b112 - 2*m.b53*m.b113 + 2*m.b53*m.b114 + 2*m.b53*m.b117 + 2*m.b53*
m.b118 + 2*m.b53*m.b119 + 2*m.b53*m.b120 + 2*m.b53*m.b121 + 2*m.b53*m.b122 + 2*m.b53*m.b123 +
2*m.b53*m.b124 + 2*m.b53*m.b125 + 2*m.b53*m.b126 - 2*m.b54*m.b92 + 7*m.b54 - 2*m.b54*m.b94 + 2
*m.b54*m.b96 + 2*m.b54*m.b98 - 2*m.b54*m.b99 + 2*m.b54*m.b100 + 2*m.b54*m.b102 - 2*m.b54*
m.b103 + 2*m.b54*m.b104 - 2*m.b54*m.b105 + 2*m.b54*m.b106 + 2*m.b54*m.b108 - 2*m.b54*m.b109 +
2*m.b54*m.b110 + 2*m.b54*m.b112 - 2*m.b54*m.b113 + 2*m.b54*m.b114 - 2*m.b54*m.b115 + 2*m.b54*
m.b117 + 2*m.b54*m.b118 + 2*m.b54*m.b119 + 2*m.b54*m.b120 + 2*m.b54*m.b121 + 2*m.b54*m.b122 +
2*m.b54*m.b123 + 2*m.b54*m.b124 + 2*m.b54*m.b125 + 2*m.b54*m.b126 - 2*m.b54*m.b129 - 2*m.b54*
m.b131 - 2*m.b54*m.b132 - 2*m.b54*m.b134 - 2*m.b54*m.b136 - 2*m.b54*m.b137 - 2*m.b54*m.b138 -
2*m.b54*m.b140 - 2*m.b54*m.b141 - 2*m.b54*m.b143 - 2*m.b54*m.b145 - 2*m.b54*m.b146 + 2*m.b54*
m.b147 + 2*m.b54*m.b150 + 2*m.b54*m.b152 - 2*m.b54*m.b154 - 2*m.b54*m.b155 - 2*m.b54*m.b156 -
2*m.b54*m.b158 - 2*m.b54*m.b160 - 2*m.b54*m.b161 + 2*m.b54*m.b163 + 2*m.b54*m.b165 - 2*m.b54*
m.b167 + 2*m.b54*m.b168 + 2*m.b54*m.b170 - 2*m.b54*m.b172 - 2*m.b54*m.b173 - 2*m.b54*m.b175 -
2*m.b54*m.b176 + 2*m.b54*m.b177 - 2*m.b54*m.b179 | |
<gh_stars>0
import pinocchio as pin
import numpy as np
import pybullet as p
import pybullet_data
import torch
from torch import tensor
from pinocchio.robot_wrapper import RobotWrapper
import os
import matplotlib.pyplot as plt
import time
from cep.utils import numpy2torch, torch2numpy
from cep.liegroups.torch import SO3, SE3
number_iteration = 500 # Define max iteration number
dt = 0.01 # Define time step
first_time = True
# TODO: add on 07.19
def MoveIt_generate_traj(): #TODO: 06.14
return
def getPosVelJoints(robotId, joint_indexes): # Function to get the position/velocity of all joints from pybullet
jointStates = p.getJointStates(robotId, joint_indexes) # State of all joints (position, velocity, reaction forces, appliedJointMotortoruqe)
# print('np.shape(jointStates):', np.shape(jointStates))
# print('len(jointStates): ', len(jointStates))
# print('jointStates[0]: ', jointStates[0])
# print('jointStates[0][0]: ', jointStates[0][0])
#baseState = p.getBasePositionAndOrientation(robotId) # Position and orientation of the free flying base (Position, orientation)
#baseVel = p.getBaseVelocity(robotId) # Velocity of the free flying base (linear velocity, angular velocity)
# Reshaping data into q and qdot
joint_pos = np.vstack((np.array([[jointStates[i_joint][0] for i_joint in range(len(jointStates))]]).transpose()))
# q = np.vstack((np.array([baseState[0]]).transpose(), np.array([baseState[1]]).transpose(),
# np.array([[jointStates[i_joint][0] for i_joint in range(len(jointStates))]]).transpose()))
#print('q: ', q) # ([:3] -> base position,
# # [3:7] -> base orientation in Quatenion,
# # [7:9] -> wheel right and left,
# # [9:16] -> position of 7 joints,
# # [16:] -> position of gripper right finger and left finger
#print('np.shape(q): ', np.shape(q)) # (18, 1), baseState[1] is orientation in Quatenion
return joint_pos
def add_debug_lines(X_w, Y_w, Z_w, XYZ_ee):
global l1
global l2
global l3
global w1
global w2
global w3
if first_time:
l1 = p.addUserDebugLine([0, 0, 0], X_w, [1, 0, 0], lineWidth=2)
l2 = p.addUserDebugLine([0, 0, 0], Y_w, [0, 1, 0], lineWidth=2)
l3 = p.addUserDebugLine([0, 0, 0], Z_w, [0, 0, 1], lineWidth=2)
w1 = p.addUserDebugLine(XYZ_ee, X_w, [1, 0, 0], lineWidth=2)
w2 = p.addUserDebugLine(XYZ_ee, Y_w, [0, 1, 0], lineWidth=2)
w3 = p.addUserDebugLine(XYZ_ee, Z_w, [0, 0, 1], lineWidth=2)
else:
p.removeUserDebugItem(l1)
p.removeUserDebugItem(l2)
p.removeUserDebugItem(l3)
p.removeUserDebugItem(w1)
p.removeUserDebugItem(w2)
p.removeUserDebugItem(w3)
l1 = p.addUserDebugLine([0, 0, 0], X_w, [1, 0, 0], lineWidth=2)
l2 = p.addUserDebugLine([0, 0, 0], Y_w, [0, 1, 0], lineWidth=2)
l3 = p.addUserDebugLine([0, 0, 0], Z_w, [0, 0, 1], lineWidth=2)
w1 = p.addUserDebugLine(XYZ_ee, X_w, [1, 0, 0], lineWidth=2)
w2 = p.addUserDebugLine(XYZ_ee, Y_w, [0, 1, 0], lineWidth=2)
w3 = p.addUserDebugLine(XYZ_ee, Z_w, [0, 0, 1], lineWidth=2)
return l1, l2, l3, w1, w2, w3
def add_xyz_text(X_w, Y_w, Z_w):
global x_text
global y_text
global z_text
if first_time:
x_text = p.addUserDebugText('X', X_w)
y_text = p.addUserDebugText('Y', Y_w)
z_text = p.addUserDebugText('Z', Z_w)
return x_text, y_text, z_text
def put_obj_in_world(X_w, Y_w, Z_w):
global lego_x
global lego_y
global lego_z
if first_time:
lego_x = p.loadURDF('sphere_small.urdf', X_w, p.getQuaternionFromEuler([0, 0, 0]),
useFixedBase=True, useMaximalCoordinates=True)
lego_y = p.loadURDF('cube_small.urdf', Y_w, p.getQuaternionFromEuler([0, 0, 0]),
useFixedBase=True, useMaximalCoordinates=True)
lego_z = p.loadURDF('/lego/lego.urdf', Z_w, p.getQuaternionFromEuler([0, 0, 0]),
useFixedBase=True, useMaximalCoordinates=True)
return lego_x, lego_y, lego_z
def calculate_xyz_world(T: tensor):
vec_x_e = tensor([[.33, 0., 0., 1.]]).reshape(4, 1)
vec_y_e = tensor([[0., .33, 0., 1.]]).reshape(4, 1)
vec_z_e = tensor([[0., 0., .33, 1.]]).reshape(4, 1)
vec_x_w = torch2numpy(torch.matmul(T, vec_x_e)).tolist()
vec_y_w = torch2numpy(torch.matmul(T, vec_y_e)).tolist()
vec_z_w = torch2numpy(torch.matmul(T, vec_z_e)).tolist()
X_w, Y_w, Z_w = [], [], []
for i in range(3):
X_w.append(vec_x_w[i][0])
Y_w.append(vec_y_w[i][0])
Z_w.append(vec_z_w[i][0])
return X_w, Y_w, Z_w
def joint_control(robot, q, K: int, dt: float):
q_des = np.ones((robot.nq, 1)) * 0.6
dq = K * (q - q_des)
ans_q = q + dq * dt
return ans_q
def start_pybullet(): # load Tiago in Pybullet
physicsClient = p.connect(p.GUI) #or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD)
p.setGravity(0, 0, -9.81)
base_dir = os.path.abspath(os.path.dirname(__file__) + '../../..')
robot_dir = os.path.join(base_dir, 'robots/tiago/')
urdf_filename = os.path.join(robot_dir, 'tiago_single_modified.urdf')
planeId = p.loadURDF("plane.urdf")
startPos = [0., 0., 0.]
startOrientation = [0., 0., 0.]
robotId = p.loadURDF(urdf_filename, startPos, p.getQuaternionFromEuler([0., 0., 0.]), useFixedBase=1)
# p.loadURDF("cube_small.urdf", np.array([0.4, -0.5, 0.5]),
# p.getQuaternionFromEuler([0, 0, 0]),
# useFixedBase=True, useMaximalCoordinates=True)
p.loadURDF('sphere_1cm.urdf', np.array([0.8, 0., 0.8]), # TODO: Put an object in target postion
p.getQuaternionFromEuler([0, 0, 0]),
useFixedBase=True)
joint_indexes = [31, 32, 33, 34, 35, 36, 37]
return robotId, planeId, joint_indexes
def plot_mu(mu_values: list, num: int):
fig, axs = plt.subplots(2, 3)
t = np.arange(0, num, 1)
wx = []
wy = []
wz = []
vx = []
vy = []
vz = []
for i in range(num):
wx.append(mu_values[i][0].item())
wy.append(mu_values[i][1].item())
wz.append(mu_values[i][2].item())
vx.append(mu_values[i][3].item())
vy.append(mu_values[i][4].item())
vz.append(mu_values[i][5].item())
axs[0,0].plot(t, wx)
axs[0,0].set_title('wx: task space')
axs[0,0].set_ylim(min(wx)-10, max(wx)+10)
axs[0,1].plot(t, wx)
axs[0,1].set_title('wy: task space')
axs[0,1].set_ylim(min(wy)-10, max(wy)+10)
axs[0,2].plot(t, wx)
axs[0,2].set_title('wz: task space')
axs[0,2].set_ylim(min(wz)-10, max(wz)+10)
axs[1, 0].plot(t, wx)
axs[1, 0].set_title('vx: task space')
axs[1, 0].set_ylim(min(vx)-10, max(vx)+10)
axs[1,1].plot(t, wx)
axs[1,1].set_title('vy: task space ')
axs[1,1].set_ylim(min(vy)-10, max(vy)+10)
axs[1,2].plot(t, wx)
axs[1,2].set_title('vz: task space')
axs[1,2].set_ylim(min(vz)-10, max(vz)+10)
plt.show()
def plot_joints(joint_values: list, joint_pos_values: list, num: int):
fig, axs = plt.subplots(2, 4)
t = np.arange(0, num, 1)
j1 = []
j2 = []
j3 = []
j4 = []
j5 = []
j6 = []
j7 = []
# j1_pos = []
# j2_pos = []
# j3_pos = []
# j4_pos = []
# j5_pos = []
# j6_pos = []
# j7_pos = []
for i in range(number_iteration):
j1.append(joint_values[i][0])
j2.append(joint_values[i][1])
j3.append(joint_values[i][2])
j4.append(joint_values[i][3])
j5.append(joint_values[i][4])
j6.append(joint_values[i][5])
j7.append(joint_values[i][6])
# j1_pos.append(joint_pos_values[i][0])
# j2_pos.append(joint_pos_values[i][1])
# j3_pos.append(joint_pos_values[i][2])
# j4_pos.append(joint_pos_values[i][3])
# j5_pos.append(joint_pos_values[i][4])
# j6_pos.append(joint_pos_values[i][5])
# j7_pos.append(joint_pos_values[i][6])
#q1_des = np.ones((number_iteration, 1)) * np.pi/3
axs[0, 0].plot(t, j1)
#axs[0, 0].plot(t, j1_pos, '+', linewidth=.02)
axs[0, 0].set_title('1st Joint')
axs[0, 0].set_ylim(min(j1) - 1, max(j1) + 1)
#axs[0, 0].plot(t, q1_des, label='Desired q=pi/3')
axs[0, 1].plot(t, j2)
#axs[0, 1].plot(t, j2_pos, '+', linewidth=.02)
axs[0, 1].set_title('2nd Joint')
axs[0, 1].set_ylim(min(j2) - 1, max(j2) + 1)
axs[0, 2].plot(t, j3)
#axs[0, 2].plot(t, j3_pos, '+', linewidth=.02)
axs[0, 2].set_title('3rd Joint')
axs[0, 2].set_ylim(min(j3) - 1, max(j3) + 1)
axs[0, 3].plot(t, j4)
#axs[0, 3].plot(t, j4_pos, '+', linewidth=.02)
axs[0, 3].set_title('4th Joint')
axs[0, 3].set_ylim(min(j4) - 1, max(j4) + 1)
axs[1, 0].plot(t, j5)
#axs[1, 0].plot(t, j5_pos, '+', linewidth=.02)
axs[1, 0].set_title('5th Joint')
axs[1, 0].set_ylim(min(j5) - 1, max(j5) + 1)
axs[1, 1].plot(t, j6)
#axs[1, 1].plot(t, j6_pos, '+', linewidth=.02)
axs[1, 1].set_title('6th Joint')
axs[1, 1].set_ylim(min(j6) - 1, max(j6) + 1)
axs[1, 2].plot(t, j7)
#axs[1, 2].plot(t, j7_pos, '+', linewidth=.02)
axs[1, 2].set_title('7th Joint')
axs[1, 2].set_ylim(min(j7) - 1, max(j7) + 1)
plt.show()
def plot_euclidiean_dist(dist_values, num):
fig, axs = plt.subplots(1, 1)
t = np.arange(0, num, 1)
axs.plot(t, dist_values)
axs.set_title('Euclidean distance')
plt.show()
def plot_error(error_values, num):
fig, axs = plt.subplots(1, 1)
t = np.arange(0, num, 1)
axs.plot(t, error_values)
axs.set_title('Error')
plt.savefig('Error')
plt.show()
def plot_xyz(x_values: list, y_values: list, z_values: list, num: int):
fig, axs = plt.subplots(1, 3)
t = np.arange(0, num, 1)
x_des = np.ones((num, )) * 0.8
axs[0].plot(t, x_values, label='Current x')
axs[0].set_title('X')
axs[0].plot(t, x_des, label='Desired x')
axs[0].legend()
axs[0].set_ylim(min(x_values) - 0.1, max(x_values) + 0.1)
y_des = np.ones((num, )) * 0.
axs[1].plot(t, y_values, label='Current y')
axs[1].set_title('Y')
axs[1].plot(t, y_des, label='Desired y')
axs[1].set_ylim(min(y_values) -.3, max(y_values) +.3)
z_des = np.ones((num, )) * 0.8
axs[2].plot(t, z_values, label='Current z')
axs[2].set_title('Z')
axs[2].plot(t, z_des, label='Desired z')
axs[2].set_ylim(min(z_values) - .3, max(z_values) +.3)
plt.show()
def se3ToTransfrom(SE3):
# Transform a SE3 to a (4, 4) transformation matrix.
r = numpy2torch(SE3.rotation)
t = numpy2torch(SE3.translation)
x1 = torch.cat((r, t.reshape(3, 1)), 1)
homo = torch.tensor([[0, 0, 0, 1]])
Tf = torch.cat((x1, homo), 0)
return Tf
def load_tiago(): # TODO: Modify the urdf path
base_dir = os.path.abspath(os.path.dirname(__file__) + '../../..')
robot_dir = os.path.join(base_dir, 'robots/tiago/')
urdf_filename = os.path.join(robot_dir, 'tiago_single_modified.urdf')
robot = RobotWrapper.BuildFromURDF(urdf_filename, [robot_dir])
# robot.initViewer()
return robot
def set_context(state, R):
x = state[0] # Tensor(4, 4), end-effector rotation and position SE(3)
v = state[1] # Tensor (1, 6), end-effector spatial velocity V_b
print('state:', state)
# print('x: ', x)
# print('v: ', v)
# print('self.R_inv: ', self.R_inv) # Tensor (4, 4)
# print('R: ', self.R) # Tensor (4, 4)
R_inv = torch.inverse(R)
Htl = torch.matmul(R_inv, x) # R_inv * X
print('Htl: ', Htl)
Xe = SE3.from_matrix(Htl, normalize=True) # <cep.liegroups.torch.se3.SE3Matrix>, SE(3)
print('Xe: ', Xe)
xtl = Xe.log() # Tensor(1, 6), (omega, V)
print('xtl: ', xtl)
vtl = -xtl
A = SE3.from_matrix(R) # <cep.liegroups.torch.se3.SE3Matrix>, SE(3), R
print('A: ', A)
Adj_lw = A.adjoint() # Adjoint map (Spatial velocity from one frame to another frame), Tensor (6,6),
print('Adj_lw: ', Adj_lw)
ve_w = torch.matmul(Adj_lw, vtl) # Tensor(6, 1)
print('v_ew: ', ve_w)
###########################################
scale = 20.
mu = scale * ve_w - 1.2 * scale * v
return mu
#print('mu: ', mu) # Tensor(6, 1)
def calculate_mu(state, R): # TODO: Acceleration control
'''
params:
state: Tensor -> contains end-effector rotation and position s[0], spatial velocity s[1]
R: Tensor (4, 4) -> Homogenous transformation matrix of end-effector
return:
mu: Tensor (1, 6). ddx, contains (dw, dv),
then ddq = J_pinv * ddx, | |
1 * u.m**2/u.s),
(1 * u.m, 0 * u.m, 1 * u.m, 1 * u.m**2/u.s))
for i in passChecks:
with self.subTest(i=i):
pc.flow_hagen(*i)
def test_flow_hagen_warning(self):
"""flow_hagen should raise warnings when passed deprecated parameters"""
error_checks = (lambda: pc.flow_hagen(1 * u.m, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, HeadLossFric=1 * u.m),
lambda: pc.flow_hagen(1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s),
lambda: pc.flow_hagen(1 * u.m, HeadLossMajor=1 * u.m, Nu=1 * u.m**2/u.s),
lambda: pc.flow_hagen(1 * u.m, HeadLossMajor=1 * u.m, Length=1 * u.m))
for i in error_checks:
with self.subTest(i=i):
self.assertRaises(TypeError, i)
warning_checks = (lambda: pc.flow_hagen(1 * u.m, HeadLossFric=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s),)
for i in warning_checks:
with self.subTest(i=i):
self.assertWarns(UserWarning, i)
def test_flow_swamee(self):
"""flow_swamee should return known value for known inputs."""
checks = (([2 * u.m, 0.04 * u.m, 3 * u.m, 0.1 * u.m**2/u.s, 0.37 * u.m], 2.9565931732010045 * u.m**3/u.s),)
for i in checks:
with self.subTest(i=i):
self.assertAlmostEqualQuantity(pc.flow_swamee(*i[0]), i[1])
def test_flow_swamee_range(self):
"""flow_swamee should raise errors when inputs are out of bounds."""
failChecks = ((0 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m, 0 * u.m, 1 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m, 1 * u.m, 0 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m, 1 * u.m, 1 * u.m, 0 * u.m**2/u.s, 1 * u.m),
(1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, -0.1 * u.m),
(1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, -2 * u.m))
for i in failChecks:
with self.subTest(i=i):
self.assertRaises(ValueError, pc.flow_swamee, *i)
passChecks = ((1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, 0 * u.m))
for i in passChecks:
with self.subTest(i=i):
pc.flow_swamee(*i)
def test_flow_swamee_warning(self):
"""flow_swamee should raise warnings when passed deprecated parameters"""
error_checks = (lambda: pc.flow_swamee(1 * u.m, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m, HeadLossFric=1 * u.m),
lambda: pc.flow_swamee(1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m),
lambda: pc.flow_swamee(1 * u.m, HeadLossMajor=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m),
lambda: pc.flow_swamee(1 * u.m, HeadLossMajor=1 * u.m, Length=1 * u.m, Roughness=1 * u.m),
lambda: pc.flow_swamee(1 * u.m, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m, PipeRough=1 * u.m),
lambda: pc.flow_swamee(1 * u.m, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s))
for i in error_checks:
with self.subTest(i=i):
self.assertRaises(TypeError, i)
warning_checks = (lambda: pc.flow_swamee(1 * u.m, HeadLossFric=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m),
lambda: pc.flow_swamee(1 * u.m, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, PipeRough=1 * u.m))
for i in warning_checks:
with self.subTest(i=i):
self.assertWarns(UserWarning, i)
def test_flow_pipemajor(self):
self.assertWarns(UserWarning, pc.flow_pipemajor, *(1 * u.m, 0.97 * u.m, 0.5 * u.m, 0.025 * u.m**2/u.s, 0.06 * u.m))
def test_flow_major_pipe(self):
"""flow_major_pipe should return known result for known inputs."""
checks = (([1 * u.m, 0.97 * u.m, 0.5 * u.m, 0.025 * u.m**2/u.s, 0.06 * u.m], 18.677652880272845 * u.m**3/u.s),
([2 * u.m, 0.62 * u.m, 0.5 * u.m, 0.036 * u.m**2/u.s, 0.23 * u.m], 62.457206502701297 * u.m**3/u.s))
for i in checks:
with self.subTest(i=i):
self.assertAlmostEqualQuantity(pc.flow_major_pipe(*i[0]), i[1])
def test_flow_pipeminor(self):
self.assertWarns(UserWarning, pc.flow_pipeminor, *(1 * u.m, 0.125 * u.m, 3))
def test_flow_minor_pipe(self):
"""flow_minor_pipe should return known results for known input."""
self.assertAlmostEqualQuantity(pc.flow_minor_pipe(1 * u.m, 0.125 * u.m, 3),
0.71000203931611083 * u.m**3/u.s)
def test_flow_minor_pipe_range(self):
"""flow_minor_pipe should raise errors when inputs are out of bounds."""
failChecks = ((1 * u.m, -1 * u.m, 1),
(1 * u.m, 1 * u.m, 0 * u.dimensionless))
for i in failChecks:
with self.subTest(i=i):
self.assertRaises(ValueError, pc.flow_minor_pipe, *i)
passChecks = ((1 * u.m, 1 * u.m, 1), (1 * u.m, 0 * u.m, 1))
for i in passChecks:
with self.subTest(i=i):
pc.flow_minor_pipe(*i)
def test_flow_pipe(self):
"""flow_pipe should return known value for known inputs."""
checks = (([0.25 * u.m, 0.4 * u.m, 2 * u.m, 0.58 * u.m**2/u.s, 0.029 * u.m, 0], 0.000324207170118938 * u.m**3/u.s),
([0.25 * u.m, 0.4 * u.m, 2 * u.m, 0.58 * u.m**2/u.s, 0.029 * u.m, 0.35 * u.dimensionless], 0.000324206539183988 * u.m**3/u.s))
for i in checks:
with self.subTest(i=i):
self.assertAlmostEqualQuantity(pc.flow_pipe(*i[0]), i[1])
def test_flow_pipe_warning(self):
"""flow_pipe should raise warnings when passed deprecated parameters"""
error_checks = (lambda: pc.flow_pipe(1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, Roughness=1 * u.m, KMinor=1, PipeRough=1 * u.m),
lambda: pc.flow_pipe(1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, KMinor=1),
lambda: pc.flow_pipe(1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, Roughness=1 * u.m),)
for i in error_checks:
with self.subTest(i=i):
self.assertRaises(TypeError, i)
warning_checks = (lambda: pc.flow_pipe(1 * u.m, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, PipeRough=1 * u.m, KMinor=1),)
for i in warning_checks:
with self.subTest(i=i):
self.assertWarns(UserWarning, i)
class DiamFuncsTest(QuantityTest):
"""Test the diameter functions."""
def test_diam_hagen(self):
"""diam_hagen should return known value for known inputs."""
self.assertAlmostEqualQuantity(pc.diam_hagen(0.006 * u.m**3/u.s, 0.00025 * u.m, 0.75 * u.m, 0.0004 * u.m**2/u.s),
0.4158799465199102 * u.m)
def test_diam_hagen_range(self):
"""diam_hagen should raise errors when inputs are out of bounds."""
failChecks = ((0 * u.m**3/u.s, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s),
(1 * u.m**3/u.s, 0 * u.m, 1 * u.m, 1 * u.m**2/u.s),
(1 * u.m**3/u.s, 1 * u.m, 0 * u.m, 1 * u.m**2/u.s),
(1 * u.m**3/u.s, 1 * u.m, 1 * u.m, 0 * u.m**2/u.s))
for i in failChecks:
with self.subTest(i=i):
self.assertRaises(ValueError, pc.diam_hagen, *i)
def test_diam_hagen_warning(self):
"""flow_hagen should raise warnings when passed deprecated parameters"""
error_checks = (lambda: pc.diam_hagen(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, HeadLossFric=1 * u.m),
lambda: pc.diam_hagen(1 * u.m**3/u.s, Length=1 * u.m, Nu=1 * u.m**2/u.s),
lambda: pc.diam_hagen(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Nu=1 * u.m**2/u.s),
lambda: pc.diam_hagen(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Length=1 * u.m))
for i in error_checks:
with self.subTest(i=i):
self.assertRaises(TypeError, i)
warning_checks = (lambda: pc.diam_hagen(1 * u.m**3/u.s, HeadLossFric=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s),)
for i in warning_checks:
with self.subTest(i=i):
self.assertWarns(UserWarning, i)
def test_diam_swamee(self):
"""diam_swamee should return known value for known input."""
self.assertAlmostEqualQuantity(pc.diam_swamee(0.06 * u.m**3/u.s, 1.2 * u.m, 7 * u.m, 0.2* u.m**2/u.s, 0.0004 * u.m),
0.19286307314945772 * u.m)
def test_diam_swamee_range(self):
"""diam_swamee should raise errors if inputs are out of bounds."""
failChecks = ((0 * u.m**3/u.s, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m**3/u.s, 0 * u.m, 1 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m**3/u.s, 1 * u.m, 0 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m**3/u.s, 1 * u.m, 1 * u.m, 0 * u.m**2/u.s, 1 * u.m),
(1 * u.m**3/u.s, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, -2 * u.m),
(1 * u.m**3/u.s, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, -1 * u.m))
for i in failChecks:
with self.subTest(i=i):
self.assertRaises(ValueError, pc.diam_swamee, *i)
passChecks = ((1 * u.m**3/u.s, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, 1 * u.m),
(1 * u.m**3/u.s, 1 * u.m, 1 * u.m, 1 * u.m**2/u.s, 0 * u.m))
for i in passChecks:
with self.subTest(i=i):
pc.diam_swamee(*i)
def test_diam_swamee_warning(self):
"""diam_swamee should raise warnings when passed deprecated parameters"""
error_checks = (lambda: pc.diam_swamee(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m, HeadLossFric=1 * u.m),
lambda: pc.diam_swamee(1 * u.m**3/u.s, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m),
lambda: pc.diam_swamee(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m),
lambda: pc.diam_swamee(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Length=1 * u.m, Roughness=1 * u.m),
lambda: pc.diam_swamee(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m, PipeRough=1 * u.m),
lambda: pc.diam_swamee(1 * u.m**3/u.s, HeadLossMajor=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s))
for i in error_checks:
with self.subTest(i=i):
self.assertRaises(TypeError, i)
warning_checks = (lambda: pc.diam_swamee(1 * u.m**3/u.s, HeadLossFric=1 * u.m, Length=1 * u.m, Nu=1 * u.m**2/u.s, Roughness=1 * u.m),
lambda: pc.diam_swamee(1 * u.m**3/u.s, HeadLossMajor=1 | |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for dealing with FuzzerStats."""
from builtins import object
import datetime
import functools
import itertools
import json
import os
import random
import re
from base import memoize
from base import utils
from datastore import data_handler
from datastore import data_types
from datastore import fuzz_target_utils
from google_cloud_utils import big_query
from google_cloud_utils import storage
from metrics import fuzzer_logs
from metrics import logs
from system import environment
from system import shell
STATS_FILE_EXTENSION = '.stats2'
PERFORMANCE_REPORT_VIEWER_PATH = '/performance-report/{fuzzer}/{job}/{date}'
JOB_RUN_SCHEMA = {
'fields': [{
'name': 'testcases_executed',
'type': 'INTEGER',
'mode': 'NULLABLE'
}, {
'name': 'build_revision',
'type': 'INTEGER',
'mode': 'NULLABLE'
}, {
'name': 'new_crashes',
'type': 'INTEGER',
'mode': 'NULLABLE'
}, {
'name': 'job',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'name': 'timestamp',
'type': 'FLOAT',
'mode': 'NULLABLE'
}, {
'name':
'crashes',
'type':
'RECORD',
'mode':
'REPEATED',
'fields': [{
'name': 'crash_type',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'name': 'is_new',
'type': 'BOOLEAN',
'mode': 'NULLABLE'
}, {
'name': 'crash_state',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'name': 'security_flag',
'type': 'BOOLEAN',
'mode': 'NULLABLE'
}, {
'name': 'count',
'type': 'INTEGER',
'mode': 'NULLABLE'
}]
}, {
'name': 'known_crashes',
'type': 'INTEGER',
'mode': 'NULLABLE'
}, {
'name': 'fuzzer',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'name': 'kind',
'type': 'STRING',
'mode': 'NULLABLE'
}]
}
class FuzzerStatsException(Exception):
"""Fuzzer stats exception."""
class BaseRun(object):
"""Base run."""
VALID_FIELDNAME_PATTERN = re.compile(r'[a-zA-Z][a-zA-Z0-9_]*')
def __init__(self, fuzzer, job, build_revision, timestamp):
self._stats_data = {
'fuzzer': fuzzer,
'job': job,
'build_revision': build_revision,
'timestamp': timestamp,
}
def __getitem__(self, key):
return self._stats_data.__getitem__(key)
def __setitem__(self, key, value):
if not re.compile(self.VALID_FIELDNAME_PATTERN):
raise ValueError('Invalid key name.')
return self._stats_data.__setitem__(key, value)
def __delitem__(self, key):
return self._stats_data.__delitem__(key)
def __contains__(self, key):
return self._stats_data.__contains__(key)
def to_json(self):
"""Return JSON representation of the stats."""
return json.dumps(self._stats_data)
def update(self, other):
"""Update stats with a dict."""
self._stats_data.update(other)
@property
def data(self):
return self._stats_data
@property
def kind(self):
return self._stats_data['kind']
@property
def fuzzer(self):
return self._stats_data['fuzzer']
@property
def job(self):
return self._stats_data['job']
@property
def build_revision(self):
return self._stats_data['build_revision']
@property
def timestamp(self):
return self._stats_data['timestamp']
@staticmethod
def from_json(json_data):
"""Convert json to the run."""
try:
data = json.loads(json_data)
except (ValueError, TypeError):
return None
if not isinstance(data, dict):
return None
result = None
try:
kind = data['kind']
if kind == 'TestcaseRun':
result = TestcaseRun(data['fuzzer'], data['job'],
data['build_revision'], data['timestamp'])
elif kind == 'JobRun':
result = JobRun(data['fuzzer'], data['job'], data['build_revision'],
data['timestamp'], data['testcases_executed'],
data['new_crashes'], data['known_crashes'],
data.get('crashes'))
except KeyError:
return None
if result:
result.update(data)
return result
class JobRun(BaseRun):
"""Represents stats for a particular job run."""
SCHEMA = JOB_RUN_SCHEMA
# `crashes` is a new field that will replace `new_crashes` and `old_crashes`.
def __init__(self, fuzzer, job, build_revision, timestamp,
number_of_testcases, new_crashes, known_crashes, crashes):
super(JobRun, self).__init__(fuzzer, job, build_revision, timestamp)
self._stats_data.update({
'kind': 'JobRun',
'testcases_executed': number_of_testcases,
'new_crashes': new_crashes,
'known_crashes': known_crashes,
'crashes': crashes
})
class TestcaseRun(BaseRun):
"""Represents stats for a particular testcase run."""
SCHEMA = None
def __init__(self, fuzzer, job, build_revision, timestamp):
super(TestcaseRun, self).__init__(fuzzer, job, build_revision, timestamp)
self._stats_data.update({
'kind': 'TestcaseRun',
})
source = environment.get_value('STATS_SOURCE')
if source:
self._stats_data['source'] = source
@staticmethod
def get_stats_filename(testcase_file_path):
"""Get stats filename for the given testcase."""
return testcase_file_path + STATS_FILE_EXTENSION
@staticmethod
def read_from_disk(testcase_file_path, delete=False):
"""Read the TestcaseRun for the given testcase."""
stats_file_path = TestcaseRun.get_stats_filename(testcase_file_path)
if not os.path.exists(stats_file_path):
return None
fuzzer_run = None
with open(stats_file_path) as f:
fuzzer_run = BaseRun.from_json(f.read())
if delete:
shell.remove_file(stats_file_path)
return fuzzer_run
@staticmethod
def write_to_disk(testcase_run, testcase_file_path):
"""Write the given TestcaseRun for |testcase_file_path| to disk."""
if not testcase_run:
return
stats_file_path = TestcaseRun.get_stats_filename(testcase_file_path)
with open(stats_file_path, 'w') as f:
f.write(testcase_run.to_json())
class QueryGroupBy(object):
"""GroupBy enum."""
GROUP_BY_NONE = 0
GROUP_BY_REVISION = 1
GROUP_BY_DAY = 2
GROUP_BY_TIME = 3
GROUP_BY_JOB = 4
GROUP_BY_FUZZER = 5
def group_by_to_field_name(group_by):
"""Convert QueryGroupBy value to its corresponding field name."""
if group_by == QueryGroupBy.GROUP_BY_REVISION:
return 'build_revision'
if group_by == QueryGroupBy.GROUP_BY_DAY:
return 'date'
if group_by == QueryGroupBy.GROUP_BY_TIME:
return 'time'
if group_by == QueryGroupBy.GROUP_BY_JOB:
return 'job'
if group_by == QueryGroupBy.GROUP_BY_FUZZER:
return 'fuzzer'
return None
class BuiltinFieldData(object):
"""Represents a cell value for a builtin field."""
def __init__(self, value, sort_key=None, link=None):
self.value = value
self.sort_key = sort_key
self.link = link
class BuiltinFieldSpecifier(object):
"""Represents a builtin field."""
def __init__(self, name, alias=None):
self.name = name
self.alias = alias
def create(self, ctx=None):
"""Create the actual BuiltinField."""
constructor = BUILTIN_FIELD_CONSTRUCTORS.get(self.name)
if not constructor:
return None
return constructor(ctx)
def field_class(self):
"""Return the class for the field."""
constructor = BUILTIN_FIELD_CONSTRUCTORS.get(self.name)
if not constructor:
return None
if isinstance(constructor, functools.partial):
return constructor.func
return constructor
class BuiltinField(object):
"""Base Builtin field."""
def __init__(self, ctx=None):
self.ctx = ctx
def get(self, group_by, group_by_value): # pylint: disable=unused-argument
"""Return BuiltinFieldData."""
return None
class BuiltinFieldContext(object):
"""Context for builtin fields."""
def __init__(self, fuzzer=None, jobs=None):
self.fuzzer = fuzzer
self.jobs = jobs
def single_job_or_none(self):
"""Return the job if only 1 is specified, or None."""
if self.jobs and len(self.jobs) == 1:
return self.jobs[0]
return None
class CoverageFieldContext(BuiltinFieldContext):
"""Coverage field context. Acts as a cache."""
def __init__(self, fuzzer=None, jobs=None):
super(CoverageFieldContext, self).__init__(fuzzer=fuzzer, jobs=jobs)
@memoize.wrap(memoize.FifoInMemory(256))
def get_coverage_info(self, fuzzer, date=None):
"""Return coverage info of child fuzzers."""
if fuzzer in data_types.BUILTIN_FUZZERS:
# Get coverage info for a job (i.e. a project).
job = self.single_job_or_none()
project = data_handler.get_project_name(job)
return get_coverage_info(project, date)
fuzz_target = data_handler.get_fuzz_target(fuzzer)
if fuzz_target:
fuzzer = fuzz_target.project_qualified_name()
return get_coverage_info(fuzzer, date)
class BaseCoverageField(object):
"""Base builtin field class for coverage related fields."""
CONTEXT_CLASS = CoverageFieldContext
def __init__(self, ctx):
self.ctx = ctx
def get_coverage_info(self, group_by, group_by_value):
"""Return coverage information."""
coverage_info = None
if group_by == QueryGroupBy.GROUP_BY_DAY:
# Return coverage data for the fuzzer and the day.
coverage_info = self.ctx.get_coverage_info(self.ctx.fuzzer,
group_by_value)
elif group_by == QueryGroupBy.GROUP_BY_FUZZER:
# Return latest coverage data for each fuzzer.
coverage_info = self.ctx.get_coverage_info(group_by_value)
elif group_by == QueryGroupBy.GROUP_BY_JOB:
# Return the latest coverage data for the fuzzer. Even though we group by
# job here, coverage information does not differ accross jobs. As of now,
# it only depends on the fuzzer name and the date.
coverage_info = self.ctx.get_coverage_info(self.ctx.fuzzer)
return coverage_info
class CoverageField(BaseCoverageField):
"""Coverage field."""
EDGE = 0
FUNCTION = 1
VALUE_TYPE = float
def __init__(self, coverage_type, ctx=None):
super(CoverageField, self).__init__(ctx)
self.coverage_type = coverage_type
def get(self, group_by, group_by_value):
"""Return data."""
coverage_info = self.get_coverage_info(group_by, group_by_value)
if not coverage_info:
return None
if self.coverage_type == self.EDGE:
covered = coverage_info.edges_covered
total = coverage_info.edges_total
else:
covered = coverage_info.functions_covered
total = coverage_info.functions_total
if covered is None or total is None:
return None
if not total:
logs.log_error(
'Invalid coverage info: total equals 0 for "%s".' % self.ctx.fuzzer)
return BuiltinFieldData('No coverage', sort_key=0.0)
percentage = 100.0 * float(covered) / total
display_value = '%.2f%% (%d/%d)' % (percentage, covered, total)
return BuiltinFieldData(display_value, sort_key=percentage)
class CorpusBackupField(BaseCoverageField):
"""Link to the latest corpus backup archive."""
VALUE_TYPE = str
def __init__(self, ctx=None):
super(CorpusBackupField, self).__init__(ctx)
def get(self, group_by, group_by_value):
"""Return data."""
coverage_info = self.get_coverage_info(group_by, group_by_value)
if not coverage_info:
return None
if not coverage_info.corpus_backup_location:
return None
# Google Cloud console does not support linking to a specific file, so we
# link to the directory instead.
corpus_backup_location = os.path.dirname(
coverage_info.corpus_backup_location)
display_value = 'Download'
return BuiltinFieldData(display_value, link=corpus_backup_location)
class CorpusSizeField(BaseCoverageField):
"""Corpus size field."""
CORPUS = 0
QUARANTINE = 1
VALUE_TYPE = int
def __init__(self, corpus_type, ctx=None):
super(CorpusSizeField, self).__init__(ctx)
self.corpus_type = corpus_type
def get(self, group_by, group_by_value):
"""Return data."""
if (self.ctx.fuzzer in data_types.BUILTIN_FUZZERS and
group_by == QueryGroupBy.GROUP_BY_DAY):
# Explicitly return None here, as coverage_info below might exist and have
# default corpus size of 0, which might look confusing on the stats page.
return None
coverage_info = self.get_coverage_info(group_by, group_by_value)
if not coverage_info:
return None
if self.corpus_type == self.CORPUS:
corpus_size_units = coverage_info.corpus_size_units
corpus_size_bytes = coverage_info.corpus_size_bytes
corpus_location = coverage_info.corpus_location
else:
corpus_size_units = coverage_info.quarantine_size_units
corpus_size_bytes = coverage_info.quarantine_size_bytes
corpus_location = coverage_info.quarantine_location
# If the values aren't specified, return None to show the default '--' text.
if corpus_size_units is None or corpus_size_bytes is None:
return None
display_value = '%d (%s)' % (corpus_size_units,
utils.get_size_string(corpus_size_bytes))
return BuiltinFieldData(
display_value, sort_key=corpus_size_units, link=corpus_location)
class CoverageReportField(BaseCoverageField):
"""Coverage report field."""
VALUE_TYPE = str
def __init__(self, ctx=None):
super(CoverageReportField, self).__init__(ctx)
def get(self, group_by, group_by_value):
"""Return data."""
coverage_info = self.get_coverage_info(group_by, group_by_value)
if not coverage_info or not coverage_info.html_report_url:
return None
display_value = 'Coverage'
return BuiltinFieldData(display_value, link=coverage_info.html_report_url)
def _logs_bucket_key_fn(func, args, kwargs): # pylint: | |
<gh_stars>0
import logging
import simplejson
import string
import time
import traceback
ID="api" #this is our command identifier, so with conventional commands, this is the command name
permission=0 #Min permission required to run the command (needs to be 0 as our lowest command is 0)
MDAPI_logger = logging.getLogger("NEMPolling")
class ModDotaAPI:
def __init__(self):
pass
def ReadDump(self):
with open("commands/ModDota/vscript-dump.txt", "r") as f:
try:
curClass = "##GLOBALS##"
db = {
"##GLOBALS##" : {"methods" : {},"comment" : " "}
}
lineNum = 0
prevLine = ""
for line in f:
lineMsg = line.lstrip(" ").split(" ")
lineNum = lineNum + 1
if lineMsg[0] == "Class":
#MDAPI_logger.info("This is a class, right.. :"+line)
#Ok, class time
if lineMsg[1] in db:
#ok, dafuq. why is this already present
raise ClassAlreadyExistsDontDoThisToMeException()
else:
if lineMsg[1][-1:] == ",":
curClass = lineMsg[1][:-1]
db[curClass] = {"methods" : {}}
#Ok, it is a child class.
db[curClass]["base"] = lineMsg[2]
db[curClass]["comment"] = " ".join(lineMsg[4:])
else:
curClass = lineMsg[1]
db[curClass] = {"methods" : {}}
db[curClass]["comment"] = " ".join(lineMsg[3:])
#Ok boys, new class
elif "//" in line:
#comment, we dont care about it yet
#we dont want to continue, as we want the code below to run anyway
pass
elif lineMsg[0] == "":
#blank line, boring!!
pass
else:
#MDAPI_logger.info("This is a method, right.. : "+line)
#MDAPI_logger.info(lineMsg)
if len(lineMsg) > 1:
method = {
"return" : lineMsg[0],
"args" : []
}
if "()" in lineMsg[1]:
#no args
methodName = lineMsg[1][:-4]
else:
#print(lineMsg)
methodName = lineMsg[1][:-1]
i = 2
while lineMsg[i].rstrip() != ")":
method["args"].append(lineMsg[i].rstrip(","))
#MDAPI_logger.info(method["args"])
i = i + 1
commentStart = prevLine.find("//")
if commentStart > -1: #meaning it exists
method["comment"] = prevLine[commentStart+3:]
print("Class: "+curClass)
print("Method: "+methodName)
db[curClass]["methods"][methodName] = method
prevLine = line
self.db = db
except:
print(traceback.format_exc())
class ModDota_Api_HTMLCompiler:
def __init__(self):
self.init = False
self.community = False
self.communityDocs = {}
try:
self.templates = {}
with open("commands/ModDota/doc_template_arg.txt", "r") as f:
self.templates["arg"] = f.read()
with open("commands/ModDota/doc_template_class.txt", "r") as f:
self.templates["class"] = f.read()
with open("commands/ModDota/doc_template_class-tableofcontents.txt", "r") as f:
self.templates["class-tableofcontents"] = f.read()
with open("commands/ModDota/doc_template_function.txt", "r") as f:
self.templates["function"] = f.read()
with open("commands/ModDota/doc_template_index.txt", "r") as f:
self.templates["index"] = f.read()
with open("commands/ModDota/doc_template_tableofcontents.txt", "r") as f:
self.templates["tableofcontents"] = f.read()
self.init = True
except:
#fuck this, they aren't there meaning who ever setup renol failed horribly
pass
try:
with open("commands/ModDota/docdb.json", "r") as f:
self.communityDocs = simplejson.load(f)
print(self.communityDocs)
print("community docs loaded")
self.community = True
except:
#no community for us </3
print("OHNO it didn't work, ah well")
print(traceback.format_exc())
def GenerateClass(self, className, description=""):
if self.community:
if className not in self.communityDocs:
self.communityDocs[className] = {
"description" : description,
"funcs" : {}
}
print("Generated class "+className)
def GenerateFunction(self, className, funcName, description=""):
if self.community:
if className not in self.communityDocs:
self.GenerateClass(className)
if funcName not in self.communityDocs[className]:
self.communityDocs[className]["funcs"][funcName] = {
"description" : description,
"args" : []
}
print("Generated function "+funcName)
def RenderHTML(self, db):
if self.init:
print("Compiling")
try:
alphabetDict = list(string.ascii_lowercase)
alphabetDict.insert(0, "death and decay")
tableOfContents = ""
for Class in sorted(db):
tableOfContents = tableOfContents + self.templates["tableofcontents"].format(name=(Class))
contents=""
classInfo = []
for Class, ClassInfo in sorted(db.iteritems()):
print(Class)
communityClass = False
#print("Do we have this class defined")
if self.community:
if Class in self.communityDocs:
communityClass = True
#print("Ok, in theory we should be fine")
classTableOfContents = ""
functionText = ""
for Func, FuncInfo in sorted(ClassInfo["methods"].iteritems()):
print(Func)
communityFunc = False
if communityClass == True:
#print("Do we have this func defined")
if Func in self.communityDocs[Class]["funcs"]:
communityFunc = True
#print("Ok, in theory we should be double fine")
#print(FuncInfo)
if "comment" not in FuncInfo:
FuncInfo["comment"] = "Valve didn't give us a description, sucks.."
communityArg = False
if communityClass and communityFunc:
if len(self.communityDocs[Class]["funcs"][Func]["args"]) > 0:
communityArg = True
i = 0
newArgs = []
for arg in FuncInfo["args"]:
#print(FuncInfo)
i = i + 1
#print(communityClass, communityFunc)
if communityArg:
if len(self.communityDocs[Class]["funcs"][Func]["args"]) >= i:
#print(self.communityDocs[Class]["funcs"][Func]["args"][i-1])
if self.communityDocs[Class]["funcs"][Func]["args"][i-1]:
newArgs.append(self.templates["arg"].format(type=(arg), name=(self.communityDocs[Class]["funcs"][Func]["args"][i-1]) ))
continue
newArgs.append(self.templates["arg"].format(type=(arg), name=(alphabetDict[i]) ))
classTableOfContents = classTableOfContents + self.templates["class-tableofcontents"].format(className=(Class), func=(Func), args=(", ".join(newArgs)), entry=(FuncInfo))
communityDescText = " "
if communityFunc:
communityDescText = self.communityDocs[Class]["funcs"][Func]["description"]
functionText = functionText + self.templates["function"].format(className=(Class), func=(Func), entry=(FuncInfo), args=(", ".join(newArgs)), communityDesc=(communityDescText), arg=("This is coming soon!"))
del Func, FuncInfo
if "base" not in ClassInfo:
ClassInfo["base"] = "n/a"
desc = " "
if communityClass:
desc = self.communityDocs[Class]["description"]
classInfo.append(self.templates["class"].format(name=(Class),entry=ClassInfo, classtableofcontents=(classTableOfContents), function=(functionText), description=(desc) ))
del Class, ClassInfo
contents = "<hr class=\"line-class-seperate\">\r\n ".join(classInfo)
with open("commands/ModDota/docSite/index.html", "w") as f:
f.write(self.templates["index"].format(tableofcontents=(tableOfContents), contents=(contents)))
except:
print("OHNO it didn't work, ah well")
print(traceback.format_exc())
try:
if self.community:
with open("commands/ModDota/docdb.json", "w") as f:
simplejson.dump(self.communityDocs, fp = f, sort_keys=True, indent=4 * ' ', encoding = "utf-8")
except:
print("uhh, wat")
modDotaAPI = ModDotaAPI()
modhtml = ModDota_Api_HTMLCompiler()
mainPrefix = "@"
#called when the bot has loaded everything and is connected
def __initialize__(self, Startup):
modDotaAPI.ReadDump()
with open("commands/ModDota/vscript-dump.json", "w") as f:
f.write(simplejson.dumps(modDotaAPI.db, sort_keys=True, indent=4 * ' '))
if self.events["chat"].doesExist("ModDota_Docs"):
self.events["chat"].removeEvent("ModDota_Docs")
self.events["chat"].addEvent("ModDota_Docs", onPrivmsg)
def onPrivmsg(self, channels, userdata, message, currChannel):
#are the first two characters equal to mainPrefix?
if message[0:1] == mainPrefix:
#Yes they are, lets do work!
params = message.split(" ")
params[0] = params[0][1:]
else:
#they weren't, not worth looking at it anymore
return
#was this in a channel, or as a privmsg
if currChannel:
#Channel it is
channel = currChannel
else:
#Nope, was a private message
channel = userdata["name"]
#Ok, are they smart enough to actually run this command
rank = self.userGetRankNum(currChannel,userdata["name"])
#this may or may not blow up, lets be careful
try:
"""if userdata["name"] in banList:
self.sendNotice(userdata["name"], "You have been banned from using this command.")
return"""
cmd = False
if params[0] in aliases:
print("using alias")
cmd = aliases[params[0]]
elif params[0] in commands:
print("using actual command")
cmd = params[0]
if cmd:
if rank >= commands[cmd]["rank"]:
#They ARE smart enough, lets try to run the command
command = commands[cmd]["function"]
command(self, userdata["name"], params[1:], channel, userdata, rank)
else:
#They are dumb, booo!
self.sendNotice(userdata["name"], "You do not have permissions for this command!")
else:
print("Failwhale typo")
except KeyError:
print(traceback.format_exc())
#Ok, the command didn't exist, no biggy, act like it never happened.
#Doing a message if someone started their message with the prefix is bad
#the command entry point from '=api" or something
def execute(self, name, params, channel, userdata, rank):
msg = " ".join(params)
methods = []
output = channel
for Class, ClassInfo in modDotaAPI.db.iteritems():
for MethodName, MethodInfo in ClassInfo["methods"].iteritems():
#print(MethodName)
if msg.lower() in MethodName.lower():
#MDAPI_logger.info("Found a method, "+MethodName)
methods.append((Class, MethodName))
if len(methods) == 0:
self.sendMessage(channel, "No results found.")
if len(methods) > 5:
#pm it
if len(methods) > 20:
self.sendMessage(channel, "Too many functions matched ("+str(len(methods))+"). Please refine your search.")
return
else:
output = name
self.sendMessage(channel, "Too many functions matched ("+str(len(methods))+"). replying privately.")
colBold = chr(2)
colBlue = chr(3)+"02"
colEnd = chr(3)
for method in methods:
args = []
msg = ""
if len(modDotaAPI.db[method[0]]["methods"][method[1]]["args"]) > 0:
if (modhtml.community
and method[0] in modhtml.communityDocs
and method[1] in modhtml.communityDocs[method[0]]["funcs"]
and len(modhtml.communityDocs[method[0]]["funcs"][method[1]]["args"]) > 0):
#args exist.
i=0
for arg in modDotaAPI.db[method[0]]["methods"][method[1]]["args"]:
if len(modhtml.communityDocs[method[0]]["funcs"][method[1]]["args"]) > i:
if modhtml.communityDocs[method[0]]["funcs"][method[1]]["args"][i] != False:
args.append(colBlue+arg+colEnd +" "+ str(modhtml.communityDocs[method[0]]["funcs"][method[1]]["args"][i]))
else:
args.append(colBlue+arg+colEnd)
else:
args.append(colBlue+arg+colEnd)
i = i + 1
msg = ", ".join(args)
else:
sep = colEnd + ", " + colBlue
msg = " " + colBlue + sep.join(modDotaAPI.db[method[0]]["methods"][method[1]]["args"]) + colEnd + " "
comment = ""
if "comment" in modDotaAPI.db[method[0]]["methods"][method[1]]:
comment = " -- "+modDotaAPI.db[method[0]]["methods"][method[1]]["comment"]
self.sendMessage(output, "["+method[0]+"] "+modDotaAPI.db[method[0]]["methods"][method[1]]["return"] + " " + method[1] + colBold+"(" + colBold + msg + colBold+")" + colBold + comment)
def command_class(self, name, params, channel, userdata, rank):
if len(params) > 1:
#todo: check vscript dump if this class even exists before creating
if modhtml.community:
if params[0] in modhtml.communityDocs:
print("changing description rather than defining class")
modhtml.communityDocs[params[0]]["description"] = " ".join(params[1:])
self.sendMessage(channel, "Set the description of "+params[0]+" and set it to "+" ".join(params[1:]))
modhtml.RenderHTML(modDotaAPI.db)
else:
modhtml.GenerateClass(params[0], description=" ".join(params[1:]))
self.sendMessage(channel, "Defined "+params[0]+", and set the description to "+" ".join(params[1:]))
modhtml.RenderHTML(modDotaAPI.db)
else:
self.sendMessage(channel, "Sorry, community documentation is out of service at the moment")
else:
self.sendMessage(channel, "ERROR: Not enough arguments")
def command_function(self, name, params, channel, userdata, rank):
if len(params) > 2:
#todo: check vscript dump if this function even exists before creating
if modhtml.community:
if params[1] in modhtml.communityDocs[params[0]]["funcs"]:
| |
<reponame>EricCousineau-TRI/deformable-ravens<filename>load.py
#!/usr/bin/env python
"""Strictly for loading agents to inspect. Based on `main.py`."""
import datetime
import os
import time
import argparse
import cv2
import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from os.path import join
from ravens import Dataset, Environment, cameras, agents, tasks
from ravens import utils as U
# Of critical importance! See the top of main.py for details.
MAX_ORDER = 4
# See Task().
PIXEL_SIZE = 0.003125
CAMERA_CONFIG = cameras.RealSenseD415.CONFIG
BOUNDS = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])
def goal_similarity(obs, goal):
"""For goal-conditioning, measure how close current image is to goal.
Metrics: L2 and SSIM for now. The `obs` and `goal` should be of the same
format as in rollout(), where they have color/depth keys, with 3 camera
viewpoints. However, `obs` will be a list and `goal a np.array. For the
pose metrics, use the task reward.
"""
# Requires pip install scikit-image
from skimage.metrics import structural_similarity
colormap_o, _ = get_heightmap(obs=obs)
colormap_g, _ = get_heightmap(obs=goal)
L2 = np.linalg.norm(colormap_o - colormap_g) / np.prod(colormap_o.shape)
SSIM = structural_similarity(colormap_o, colormap_g, multichannel=True)
metrics = {}
metrics['L2'] = round(L2, 4)
metrics['SSIM'] = round(SSIM, 4)
return metrics
def get_heightmap(obs):
"""Reconstruct orthographic heightmaps with segmentation masks.
Here, `obs` could be current or goal, either will work.
See transporter.py, regression.py, task.py, dummy.py, and dataset.py.
We use this pattern quite a lot. Copy from transporter.py version.
"""
heightmaps, colormaps = U.reconstruct_heightmaps(
obs['color'], obs['depth'], CAMERA_CONFIG, BOUNDS, PIXEL_SIZE)
colormaps = np.float32(colormaps)
heightmaps = np.float32(heightmaps)
# Fuse maps from different views.
valid = np.sum(colormaps, axis=3) > 0
repeat = np.sum(valid, axis=0)
repeat[repeat == 0] = 1
colormap = np.sum(colormaps, axis=0) / repeat[..., None]
colormap = np.uint8(np.round(colormap))
heightmap = np.max(heightmaps, axis=0)
return colormap, heightmap
def load(path, iepisode, field):
"""Adapted from `dataset.py` so we can sample goal images. Just including
some logic to extract the episode automatically based on the index
`iepisode`, so we don't need to know the length in advance.
"""
field_path = os.path.join(path, field)
data_list = [os.path.join(field_path, x) for x in os.listdir(field_path)]
fname = [x for x in data_list if f'{iepisode:06d}' in x]
assert len(fname) == 1, fname
fname = fname[0]
return pickle.load(open(fname, 'rb'))
def debug_time_step(t, epidx, obs, act, extras, goal=None):
"""Save images and other stuff from time `t` in episode `epidx`."""
pth = 'tmp'
tt = str(t).zfill(2)
# Convert from BGR to RGB to match what we see in the GUI.
def save(fname, c_img):
cv2.imwrite(fname, img=cv2.cvtColor(c_img, cv2.COLOR_BGR2RGB))
# Save current color images from camera angles and the fused version.
for img_idx, c_img in enumerate(obs['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}.png')
save(fname, c_img)
colormap_o, _ = get_heightmap(obs=obs)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused.png')
save(fname, colormap_o)
# (If applicable) save the goal color images.
if (goal is not None) and t == 1:
for img_idx, c_img in enumerate(goal['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}_goal.png')
save(fname, c_img)
colormap_g, _ = get_heightmap(obs=goal)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused_goal.png')
save(fname, colormap_g)
# Print the action.
pose0 = act['params']['pose0']
pose1 = act['params']['pose1']
print(f" pose0, pose1: {U.round_pose(pose0)}, {U.round_pose(pose1)}")
# Attention. (Well, attn_input.png is also input to Transport...)
fname1 = join(pth, f'ep_{epidx}_t{tt}_attn_input.png')
fname2 = join(pth, f'ep_{epidx}_t{tt}_attn_heat_bgr.png')
cv2.imwrite(fname1, extras['input_c'])
cv2.imwrite(fname2, extras['attn_heat_bgr'])
# Transport
for idx, tran_heat in enumerate(extras['tran_heat_bgr']):
idxstr = str(idx).zfill(2)
fname = join(pth, f'ep_{epidx}_t{tt}_tran_rot_{idxstr}.png')
if idx == extras['tran_rot_argmax']:
fname = fname.replace('.png', '_rot_chosen.png')
cv2.imwrite(fname, tran_heat)
def rollout(agent, env, task, goal_conditioned, args, num_finished, debug=False):
"""Standard gym environment rollout.
Adding more debugging options (enable with debug=True), such as printing
the pose and saving the images and heatmaps. We can also run `dataset.py`
and see goal images in the `goals_out` directory.
:goal_conditioned: a boolean to check if we have goal-conditioning.
:num_finished: to track how many episodes we have finished. Ignores any
episodes drawn and then discarded due to initial states that were
already done. Also used to sample the goal states for
goal-conditioned policies. We have a fixed number of testing episodes
(characterized by goal images), so `num_finished` is the identifier.
Returns `t` to track episode length. Update (21 Aug 2020): also returns
last_stuff=(obs,info), consistent with main.py and generate_goals.py.
(13 Oct 2020): fixing so that we will always append stuff in the episode
list for gt_state agents. The problem is that the first time step (start_t=1)
wasn't saving because len(obs) = 0, but in gt_state we actually want to save.
Otherwise, a length 1 episode will have len(episode)==0 later. It's not a huge
deal because we still save the final info correctly, so that we can report
correct stats, but it helps to have the initial info because that gives us the
deltas over the starting state.
"""
if debug:
if not os.path.exists('tmp/'):
os.makedirs('tmp/')
print('')
start_t = 0
if args.agent in ['gt_state', 'gt_state_2_step']:
start_t = 1
episode = []
total_reward = 0
# Before task.reset(), need goal info for goal episode at idx `num_finished`.
if goal_conditioned:
task.goal_cond_testing = True
path = os.path.join('goals', args.task)
goal = {}
goal['color'] = load(path, num_finished, 'last_color')
goal['depth'] = load(path, num_finished, 'last_depth')
goal['info'] = load(path, num_finished, 'last_info')
goal_imgs = goal if goal_conditioned else None
# Reset env and call task.reset(), len(obs)=0 but info will have stuff for gt_state.
if goal_conditioned:
obs = env.reset(task, last_info=goal['info'])
else:
obs = env.reset(task)
info = env.info
for t in range(start_t, task.max_steps):
if debug and t > 0:
act, extras = agent.act(obs, info, goal=goal_imgs, debug_imgs=True)
else:
act = agent.act(obs, info, goal=goal_imgs)
# Optional debugging to save images, etc. Do before we get new obs.
if debug and 'params' in act:
debug_time_step(t, num_finished, obs, act, extras, goal=goal_imgs)
# (13 Oct 2020) Ah, if gt_state, we won't save at start_t=1, so let's fix that!
if (len(obs) > 0 and act['primitive']) or (args.agent in ['gt_state', 'gt_state_2_step']):
episode.append((act, info)) # don't save obs
(obs, reward, done, info) = env.step(act)
# If goal-conditioning, additionally compute image-based metrics.
if goal_conditioned and ('color' in obs and 'depth' in obs):
info['image_metrics'] = goal_similarity(obs, goal_imgs)
else:
info['image_metrics'] = None
if debug:
print(' {}/{}, rew: {:0.3f}, len(epis): {}, act: {}, info: {}'.format(t,
task.max_steps, reward, len(episode), act['primitive'], info['extras']))
if goal_conditioned:
print(' goal-conditioning image metrics: {}'.format(info['image_metrics']))
total_reward += reward
last_obs_info = (obs, info)
if done:
break
return total_reward, episode, t, last_obs_info
def is_goal_conditioned(args):
"""
Be careful with checking this condition. See `generate_goals.py`. Here,
though, we check the task name and as an extra safety measure, check that
the agent is also named with 'goal'.
Update: all right, let's modify this to incorpoate gt_state w/out too much
extra work. :(
"""
goal_tasks = ['insertion-goal', 'cable-shape-notarget', 'cable-line-notarget',
'cloth-flat-notarget', 'bag-color-goal']
goal_task = (args.task in goal_tasks)
if goal_task:
assert 'goal' in args.agent or 'gt_state' in args.agent, \
'Agent should be a goal-based agent, or gt_state agent.'
return goal_task
def ignore_this_demo(args, reward, t, last_extras):
"""In some cases, we should filter out demonstrations.
Filter for if t == 0, which means the initial state was a success, and
also if we have exit_gracefully, which means for the bag-items tasks, it
may not have had visible item(s) at the start, for some reason.
"""
ignore = (t == 0)
if 'exit_gracefully' in last_extras:
assert last_extras['exit_gracefully']
return True
return ignore
if __name__ == '__main__':
# Parse command line arguments.
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0')
parser.add_argument('--disp', action='store_true')
parser.add_argument('--task', default='hanoi')
parser.add_argument('--agent', default='transporter')
parser.add_argument('--num_demos', default=1000, type=int)
parser.add_argument('--train_run', default=0, type=int)
parser.add_argument('--num_test_eps', default=20, type=int)
parser.add_argument('--num_rots', default=24, type=int,
help='Transporter rotations used from the trained model, usually 24')
parser.add_argument('--num_rots_inf', default=24, type=int,
help='Transporter rotations we want FOR INFERENCE time; it can be 1')
parser.add_argument('--hz', default=240.0, type=float)
parser.add_argument('--crop_bef_q', default=0, type=int, help='CoRL paper used 1')
parser.add_argument('--gpu_mem_limit', default=None)
parser.add_argument('--subsamp_g', action='store_true')
args = parser.parse_args()
# Configure which GPU to use.
cfg = tf.config.experimental
gpus = cfg.list_physical_devices('GPU')
if len(gpus) == 0:
print('No GPUs detected. Running with CPU.')
else:
cfg.set_visible_devices(gpus[int(args.gpu)], 'GPU')
# Configure how much GPU to use.
if args.gpu_mem_limit is not None:
MEM_LIMIT = int(1024 * float(args.gpu_mem_limit))
print(args.gpu_mem_limit)
dev_cfg = [cfg.VirtualDeviceConfiguration(memory_limit=MEM_LIMIT)]
cfg.set_virtual_device_configuration(gpus[0], dev_cfg)
# Initialize task, set to 'test,' but I think this only matters for kitting.
task = tasks.names[args.task]()
task.mode = 'test'
# Evaluate on saved snapshots. Go backwards to get better results first.
snapshot_itrs = [i*2000 for i in range(1,10+1)] # Do 10 snapshots to save on compute.
snapshot_itrs = snapshot_itrs[::-1]
if not os.path.exists('test_results'):
os.makedirs('test_results')
| |
<filename>src/decifer/__main__.py
"""
decifer.py
author: <NAME>
date: 2020-05-21
"""
import sys, os
import warnings
import datetime
import traceback
import multiprocessing as mp
import random as rand
from collections import defaultdict
from copy import deepcopy
from multiprocessing import Lock, Value, Pool, Manager
import numpy as np
import math
from bisect import bisect_left
# decifer
from decifer.parse_args import args
from decifer.fileio import write_results, write_results_CIs, read_in_state_trees
from decifer.new_coordinate_ascent import coordinate_descent, objective
from decifer.mutation import create_mutations
from decifer.process_input import PURITY, MUTATION_DF
def main():
sys.stderr.write('\n'.join(['Arguments:'] + ['\t{} : {}'.format(a, args[a]) for a in args]) + '\n')
# create dictionary of sample indices and labels for printing later
sample_ids = { int(i[0]) : i[1] for i in zip(MUTATION_DF['#sample_index'].unique(), MUTATION_DF['sample_label'].unique()) }
for i in sample_ids:
print(i, sample_ids[i])
num_samples = len(sample_ids)
if args['mink'] < 2 + num_samples:
args['mink'] = 2 + num_samples
sys.stderr.write('## The minimum number of clusters has been increased to {} to account for fixed clusters!\n'.format(args['mink']))
if args['maxk'] < args['mink']:
args['maxk'] = args['mink']
sys.stderr.write('## The maximum number of clusters has been increased to {} to be higher than the minimum!\n'.format(args['maxk']))
# state_trees dict: keys are potential CN observed CN states (x,y), e.g. ((1, 0), (1, 1), (0, 0))
# values are lists of lists, where each list contains all possible genotypes (x,y,m), e.g.
# [(1, 0, 0), (0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 0, 0)]
# these lists are always even, and each pair (i,i+1) is an edge in the state tree
state_trees = read_in_state_trees(args['statetrees'])
# store info from MUTATION_DF pd.DataFrame in mutations list, containing Mutation objects as elements
# infer purity from SNV data, but overwrite purity dict if purity file provided (next line)
mutations = create_mutations(MUTATION_DF, state_trees, not args['ccf'])
if args['record']:
manager = mp.Manager()
record = manager.list()
if not args['iterative']:
print("Using binary-search model selection")
run_coordinator_binary(mutations, num_samples, PURITY, args, record if args['record'] else None)
else:
print("Using iterative model selection")
run_coordinator_iterative(mutations, sample_ids, num_samples, PURITY, args, record if args['record'] else None)
if args['record']:
with open('record.log.tsv', 'w') as o:
o.write('#NUM_CLUSTERS\tRESTART\tSEED\tITERATION\tOBJECTIVE\n')
for r in record:
o.write('{}\t{}\t{}\t{}\t{}\n'.format(r[0], r[1], r[2], r[3], r[4]))
def run_coordinator_iterative(mutations, sample_ids, num_samples, PURITY, args, record):
mink, maxk, maxit, prefix, restarts, ubleft, J = unpck(args)
jobs = [(x, k, np.random.randint(low=0, high=2**10)) for x in range(restarts) for k in range(mink, maxk+1)]
# run in single-thread mode for development/debugging
if args['debug']:
shared = defaultdict(dict)
# make objects global
init_descent(mutations, num_samples, maxit, shared, record, args['betabinomial'], PURITY)
for job in jobs:
run_descent(job)
else:
manager, shared = setup_shared()
initargs = (mutations, num_samples, maxit, shared, record, args['betabinomial'], PURITY)
pool = Pool(processes=min(J, len(jobs)), initializer=init_descent, initargs=initargs)
bar = ProgressBar(total=len(jobs), length=30, verbose=False, lock=Lock(), counter=Value('i', 0))
bar.progress(advance=False, msg="Started")
report = (lambda r : bar.progress(advance=True, msg="Completed {} for k={} [Iterations: {}]".format(r[0], r[1], r[3])))
list(map(report, pool.imap_unordered(run_descent, jobs)))
# best[cluster number k] = min objective across runs/restarts
best = {k : min(list(filter(lambda j : j[1] == k, jobs)), key=(lambda j : shared['objs'][j])) for k in range(mink, maxk+1)}
#ubleft = .25 * len(mutations) * num_samples * 10
objs = {k : shared['objs'][best[k]] for k in best}
for k in range(mink+1, maxk+1):
if objs[k - 1] < objs[k]:
best[k] = best[k - 1]
objs[k] = objs[k - 1]
chk = (lambda v : v if v != 0.0 else 0.01)
left = (lambda k : min((objs[k - 1] - objs[k]) / abs(chk(objs[k - 1])), ubleft) if k > mink else ubleft)
right = (lambda k : (objs[k] - objs[k+1]) / abs(chk(objs[k])))
elbow = {k : left(k) - right(k) for k in range(mink, maxk)}
if mink < maxk:
selected = max(range(mink, maxk), key=(lambda k : elbow[k]))
else:
selected = mink
print('\t'.join(['#NUM_CLUSTERS', 'BEST_OBJ', 'ELBOW_SCORE', 'SELECTED']))
for k in range(mink, maxk+1):
print('\t'.join(map(str, [k, objs[k], elbow[k] if k < maxk else 'NaN', selected==k])))
C, bmut, clus, conf, objs = map(lambda D : shared[D][best[selected]], ['C', 'bmut', 'clus', 'conf', 'objs'])
# C is list of lists; rows are samples, columns are cluster IDs, values are CCFs
#CIs = [[()]*len(C[i]) for i in range(len(C))] # list of lists to store CIs, same structure as C
CIs, PDFs = compute_CIs_mp(set(clus), bmut, num_samples, args['betabinomial'], J, C)
"""
# FOR TESTING
#print_PDF(set(clus), bmut, num_samples, args['betabinomial'], C)
#print_feasibleVAFs(set(clus), bmut, num_samples, args['betabinomial'], C)
with open("pdfs.txt", 'w') as f:
for c in set(clus):
for s in range(num_samples):
i = str(c) + "_" + str(s) + " "
f.write(i)
f.write(" ".join( list(map(str, PDFs[s][c]))))
f.write("\n")
with open("max_dcfs.txt", 'w') as f:
for c in set(clus):
for s in range(num_samples):
print c, s, C[s][c], CIs[s][c][0], CIs[s][c][1]
f.write(" ".join( list(map(str, [c, s, C[s][c], CIs[s][c][0], CIs[s][c][1]] ))))
f.write("\n")
"""
write_results_CIs(prefix, num_samples, clus, sample_ids, CIs)
write_results(prefix, C, CIs, clus, conf, bmut, PURITY, args['betabinomial'], 'CCF' if args['ccf'] else 'DCF')
#write_results_decifer_format(bmut, clus, prefix, selected, num_samples, C)
def print_feasibleVAFs(cluster_ids, muts, num_samples, bb, C):
with open("feasibleVAFs.txt", 'w') as f:
for i in cluster_ids:
mut = list(filter(lambda m : m.assigned_cluster == i, muts))
for s in range(0,num_samples):
lowers = [m.assigned_config.cf_bounds(s)[0] for m in mut]
uppers = [m.assigned_config.cf_bounds(s)[1] for m in mut]
f.write(" ".join(list(map(str, [i, s, max(lowers), min(uppers)]))))
f.write("\n")
def print_PDF(cluster_ids, muts, num_samples, bb, C):
with open("pdfs.txt", 'w') as f:
for i in cluster_ids:
mut = list(filter(lambda m : m.assigned_cluster == i, muts))
for s in range(0,num_samples):
max_dcf = C[s][i] # dcf value that maximizes posterior for this sample and cluster ID
delta = (-1*objective(max_dcf, mut, s, bb))-2
#prob = (lambda x: math.exp(-1*(x+delta))) # convert neg log to probability
prob = (lambda x: math.exp(-1*(x))) # convert neg log to probability
l = []
for j in np.linspace(0, 1, 1000):
l.append(prob(objective(j, mut, s, bb)))
total = np.sum(l)
l = [x/total for x in l]
f.write(" ".join(list(map(str, [i,s] + l))))
f.write("\n")
def compute_CIs_mp(cluster_ids, muts, num_samples, bb, J, C):
CIs = [[()]*len(C[i]) for i in range(len(C))] # list of lists to store CIs, same structure as C
PDFs = [[()]*len(C[i]) for i in range(len(C))] # list of lists to store CIs, same structure as C
num_tests = float(len(cluster_ids)*num_samples) # bonferroni correction for multiple hypothesis testing
#C[s][i] is the putative mode of the pdf
jobs = [(c, s, muts, num_tests, bb) for c in cluster_ids for s in range(num_samples)]
pool = Pool(processes=min(J, len(jobs)))
results = pool.imap_unordered(CI, jobs)
pool.close()
pool.join()
for i in results:
clust, samp, lower, upper, pdf = i[0], i[1], i[2], i[3], i[4]
CIs[samp][clust] = (lower,upper)
PDFs[samp][clust] = pdf
return CIs, PDFs
def CI(job):
"""
Computes CIs for a sample-cluster combination
There have been two issues in dealing with the "objective" function to characterize the PDF of CCF/DCF values, needed for obtaining CIs.
1.) converting -log(unnormalized probability) from objective to an unnormalized probability produced prohibitively large numbers (-log numbers
are very negative such that e^-x huge), so we rescaled all values based on the most negative -log value observed from many samples from objective
(previously we tried using cluster centers, but these do not succeed in finding mode especially when PDF is extremely narrow/disjoint from
infeasible VAF values truncating distribution).
2.) across the support of the CCF/DCF PDF distribution, some sample-cluster combinations have 0 values everywhere except an extremely narrow range,
so trying to integrate approximately with functions like scipy.integrate.quad to get a normalization constant produces 0, because all sampled
points yield 0.
Thus, we have used a brute force method where we sample num_pts from the objective function, and use these to create and cahracterize the PDF of
the CCF/DCF distribution.
"""
c, s, muts, num_tests, bb = job # c is cluster, s is sample
mut = list(filter(lambda m : m.assigned_cluster == c, muts))
num_pts = 10000
grid = [objective(j, mut, s, bb) for j in np.linspace(0, 1, num_pts)]
min_log = min(grid)
delta = (-1*min_log)-2 # constant to make -log(pdf) values less negative
prob = (lambda x: math.exp(-1*(x+delta))) # convert -log(pdf) to unnormalized probability
total = sum([prob(x) for x in grid]) # unnormalized probabilities across support
pdf = [prob(x)/total for x in grid] # normalized probabilities across support
cdf = np.cumsum(pdf)
low_ci = 0.025/num_tests # divide the desired CI quantile by the number of tests, bonferonni correction
high_ci = 1 - low_ci
low_index = take_closest(cdf, low_ci)
high_index = | |
<reponame>jblackb1/triagelib
#!/usr/bin/env python
import json
import logging
import requests
from requests import Request, Session
global triagelog
class TriageError(Exception):
"""Base exception class for all Triage related errors
Exception is explicitly raised when an unknown Triage error"""
class TriageStateError(TriageError):
"""Exception is raised when the session is not in a valid state for the method"""
pass
class TriageClientError(TriageError):
"""Exception is raised when Triage box or an intermediate proxy responds with HTTP 4xx status code"""
pass
class TriageAuthError(TriageClientError):
"""Exception is raised when Triage box or a transparent proxy responds with HTTP 401 code"""
pass
class TriageServerError(TriageError):
"""Exception is raised when Triage box or an intermediate proxy responds with 5xx status code"""
pass
class TriageFailureError(TriageError):
"""Exception is raised when Triage box returns failure result to last request"""
pass
class TriageSession:
"""Class maintaining Triage connectivity through API."""
apiver = '1.0.0' # API version implemented/required
debug = False
def __init__(self, host, email, apikey, ssl=True, uag='Python CoFense Triage Client'):
"""
Set ssl to False if you like to connect using plain HTTP (Triage must not redirect to HTTPS),
Set uag to a desired User-Agent header value.
"""
if not isinstance(ssl, bool): raise TypeError(__name__ + u': ssl parameter must be True or False')
if not isinstance(uag, str): raise TypeError(__name__ + u': uag parameter must be a string')
# ------- Private class instance attributes -------
self._triagehost = host # Hostname of an Triage box to connect to
self._usessl = ssl # Use SSL encryption for Triage communication
self._userag = uag # User-Agent string to use in HTTP headers
self._email = email # Email address of user with API key
self._apikey = apikey # API key of user accessing Triage
self._headers = {'Accept': 'application/vnd.ve.v1.0+json',
'VE-API-Version': TriageSession.apiver,
'user-agent': self._userag,
'Authorization': 'Token token={0}:{1}'.format(self._email, self._apikey)}
def _reqsend(self, prep, host=''):
"""Sends prepared request.
Used by all other methods.
Returns raw response.
"""
triagelog.info(u'------- Sending {0} request to host {1} -------'.format(prep.method,
host))
s = Session()
resp = s.send(prep, verify=False)
triagelog.debug(u'server response: {0}'.format(resp.text))
if resp.status_code == 401:
triagelog.error(u'Could not authenticate to Triage box {0}.'.format(host))
raise TriageAuthError(__name__ + u': Could not authenticate to Triage box {0}.'.format(host))
if resp.status_code != 200:
desc = __name__ + u': Triage box {0} returned HTTP error {1}.'.format(self._triagehost,
resp.status_code)
triagelog.error(desc)
if 400 <= resp.status_code < 500:
raise TriageClientError(desc)
elif 500 <= resp.status_code < 600:
raise TriageServerError(desc)
else:
raise TriageError(desc)
return resp
def _parse(self, src):
"""Parses source text as json entity.
Returns result of parser(src).
Raises TriageError if resp is not json, or json does not contain the values expected by parser.
"""
triagelog.info(u'------- Parsing {0}-byte server response -------'.format(len(src)))
triagelog.debug(u'text to parse = "{0}"'.format(src))
try:
res = json.loads(src)
except ValueError as e:
triagelog.error(u'Triage box {0} did not return a valid json output.'.format(self._triagehost))
raise TriageError(
__name__ + u': Triage box {0} did not return a valid json output.'.format(self._triagehost))
triagelog.debug(u'json data = "{0}"'.format(res))
return res
# ===== Public class methods =====
def reports(self, match_priority=None, category_id=None, start_date=None,
tags=None, end_date=None, page=None, per_page=None, report_id=None):
"""Searches all Triage reports.
tags (list) - One or more tags of processed reports to filter on.
page (int) - The page number for the results.
per_page (int) - The number of results rendered per page. The maximum value is 50 results per page.
start_date (str) - The start date and time of the query. The default is six days ago.
end_date (str) - The end date and time of the query. The default is the current time.
report_id (int) - The Numeric ID of a Triage report.
category_id (int) - The category ID (1-5) for processed reports.
match_priority (int) - The highest match priority based on rule hits for the report.
"""
triagelog.info(u'------- Opening new session to server {0} with user {1} -------'.format(self._triagehost,
self._email))
if not isinstance(self._email, str):
raise TypeError(__name__ + u': email parameter must be a string')
if not isinstance(self._apikey, str):
raise TypeError(__name__ + u': apikey parameter must be a string')
if tags and not isinstance(tags, list):
raise TypeError(__name__ + u': tags parameter must be a list of strings')
if page and not isinstance(page, int):
raise TypeError(__name__ + u': page parameter must be an integer')
if per_page and not isinstance(per_page, int):
raise TypeError(__name__ + u': per_page parameter must be an integer')
if start_date and not isinstance(start_date, str):
raise TypeError(__name__ + u': start_date parameter must be a string')
if end_date and not isinstance(end_date, str):
raise TypeError(__name__ + u': end_date parameter must be a string')
if report_id and not isinstance(report_id, int):
raise TypeError(__name__ + u': report_id parameter must be an integer')
if category_id and not isinstance(category_id, int):
raise TypeError(__name__ + u': caterory_id parameter must be an integer')
if match_priority and not isinstance(match_priority, int):
raise TypeError(__name__ + u': match_priority parameter must be an integer')
params = {}
if tags: params["tags"] = tags
if page: params["page"] = page
if per_page: params["per_page"] = per_page
if start_date: params["start_date"] = start_date
if end_date: params["end_date"] = end_date
if match_priority: params["match_priority"] = match_priority
if category_id: params["category_id"] = category_id
if report_id:
url = 'http{0}://{1}/api/public/v1/reports/{2}'.format(('s' if self._usessl else ''),
self._triagehost,
report_id)
else:
url = 'http{0}://{1}/api/public/v1/reports'.format(('s' if self._usessl else ''),
self._triagehost)
headers = self._headers.copy()
headers.update({'Content-Type': 'application/json'})
triagelog.debug(u'url = "{0}", headers = "{1}", params = "{2}"'.format(url, headers, params))
req = Request('GET', url, headers=headers, params=params)
prep = req.prepare()
resp = self._reqsend(prep, self._triagehost)
return self._parse(resp.text)
def processed_reports(self, match_priority=None, category_id=None, start_date=None,
tags=None, end_date=None, page=None, per_page=None):
"""Searches all Triage reports.
tags (list) - One or more tags of processed reports to filter on.
page (int) - The page number for the results.
per_page (int) - The number of results rendered per page. The maximum value is 50 results per page.
start_date (str) - The start date and time of the query. The default is six days ago.
end_date (str) - The end date and time of the query. The default is the current time.
category_id (int) - The category ID (1-5) for processed reports.
match_priority (int) - The highest match priority based on rule hits for the report.
"""
triagelog.info(u'------- Opening new session to server {0} with user {1} -------'.format(self._triagehost,
self._email))
if not isinstance(self._email, str):
raise TypeError(__name__ + u': email parameter must be a string')
if not isinstance(self._apikey, str):
raise TypeError(__name__ + u': apikey parameter must be a string')
if tags and not isinstance(tags, list):
raise TypeError(__name__ + u': tags parameter must be a list of strings')
if page and not isinstance(page, int):
raise TypeError(__name__ + u': page parameter must be an integer')
if per_page and not isinstance(per_page, int):
raise TypeError(__name__ + u': per_page parameter must be an integer')
if start_date and not isinstance(start_date, str):
raise TypeError(__name__ + u': start_date parameter must be a string')
if end_date and not isinstance(end_date, str):
raise TypeError(__name__ + u': end_date parameter must be a string')
if category_id and not isinstance(category_id, int):
raise TypeError(__name__ + u': caterory_id parameter must be an integer')
if match_priority and not isinstance(match_priority, int):
raise TypeError(__name__ + u': match_priority parameter must be an integer')
params = {}
if tags: params["tags"] = tags
if page: params["page"] = page
if per_page: params["per_page"] = per_page
if start_date: params["start_date"] = start_date
if end_date: params["end_date"] = end_date
if category_id: params["category_id"] = category_id
if match_priority: params["match_priority"] = match_priority
url = 'http{0}://{1}/api/public/v1/processed_reports'.format(('s' if self._usessl else ''),
self._triagehost)
headers = self._headers.copy()
headers.update({'Content-Type': 'application/json'})
triagelog.debug(u'url = "{0}", headers = "{1}", params = "{2}"'.format(url, headers, params))
req = Request('GET', url, headers=headers, params=params)
prep = req.prepare()
resp = self._reqsend(prep, self._triagehost)
return self._parse(resp.text)
def inbox_reports(self, match_priority=None, start_date=None,
end_date=None, page=None, per_page=None):
"""Searches current Triage inbox for new email reports.
page (int) - The page number for the results.
per_page (int) - The number of results rendered per page. The maximum value is 50 results per page.
start_date (str) - The start date and time of the query. The default is six days ago.
end_date (str) - The end date and time of the query. The default is the current time.
match_priority (int) - The highest match priority based on rule hits for the report.
"""
triagelog.info(u'------- Opening new session to server {0} with user {1} -------'.format(self._triagehost,
self._email))
| |
<reponame>MatthiasValvekens/certomancer<filename>certomancer/integrations/animator.py
import logging
import os
from dataclasses import dataclass
from datetime import datetime
from io import BytesIO
from typing import Optional, Dict, List, Callable
import tzlocal
from asn1crypto import ocsp, tsp, pem
from werkzeug.wrappers import Request, Response
from dateutil.parser import parse as parse_dt
from werkzeug.routing import Map, Rule, BaseConverter, Submount
from werkzeug.exceptions import HTTPException, NotFound, InternalServerError, \
BadRequest
from certomancer.config_utils import ConfigurationError
from certomancer.crypto_utils import pyca_cryptography_present
from certomancer.registry import (
PKIArchitecture, ServiceLabel, CertLabel,
CertomancerObjectNotFoundError, CertomancerConfig, ArchLabel, EntityLabel,
AttributeCertificateSpec,
CertificateSpec, PluginLabel, PluginServiceRequestError
)
from certomancer.services import CertomancerServiceError
logger = logging.getLogger(__name__)
pfx_possible = pyca_cryptography_present()
def _now():
return datetime.now(tz=tzlocal.get_localzone())
FAKE_TIME_HEADER = 'X-Certomancer-Fake-Time'
class PemExtensionConverter(BaseConverter):
def __init__(self, map, exts=('crt', 'cert', 'cer')):
if isinstance(exts, str):
exts = (exts,)
self.expected_exts = exts
self.regex = r"(%s)(\.pem)?" % '|'.join(exts)
super().__init__(map)
def to_python(self, value):
return value.endswith('.pem')
def to_url(self, value):
return self.expected_exts[0] + ('.pem' if value else '')
@dataclass(frozen=True)
class AnimatorCertInfo:
spec: CertificateSpec
pfx_available: bool
subject_dn: str
@staticmethod
def gather_cert_info(pki_arch: PKIArchitecture):
def _for_cert(spec: CertificateSpec):
pfx = pfx_possible and pki_arch.is_subject_key_available(spec.label)
return AnimatorCertInfo(
spec=spec, pfx_available=pfx,
subject_dn=pki_arch.entities[spec.subject].human_friendly
)
return {
iss: list(map(_for_cert, issd_certs))
for iss, issd_certs in pki_arch.enumerate_certs_by_issuer()
}
@dataclass(frozen=True)
class AnimatorAttrCertInfo:
spec: AttributeCertificateSpec
holder_dn: str
@staticmethod
def gather_cert_info(pki_arch: PKIArchitecture):
def _for_attr_cert(spec: AttributeCertificateSpec):
return AnimatorAttrCertInfo(
spec=spec,
holder_dn=pki_arch.entities[spec.holder.name].human_friendly
)
return {
iss: list(map(_for_attr_cert, issd_certs))
for iss, issd_certs in pki_arch.enumerate_attr_certs_by_issuer()
}
@dataclass(frozen=True)
class ArchServicesDescription:
arch: ArchLabel
tsa: list
ocsp: list
crl: list
cert_repo: list
attr_cert_repo: list
certs_by_issuer: Dict[EntityLabel, List[AnimatorCertInfo]]
attr_certs_by_issuer: Dict[EntityLabel, List[AnimatorCertInfo]]
@classmethod
def compile(cls, pki_arch: PKIArchitecture):
services = pki_arch.service_registry
cert_info = AnimatorCertInfo.gather_cert_info(pki_arch)
attr_cert_info = AnimatorAttrCertInfo.gather_cert_info(pki_arch)
return ArchServicesDescription(
pki_arch.arch_label,
tsa=services.list_time_stamping_services(),
ocsp=services.list_ocsp_responders(),
crl=services.list_crl_repos(),
cert_repo=services.list_cert_repos(),
attr_cert_repo=services.list_attr_cert_repos(),
certs_by_issuer=cert_info,
attr_certs_by_issuer=attr_cert_info
)
WEB_UI_URL_PREFIX = '_certomancer'
def web_ui_rules():
return [
Rule('/', endpoint='index', methods=('GET',)),
Submount("/" + WEB_UI_URL_PREFIX, [
# convenience endpoint that serves certs without regard for
# checking whether they belong to any particular (logical)
# cert repo (these URLs aren't part of the "PKI API", for lack
# of a better term)
Rule('/any-cert/<arch>/<label>.<ext:use_pem>',
endpoint='any-cert', methods=('GET',)),
Rule('/any-attr-cert/<arch>/<label>.attr.<ext:use_pem>',
endpoint='any-attr-cert', methods=('GET',)),
Rule('/attr-certs-of/<arch>/<entity_label>-all.attr.cert.pem',
endpoint='attr-certs-of', methods=('GET',)),
Rule('/cert-bundle/<arch>', endpoint='cert-bundle',
methods=('GET',)),
Rule('/pfx-download/<arch>', endpoint='pfx-download',
methods=('POST',)),
])
]
def service_rules():
return [
# OCSP responder pattern
Rule('/<arch>/ocsp/<label>', endpoint='ocsp', methods=('POST',)),
# Time stamping service pattern
Rule('/<arch>/tsa/<label>', endpoint='tsa', methods=('POST',)),
# Plugin endpoint pattern
Rule('/<arch>/plugin/<plugin_label>/<label>', endpoint='plugin',
methods=('POST',)),
# latest CRL pattern
Rule("/<arch>/crls/<label>/latest.<ext(exts='crl'):use_pem>",
endpoint='crls', methods=('GET',), defaults={'crl_no': None}),
# CRL archive pattern
Rule("/<arch>/crls/<label>"
"/archive-<int:crl_no>.<ext(exts='crl'):use_pem>",
endpoint='crls', methods=('GET',)),
# Cert repo authority pattern
Rule('/<arch>/certs/<label>/ca.<ext:use_pem>',
defaults={'cert_label': None}, endpoint='certs', methods=('GET',)),
# Cert repo generic pattern
Rule('/<arch>/certs/<label>/issued/<cert_label>.<ext:use_pem>',
endpoint='certs', methods=('GET',)),
# Attr cert repo authority pattern
Rule('/<arch>/attr-certs/<label>/aa.<ext:use_pem>',
defaults={'cert_label': None},
endpoint='attr-certs', methods=('GET',)),
# Attr cert repo generic pattern
Rule("/<arch>/attr-certs/<label>/issued/<cert_label>.attr.<ext:use_pem>",
endpoint='attr-certs', methods=('GET',)),
Rule("/<arch>/attr-certs/<label>/by-holder/<entity_label>-all.attr.cert.pem",
endpoint='attr-certs-by-holder', methods=('GET',)),
]
def gen_index(architectures):
try:
from jinja2 import Environment, PackageLoader
except ImportError as e: # pragma: nocover
raise CertomancerServiceError(
"Web UI requires Jinja2 to be installed"
) from e
# the index is fixed from the moment the server is launched, so
# just go ahead and render it
jinja_env = Environment(
loader=PackageLoader('certomancer.integrations', 'animator_templates'),
autoescape=True
)
template = jinja_env.get_template('index.html')
return template.render(
pki_archs=[
ArchServicesDescription.compile(arch) for arch in architectures
],
pfx_possible=pfx_possible, web_ui_prefix=WEB_UI_URL_PREFIX
)
class AnimatorArchStore:
def __init__(self, architectures: Dict[ArchLabel, PKIArchitecture]):
self.architectures = architectures
def __getitem__(self, arch: ArchLabel) -> PKIArchitecture:
try:
return self.architectures[arch]
except KeyError:
raise NotFound()
def __iter__(self):
return iter(self.architectures.values())
class Animator:
def __init__(self, architectures: AnimatorArchStore,
at_time: Optional[datetime] = None, with_web_ui=True,
allow_time_override=True):
self.fixed_time = at_time
self.architectures = architectures
self.with_web_ui = with_web_ui
self.url_map = None
self.allow_time_override = allow_time_override
self.url_map = Map(
service_rules() + (web_ui_rules() if with_web_ui else []),
converters={'ext': PemExtensionConverter}
)
handlers: Dict[str, Callable] = {
'ocsp': self.serve_ocsp_response,
'tsa': self.serve_timestamp_response,
'crls': self.serve_crl,
'certs': self.serve_cert,
'attr-certs': self.serve_attr_cert,
'attr-certs-by-holder': self.serve_attr_certs_of_holder,
'plugin': self.serve_plugin
}
if with_web_ui:
self.index_html = gen_index(iter(architectures))
handlers.update({
'any-cert': self.serve_any_cert,
'any-attr-cert': self.serve_any_attr_cert,
'attr-certs-of': self.serve_all_attr_certs_of_holder,
'cert-bundle': self.serve_zip, 'pfx-download': self.serve_pfx
})
self._handlers = handlers
def at_time(self, request):
fake_time = None
if self.allow_time_override:
fake_time = request.headers.get(FAKE_TIME_HEADER, type=parse_dt)
return fake_time or self.fixed_time or _now()
def serve_ocsp_response(self, request: Request, *, label: str, arch: str):
pki_arch = self.architectures[ArchLabel(arch)]
ocsp_resp = pki_arch.service_registry.summon_responder(
ServiceLabel(label), self.at_time(request)
)
data = request.stream.read()
req: ocsp.OCSPRequest = ocsp.OCSPRequest.load(data)
response = ocsp_resp.build_ocsp_response(req)
return Response(response.dump(), mimetype='application/ocsp-response')
def serve_timestamp_response(self, request: Request, *,
label: str, arch: str):
pki_arch = self.architectures[ArchLabel(arch)]
tsa = pki_arch.service_registry.summon_timestamper(
ServiceLabel(label), self.at_time(request)
)
data = request.stream.read()
req: tsp.TimeStampReq = tsp.TimeStampReq.load(data)
response = tsa.request_tsa_response(req)
return Response(response.dump(), mimetype='application/timestamp-reply')
def serve_crl(self, request: Request, *,
label: ServiceLabel, arch: str, crl_no, use_pem):
pki_arch = self.architectures[ArchLabel(arch)]
mime = 'application/x-pem-file' if use_pem else 'application/pkix-crl'
if crl_no is not None:
crl = pki_arch.service_registry.get_crl(label, number=crl_no)
else:
crl = pki_arch.service_registry.get_crl(
label, self.at_time(request)
)
data = crl.dump()
if use_pem:
data = pem.armor('X509 CRL', data)
return Response(data, mimetype=mime)
def serve_any_cert(self, _request: Request, *,
arch: str, label: str, use_pem):
mime = 'application/x-pem-file' if use_pem else 'application/pkix-cert'
pki_arch = self.architectures[ArchLabel(arch)]
cert = pki_arch.get_cert(CertLabel(label))
data = cert.dump()
if use_pem:
data = pem.armor('certificate', data)
return Response(data, mimetype=mime)
def serve_any_attr_cert(self, _request: Request, *,
arch: str, label: str, use_pem):
mime = (
'application/x-pem-file'
if use_pem else 'application/pkix-attr-cert'
)
pki_arch = self.architectures[ArchLabel(arch)]
cert = pki_arch.get_attr_cert(CertLabel(label))
data = cert.dump()
if use_pem:
data = pem.armor('attribute certificate', data)
return Response(data, mimetype=mime)
def serve_cert(self, _request: Request, *, label: str, arch: str,
cert_label: Optional[str], use_pem):
mime = 'application/x-pem-file' if use_pem else 'application/pkix-cert'
pki_arch = self.architectures[ArchLabel(arch)]
cert_label = CertLabel(cert_label) if cert_label is not None else None
cert = pki_arch.service_registry.get_cert_from_repo(
ServiceLabel(label), cert_label
)
if cert is None:
raise NotFound()
data = cert.dump()
if use_pem:
data = pem.armor('certificate', data)
return Response(data, mimetype=mime)
def serve_attr_cert(self, _request: Request, *, label: str, arch: str,
cert_label: Optional[str], use_pem):
pki_arch = self.architectures[ArchLabel(arch)]
svc_reg = pki_arch.service_registry
svc_label = ServiceLabel(label)
if cert_label is None:
mime = (
'application/x-pem-file' if use_pem else 'application/pkix-cert'
)
# retrieve the AA's certificate
cert = pki_arch.get_cert(
svc_reg.determine_repo_issuer_cert(
svc_reg.get_attr_cert_repo_info(svc_label),
)
)
else:
mime = (
'application/x-pem-file'
if use_pem else 'application/pkix-attr-cert'
)
cert = svc_reg.get_attr_cert_from_repo(
svc_label, CertLabel(cert_label)
)
if cert is None:
raise NotFound()
data = cert.dump()
if use_pem:
data = pem.armor(
'attribute certificate'
if cert_label is not None else 'certificate',
data
)
return Response(data, mimetype=mime)
def _build_attr_cert_payload(self, pki_arch, cert_specs):
# TODO support non-PEM with p7b
data_buf = BytesIO()
for cert_spec in cert_specs:
cert = pki_arch.get_attr_cert(cert_spec.label)
data_buf.write(
pem.armor(
'attribute certificate',
cert.dump()
)
)
data = data_buf.getvalue()
if not data:
raise NotFound()
return data
def serve_all_attr_certs_of_holder(self, _request: Request, *,
arch: str, entity_label: str):
pki_arch = self.architectures[ArchLabel(arch)]
cert_specs = pki_arch.enumerate_attr_certs_of_holder(
EntityLabel(entity_label),
)
data = self._build_attr_cert_payload(pki_arch, cert_specs)
return Response(data, mimetype='application/pkix-attr-cert')
def serve_attr_certs_of_holder(self, _request: Request, *,
label: str, arch: str, entity_label: str):
pki_arch = self.architectures[ArchLabel(arch)]
svc_label = ServiceLabel(label)
info = pki_arch.service_registry.get_attr_cert_repo_info(svc_label)
if not info.publish_by_holder:
raise NotFound()
cert_specs = pki_arch.enumerate_attr_certs_of_holder(
EntityLabel(entity_label), info.for_issuer,
)
data = self._build_attr_cert_payload(pki_arch, cert_specs)
return Response(data, mimetype='application/pkix-attr-cert')
def serve_plugin(self, request: Request, plugin_label: str, *, label: str,
arch: str):
pki_arch = self.architectures[ArchLabel(arch)]
services = pki_arch.service_registry
plugin_label = PluginLabel(plugin_label)
label = ServiceLabel(label)
try:
plugin_info = services.get_plugin_info(plugin_label, label)
except ConfigurationError:
raise NotFound()
content_type = plugin_info.content_type
req_content = request.stream.read()
try:
response_bytes = services.invoke_plugin(
plugin_label, label, req_content, at_time=self.at_time(request)
)
except PluginServiceRequestError as e:
raise BadRequest(e.user_msg)
return Response(response_bytes, mimetype=content_type)
def serve_zip(self, _request: Request, *, arch):
try:
pki_arch = self.architectures[ArchLabel(arch)]
except KeyError:
raise NotFound()
zip_buffer = BytesIO()
pki_arch.zip_certs(zip_buffer)
zip_buffer.seek(0)
data = zip_buffer.read()
cd_header = f'attachment; filename="{arch}-certificates.zip"'
return Response(data, mimetype='application/zip',
headers={'Content-Disposition': cd_header})
def serve_pfx(self, request: Request, *, arch):
pki_arch = self.architectures[ArchLabel(arch)]
try:
cert = request.form['cert']
except KeyError:
raise BadRequest()
cert = CertLabel(cert)
if not (pyca_cryptography_present() and
pki_arch.is_subject_key_available(cert)):
raise NotFound()
pass_bytes = request.form.get('passphrase', '').encode('utf8')
data = pki_arch.package_pkcs12(cert, password=pass_bytes or None)
cd_header = f'attachment; filename="{cert}.pfx"'
return Response(data, mimetype='application/x-pkcs12',
headers={'Content-Disposition': cd_header})
def dispatch(self, request: Request):
adapter = self.url_map.bind_to_environ(request.environ)
# TODO even though this is a testing tool, inserting some safeguards
# to check request size etc. might be prudent
try:
endpoint, values = adapter.match()
assert isinstance(endpoint, str)
if endpoint == 'index' and self.with_web_ui:
return Response(self.index_html, mimetype='text/html')
handler = self._handlers[endpoint]
return handler(request, **values)
except CertomancerObjectNotFoundError as e:
logger.info(e)
return NotFound()
except CertomancerServiceError as e:
logger.error(e)
return InternalServerError()
except HTTPException as e:
return e
def __call__(self, environ, start_response):
request = Request(environ)
resp = self.dispatch(request)
return resp(environ, start_response)
def _check_env_flag(env, flag_name):
val = env.get(flag_name, '0')
try:
return bool(int(val))
except ValueError:
return False
class LazyAnimator:
def __init__(self):
self.animator = None
def _load(self):
if self.animator is not None:
return
env = os.environ
cfg_file = env['CERTOMANCER_CONFIG']
key_dir = env['CERTOMANCER_KEY_DIR']
config_dir = env.get('CERTOMANCER_EXTRA_CONFIG_DIR', None)
with_web_ui = not _check_env_flag(env, 'CERTOMANCER_NO_WEB_UI')
extl_config = not _check_env_flag(env, 'CERTOMANCER_NO_EXTRA_CONFIG')
allow_time_override = not _check_env_flag(
env, 'CERTOMANCER_NO_TIME_OVERRIDE'
)
cfg = CertomancerConfig.from_file(
cfg_file, key_search_dir=key_dir, config_search_dir=config_dir,
allow_external_config=extl_config
)
self.animator = Animator(
AnimatorArchStore(cfg.pki_archs), | |
= np.zeros(vec[0].shape) + np.nan
y = np.zeros(vec[0].shape) + np.nan
x[w] = flip * vec[1][w] / vec[0][w]
y[w] = vec[2][w] / vec[0][w]
return x, y
vec2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2vec(self, x, y=None, direct=False):
flip = self._flip
if y is None:
x, y = x
x, y = np.asarray(x), np.asarray(y)
rm1 = 1.0 / np.sqrt(1.0 + x ** 2 + y ** 2)
vec = (rm1, flip * rm1 * x, rm1 * y)
if not direct:
return self.rotator.I(vec)
else:
return vec
xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name, name)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
vec = R.dir2vec(theta, phi, lonlat=lonlat)
return self.vec2xy(vec, direct=direct)
ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2ang(self, x, y=None, lonlat=False, direct=False):
return R.vec2dir(self.xy2vec(x, y, direct=direct), lonlat=lonlat)
xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name, name)
def xy2ij(self, x, y=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
reso = self.arrayinfo["reso"]
if y is None:
x, y = x
dx = reso / 60.0 * dtor
xc, yc = 0.5 * (xsize - 1), 0.5 * (ysize - 1)
j = np.around(xc + x / dx).astype(np.long)
i = np.around(yc + y / dx).astype(np.long)
return i, j
xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name, name)
def ij2xy(self, i=None, j=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
reso = self.arrayinfo["reso"]
dx = reso / 60.0 * dtor
xc, yc = 0.5 * (xsize - 1), 0.5 * (ysize - 1)
if i is None and j is None:
idx = np.outer(np.ones(ysize), np.arange(xsize))
x = (idx - xc) * dx # astro= '-' sign, geo '+' sign
idx = np.outer(np.arange(ysize), np.ones(xsize))
y = (idx - yc) * dx # (idx-yc) * dx
elif i is not None and j is not None:
x = (np.asarray(j) - xc) * dx
y = (np.asarray(i) - yc) * dx # (asarray(i)-yc) * dx
elif i is not None and j is None:
i, j = i
x = (np.asarray(j) - xc) * dx
y = (np.asarray(i) - yc) * dx # (i-yc) * dx
else:
raise TypeError("Wrong parameters")
return x, y
ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name, name)
def get_extent(self):
xsize, ysize = self.arrayinfo["xsize"], self.arrayinfo["ysize"]
left, bottom = self.ij2xy(0, 0)
right, top = self.ij2xy(ysize - 1, xsize - 1)
return (left, right, bottom, top)
def get_fov(self):
vx, vy, vz = self.xy2vec(self.ij2xy(0, 0), direct=True)
a = np.arccos(vx)
return 2.0 * a
class MollweideProj(SphericalProj):
"""This class provides class methods for Mollweide projection.
"""
name = "Mollweide"
__molldata = []
def __init__(self, rot=None, coord=None, xsize=800, **kwds):
self.__initialise_data()
super(MollweideProj, self).__init__(rot=rot, coord=coord, xsize=xsize, **kwds)
def set_proj_plane_info(self, xsize):
super(MollweideProj, self).set_proj_plane_info(xsize=xsize)
def vec2xy(self, vx, vy=None, vz=None, direct=False):
if not direct:
theta, phi = R.vec2dir(self.rotator(vx, vy, vz))
else:
theta, phi = R.vec2dir(vx, vy, vz)
flip = self._flip
X, Y = MollweideProj.__molldata
# set phi in [-pi,pi]
phi = (phi + pi) % (2 * pi) - pi
lat = pi / 2.0 - theta
A = MollweideProj.__lininterp(X, Y, lat)
x = flip * 2.0 / pi * phi * np.cos(A)
y = np.sin(A)
return x, y
vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name, name)
def xy2vec(self, x, y=None, direct=False):
flip = self._flip
if y is None:
x, y = x
mask = np.asarray(x) ** 2 / 4.0 + np.asarray(y) ** 2 > 1.0
w = np.where(mask == False)
if not mask.any():
mask = np.ma.nomask
if not hasattr(x, "__len__"):
if mask is not np.ma.nomask:
return np.nan, np.nan, np.nan
else:
s = np.sqrt((1 - y) * (1 + y))
a = np.arcsin(y)
z = 2.0 / pi * (a + y * s)
phi = flip * pi / 2.0 * x / np.maximum(s, 1.0e-6)
sz = np.sqrt((1 - z) * (1 + z))
vec = sz * np.cos(phi), sz * np.sin(phi), z
if not direct:
return self.rotator.I(vec)
else:
return vec
else:
vec = (
np.zeros(x.shape) + np.nan,
np.zeros(x.shape) + np.nan,
np.zeros(x.shape) + np.nan,
)
s = np.sqrt((1 - y[w]) * (1 + y[w]))
a = np.arcsin(y[w])
vec[2][w] = 2.0 / pi * (a + y[w] * s)
phi = flip * pi / 2.0 * x[w] / np.maximum(s, 1.0e-6)
sz = np.sqrt((1 - vec[2][w]) * (1 + vec[2][w]))
vec[0][w] = sz * np.cos(phi)
vec[1][w] = sz * np.sin(phi)
if not direct:
return self.rotator.I(vec)
else:
return vec
xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name, name)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
return self.vec2xy(R.dir2vec(theta, phi, lonlat=lonlat), direct=direct)
ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2ang(self, x, y=None, lonlat=False, direct=False):
vec = self.xy2vec(x, y, direct=direct)
return R.vec2dir(vec, lonlat=lonlat)
xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name, name)
def xy2ij(self, x, y=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = xsize // 2
if y is None:
x, y = x
xc, yc = (xsize - 1.0) / 2.0, (ysize - 1.0) / 2.0
if hasattr(x, "__len__"):
j = np.around(x * xc / 2.0 + xc).astype(np.long)
i = np.around(yc + y * yc).astype(np.long)
mask = x ** 2 / 4.0 + y ** 2 > 1.0
if not mask.any():
mask = np.ma.nomask
j = np.ma.array(j, mask=mask)
i = np.ma.array(i, mask=mask)
else:
if x ** 2 / 4.0 + y ** 2 > 1.0:
i, j = np.nan, np.nan
else:
j = np.around(x * xc / 2.0 + xc).astype(np.long)
i = np.around(yc + y * yc).astype(np.long)
return i, j
xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name, name)
def ij2xy(self, i=None, j=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = xsize // 2
xc, yc = (xsize - 1.0) / 2.0, (ysize - 1.0) / 2.0
if i is None and j is None:
idx = np.outer(np.arange(ysize), np.ones(xsize))
y = (idx - yc) / yc
idx = np.outer(np.ones(ysize), np.arange(xsize))
x = 2.0 * (idx - xc) / xc
mask = x ** 2 / 4.0 + y ** 2 > 1.0
if not mask.any():
mask = np.ma.nomask
x = np.ma.array(x, mask=mask)
y = np.ma.array(y, mask=mask)
elif i is not None and j is not None:
y = (np.asarray(i) - yc) / yc
x = 2.0 * (np.asarray(j) - xc) / xc
if x ** 2 / 4.0 + y ** 2 > 1.0:
x, y = np.nan, np.nan
elif i is not None and j is None:
i, j = i
y = (np.asarray(i) - yc) / yc
x = 2.0 * (np.asarray(j) - xc) / xc
if x ** 2 / 4.0 + y ** 2 > 1.0:
x, y = np.nan, np.nan
else:
raise TypeError("i and j must be both given or both not given")
return x, y
ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name, name)
def get_extent(self):
return (-2.0, 2.0, -1.0, 1.0)
@staticmethod
def __initialise_data():
if len(MollweideProj.__molldata) == 0:
X = (np.arange(1.0, 180.0, 1.0) - 90.0) * dtor
Y = MollweideProj.__findRoot(
MollweideProj.__fmoll, MollweideProj.__dfmoll, X.copy(), X, niter=10
)
X = np.concatenate([[-pi / 2], X, [pi / 2]])
Y = np.concatenate([[-pi / 2], Y, [pi / 2]])
MollweideProj.__molldata.append(X)
MollweideProj.__molldata.append(Y)
return
@staticmethod
def __findRoot(f, df, x0, argsf=None, argsdf=None, niter=100):
x = x0
niter = min(abs(niter), 1000)
i = 0
while i < niter:
dx = -f(x, argsf) / df(x, argsdf)
x += dx
i += 1
return x
@staticmethod
def __fmoll(x, args):
return 2.0 * x + np.sin(2.0 * x) - pi * np.sin(args)
@staticmethod
def __dfmoll(x, args):
return 2.0 * (1.0 + np.cos(2.0 * x))
@staticmethod
def __lininterp(X, Y, x):
idx = X.searchsorted(x)
y = Y[idx - 1] + (Y[idx] - Y[idx - 1]) / (X[idx] - X[idx - 1]) * (
x - X[idx - 1]
)
return y
class CartesianProj(SphericalProj):
"""This class provides class methods for Cartesian projection.
"""
name = "Cartesian"
def __init__(
self,
| |
resources.html Required
"""
class ActionTypeValueValuesEnum(_messages.Enum):
"""The type of action that Robo should perform on the specified element.
Required.
Values:
ACTION_TYPE_UNSPECIFIED: DO NOT USE. For proto versioning only.
SINGLE_CLICK: Direct Robo to click on the specified element. No-op if
specified element is not clickable.
ENTER_TEXT: Direct Robo to enter text on the specified element. No-op if
specified element is not enabled or does not allow text entry.
"""
ACTION_TYPE_UNSPECIFIED = 0
SINGLE_CLICK = 1
ENTER_TEXT = 2
actionType = _messages.EnumField('ActionTypeValueValuesEnum', 1)
inputText = _messages.StringField(2)
resourceName = _messages.StringField(3)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestDetails(_messages.Message):
"""Additional details about the progress of the running test.
Fields:
errorMessage: If the TestState is ERROR, then this string will contain
human-readable details about the error. @OutputOnly
progressMessages: Human-readable, detailed descriptions of the test's
progress. For example: "Provisioning a device", "Starting Test". During
the course of execution new data may be appended to the end of
progress_messages. @OutputOnly
"""
errorMessage = _messages.StringField(1)
progressMessages = _messages.StringField(2, repeated=True)
class TestEnvironmentCatalog(_messages.Message):
"""A description of a test environment.
Fields:
androidDeviceCatalog: Android devices suitable for running Android
Instrumentation Tests.
networkConfigurationCatalog: Supported network configurations
"""
androidDeviceCatalog = _messages.MessageField('AndroidDeviceCatalog', 1)
networkConfigurationCatalog = _messages.MessageField('NetworkConfigurationCatalog', 2)
class TestExecution(_messages.Message):
"""Specifies a single test to be executed in a single environment.
Enums:
StateValueValuesEnum: Indicates the current progress of the test execution
(e.g., FINISHED). @OutputOnly
Fields:
environment: How the host machine(s) are configured. @OutputOnly
id: Unique id set by the backend. @OutputOnly
matrixId: Id of the containing TestMatrix. @OutputOnly
projectId: The cloud project that owns the test execution. @OutputOnly
state: Indicates the current progress of the test execution (e.g.,
FINISHED). @OutputOnly
testDetails: Additional details about the running test. @OutputOnly
testSpecification: How to run the test. @OutputOnly
timestamp: The time this test execution was initially created. @OutputOnly
toolResultsStep: Where the results for this execution are written.
@OutputOnly
"""
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test execution (e.g., FINISHED).
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently being processed. Can only be set on
an execution.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution. Can only be set on an
execution.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
environment = _messages.MessageField('Environment', 1)
id = _messages.StringField(2)
matrixId = _messages.StringField(3)
projectId = _messages.StringField(4)
state = _messages.EnumField('StateValueValuesEnum', 5)
testDetails = _messages.MessageField('TestDetails', 6)
testSpecification = _messages.MessageField('TestSpecification', 7)
timestamp = _messages.StringField(8)
toolResultsStep = _messages.MessageField('ToolResultsStep', 9)
class TestMatrix(_messages.Message):
"""A group of one or more TestExecutions, built by taking a product of
values over a pre-defined set of axes.
Enums:
InvalidMatrixDetailsValueValuesEnum: Describes why the matrix is
considered invalid. Only useful for matrices in the INVALID state.
@OutputOnly
StateValueValuesEnum: Indicates the current progress of the test matrix
(e.g., FINISHED) @OutputOnly
Fields:
clientInfo: Information about the client which invoked the test. Optional
environmentMatrix: How the host machine(s) are configured. Required
invalidMatrixDetails: Describes why the matrix is considered invalid. Only
useful for matrices in the INVALID state. @OutputOnly
projectId: The cloud project that owns the test matrix. @OutputOnly
resultStorage: Where the results for the matrix are written. Required
state: Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
testExecutions: The list of test executions that the service creates for
this matrix. @OutputOnly
testMatrixId: Unique id set by the service. @OutputOnly
testSpecification: How to run the test. Required
timestamp: The time this test matrix was initially created. @OutputOnly
"""
class InvalidMatrixDetailsValueValuesEnum(_messages.Enum):
"""Describes why the matrix is considered invalid. Only useful for
matrices in the INVALID state. @OutputOnly
Values:
INVALID_MATRIX_DETAILS_UNSPECIFIED: Do not use. For proto versioning
only.
DETAILS_UNAVAILABLE: The matrix is INVALID, but there are no further
details available.
MALFORMED_APK: The input app APK could not be parsed.
MALFORMED_TEST_APK: The input test APK could not be parsed.
NO_MANIFEST: The AndroidManifest.xml could not be found.
NO_PACKAGE_NAME: The APK manifest does not declare a package name.
TEST_SAME_AS_APP: The test package and app package are the same.
NO_INSTRUMENTATION: The test apk does not declare an instrumentation.
NO_LAUNCHER_ACTIVITY: A main launcher activity could not be found.
FORBIDDEN_PERMISSIONS: The app declares one or more permissions that are
not allowed.
INVALID_ROBO_DIRECTIVES: There is a conflict in the provided
robo_directives.
TEST_LOOP_INTENT_FILTER_NOT_FOUND: There there is no test loop intent
filter, or the one that is given is not formatted correctly.
SCENARIO_LABEL_NOT_DECLARED: The request contains a scenario label that
was not declared in the manifest.
SCENARIO_LABEL_MALFORMED: There was an error when parsing a label's
value.
SCENARIO_NOT_DECLARED: The request contains a scenario number that was
not declared in the manifest.
DEVICE_ADMIN_RECEIVER: Device administrator applications are not
allowed.
"""
INVALID_MATRIX_DETAILS_UNSPECIFIED = 0
DETAILS_UNAVAILABLE = 1
MALFORMED_APK = 2
MALFORMED_TEST_APK = 3
NO_MANIFEST = 4
NO_PACKAGE_NAME = 5
TEST_SAME_AS_APP = 6
NO_INSTRUMENTATION = 7
NO_LAUNCHER_ACTIVITY = 8
FORBIDDEN_PERMISSIONS = 9
INVALID_ROBO_DIRECTIVES = 10
TEST_LOOP_INTENT_FILTER_NOT_FOUND = 11
SCENARIO_LABEL_NOT_DECLARED = 12
SCENARIO_LABEL_MALFORMED = 13
SCENARIO_NOT_DECLARED = 14
DEVICE_ADMIN_RECEIVER = 15
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently | |
tmp[1][:, :, :, :]
out = self._apply_array_spin123(nh1e, nh2e, nh3e, (dveca, dvecb),
(evecaa, evecab, evecba, evecbb))
estr = 'ikmojlnp,mnopxy->ijklxy'
nevecaa = numpy.einsum(estr, h4e[:norb, :norb, :norb, :norb, \
:norb, :norb, :norb, :norb], evecaa) \
+ 2.0 * numpy.einsum(estr, h4e[:norb, :norb, :norb, norb:, \
:norb, :norb, :norb, norb:], evecab) \
+ numpy.einsum(estr, h4e[:norb, :norb, norb:, norb:, \
:norb, :norb, norb:, norb:], evecbb)
nevecab = numpy.einsum(estr, h4e[:norb, norb:, :norb, :norb, \
:norb, norb:, :norb, :norb], evecaa) \
+ 2.0 * numpy.einsum(estr, h4e[:norb, norb:, :norb, norb:, \
:norb, norb:, :norb, norb:], evecab) \
+ numpy.einsum(estr, h4e[:norb, norb:, norb:, norb:, \
:norb, norb:, norb:, norb:], evecbb)
nevecbb = numpy.einsum(estr, h4e[norb:, norb:, :norb, :norb, \
norb:, norb:, :norb, :norb], evecaa) \
+ 2.0 * numpy.einsum(estr, h4e[norb:, norb:, :norb, norb:, \
norb:, norb:, :norb, norb:], evecab) \
+ numpy.einsum(estr, h4e[norb:, norb:, norb:, norb:, \
norb:, norb:, norb:, norb:], evecbb)
dveca2 = numpy.zeros(dveca.shape, dtype=self._dtype)
dvecb2 = numpy.zeros(dvecb.shape, dtype=self._dtype)
for i in range(norb):
for j in range(norb):
dveca[:, :, :, :] = nevecaa[i, j, :, :, :, :]
dvecb[:, :, :, :] = nevecab[i, j, :, :, :, :]
cvec = self.calculate_coeff_spin_with_dvec((dveca, dvecb))
dveca2[i, j, :, :] += cvec[:, :]
dveca[:, :, :, :] = nevecab[:, :, i, j, :, :]
dvecb[:, :, :, :] = nevecbb[i, j, :, :, :, :]
cvec = self.calculate_coeff_spin_with_dvec((dveca, dvecb))
dvecb2[i, j, :, :] += cvec[:, :]
out += self.calculate_coeff_spin_with_dvec((dveca2, dvecb2))
return out
def apply_inplace_s2(self) -> None:
"""
Apply the S squared operator to self.
"""
norb = self.norb()
orig = numpy.copy(self.coeff)
s_z = (self.nalpha() - self.nbeta()) * 0.5
self.coeff *= s_z + s_z * s_z + self.nbeta()
if self.nalpha() != self.norb() and self.nbeta() != 0:
dvec = numpy.zeros((norb, norb, self.lena(), self.lenb()),
dtype=self._dtype)
for i in range(norb):
for j in range(norb):
for source, target, parity in self.alpha_map(i, j):
dvec[i, j, target, :] += orig[source, :] * parity
for i in range(self.norb()):
for j in range(self.norb()):
for source, target, parity in self.beta_map(j, i):
self.coeff[:, source] -= dvec[j, i, :, target] * parity
def apply_individual_nbody(self, coeff: complex, daga: List[int],
undaga: List[int], dagb: List[int],
undagb: List[int]) -> 'FqeData':
"""
Apply function with an individual operator represented in arrays.
It is assumed that the operator is spin conserving
"""
assert len(daga) == len(undaga) and len(dagb) == len(undagb)
alphamap = []
betamap = []
def make_mapping_each(alpha: bool) -> None:
(dag, undag) = (daga, undaga) if alpha else (dagb, undagb)
for index in range(self.lena() if alpha else self.lenb()):
if alpha:
current = self._core.string_alpha(index)
else:
current = self._core.string_beta(index)
check = True
for i in undag:
if not check:
break
check &= bool(get_bit(current, i))
for i in dag:
if not check:
break
check &= i in undag or not bool(get_bit(current, i))
if check:
parity = 0
for i in reversed(undag):
parity += count_bits_above(current, i)
current = unset_bit(current, i)
for i in reversed(dag):
parity += count_bits_above(current, i)
current = set_bit(current, i)
if alpha:
alphamap.append((index, self._core.index_alpha(current),
(-1)**parity))
else:
betamap.append((index, self._core.index_beta(current),
(-1)**parity))
make_mapping_each(True)
make_mapping_each(False)
out = copy.deepcopy(self)
out.coeff.fill(0.0)
sourceb_vec = numpy.array([xx[0] for xx in betamap])
targetb_vec = numpy.array([xx[1] for xx in betamap])
parityb_vec = numpy.array([xx[2] for xx in betamap])
if len(alphamap) == 0 or len(betamap) == 0:
return out
else:
for sourcea, targeta, paritya in alphamap:
out.coeff[targeta, targetb_vec] = \
coeff * paritya * numpy.multiply(
self.coeff[sourcea, sourceb_vec], parityb_vec)
# # TODO: THIS SHOULD BE CHECKED THOROUGHLY
# # NOTE: Apparently the meshgrid construction overhead
# # slows down this line so it is a little slower than the previous
# sourcea_vec = numpy.array([xx[0] for xx in alphamap])
# targeta_vec = numpy.array([xx[1] for xx in alphamap])
# paritya_vec = numpy.array([xx[2] for xx in alphamap])
# target_xi, target_yj = numpy.meshgrid(targeta_vec, targetb_vec)
# source_xi, source_yj = numpy.meshgrid(sourcea_vec, sourceb_vec)
# parity_xi, parity_yj = numpy.meshgrid(paritya_vec, parityb_vec)
# out.coeff[target_xi, target_yj] = coeff * \
# (self.coeff[source_xi, source_yj] * parity_xi * parity_yj)
return out
def rdm1(self, bradata: Optional['FqeData'] = None) -> 'Nparray':
"""
API for calculating 1-particle RDMs given a wave function. When bradata
is given, it calculates transition RDMs. Depending on the filling, the
code selects an optimal algorithm.
"""
if bradata is not None:
dvec2 = bradata.calculate_dvec_spatial()
else:
dvec2 = self.calculate_dvec_spatial()
return (numpy.einsum('jikl,kl->ij', dvec2.conj(), self.coeff),)
def rdm12(self, bradata: Optional['FqeData'] = None) -> numpy.ndarray:
"""
API for calculating 1- and 2-particle RDMs given a wave function.
When bradata is given, it calculates transition RDMs. Depending on the
filling, the code selects an optimal algorithm.
"""
norb = self.norb()
nalpha = self.nalpha()
nbeta = self.nbeta()
thresh = self._low_thresh
if nalpha < norb * thresh and nbeta < norb * thresh:
graphset = FciGraphSet(2, 2)
graphset.append(self._core)
if nalpha - 2 >= 0:
graphset.append(FciGraph(nalpha - 2, nbeta, norb))
if nalpha - 1 >= 0 and nbeta - 1 >= 0:
graphset.append(FciGraph(nalpha - 1, nbeta - 1, norb))
if nbeta - 2 >= 0:
graphset.append(FciGraph(nalpha, nbeta - 2, norb))
return self._rdm12_lowfilling(bradata)
return self._rdm12_halffilling(bradata)
def _rdm12_halffilling(self, bradata: Optional['FqeData'] = None
) -> numpy.ndarray:
"""
Standard code for calculating 1- and 2-particle RDMs given a
wavefunction. When bradata is given, it calculates transition RDMs.
"""
dvec = self.calculate_dvec_spatial()
dvec2 = dvec if bradata is None else bradata.calculate_dvec_spatial()
out1 = numpy.einsum('jikl,kl->ij', dvec2, self.coeff)
out2 = numpy.einsum('jikl,mnkl->imjn', dvec2.conj(), dvec) * (-1.0)
for i in range(self.norb()):
out2[:, i, i, :] += out1[:, :]
return out1, out2
def _rdm12_lowfilling(self,
bradata: Optional['FqeData'] = None) -> numpy.ndarray:
"""
Low-filling specialization of the code for Calculating 1- and 2-particle
RDMs given a wave function. When bradata is given, it calculates
transition RDMs.
"""
norb = self.norb()
nalpha = self.nalpha()
nbeta = self.nbeta()
lena = self.lena()
lenb = self.lenb()
nlt = norb * (norb + 1) // 2
outpack = numpy.zeros((nlt, nlt), dtype=self.coeff.dtype)
outunpack = numpy.zeros((norb, norb, norb, norb),
dtype=self.coeff.dtype)
if nalpha - 2 >= 0:
alpha_map, _ = self._core.find_mapping(-2, 0)
def compute_intermediate0(coeff):
tmp = numpy.zeros((nlt, int(binom(norb, nalpha - 2)), lenb),
dtype=self.coeff.dtype)
for i in range(norb):
for j in range(i + 1, norb):
for source, target, parity in alpha_map[(i, j)]:
tmp[i + j * (j + 1) //
2, target, :] += coeff[source, :] * parity
return tmp
inter = compute_intermediate0(self.coeff)
inter2 = inter if bradata is None else compute_intermediate0(
bradata.coeff)
outpack += numpy.einsum('imn,kmn->ik', inter2.conj(), inter)
if self.nalpha() - 1 >= 0 and self.nbeta() - 1 >= 0:
alpha_map, beta_map = self._core.find_mapping(-1, -1)
def compute_intermediate1(coeff):
tmp = numpy.zeros((norb, norb, int(binom(
norb, nalpha - 1)), int(binom(norb, nbeta - 1))),
dtype=self.coeff.dtype)
for i in range(norb):
for j in range(norb):
for sourcea, targeta, paritya in alpha_map[(i,)]:
paritya *= (-1)**(nalpha - 1)
for sourceb, targetb, parityb in beta_map[(j,)]:
work = coeff[sourcea,
sourceb] * paritya * parityb
tmp[i, j, targeta, targetb] += work
return tmp
inter = compute_intermediate1(self.coeff)
inter2 = inter if bradata is None else compute_intermediate1(
bradata.coeff)
outunpack += numpy.einsum('ijmn,klmn->ijkl', inter2.conj(), inter)
if self.nbeta() - 2 >= 0:
_, beta_map = self._core.find_mapping(0, -2)
def compute_intermediate2(coeff):
tmp = numpy.zeros((nlt, lena, int(binom(norb, nbeta - 2))),
dtype=self.coeff.dtype)
for i in range(norb):
for j in range(i + 1, norb):
for source, target, parity in beta_map[(i, j)]:
tmp[i + j * (j + 1) //
2, :, target] += coeff[:, source] * parity
return tmp
inter = compute_intermediate2(self.coeff)
inter2 = inter if bradata is None else compute_intermediate2(
bradata.coeff)
outpack += numpy.einsum('imn,kmn->ik', inter2.conj(), inter)
out = numpy.zeros_like(outunpack)
for i in range(norb):
for j in range(norb):
ij = min(i, j) + max(i, j) * (max(i, j) + 1) // 2
parityij = 1.0 if i < j else -1.0
for k in range(norb):
for l in range(norb):
parity = parityij * (1.0 if k < l else -1.0)
out[i, j, k,
l] -= outunpack[i, j, k, l] + outunpack[j, i, l, k]
mnkl, mxkl = min(k, l), max(k, l)
work = outpack[ij, mnkl + mxkl * (mxkl + 1) // 2]
out[i, j, k, l] -= work * parity
return self.rdm1(bradata)[0], out
def rdm123(self,
bradata: Optional['FqeData'] = None,
dvec: 'Nparray' = None,
dvec2: 'Nparray' = None,
| |
# MINLP written by GAMS Convert at 04/21/18 13:52:41
#
# Equation counts
# Total E G L N X C B
# 1786 418 0 1368 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 1569 969 600 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 8090 4298 3792 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x2 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x3 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x4 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x5 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x6 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x7 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x8 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x9 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x10 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x11 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x12 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x13 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x14 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x15 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x16 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x17 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x18 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x19 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x20 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x21 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x22 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x23 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x24 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x25 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x26 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x27 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x28 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x29 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x30 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x31 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x32 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x33 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x34 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x35 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x36 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x37 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x38 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x39 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x40 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x41 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x42 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x43 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x44 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x45 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x46 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x47 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x48 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x49 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x50 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x51 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x52 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x53 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x54 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x55 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x56 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x57 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x58 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x59 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x60 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x61 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x62 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x63 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x64 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x65 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x66 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x67 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x68 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x69 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x70 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x71 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x72 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x73 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x74 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x75 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x76 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x77 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x78 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x79 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x80 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x81 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x82 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x83 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x84 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x85 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x86 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x87 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x88 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x89 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x90 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x91 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x92 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x93 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x94 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x95 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x96 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x97 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x98 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x99 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x100 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x101 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x102 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x103 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x104 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x105 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x106 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x107 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x108 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x109 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x110 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x111 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x112 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x113 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x114 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x115 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x116 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x117 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x118 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x119 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x120 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x121 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x122 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x123 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x124 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x125 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x126 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x127 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x128 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x129 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x130 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x131 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x132 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x133 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x134 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x135 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x136 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x137 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x138 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x139 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x140 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x141 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x142 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x143 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x144 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x145 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x146 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x147 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x148 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x149 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x150 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x151 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x152 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x153 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x154 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x155 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x156 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x157 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x158 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x159 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x160 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x161 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x162 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x163 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x164 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x165 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x166 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x167 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x168 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x169 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x170 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x171 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x172 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x173 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x174 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x175 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x176 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x177 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x178 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x179 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x180 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x181 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x182 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x183 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x184 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x185 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x186 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x187 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x188 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x189 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x190 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x191 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x192 = Var(within=Reals,bounds=(0.00347222222222222,None),initialize=0.0347222222222222)
m.x193 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x194 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x195 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x196 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x197 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x198 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x199 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x200 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x201 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x202 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x203 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x204 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x205 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x206 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x207 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x208 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x209 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x210 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x211 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x212 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x213 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x214 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x215 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x216 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x217 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x218 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x219 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x220 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x221 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x222 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x223 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x224 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x225 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x226 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x227 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x228 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x229 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x230 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x231 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x232 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x233 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x234 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x235 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x236 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x237 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x238 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x239 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x240 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x241 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x242 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x243 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x244 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x245 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x246 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x247 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x248 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x249 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x250 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x251 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x252 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x253 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x254 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x255 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x256 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x257 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x258 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x259 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x260 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x261 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x262 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x263 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x264 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x265 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x266 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x267 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x268 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x269 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x270 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x271 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x272 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x273 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x274 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x275 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x276 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x277 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x278 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x279 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x280 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x281 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x282 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x283 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x284 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x285 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x286 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x287 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x288 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x289 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x290 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x291 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x292 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x293 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x294 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x295 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x296 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x297 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x298 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x299 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x300 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x301 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x302 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x303 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x304 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x305 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x306 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x307 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x308 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x309 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x310 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x311 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x312 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x313 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x314 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x315 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x316 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x317 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x318 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x319 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x320 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x321 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x322 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x323 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x324 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x325 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x326 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x327 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x328 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x329 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x330 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x331 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x332 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x333 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x334 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x335 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x336 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x337 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x338 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x339 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x340 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x341 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x342 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x343 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x344 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x345 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x346 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x347 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x348 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x349 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x350 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x351 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x352 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x353 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x354 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x355 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x356 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x357 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x358 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x359 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x360 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x361 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x362 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x363 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x364 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x365 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x366 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x367 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x368 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x369 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x370 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x371 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x372 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x373 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x374 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x375 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x376 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x377 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x378 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x379 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x380 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x381 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x382 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x383 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x384 = Var(within=Reals,bounds=(0.12,None),initialize=1.2)
m.x385 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.x386 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.x387 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.x388 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.x389 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.x390 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.x391 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.x392 = Var(within=Reals,bounds=(0,None),initialize=0.0416668402777778)
m.b393 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b394 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b395 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b396 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b397 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b398 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b399 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b400 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b401 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b402 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b403 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b404 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b405 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b406 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b407 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b408 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b409 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b410 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b411 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b412 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b413 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b414 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b415 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b416 = Var(within=Binary,bounds=(0,1),initialize=0.25)
m.b417 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b418 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b419 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b420 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b421 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b422 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b423 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b424 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b425 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b426 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b427 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b428 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b429 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b430 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b431 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b432 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b433 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b434 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b435 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b436 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b437 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b438 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b439 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b440 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b441 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b442 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b443 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b444 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b445 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b446 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b447 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b448 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b449 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b450 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b451 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b452 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b453 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b454 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b455 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b456 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b457 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b458 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b459 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b460 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b461 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b462 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b463 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b464 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b465 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b466 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b467 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b468 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b469 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b470 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b471 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b472 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b473 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b474 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b475 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b476 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b477 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b478 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b479 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b480 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b481 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b482 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b483 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b484 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b485 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b486 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b487 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b488 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b489 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b490 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b491 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b492 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b493 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b494 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b495 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b496 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b497 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b498 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b499 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b500 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b501 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b502 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b503 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b504 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b505 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b506 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b507 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b508 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b509 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b510 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b511 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b512 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b513 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b514 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b515 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b516 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b517 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b518 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b519 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b520 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b521 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b522 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b523 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b524 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b525 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b526 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b527 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b528 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b529 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b530 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b531 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b532 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b533 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b534 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b535 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b536 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b537 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b538 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b539 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b540 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b541 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b542 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b543 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b544 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b545 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b546 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b547 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b548 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b549 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b550 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b551 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b552 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b553 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b554 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b555 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b556 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b557 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b558 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b559 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b560 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b561 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b562 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b563 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b564 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b565 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b566 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b567 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b568 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b569 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b570 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b571 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b572 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b573 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b574 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b575 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b576 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b577 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b578 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b579 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b580 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b581 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b582 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b583 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b584 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b585 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b586 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b587 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b588 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b589 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b590 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b591 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b592 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b593 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b594 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b595 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b596 | |
self.budget.costo_gasto_1 == None:
# return 0
return (self.budget.veces_gasto_1 or 0) * (self.budget.costo_gasto_1 or 0)
return 0
@property
def get_gasto_2(self):
if self.has_gasto_2:
# return self_budget
# if self.budget.veces_gasto_2 == None or self.budget.costo_gasto_2 == None:
# return 0
return (self.budget.veces_gasto_2 or 0) * (self.budget.costo_gasto_2 or 0)
return 0
@property
def get_costo_mes_inmac(self):
return (
self.get_costo_mes_contab +
self.get_examen_medico_pre_ocupacional +
self.get_examen_medico_post_ocupacional +
self.budget.get_costo_vacunacion +
self.get_epp +
self.get_costo_certificacion +
self.get_gasto_1 + self.get_gasto_2
)
@property
def get_coeficiente(self):
if self.relevo_trabajo == 0:
return 0
return round((self.relevo_trabajo + self.relevo_descanso) / self.relevo_trabajo, 2)
@property
def get_costo_mes_con_relevo(self):
return self.get_costo_mes_inmac * self.get_coeficiente
@property
def get_catering(self):
if self.has_catering:
return self.budget.get_catering
return 0
@property
def get_medicina(self):
if self.has_medicina:
return self.budget.get_medicina
return 0
@property
def get_costo_mes_final(self):
return (
self.get_costo_mes_con_relevo +
self.get_catering +
self.get_medicina
)
@property
def get_standby_hours(self):
quantity = 0
for item in self.manpowers_budget_manpower_task.all():
quantity += round(
item.task.quantity *
item.get_efficiency * item.quantity, 2)
return quantity
@property
def get_standby_estimated_hours(self):
return (
self.quantity *
self.time_valorize *
self.budget.workdays_per_month *
self.budget.normal_working_hours
)
@property
def get_standby_cost(self):
delta = self.get_standby_estimated_hours - self.get_standby_hours
if delta > 0:
return delta * self.get_cost_unit
return 0.0
@property
def get_overhead_cost(self):
return (
self.quantity *
self.time_valorize *
self.get_business_cost
)
@property
def get_standby_hydration(self):
return (
self.quantity *
self.time_valorize *
Decimal(3.5 / 20) *
self.budget.workdays_per_month
)
@property
def get_standby_epp(self):
return (
self.quantity *
self.time_valorize *
self.get_epp /
self.budget.exchange_rate if self.budget.currency == "D" else 1
)
@property
def get_epp_total(self):
result = self.budget.epps_budget.filter(tipo_epp=self.tipo_epp)
if result.count() > 0:
if self.type_cost == "D": # Directo
costo = result[0].get_costo
else: # Indirecto
costo = result[0].get_costo_mes
if self.budget.currency == "D": # Dólares
return costo / self.budget.exchange_rate
return costo
return 0
@property
def get_epp_final(self):
if self.type_cost == "I": # Indirecto
q = self.quantity * self.time_valorize
else:
q = self.get_personas_relevo
return self.get_epp_total * q
@property
def get_cost_unit(self):
hours = self.budget.normal_working_hours
ratio = self.budget.ratio_manpower
days = self.budget.workdays_per_month
exchange = self.budget.exchange_rate
currency = self.budget.currency
cost = self.get_costo_mes_final / (hours * days)
if self.allows_ratio == 1:
cost = cost * ratio
if currency == "D" and self.currency == "S":
cost = cost / exchange
elif currency == "S" and self.currency == "D":
cost = cost * exchange
return cost
@property
def get_business_cost(self):
ratio = self.budget.ratio_manpower
exchange = self.budget.exchange_rate
currency = self.budget.currency
cost = self.get_costo_mes_final
if self.allows_ratio == 1:
cost = cost * ratio
if currency == "D":
cost = cost / exchange
return cost
# @property
# def get_epp_cost(self):
# cost = self.epp_cost
# exchange = self.budget.exchange_rate
# currency = self.budget.currency
# if currency == "D" and self.currency == "S":
# cost = cost / exchange
# elif currency == "S" and self.currency == "D":
# cost = cost * exchange
# return cost
@property
def get_resource_name(self):
return self.manpower.name
@property
def get_resource_code(self):
return self.manpower.code
@property
def get_manpower_display(self):
return "{0} - {1} ({2})".format(
self.manpower.code or '--',
self.manpower.name,
self.manpower.unit)
@property
def get_manpower_name(self):
return "{0} - {1} ({2})".format(
self.manpower.code,
self.manpower.name,
self.manpower.unit)
@property
def get_resource_label(self):
return "{0} - {1}".format(
self.manpower.code,
self.manpower.name)
@property
def get_manpower_id(self):
return self.manpower.id
@reversion.register()
class MaterialBudget(ResourceBudgetBase):
"""
Precios base del material de un presupuesto
"""
budget = models.ForeignKey(
Budget,
verbose_name=u'Presupuesto',
related_name="materials_budget",
on_delete=models.CASCADE
)
material = models.ForeignKey(
Material,
verbose_name="material",
related_name="materials_material_budget",
blank=False,
null=False,
on_delete=models.CASCADE
)
type_material = models.CharField(
'tipo de material',
max_length=1,
blank=False,
null=False,
choices=constants.TYPE_MATERIAL,
default=constants.TYPE_MATERIAL_DEFAULT
)
quantity = models.DecimalField(
'cantidad',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
time_valorize = models.DecimalField(
'tiempo a valorizar',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
amortization = models.DecimalField(
'amortización',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
is_subcontract = models.BooleanField(default=False)
distancia = models.DecimalField(
'distancia',
max_digits=8, decimal_places=3,
blank=True,
null=True,
)
costo_unitario_transporte = models.DecimalField(
'costo unitario de transporte',
max_digits=9, decimal_places=4,
blank=True,
null=True,
)
ratio_perdida = models.DecimalField(
'ratio de perdida',
max_digits=5, decimal_places=2,
blank=True,
null=True,
default=Decimal("1.00")
)
def __str__(self):
return self.material.name
class Meta:
verbose_name = "material"
verbose_name_plural = "materiales"
# ordering = ("code", "name")
unique_together = (("budget", "material"))
@property
def get_costo_transporte(self):
if self.distancia and self.costo_unitario_transporte:
return self.distancia * self.costo_unitario_transporte
return 0
@property
def get_costo_obra(self):
return self.price + self.get_costo_transporte
@property
def get_costo_perdida(self):
if self.ratio_perdida:
return self.get_costo_obra * self.ratio_perdida / 100
return 0
@property
def get_cost_unit(self):
ratio = self.budget.ratio_material
exchange = self.budget.exchange_rate
currency = self.budget.currency
cost = (self.get_costo_obra + self.get_costo_perdida) * self.budget.ratio_material
if currency == "D" and self.currency == "S":
cost = cost / exchange
elif currency == "S" and self.currency == "D":
cost = cost * exchange
return cost
@property
def get_overhead_cost(self):
return (
self.quantity *
self.time_valorize *
self.get_cost_unit *
(self.amortization or 1)
)
@property
def get_resource_name(self):
return self.material.name
@property
def get_resource_code(self):
return self.material.code
@property
def get_resource_label(self):
return "{0} - {1} ({2})".format(
self.material.code,
self.material.name,
self.unit)
@property
def get_resource_id(self):
return self.material.id
@reversion.register()
class SubcontractBudget(ResourceBudgetBase):
"""
Precios base del subcontrato de un presupuesto
"""
budget = models.ForeignKey(
Budget,
verbose_name=u'Presupuesto',
related_name="subcontracts_budget",
on_delete=models.CASCADE
)
subcontract = models.ForeignKey(
Subcontract,
verbose_name="subcontrato",
related_name="subcontracts_subcontract_budget",
blank=False,
null=False,
on_delete=models.CASCADE
)
quantity = models.DecimalField(
'cantidad',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
time_valorize = models.DecimalField(
'tiempo a valorizar',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
amortization = models.DecimalField(
'amortización',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
distancia = models.DecimalField(
'distancia',
max_digits=8, decimal_places=3,
blank=True,
null=True,
)
costo_unitario_transporte = models.DecimalField(
'costo unitario de transporte',
max_digits=9, decimal_places=4,
blank=True,
null=True,
)
ratio_perdida = models.DecimalField(
'ratio de perdida',
max_digits=5, decimal_places=2,
blank=True,
null=True,
default=Decimal("1.00")
)
def __str__(self):
return self.subcontract.name
class Meta:
verbose_name = "subcontrato"
verbose_name_plural = "subcontratos"
# ordering = ("code", "name")
unique_together = (("budget", "subcontract"))
@property
def get_costo_transporte(self):
if self.distancia and self.costo_unitario_transporte:
return self.distancia * self.costo_unitario_transporte
return 0
@property
def get_costo_obra(self):
return self.price + self.get_costo_transporte
@property
def get_costo_perdida(self):
if self.ratio_perdida:
return self.get_costo_obra * self.ratio_perdida / 100
return 0
@property
def get_cost_unit(self):
ratio = self.budget.ratio_subcontract
exchange = self.budget.exchange_rate
currency = self.budget.currency
cost = (self.get_costo_obra + self.get_costo_perdida) * self.budget.ratio_subcontract
if currency == "D" and self.currency == "S":
cost = cost / exchange
elif currency == "S" and self.currency == "D":
cost = cost * exchange
return cost
@property
def get_overhead_cost(self):
return (
self.quantity *
self.time_valorize *
self.get_cost_unit *
(self.amortization or 1)
)
@property
def get_resource_name(self):
return self.subcontract.name
@property
def get_resource_code(self):
return self.subcontract.code
@property
def get_resource_label(self):
return "{0} - {1} ({2})".format(
self.subcontract.code,
self.subcontract.name,
self.subcontract.unit)
@property
def get_resource_id(self):
return self.subcontract.id
@reversion.register()
class EquipmentBudget(ResourceBudgetBase):
"""
Precios base del equipo de un presupuesto
"""
budget = models.ForeignKey(
Budget,
verbose_name=u'Presupuesto',
related_name="equipments_budget",
on_delete=models.CASCADE
)
equipment = models.ForeignKey(
Equipment,
verbose_name="equipo",
related_name="equipments_equipment_budget",
blank=False,
null=False,
on_delete=models.CASCADE
)
category = models.ForeignKey(
CategoryEquipment,
null=True,
blank=True,
on_delete=models.CASCADE
)
potencia = models.DecimalField(
'potencia del equipo',
max_digits=6, decimal_places=2,
blank=True,
null=True,
)
quantity = models.DecimalField(
'cantidad',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
time_valorize = models.DecimalField(
'tiempo a valorizar',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=0
)
hours_equipment_operation = models.DecimalField(
'horas de operación del equipo',
max_digits=15, decimal_places=6,
blank=False,
null=False,
default=8.00
)
has_combustible = models.BooleanField(default=False)
tipo_combustible = models.CharField(
'tipo de combustible',
max_length=2,
blank=True,
null=True,
choices=constants.COMBUSTIBLE,
)
def __str__(self):
return self.equipment.name
class Meta:
verbose_name = "equipo"
verbose_name_plural = "equipos"
ordering = ("position",)
unique_together = (("budget", "equipment"))
@property
def get_categoria_display(self):
return self.equipment.get_category_display
@property
def get_costo_hs_alquiler(self):
return self.price*self.budget.ratio_equipment
@property
def get_horas_mes(self):
return self.hours_equipment_operation * self.budget.workdays_per_month
@property
def get_costo_mes_alquiler(self):
return self.get_costo_hs_alquiler * self.get_horas_mes
@property
def get_rep_resp(self):
return self.get_costo_hs_alquiler * self.budget.ratio_equipo_reparacion_reposicion / 100
@property
def get_costo_apu_hs_alq(self):
return self.get_costo_hs_alquiler + self.get_rep_resp
@property
def get_precio_combustible(self):
combustible = 0
if self.tipo_combustible:
if self.tipo_combustible == 'GS':
combustible = self.budget.precio_gasolina
else:
combustible = self.budget.precio_gasoil
if self.currency == "S":
combustible = combustible * self.budget.exchange_rate
return combustible
@property
def get_cons(self):
if self.potencia:
return round(self.potencia * self.budget.ratio_consumo_equipos, 1)
return 0
@property
def get_costo_combustible(self):
return round(self.get_precio_combustible * self.get_cons, 2)
@property
def get_lubricante(self):
return round(self.get_costo_combustible * self.budget.ratio_lubricante / 100, 2)
@property
def get_cost_unit(self):
ratio = self.budget.ratio_equipment
exchange = self.budget.exchange_rate
currency = self.budget.currency
cost = self.get_costo_apu_hs_alq + self.get_lubricante
if self.budget.is_cliente_asume_combustible == False:
cost = cost + self.get_costo_combustible
if currency == "D" and self.currency == "S":
cost = cost / exchange
elif currency == "S" and self.currency == "D":
cost = cost * exchange
return cost
@property
def get_quantity(self):
quantity = 0
for item in self.equipments_budget_equipment_task.all():
quantity += item.quantity
return quantity
@property
def get_standby_hours(self):
quantity = 0
for item in self.equipments_budget_equipment_task.all():
quantity += round(
item.task.quantity *
item.get_efficiency * item.quantity, 2)
return quantity
@property
| |
user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
"""
Post method on all responses view handles the 'delete all preview data' button.
"""
study = self.get_object()
# Note: delete all, not just consented!
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
# response logs, consent rulings, feedback, videos will all be deleted
# via cascades - videos will be removed from S3 also on pre_delete hook
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all study responses in JSON format.
"""
# Smaller pagination because individual responses may be large and we don't want the json representing 100
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all study responses in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary for the study response summary in CSV format. Does not depend on actual response data.
"""
def build_summary_dict_csv(self, optional_headers_selected_ids):
"""
Builds CSV file contents for data dictionary corresponding to the overview CSV
"""
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all children who participated in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary in CSV format for the summary of children who participated.
Does not depend on actual response data.
TODO: separate from response data mixin
"""
def build_child_dict_csv(self):
"""
Builds CSV file contents for data dictionary for overview of all child participants
"""
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
"""Hitting this URL downloads a ZIP file with frame data from one response per file in CSV format"""
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
"""
Hitting this URL queues creation of a template data dictionary for frame-level data in CSV format.
The file is put on GCP and a link is emailed to the user.
"""
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyDemographics view shows participant demographic snapshots associated
with each response to the study
"""
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
"""
Adds information for displaying how many and which types of responses are available.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in JSON format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a data dictionary for participant demographics in in CSV format.
Does not depend on any actual data.
"""
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
"""
Hitting this URL checks for collisions among all child and account hashed IDs, and returns a string describing
any collisions (empty string if none).
"""
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
# Note: could also just check number of unique global vs. hashed IDs in full dataset;
# only checking one-by-one for more informative output.
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] | |
= True,
name: Optional[str] = None,
dropout_in_single_layer: bool = False,
skip_conn: bool = False,
projsz: Optional[int] = None,
**kwargs,
):
"""Produce a stack of LSTMs with dropout performed on all but the last layer.
:param insz: The size of the input or `None`
:param hsz: The number of hidden units per LSTM
:param nlayers: The number of layers of LSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param name: TF only! Provide a graph layer name
:param dropout_in_single_layer: TF only! If we have a single layer, should we dropout (defaults to `False`)
:param skip_conn: TF only! This parameter isnt currently supported in TF Keras implementation
:param projsz: TF only! This parameter isnt currently supported in TF Keras implementation
"""
super().__init__(name=name)
self.output_dim = hsz
self._requires_length = requires_length
self.rnns = []
for _ in range(nlayers - 1):
self.rnns.append(
tf.keras.layers.LSTM(
hsz,
return_sequences=True,
recurrent_dropout=pdrop if variational else 0.0,
dropout=pdrop if not variational else 0.0,
)
)
if nlayers == 1 and not dropout_in_single_layer and not variational:
pdrop = 0.0
self.rnns.append(
tf.keras.layers.LSTM(
hsz,
return_sequences=True,
return_state=True,
recurrent_dropout=pdrop if variational else 0.0,
dropout=pdrop if not variational else 0.0,
)
)
def output_fn(self, output, state):
return output, state
def call(self, inputs):
"""RNNs over input sequence of `[B, T, C]` and lengths `[B]`, output `[B, S, H]` where `S = max(lengths)`
:param inputs: A tuple of `(sequence, lengths)`, `sequence` shape `[B, T, C]`, lengths shape = `[B]`
:return: Output depends on the subclass handling
"""
inputs, lengths = tensor_and_lengths(inputs)
mask = tf.sequence_mask(lengths)
max_length = tf.reduce_max(lengths)
inputs = inputs[:, :max_length, :]
for rnn in self.rnns:
outputs = rnn(inputs, mask=mask)
inputs = outputs
rnnout, h, c = outputs
return self.output_fn(rnnout, (h, c))
@property
def requires_length(self) -> bool:
return self._requires_length
class LSTMEncoderWithState(tf.keras.layers.Layer):
"""LSTM encoder producing the hidden state and the output, where the input doesnt require any padding
"""
def __init__(
self,
insz: Optional[int],
hsz: int,
nlayers: int = 1,
pdrop: float = 0.0,
variational: bool = False,
name: Optional[str] = None,
dropout_in_single_layer: bool = False,
skip_conn: bool = False,
projsz: Optional[int] = None,
**kwargs,
):
super().__init__(name=name)
self._requires_length = False
self.hsz = hsz
self.rnns = []
for _ in range(nlayers - 1):
self.rnns.append(
tf.keras.layers.LSTM(
hsz,
return_sequences=True,
return_state=True,
recurrent_dropout=pdrop if variational else 0.0,
dropout=pdrop if not variational else 0.0,
)
)
if nlayers == 1 and not dropout_in_single_layer and not variational:
pdrop = 0.0
self.rnns.append(
tf.keras.layers.LSTM(
hsz,
return_sequences=True,
return_state=True,
recurrent_dropout=pdrop if variational else 0.0,
dropout=pdrop if not variational else 0.0,
)
)
self.requires_state = True
@property
def output_dim(self):
return self.hsz
def call(self, inputs):
"""The format of the output here is
output: `[B, T, H]`
hidden: List[(h, c), (h, c), ...]`
:param inputs:
:return:
"""
inputs, hidden_state_input = inputs
hidden_outputs = []
initial_state = None
for i, rnn in enumerate(self.rnns):
if hidden_state_input is not None:
hidden_state = hidden_state_input[i]
initial_state = (hidden_state[0], hidden_state[1])
outputs, h, c = rnn(inputs, initial_state=initial_state)
hidden_outputs.append((h, c))
inputs = outputs
return outputs, hidden_outputs
def zero_state(self, batchsz: int):
"""Zero state for LSTM with batch size given
:param batchsz: The batch size
"""
num_rnns = len(self.rnns)
zstate = []
for rnn in self.rnns:
zstate.append(
#rnn.get_initial_state(tf.zeros(()))
(tf.zeros((batchsz, self.hsz), dtype=np.float32), tf.zeros((batchsz, self.hsz), dtype=tf.float32))
)
return zstate
class LSTMEncoderSequence(LSTMEncoder):
"""LSTM encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
"""
def output_fn(self, output, state):
"""Return sequence `[B, S, H]` where `S = max(lengths)`
:param output: The sequence
:param state: The hidden state
:return: The sequence `[B, S, H]`
"""
return output
class LSTMEncoderHidden(LSTMEncoder):
"""LSTM encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
"""
def output_fn(self, output, state):
"""Get the last hidden layer
:param output:
:param state:
:return: hidden unit tensor of shape `[B, H]`
"""
return state[0]
class LSTMEncoderHiddenContext(LSTMEncoder):
def output_fn(self, output, state):
"""Return last hidden state `(h, c)`
:param output: The sequence
:param state: The hidden state
:return: The last hidden state `(h, c)`
"""
return state
class GRUEncoder(tf.keras.layers.Layer):
"""GRU encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
"""
def __init__(
self,
insz: Optional[int],
hsz: int,
nlayers: int = 1,
pdrop: float = 0.0,
variational: bool = False,
requires_length: bool = True,
name: Optional[str] = None,
dropout_in_single_layer: bool = False,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: An optional input size for parity with other layer backends. Can pass `None`
:param hsz: The number of hidden units per GRU
:param nlayers: The number of layers of GRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param variational: variational recurrence is on, defaults to `False`
:param requires_length: Does the input require an input length (defaults to ``True``)
:param name: TF only! Put a name in the graph for this layer. Optional, defaults to `None`
:param dropout_in_single_layer: TF only! If there is a single layer, should we do dropout, defaults to `False`
"""
super().__init__(name=name)
self._requires_length = requires_length
self.rnns = []
self.output_dim = hsz
for _ in range(nlayers - 1):
self.rnns.append(
tf.keras.layers.GRU(
hsz,
return_sequences=True,
recurrent_dropout=pdrop if variational else 0.0,
dropout=pdrop if not variational else 0.0,
)
)
if nlayers == 1 and not dropout_in_single_layer and not variational:
pdrop = 0.0
self.rnns.append(
tf.keras.layers.GRU(
hsz,
return_sequences=True,
return_state=True,
recurrent_dropout=pdrop if variational else 0.0,
dropout=pdrop if not variational else 0.0,
)
)
def output_fn(self, output, state):
return output, state
def call(self, inputs):
"""RNNs over input sequence of `[B, T, C]` and lengths `[B]`, output `[B, S, H]` where `S = max(lengths)`
:param inputs: A tuple of `(sequence, lengths)`, `sequence` shape `[B, T, C]`, lengths shape = `[B]`
:return: Output depends on the subclass handling
"""
inputs, lengths = tensor_and_lengths(inputs)
mask = tf.sequence_mask(lengths)
max_length = tf.reduce_max(lengths)
inputs = inputs[:, :max_length, :]
for rnn in self.rnns:
outputs = rnn(inputs, mask=mask)
inputs = outputs
rnnout, h = outputs
return self.output_fn(rnnout, h)
@property
def requires_length(self) -> bool:
return self._requires_length
class GRUEncoderAll(tf.keras.layers.Layer):
"""GRU encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a hidden vector `[L, B, H]`
"""
def __init__(
self,
insz: Optional[int],
hsz: int,
nlayers: int = 1,
pdrop: float = 0.0,
variational: bool = False,
requires_length: bool = True,
name: Optional[str] = None,
dropout_in_single_layer=False,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: The size of the input (or `None`)
:param hsz: The number of hidden units per GRU
:param nlayers: The number of layers of GRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param variational: TF only! apply variational dropout
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param name: TF only! A name to give the layer in the graph
:param dropout_in_single_layer: TF only! If its a single layer cell, should we do dropout? Default | |
in the comments.
//
// other parameters:
//
// h = hedron array data according to rev flag:
// yes reversed : not reversed
// 0 1 2 3 4 5 6 7 : 0 1 2 3 4 5 6 7
// len1 len3 atom1 atom3 a1 a2 a1-a2 a2-a3 len1 len3 atom1 atom3 a1 a3 a1-a2 a2-a3
//
// split: chop half of the hedron - to selectively print parts of a rotating
// bond to be glued together. top or bottom half selected by global caTop
// (C-alpha top) variable, undef by default so bottom half.
//
// supporSel: enable support structure inside rotatable bond to print in place.
// Please note the bond needs to be exactly parallel to the buildplate and the
// layerHeight global variable above needs to be set correctly for the
// structure to be correctly created by your slicer software.
//
*/
module hedron(h,rev=0,scal,split=0, supportSel) {
newh = hFlip(h, rev); // make a consistent hedron array regardless of rev flag
bondRad = bondRadius * scal;
difference() {
union(){
if (h[7]) {
// central atom at 0,0,0
atom(h[4],scal);
}
if (newh[5] && newh[7] != FemaleJoinBond) { // not female join
// comments for non-reversed case
// atom 3 is len3 up on +z
translate([0,0,newh[1]])
difference() {
atom(newh[3],scal * (newh[7] == SkinnyBond ? 0.7 : 1)); // if skinny bond make atom (C-beta) same diameter as bond
if (newh[7] == HBond) { // make room for hbond magnet through atom - this branch not used for backbone N,O
translate([0,0,scal*hblen/2-magL-pClearance])
cylinder(h=magL+pClearance,r=magR+pClearance,$fn=8);
}
}
}
if (newh[7]) {
// atom 2 - atom 3 bond from origin up +z distance len3
bond(newh[1], bondRad, scal, newh[7], h[4], ver=1, supportSel=supportSel);
}
rotate([0, h[1], 0]) { // rotate following elements by angle2 about Y
if (newh[6]) {
bond(newh[0], bondRad, scal, newh[6], h[4], ver=1, supportSel=supportSel); // h[4] is center atom (atom 2)
}
if (newh[4] && newh[6] != FemaleJoinBond) { // if draw atom 2 and atom1-atom2 not joiner
translate([0,0,newh[0]]) {
difference() {
atom(newh[2],scal * (newh[6] == SkinnyBond ? 0.7 : 1)); // put atom1 sphere len1 away on Z
if (newh[6] == HBond) { // make room for hbond magnet through atom
translate([0,0,scal*hblen/2-magL-pClearance])
cylinder(h=magL+pClearance,r=magR+pClearance,$fn=8);
}
}
}
}
}
}
if (split) {
// top / bottom half cutter
thick = 2*bondRadius * scal;
Zdim = newh[0];
Xdim = newh[1];
cside = 7* defaultAtomRadius * atomScale * scal / 12 + (caTop ? pClearance : -pClearance);
difference() {
translate([-Xdim,((rev || caTop) ? 0 : -thick),-Zdim]) {
cube([2*Xdim,thick,2*Zdim]);
}
if (!caTop) {
rotate([0,(rev ? h[1] : 0),0])
rotate([45,0,0])
cube([cside, cside, cside],center=true);
}
}
if (caTop) {
//translate([tx+cside,0,tx+cside])
rotate([0,(rev ? h[1] : 0),0])
rotate([45,0,0])
cube([cside, cside, cside], center=true);
}
}
if (newh[7] == FemaleJoinBond) { // female join
joiner(newh[1], scal, male=false, ver=1, supportSelect=supportSel);
}
if (newh[6] == FemaleJoinBond) { // female join
rotate([0, h[1], 0]) { // rotate following elements by angle2 about Y
joiner(newh[0], scal, male=false, ver=1, supportSelect=supportSel);
translate([0,0,newh[0]])
atom(newh[2],scal+0.5,clearance); // clearance for atom against join outer cylinder
}
}
if (newh[7] == FemaleJoinBond || newh[6] == FemaleJoinBond) { // female join both hedron arms
translate([0,0,newh[1]]) atom(newh[3],scal+0.5,clearance); // clearance for atom against join outer cylinder
}
}
}
/*
//
// Hook to call custom routines for specific hedra.
//
// Residue is h[h_residue]
// Sequence position is h[h_seqpos]
//
*/
module hedronDispatch(h,rev=0,scal) {
// default action is just to pass to hedron()
hedron(h, rev, scal, 0, (support ? 1 : 0));
/*
// Some examples for special handling for specific hedra below:
// note use of h_seqpos, h_residue, h_class for selecting hedra
// bool flag caTop (for rotatable bond part) needs to be a global variable
// so hedron() above can see it.
caBase1 = false; // only make bottom of N_C-alpha_C hedron
caBase2 = false; // same as caBase1 but for case of reversed hedron (for testing, should be identical to caBase1 result)
amideOnly = false; // make only the first amide
if (caTop) {
// these examples select a specific sequence position (h[h_seqpos] == n)
if (h[h_seqpos] == 1) {
if (h[h_class] == "NCAC") {
hedron(h, rev, scal, 1);
} else if (h[h_class] == "CBCAC") {
color("yellow") { // ca-cb
hedron(h, rev, scal);
}
}
}
} else if (caBase1) {
if (h[h_seqpos] == 1 && (h[h_class] == "NCAC")) {
hedron(h, rev, scal, true, (support ? 1 : 0));
}
} else if (caBase2) {
if (h[h_seqpos] == 5 && (h[h_class] == "NCAC")) {
hedron(h, rev, scal, true, (support ? 1 : 0));
}
} else if (amideOnly) {
if (h[h_seqpos] == 1) {
if (h[h_class] == "CACN") {
color("darkgray") {
hedron(h, rev, scal);
}
} else if (h[h_class] == "CACO") {
color("red") { // c=o
hedron(h, rev, scal);
}
} else if (h[h_class] == "CNCA") {
color("cyan") { // h=n
hedron(h, rev, scal);
}
}
} else if ((h[h_seqpos] == 2) && (h[h_class] == "HNCA")) {
color("cyan") { // h=n
hedron(h, rev, scal);
}
}
// actions above select out only a single hedron
} else {
// actions below will process hedra all but handle selected ones differently
if (h[h_class] == "NCAC") {
if (h[h_seqpos] == 1) {
if (! CCap && NCap) { // make split rotatable bond for terminal NH3
hedron(h, rev, scal, true, (support ? 1 : 0));
}
} else if (h[h_seqpos] == 5) { // make split rotatable bond for terminal COOH
hedron(h, rev, scal, true, (support ? 2 : 0)); // note supportSel = 2
} else {
hedron(h, rev, scal, 0, (support ? 2 : 0));
}
} else if (h[h_class] == "CBCAC") {
color("yellow") { // ca-cb -- color yellow in OpenSCAD renderer
if (h[h_seqpos] == 1 ) { // don't make here for N-term
} else if (h[h_seqpos] == 5 ) { // don't make here for C-term
} else {
hedron(h, rev, scal); // otherwise do make here
}
}
} else if (h[h_class] == "HNCA") {
color("cyan") { // color h-n in OenSCAD renderer
if (h[h_seqpos] == 1) {
if (NCap) { // only make at N term if variable NCap is true
hedron(h, rev, scal, 0, (support ? 1 : 0));
}
} else {
hedron(h, rev, scal, 0, (support ? 1 : 0));
}
}
} else if (h[h_residue] == "P") {
color("darkgray") // highlight Prolines in OpenSCAD renderer
hedron(h, rev, scal);
} else {
echo("unrecognised hedron", h[h_class]);
color("pink")
hedron(h, rev, scal, 0, (support ? 1 : 0));
}
}
*/
}
/*
//
// Generate a hedron rotated to specific angle d
//
*/
module d2(d,hedra,scal)
{
tz = (d[d_reversed] ? hedra[d[d_h2ndx]][2] : hedra[d[d_h2ndx]][0]); // get h2 len1 depending on reversed
rotate(d[d_dangle1]) { // 4. rotate h2 to specified dihedral angle
translate([0,0,tz]) { // 3. translate h2 h2:len1 up +z
rotate([180, 0, 0]) { // 2. rotate h2r about X so h2:a3 in +z and h2:a1 in -z
hedronDispatch(hedra[d[d_h2ndx]],(!d[d_reversed]),scal); // 1. reverse hedron 2 orientation = h2r
}
}
}
}
/*
//
// Generate two hedra at specified dihedral angle d
//
*/
module dihedron(d,hedra,scal)
{
if (d[d_h1new])
hedronDispatch(hedra[d[d_h1ndx]],d[d_reversed],scal); // reverse h1 if dihedral reversed
if (d[d_h2new])
d2(d,hedra,scal);
}
/*
//
// Generate a residue consisting of the set of dihedra in the parameter 'r',
// referring to hedra the table specified in the parameter 'hedra'.
//
*/
module residue(r,hedra, scal)
{
for (d = r) {
multmatrix(d[d_dihedralTransform]) {
dihedron(d, hedra, scal);
}
}
}
/*
//
// Generate a chain of residues, each positioned by a supplied
// rotation/translation matrix.
//
*/
module chain(protein)
{
chnD = protein[p_chainData];
c = chnD[c_residues];
dihedra = chnD[c_dihedra];
hedra = chnD[c_hedra];
for (r = c) {
multmatrix(r[r_resTransform]) {
residue(dihedra[r[r_resNdx]],hedra, protein[p_proteinScale]);
}
}
}
/*
//
// OpenSCAD array indices to reference protein data - tied to BioPython code
//
*/
// protein base level
p_pdbid = 0;
p_proteinScale = 1;
p_chainData = 2;
// chain level data
c_chainID = 0;
c_dihedra = 1;
c_hedra = 2;
c_residues = 3;
// hedra definitions
h_len1 = 0;
h_angle2 = 1;
h_len3 = 2;
h_atom1class = 3;
h_atom2class = 4;
h_atom3class = 5;
h_atom1state = 6;
h_atom2state = 7;
h_atom3state = 8;
h_bond1state = 9;
h_bond2state = 10;
h_residue = 11;
h_seqpos = 12; // residue sequence position for first atom in hedra
h_class = 13;
// dihedra specifications for each | |
<filename>scripts/arcrest/ags/_networkservice.py
from __future__ import absolute_import
from __future__ import print_function
from .._abstract.abstract import BaseAGSServer
import json
########################################################################
class NetworkService(BaseAGSServer):
"""
The network service resource represents a network analysis service
published with ArcGIS Server. The resource provides information about
the service such as the service description and the various network
layers (route, closest facility, and service area layers) contained in
the network analysis service.
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_routeLayers = None
_currentVersion = None
_serviceDescription = None
_serviceAreaLayers = None
_closestFacilityLayers = None
_serviceLimits = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = self._securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
if k == "routeLayers" and json_dict[k]:
self._routeLayers = []
for rl in v:
self._routeLayers.append(
RouteNetworkLayer(url=self._url + "/%s" % rl,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False))
elif k == "serviceAreaLayers" and json_dict[k]:
self._serviceAreaLayers = []
for sal in v:
self._serviceAreaLayers.append(
ServiceAreaNetworkLayer(url=self._url + "/%s" % sal,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False))
elif k == "closestFacilityLayers" and json_dict[k]:
self._closestFacilityLayers = []
for cf in v:
self._closestFacilityLayers.append(
ClosestFacilityNetworkLayer(url=self._url + "/%s" % cf,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False))
else:
setattr(self, "_"+ k, v)
else:
print ("attribute %s is not implemented." % k)
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.keys():
yield [att, getattr(self, att)]
#----------------------------------------------------------------------
@property
def currentVersion(self):
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def serviceDescription(self):
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
#----------------------------------------------------------------------
@property
def routeLayers(self):
if self._routeLayers is None:
self.__init()
return self._routeLayers
#----------------------------------------------------------------------
@property
def serviceAreaLayers(self):
if self._serviceAreaLayers is None:
self.__init()
return self._serviceAreaLayers
#----------------------------------------------------------------------
@property
def closestFacilityLayers(self):
if self._closestFacilityLayers is None:
self.__init()
return self._closestFacilityLayers
#----------------------------------------------------------------------
@property
def serviceLimits(self):
if self._serviceLimits is None:
self.__init()
return self._serviceLimits
########################################################################
class NetworkLayer(BaseAGSServer):
"""
The network layer resource represents a single network layer in
a network analysis service published by ArcGIS Server. It provides basic
information about the network layer such as its name, type, and network
classes. Additionally, depending on the layer type, it provides different
pieces of information.
It is a base class for RouteNetworkLayer, ServiceAreaNetworkLayer, and
ClosestFacilityNetworkLayer.
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
#common attrs for all Network Layer types
_currentVersion = None
_layerName = None
_layerType = None
_impedance = None
_restrictions = None
_snapTolerance = None
_maxSnapTolerance = None
_snapToleranceUnits = None
_ignoreInvalidLocations = None
_restrictUTurns = None
_accumulateAttributeNames = None
_attributeParameterValues = None
_outputSpatialReference = None
_useHierarchy = None
_hierarchyAttributeName = None
_hierarchyLevelCount = None
_hierarchyMaxValues = None
_hierarchyNumTransitions = None
_networkClasses = None
_networkDataset = None
_hasM = None
_hasZ = None
_supportedTravelModes = None
_serviceLimits = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler=None,
proxy_url=None, proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes all the properties """
params = {
"f" : "json"
}
# TODO handle spaces in the url, 'Service Area' should be 'Service+Area'
self._url = self._url.replace(' ','+')
json_dict = self._get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print (k, " - attribute not implemented in NetworkLayer.")
del k,v
#----------------------------------------------------------------------
@property
def currentVersion(self):
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def layerName(self):
if self._layerName is None:
self.__init()
return self._layerName
#----------------------------------------------------------------------
@property
def layerType(self):
if self._layerType is None:
self.__init()
return self._layerType
#----------------------------------------------------------------------
@property
def impedance(self):
if self._impedance is None:
self.__init()
return self._impedance
#----------------------------------------------------------------------
@property
def restrictions(self):
if self._restrictions is None:
self.__init()
return self._restrictions
#----------------------------------------------------------------------
@property
def snapTolerance(self):
if self._snapTolerance is None:
self.__init()
return self._snapTolerance
#----------------------------------------------------------------------
@property
def maxSnapTolerance(self):
if self._maxSnapTolerance is None:
self.__init()
return self._maxSnapTolerance
#----------------------------------------------------------------------
@property
def snapToleranceUnits(self):
if self._snapToleranceUnits is None:
self.__init()
return self._snapToleranceUnits
#----------------------------------------------------------------------
@property
def ignoreInvalidLocations(self):
if self._ignoreInvalidLocations is None:
self.__init()
return self._ignoreInvalidLocations
#----------------------------------------------------------------------
@property
def restrictUTurns(self):
if self._restrictUTurns is None:
self.__init()
return self._restrictUTurns
#----------------------------------------------------------------------
@property
def accumulateAttributeNames(self):
if self._accumulateAttributeNames is None:
self.__init()
return self._accumulateAttributeNames
#----------------------------------------------------------------------
@property
def attributeParameterValues(self):
if self._attributeParameterValues is None:
self.__init()
return self._attributeParameterValues
#----------------------------------------------------------------------
@property
def outputSpatialReference(self):
if self._outputSpatialReference is None:
self.__init()
return self._outputSpatialReference
#----------------------------------------------------------------------
@property
def useHierarchy(self):
if self._useHierarchy is None:
self.__init()
return self._useHierarchy
#----------------------------------------------------------------------
@property
def hierarchyAttributeName(self):
if self._hierarchyAttributeName is None:
self.__init()
return self._hierarchyAttributeName
#----------------------------------------------------------------------
@property
def hierarchyLevelCount(self):
if self._hierarchyLevelCount is None:
self.__init()
return self._hierarchyLevelCount
#----------------------------------------------------------------------
@property
def hierarchyMaxValues(self):
if self._hierarchyMaxValues is None:
self.__init()
return self._hierarchyMaxValues
#----------------------------------------------------------------------
@property
def hierarchyNumTransitions(self):
if self._hierarchyNumTransitions is None:
self.__init()
return self._hierarchyNumTransitions
#----------------------------------------------------------------------
@property
def networkClasses(self):
if self._networkClasses is None:
self.__init()
return self._networkClasses
#----------------------------------------------------------------------
@property
def networkDataset(self):
if self._networkDataset is None:
self.__init()
return self._networkDataset
#----------------------------------------------------------------------
@property
def hasM(self):
if self._hasM is None:
self.__init()
return self._hasM
#----------------------------------------------------------------------
@property
def hasZ(self):
if self._hasZ is None:
self.__init()
return self._hasZ
#----------------------------------------------------------------------
@property
def supportedTravelModes(self):
if self._supportedTravelModes is None:
self.__init()
return self._supportedTravelModes
#----------------------------------------------------------------------
@property
def serviceLimits(self):
if self._serviceLimits is None:
self.__init()
return self._serviceLimits
########################################################################
class RouteNetworkLayer(NetworkLayer):
"""
The Route Network Layer which has common properties of Network Layer
as well as some attributes unique to Route Network Layer only.
"""
#specific to Route
_findBestSequence = None
_useStartTime = None
_startTime = None
_startTimeIsUTC = None
_useTimeWindows = None
_preserveFirstStop = None
_preserveLastStop = None
_outputLineType = None
_directionsLanguage = None
_directionsSupportedLanguages = None
_directionsStyleNames = None
_directionsLengthUnits = None
_directionsTimeAttribute = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler=None,
proxy_url=None, proxy_port=None,
initialize=False):
""" initializes all properties """
NetworkLayer.__init__(self,url)
#----------------------------------------------------------------------
def __init(self):
""" initializes all the properties """
params = {
"f" : "json"
}
json_dict = self._get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in RouteNetworkLayer.")
del k,v
#----------------------------------------------------------------------
@property
def directionsTimeAttribute(self):
if self._directionsTimeAttribute is None:
self.__init()
return self._directionsTimeAttribute
#----------------------------------------------------------------------
@property
def directionsLengthUnits(self):
if self._directionsLengthUnits is None:
self.__init()
return self._directionsLengthUnits
#----------------------------------------------------------------------
@property
def outputLineType(self):
if self._outputLineType is None:
self.__init()
return self._outputLineType
#----------------------------------------------------------------------
@property
def directionsLanguage(self):
if self._directionsLanguage is None:
self.__init()
return self._directionsLanguage
#----------------------------------------------------------------------
@property
def directionsSupportedLanguages(self):
if self._directionsSupportedLanguages is None:
self.__init()
return self._directionsSupportedLanguages
#----------------------------------------------------------------------
@property
def directionsStyleNames(self):
if self._directionsStyleNames is None:
self.__init()
return self._directionsStyleNames
#----------------------------------------------------------------------
@property
def useStartTime(self):
if self._useStartTime is None:
self.__init()
return self._useStartTime
#----------------------------------------------------------------------
@property
def startTime(self):
if self._startTime is None:
self.__init()
return self._startTime
#----------------------------------------------------------------------
@property
def startTimeIsUTC(self):
if self._startTimeIsUTC is None:
self.__init()
return self._startTimeIsUTC
#----------------------------------------------------------------------
@property
def useTimeWindows(self):
if self._useTimeWindows is None:
self.__init()
return self._useTimeWindows
#----------------------------------------------------------------------
@property
def preserveFirstStop(self):
if self._preserveFirstStop is None:
self.__init()
return self._preserveFirstStop
#----------------------------------------------------------------------
@property
def preserveLastStop(self):
if self._preserveLastStop is None:
self.__init()
return self._preserveLastStop
#----------------------------------------------------------------------
@property
def findBestSequence(self):
if self._findBestSequence is None:
self.__init()
return self._findBestSequence
#----------------------------------------------------------------------
def solve(self,stops,
method="POST",
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
travelMode=None,
attributeParameterValues=None,
returnDirections=None,
returnRoutes=True,
returnStops=False,
returnBarriers=False,
returnPolylineBarriers=True,
returnPolygonBarriers=True,
outSR=None,
ignoreInvalidLocations=True,
outputLines=None,
findBestSequence=False,
preserveFirstStop=True,
preserveLastStop=True,
useTimeWindows=False,
startTime=None,
startTimeIsUTC=False,
accumulateAttributeNames=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
useHierarchy=True,
directionsLanguage=None,
directionsOutputType=None,
directionsStyleName=None,
directionsLengthUnits=None,
directionsTimeAttributeName=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits=None,
returnZ=False
):
"""The solve operation is performed on a network layer resource.
The solve operation is supported on a network layer whose layerType
is esriNAServerRouteLayer. You can provide arguments to the solve
route operation as query parameters.
Inputs:
stops - The set of stops loaded as network locations during analysis.
Stops can be specified using a simple comma / semi-colon
based syntax or as a JSON structure. If stops are not
specified, preloaded | |
'refresh_frequency_mins': {'key': 'refreshFrequencyMins', 'type': 'float'},
'reboot_if_needed': {'key': 'rebootIfNeeded', 'type': 'bool'},
'configuration_mode_frequency_mins': {'key': 'configurationModeFrequencyMins', 'type': 'float'},
}
def __init__(
self,
*,
configuration_mode: Optional[Union[str, "ConfigurationMode"]] = None,
allow_module_overwrite: Optional[bool] = None,
action_after_reboot: Optional[Union[str, "ActionAfterReboot"]] = None,
refresh_frequency_mins: Optional[float] = 30,
reboot_if_needed: Optional[bool] = None,
configuration_mode_frequency_mins: Optional[float] = 15,
**kwargs
):
super(ConfigurationSetting, self).__init__(**kwargs)
self.configuration_mode = configuration_mode
self.allow_module_overwrite = allow_module_overwrite
self.action_after_reboot = action_after_reboot
self.refresh_frequency_mins = refresh_frequency_mins
self.reboot_if_needed = reboot_if_needed
self.configuration_mode_frequency_mins = configuration_mode_frequency_mins
class ErrorResponse(msrest.serialization.Model):
"""Error response of an operation failure.
:param error:
:type error: ~azure.mgmt.guestconfig.models.ErrorResponseError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponseError'},
}
def __init__(
self,
*,
error: Optional["ErrorResponseError"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseError(msrest.serialization.Model):
"""ErrorResponseError.
:param code: Error code.
:type code: str
:param message: Detail error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ErrorResponseError, self).__init__(**kwargs)
self.code = code
self.message = message
class Resource(msrest.serialization.Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
location: Optional[str] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = name
self.location = location
self.type = None
class ProxyResource(Resource):
"""ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
location: Optional[str] = None,
**kwargs
):
super(ProxyResource, self).__init__(name=name, location=location, **kwargs)
class GuestConfigurationAssignment(ProxyResource):
"""Guest configuration assignment is an association between a machine and guest configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
:param properties: Properties of the Guest configuration assignment.
:type properties: ~azure.mgmt.guestconfig.models.GuestConfigurationAssignmentProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'GuestConfigurationAssignmentProperties'},
}
def __init__(
self,
*,
name: Optional[str] = None,
location: Optional[str] = None,
properties: Optional["GuestConfigurationAssignmentProperties"] = None,
**kwargs
):
super(GuestConfigurationAssignment, self).__init__(name=name, location=location, **kwargs)
self.properties = properties
class GuestConfigurationAssignmentList(msrest.serialization.Model):
"""The response of the list guest configuration assignment operation.
:param value: Result of the list guest configuration assignment operation.
:type value: list[~azure.mgmt.guestconfig.models.GuestConfigurationAssignment]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GuestConfigurationAssignment]'},
}
def __init__(
self,
*,
value: Optional[List["GuestConfigurationAssignment"]] = None,
**kwargs
):
super(GuestConfigurationAssignmentList, self).__init__(**kwargs)
self.value = value
class GuestConfigurationAssignmentProperties(msrest.serialization.Model):
"""Guest configuration assignment properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar target_resource_id: VM resource Id.
:vartype target_resource_id: str
:param guest_configuration: The guest configuration to assign.
:type guest_configuration: ~azure.mgmt.guestconfig.models.GuestConfigurationNavigation
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~azure.mgmt.guestconfig.models.ComplianceStatus
:ivar last_compliance_status_checked: Date and time when last compliance status was checked.
:vartype last_compliance_status_checked: ~datetime.datetime
:ivar latest_report_id: Id of the latest report for the guest configuration assignment.
:vartype latest_report_id: str
:param latest_assignment_report: Last reported guest configuration assignment report.
:type latest_assignment_report: ~azure.mgmt.guestconfig.models.AssignmentReport
:param context: The source which initiated the guest configuration assignment. Ex: Azure
Policy.
:type context: str
:ivar assignment_hash: Combined hash of the configuration package and parameters.
:vartype assignment_hash: str
:ivar provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "Succeeded", "Failed", "Canceled", "Created".
:vartype provisioning_state: str or ~azure.mgmt.guestconfig.models.ProvisioningState
"""
_validation = {
'target_resource_id': {'readonly': True},
'compliance_status': {'readonly': True},
'last_compliance_status_checked': {'readonly': True},
'latest_report_id': {'readonly': True},
'assignment_hash': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'guest_configuration': {'key': 'guestConfiguration', 'type': 'GuestConfigurationNavigation'},
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'last_compliance_status_checked': {'key': 'lastComplianceStatusChecked', 'type': 'iso-8601'},
'latest_report_id': {'key': 'latestReportId', 'type': 'str'},
'latest_assignment_report': {'key': 'latestAssignmentReport', 'type': 'AssignmentReport'},
'context': {'key': 'context', 'type': 'str'},
'assignment_hash': {'key': 'assignmentHash', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
guest_configuration: Optional["GuestConfigurationNavigation"] = None,
latest_assignment_report: Optional["AssignmentReport"] = None,
context: Optional[str] = None,
**kwargs
):
super(GuestConfigurationAssignmentProperties, self).__init__(**kwargs)
self.target_resource_id = None
self.guest_configuration = guest_configuration
self.compliance_status = None
self.last_compliance_status_checked = None
self.latest_report_id = None
self.latest_assignment_report = latest_assignment_report
self.context = context
self.assignment_hash = None
self.provisioning_state = None
class GuestConfigurationAssignmentReport(msrest.serialization.Model):
"""Report for the guest configuration assignment. Report contains information such as compliance status, reason, and more.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the report for the guest configuration assignment.
:vartype id: str
:ivar name: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype name: str
:param properties: Properties of the guest configuration report.
:type properties: ~azure.mgmt.guestconfig.models.GuestConfigurationAssignmentReportProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'GuestConfigurationAssignmentReportProperties'},
}
def __init__(
self,
*,
properties: Optional["GuestConfigurationAssignmentReportProperties"] = None,
**kwargs
):
super(GuestConfigurationAssignmentReport, self).__init__(**kwargs)
self.id = None
self.name = None
self.properties = properties
class GuestConfigurationAssignmentReportList(msrest.serialization.Model):
"""List of guest configuration assignment reports.
:param value: List of reports for the guest configuration. Report contains information such as
compliance status, reason and more.
:type value: list[~azure.mgmt.guestconfig.models.GuestConfigurationAssignmentReport]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GuestConfigurationAssignmentReport]'},
}
def __init__(
self,
*,
value: Optional[List["GuestConfigurationAssignmentReport"]] = None,
**kwargs
):
super(GuestConfigurationAssignmentReportList, self).__init__(**kwargs)
self.value = value
class GuestConfigurationAssignmentReportProperties(msrest.serialization.Model):
"""Report for the guest configuration assignment. Report contains information such as compliance status, reason, and more.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~azure.mgmt.guestconfig.models.ComplianceStatus
:ivar report_id: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype report_id: str
:param assignment: Configuration details of the guest configuration assignment.
:type assignment: ~azure.mgmt.guestconfig.models.AssignmentInfo
:param vm: Information about the VM.
:type vm: ~azure.mgmt.guestconfig.models.VMInfo
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:param details: Details of the assignment report.
:type details: ~azure.mgmt.guestconfig.models.AssignmentReportDetails
"""
_validation = {
'compliance_status': {'readonly': True},
'report_id': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'report_id': {'key': 'reportId', 'type': 'str'},
'assignment': {'key': 'assignment', 'type': 'AssignmentInfo'},
'vm': {'key': 'vm', 'type': 'VMInfo'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'details': {'key': 'details', 'type': 'AssignmentReportDetails'},
}
def __init__(
self,
*,
assignment: Optional["AssignmentInfo"] = None,
vm: Optional["VMInfo"] = None,
details: Optional["AssignmentReportDetails"] = None,
**kwargs
):
| |
# -*- coding: utf-8 -*-
"""
The :mod:`parsimony.algorithms.proximal` module contains several algorithms
that involve proximal operators.
Algorithms may not store states. I.e., if they are classes, do not keep
references to objects with state in the algorithm objects. It should be
possible to copy and share algorithms between e.g. estimators, and thus they
should not depend on any state.
Created on Mon Jun 2 15:42:13 2014
Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>, <NAME>, <NAME>
@email: <EMAIL>, <EMAIL>,
<EMAIL>
@license: BSD 3-clause.
"""
import numpy as np
import warnings
try:
from scipy.interpolate import PchipInterpolator as interp1
except ImportError:
from scipy.interpolate import interp1d as interp1
try:
from . import bases # Only works when imported as a package.
except (ValueError, SystemError):
import parsimony.algorithms.bases as bases # When run as a program.
import parsimony.utils as utils
import parsimony.utils.maths as maths
import parsimony.utils.consts as consts
from parsimony.algorithms.utils import Info
import parsimony.functions.properties as properties
__all__ = ["ISTA", "FISTA", "CONESTA", "StaticCONESTA",
"ADMM",
"DykstrasProjectionAlgorithm",
"ParallelDykstrasProjectionAlgorithm"]
class ISTA(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""The iterative shrinkage-thresholding algorithm.
Parameters
----------
eps : float
Positive float. Tolerance for the stopping criterion.
info : List or tuple of utils.consts.Info
What, if any, extra run information should be stored. Default is an
empty list, which means that no run information is computed nor
returned.
max_iter : int
Non-negative integer. Maximum allowed number of iterations.
min_iter : int
Non-negative integer less than or equal to max_iter. Minimum number of
iterations that must be performed. Default is 1.
inexact_start_iteration : int, optional
When ISTA is used repeatedly in some outer iteration procedure, it is
useful to be able to set the actual iteration count from outside. This
count is used when deriving ``inexact_eps``. Default is None, which
means to use ``inexact_eps``, if given, or default inexact behaviour
otherwise.
inexact_eps : float, optional
The precision used in the approximation of the proximal operator. This
is only used/relevant if your penalties require the approximation of
a projection or proximal operator. Default is None, which means to
derive ``inexact_eps`` from ``inexact_start_iteration``, if given, or
to use ``eps`` otherwise.
inexact_max_iter : int, optional
The number of iterations to allow in the inexact approximation of the
projection or proximal operator. Default is None, which means to use
``max_iter``.
callback: Callable
A callable object that will be called at the end of each iteration with
locals() as arguments.
Examples
--------
>>> from parsimony.algorithms.proximal import ISTA
>>> from parsimony.functions import LinearRegressionL1L2TV
>>> import scipy.sparse as sparse
>>> import numpy as np
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.0, 0.0, 0.0,
... A=A, mu=0.0)
>>> ista = ISTA(max_iter=10000)
>>> beta1 = ista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.00031215...
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.1, 0.0, 0.0,
... A=A, mu=0.0)
>>> ista = ISTA(max_iter=10000)
>>> beta1 = ista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.82723303...
>>> int(np.linalg.norm(beta2.ravel(), 0))
50
>>> int(np.linalg.norm(beta1.ravel(), 0))
7
"""
INTERFACES = [properties.Function,
properties.Gradient,
properties.StepSize,
properties.ProximalOperator]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.time,
Info.fvalue, # <-- To be deprecated!
Info.func_val,
Info.smooth_func_val,
Info.converged]
def __init__(self,
eps=consts.TOLERANCE,
info=[],
max_iter=20000,
min_iter=1,
inexact_start_iteration=None,
inexact_eps=None,
inexact_max_iter=None,
callback=None):
super(ISTA, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.eps = max(consts.FLOAT_EPSILON, float(eps))
if inexact_eps is None:
self.inexact_eps = inexact_eps
else:
self.inexact_eps = max(consts.FLOAT_EPSILON, float(inexact_eps))
if inexact_start_iteration is None:
self.inexact_start_iteration = inexact_start_iteration
else:
self.inexact_start_iteration = max(0, int(inexact_start_iteration))
if inexact_max_iter is None:
self.inexact_max_iter = self.max_iter
else:
self.inexact_max_iter = max(1, int(inexact_max_iter))
self.callback = callback
@bases.force_reset
@bases.check_compatibility
def run(self, function, beta):
"""Find the minimiser of the given function, starting at beta.
Parameters
----------
function : Function
The function to minimise.
beta : numpy.ndarray
The start vector.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
# step = function.step(beta)
betanew = betaold = beta
if self.info_requested(Info.time):
_t = []
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
_f = []
if self.info_requested(Info.smooth_func_val):
_fmu = []
if self.info_requested(Info.converged):
self.info_set(Info.converged, False)
for i in range(1, self.max_iter + 1):
if self.info_requested(Info.time):
tm = utils.time_cpu()
step = function.step(betanew)
betaold = betanew
if self.inexact_eps is not None:
inexact_eps = self.inexact_eps
else:
if self.inexact_start_iteration is None:
inexact_eps = \
1.0 / (float(i) ** (2.0 + consts.FLOAT_EPSILON))
else:
ii = self.inexact_start_iteration
inexact_eps = \
1.0 / (float(i + ii) ** (2.0 + consts.FLOAT_EPSILON))
betanew = function.prox(betaold - step * function.grad(betaold),
step,
eps=inexact_eps,
max_iter=self.inexact_max_iter)
if self.info_requested(Info.time):
_t.append(utils.time_cpu() - tm)
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
_f.append(function.f(betanew))
if self.info_requested(Info.smooth_func_val):
if hasattr(function, "fmu"):
_fmu.append(function.fmu(betanew))
if self.callback is not None:
self.callback(locals())
if (1.0 / step) * maths.norm(betanew - betaold) < self.eps \
and i >= self.min_iter:
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
break
self.num_iter = i
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i)
if self.info_requested(Info.time):
self.info_set(Info.time, _t)
if self.info_requested(Info.fvalue):
self.info_set(Info.fvalue, _f)
if self.info_requested(Info.func_val):
self.info_set(Info.func_val, _f)
if self.info_requested(Info.smooth_func_val):
self.info_set(Info.smooth_func_val, _fmu)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
return betanew
class FISTA(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""The fast iterative shrinkage-thresholding algorithm.
Parameters
----------
eps : float
Must be positive. The tolerance for the stopping criterion.
use_gap : bool
If true, FISTA will use a dual gap, from the interface DualFunction, in
the stopping criterion as
if function.gap(beta) < eps:
break
Default is False, since the gap may be very expensive to compute.
info : List or tuple of utils.consts.Info
What, if any, extra run information should be stored. Default is an
empty list, which means that no run information is computed nor
returned.
max_iter : int
Non-negative integer. Maximum allowed number of iterations.
min_iter : int
Non-negative integer less than or equal to max_iter. Minimum number of
iterations that must be performed. Default is 1.
callback: Callable
A callable object that will be called at the end of each iteration with
locals() as arguments.
Example
-------
>>> from parsimony.algorithms.proximal import FISTA
>>> from parsimony.functions import LinearRegressionL1L2TV
>>> import scipy.sparse as sparse
>>> import numpy as np
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.0, 0.0, 0.0,
... A=A, mu=0.0)
>>> fista = FISTA(max_iter=10000)
>>> beta1 = fista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
4.618281...e-06
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.1, 0.0, 0.0,
... A=A, mu=0.0)
>>> fista = FISTA(max_iter=10000)
>>> beta1 = fista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.82723292...
>>> int(np.linalg.norm(beta2.ravel(), 0))
50
>>> int(np.linalg.norm(beta1.ravel(), 0))
7
"""
INTERFACES = [properties.Function,
properties.Gradient,
properties.StepSize,
properties.ProximalOperator]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.time,
Info.fvalue, # <-- To be deprecated!
Info.func_val,
Info.converged,
Info.gap,
Info.verbose]
def __init__(self, use_gap=False,
info=[], eps=consts.TOLERANCE, max_iter=10000, min_iter=1,
callback=None,
simulation=False,
return_best=False):
super(FISTA, self).__init__(info=info,
max_iter=int(max_iter),
min_iter=int(min_iter))
self.use_gap = bool(use_gap)
self.eps = max(consts.FLOAT_EPSILON, float(eps))
self.callback = callback
self.simulation = bool(simulation)
self.return_best = bool(return_best)
@bases.force_reset
@bases.check_compatibility
def run(self, function, beta):
"""Find the minimiser of the given function, starting at beta.
Parameters
----------
function : Function. The function to minimise.
beta : Numpy array. The start vector.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
z = betanew = betaold = beta
if self.info_requested(Info.time):
t_ = []
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
f_ = []
if self.info_requested(Info.converged):
self.info_set(Info.converged, False)
if self.info_requested(Info.gap):
gap_ = []
if self.return_best:
best_f = np.inf
best_beta = None
#print("########", max(self.min_iter, self.max_iter) + 1)
for i in range(1, max(self.min_iter, self.max_iter) + 1):
if self.info_requested(Info.time):
tm = utils.time_cpu()
z = betanew + ((i - 2.0) / (i + 1.0)) * (betanew - betaold)
step = function.step(z)
betaold = betanew
betanew = function.prox(z - step * function.grad(z),
step,
eps=1.0 / (float(i) ** (4.0 + consts.FLOAT_EPSILON)),
max_iter=self.max_iter)
if self.info_requested(Info.time):
t_.append(utils.time_cpu() - tm)
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
func_val = function.f(betanew)
f_.append(func_val)
if self.return_best and func_val < best_f:
best_f = func_val
best_beta = betanew
if self.callback is not None:
self.callback(locals())
| |
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
if not HAS_FILE_SEARCH:
error_msg = 'Search not supported.'
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# argument check
keyword = request.GET.get('q', None)
if not keyword:
error_msg = 'q invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '10'))
if per_page > 100:
per_page = 100
except ValueError:
current_page = 1
per_page = 10
start = (current_page - 1) * per_page
size = per_page
if start < 0 or size < 0:
error_msg = 'page or per_page invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
search_repo = request.GET.get('search_repo', 'all') # val: scope or 'repo_id'
search_repo = search_repo.lower()
if not is_valid_repo_id_format(search_repo) and \
search_repo not in ('all', 'mine', 'shared', 'group', 'public'):
error_msg = 'search_repo invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
search_path = request.GET.get('search_path', None)
if search_path:
search_path = normalize_dir_path(search_path)
if not is_valid_repo_id_format(search_repo):
error_msg = 'search_repo invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
dir_id = seafile_api.get_dir_id_by_path(search_repo, search_path)
if not dir_id:
error_msg = 'Folder %s not found.' % search_path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
obj_type = request.GET.get('obj_type', None)
if obj_type:
obj_type = obj_type.lower()
if obj_type and obj_type not in ('dir', 'file'):
error_msg = 'obj_type invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
search_ftypes = request.GET.get('search_ftypes', 'all') # val: 'all' or 'custom'
search_ftypes = search_ftypes.lower()
if search_ftypes not in ('all', 'custom'):
error_msg = 'search_ftypes invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
with_permission = request.GET.get('with_permission', 'false')
with_permission = with_permission.lower()
if with_permission not in ('true', 'false'):
error_msg = 'with_permission invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
time_from = request.GET.get('time_from', None)
time_to = request.GET.get('time_to', None)
if time_from is not None:
try:
time_from = int(time_from)
except:
error_msg = 'time_from invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if time_to is not None:
try:
time_to = int(time_to)
except:
error_msg = 'time_to invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
size_from = request.GET.get('size_from', None)
size_to = request.GET.get('size_to', None)
if size_from is not None:
try:
size_from = int(size_from)
except:
error_msg = 'size_from invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if size_to is not None:
try:
size_to = int(size_to)
except:
error_msg = 'size_to invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
time_range = (time_from, time_to)
size_range = (size_from, size_to)
suffixes = None
custom_ftypes = request.GET.getlist('ftype') # types like 'Image', 'Video'... same in utils/file_types.py
input_fileexts = request.GET.get('input_fexts', '') # file extension input by the user
if search_ftypes == 'custom':
suffixes = []
if len(custom_ftypes) > 0:
for ftp in custom_ftypes:
if ftp in SEARCH_FILEEXT:
for ext in SEARCH_FILEEXT[ftp]:
suffixes.append(ext)
if input_fileexts:
input_fexts = input_fileexts.split(',')
for i_ext in input_fexts:
i_ext = i_ext.strip()
if i_ext:
suffixes.append(i_ext)
username = request.user.username
org_id = request.user.org.org_id if is_org_context(request) else None
repo_id_map = {}
# check recourse and permissin when search in a single repo
if is_valid_repo_id_format(search_repo):
repo_id = search_repo
repo = seafile_api.get_repo(repo_id)
# recourse check
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if not check_folder_permission(request, repo_id, '/'):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
map_id = repo.origin_repo_id if repo.origin_repo_id else repo_id
repo_id_map[map_id] = repo
repo_type_map = {}
else:
shared_from = request.GET.get('shared_from', None)
not_shared_from = request.GET.get('not_shared_from', None)
repo_id_map, repo_type_map = get_search_repos_map(search_repo,
username, org_id, shared_from, not_shared_from)
obj_desc = {
'obj_type': obj_type,
'suffixes': suffixes,
'time_range': time_range,
'size_range': size_range
}
# search file
try:
results, total = search_files(repo_id_map, search_path, keyword, obj_desc, start, size, org_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
for e in results:
e.pop('repo', None)
e.pop('exists', None)
e.pop('last_modified_by', None)
e.pop('name_highlight', None)
e.pop('score', None)
repo_id = e['repo_id']
if with_permission.lower() == 'true':
permission = check_folder_permission(request, repo_id, '/')
if not permission:
continue
e['permission'] = permission
# get repo type
if repo_id in repo_type_map:
e['repo_type'] = repo_type_map[repo_id]
else:
e['repo_type'] = ''
e['thumbnail_url'] = ''
filetype, fileext = get_file_type_and_ext(e.get('name', ''))
if filetype == IMAGE:
thumbnail_url = reverse('api2-thumbnail',
args=[e.get('repo_id', '')],
request=request)
params = '?p={}&size={}'.format(quote(e.get('fullpath', '').encode('utf-8')), 72)
e['thumbnail_url'] = thumbnail_url + params
has_more = True if total > current_page * per_page else False
return Response({"total":total, "results":results, "has_more":has_more})
########## Repo related
def repo_download_info(request, repo_id, gen_sync_token=True):
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
# generate download url for client
email = request.user.username
if gen_sync_token:
token = seafile_api.generate_repo_token(repo_id, email)
else:
token = ''
repo_name = repo.name
repo_desc = repo.desc
repo_size = repo.size
repo_size_formatted = filesizeformat(repo.size)
enc = 1 if repo.encrypted else ''
magic = repo.magic if repo.encrypted else ''
random_key = repo.random_key if repo.random_key else ''
enc_version = repo.enc_version
repo_version = repo.version
calculate_repos_last_modify([repo])
info_json = {
'relay_id': '44e8f253849ad910dc142247227c8ece8ec0f971',
'relay_addr': '127.0.0.1',
'relay_port': '80',
'email': email,
'token': token,
'repo_id': repo_id,
'repo_name': repo_name,
'repo_desc': repo_desc,
'repo_size': repo_size,
'repo_size_formatted': repo_size_formatted,
'mtime': repo.latest_modify,
'mtime_relative': translate_seahub_time(repo.latest_modify),
'encrypted': enc,
'enc_version': enc_version,
'salt': repo.salt if enc_version >= 3 else '',
'magic': magic,
'random_key': random_key,
'repo_version': repo_version,
'head_commit_id': repo.head_cmmt_id,
'permission': seafile_api.check_permission_by_path(repo_id, '/', email)
}
if is_pro_version() and ENABLE_STORAGE_CLASSES:
info_json['storage_name'] = repo.storage_name
return Response(info_json)
class Repos(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
# parse request params
filter_by = {
'mine': False,
'shared': False,
'group': False,
'org': False,
}
q = request.GET.get('nameContains', '')
rtype = request.GET.get('type', "")
if not rtype:
# set all to True, no filter applied
filter_by = filter_by.fromkeys(iter(filter_by.keys()), True)
for f in rtype.split(','):
f = f.strip()
filter_by[f] = True
email = request.user.username
owner_name = email2nickname(email)
owner_contact_email = email2contact_email(email)
# Use dict to reduce memcache fetch cost in large for-loop.
contact_email_dict = {}
nickname_dict = {}
repos_json = []
if filter_by['mine']:
if is_org_context(request):
org_id = request.user.org.org_id
owned_repos = seafile_api.get_org_owned_repo_list(org_id,
email, ret_corrupted=True)
else:
owned_repos = seafile_api.get_owned_repo_list(email,
ret_corrupted=True)
# Reduce memcache fetch ops.
modifiers_set = {x.last_modifier for x in owned_repos}
for e in modifiers_set:
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
owned_repos.sort(key=lambda x: x.last_modify, reverse=True)
for r in owned_repos:
# do not return virtual repos
if r.is_virtual:
continue
if q and q.lower() not in r.name.lower():
continue
repo = {
"type": "repo",
"id": r.id,
"owner": email,
"owner_name": owner_name,
"owner_contact_email": owner_contact_email,
"name": r.name,
"mtime": r.last_modify,
"modifier_email": r.last_modifier,
"modifier_contact_email": contact_email_dict.get(r.last_modifier, ''),
"modifier_name": nickname_dict.get(r.last_modifier, ''),
"mtime_relative": translate_seahub_time(r.last_modify),
"size": r.size,
"size_formatted": filesizeformat(r.size),
"encrypted": r.encrypted,
"permission": 'rw', # Always have read-write permission to owned repo
"virtual": False,
"root": '',
"head_commit_id": r.head_cmmt_id,
"version": r.version,
"salt": r.salt if r.enc_version >= 3 else '',
}
if is_pro_version() and ENABLE_STORAGE_CLASSES:
repo['storage_name'] = r.storage_name
repo['storage_id'] = r.storage_id
repos_json.append(repo)
if filter_by['shared']:
if is_org_context(request):
org_id = request.user.org.org_id
shared_repos = seafile_api.get_org_share_in_repo_list(org_id,
email, -1, -1)
else:
shared_repos = seafile_api.get_share_in_repo_list(
email, -1, -1)
repos_with_admin_share_to = ExtraSharePermission.objects.\
get_repos_with_admin_permission(email)
# Reduce memcache fetch ops.
owners_set = {x.user for x in shared_repos}
modifiers_set = {x.last_modifier for x in shared_repos}
for e in owners_set | modifiers_set:
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
shared_repos.sort(key=lambda x: x.last_modify, reverse=True)
for r in shared_repos:
if q and q.lower() not in r.name.lower():
continue
library_group_name = ''
if '@seafile_group' in r.user:
library_group_id = get_group_id_by_repo_owner(r.user)
library_group_name= group_id_to_name(library_group_id)
if parse_repo_perm(r.permission).can_download is False:
if not is_web_request(request):
continue
r.password_need = seafile_api.is_password_set(r.repo_id, email)
repo = {
"type": "srepo",
"id": r.repo_id,
"owner": r.user,
"owner_name": nickname_dict.get(r.user, ''),
"owner_contact_email": contact_email_dict.get(r.user, ''),
"name": r.repo_name,
"owner_nickname": nickname_dict.get(r.user, ''),
"mtime": r.last_modify,
"mtime_relative": translate_seahub_time(r.last_modify),
"modifier_email": r.last_modifier,
"modifier_contact_email": contact_email_dict.get(r.last_modifier, ''),
"modifier_name": nickname_dict.get(r.last_modifier, ''),
"size": r.size,
"size_formatted": filesizeformat(r.size),
"encrypted": r.encrypted,
"permission": r.permission,
"share_type": r.share_type,
"root": '',
"head_commit_id": r.head_cmmt_id,
"version": r.version,
"group_name": library_group_name,
"salt": r.salt if r.enc_version >= 3 else '',
}
if r.repo_id in repos_with_admin_share_to:
repo['is_admin'] = True
else:
repo['is_admin'] = False
repos_json.append(repo)
if filter_by['group']:
if is_org_context(request):
org_id = request.user.org.org_id
group_repos = seafile_api.get_org_group_repos_by_user(email,
org_id)
else:
group_repos = seafile_api.get_group_repos_by_user(email)
group_repos.sort(key=lambda x: x.last_modify, reverse=True)
# Reduce memcache fetch ops.
share_from_set = {x.user for x in group_repos}
modifiers_set = {x.last_modifier for x in group_repos}
for e in modifiers_set | share_from_set:
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
for r in group_repos:
if q and q.lower() not in r.name.lower():
continue
if parse_repo_perm(r.permission).can_download is False:
if not is_web_request(request):
continue
repo = {
"type": "grepo",
"id": r.repo_id,
"name": r.repo_name,
"groupid": r.group_id,
"group_name": r.group_name,
"owner": r.group_name,
"mtime": r.last_modify,
"mtime_relative": translate_seahub_time(r.last_modify),
"modifier_email": r.last_modifier,
"modifier_name": nickname_dict.get(r.last_modifier, ''),
"modifier_contact_email": contact_email_dict.get(r.last_modifier, ''),
"size": r.size,
"encrypted": r.encrypted,
"permission": r.permission,
"root": '',
"head_commit_id": r.head_cmmt_id,
| |
from __future__ import annotations
import asyncio
import logging
import typing as t
from datetime import datetime
from random import sample
import discord
from discord.ext import commands, tasks
from griffinbot.constants import Bot, Emoji, MOD_ROLES, StaffRoles
log = logging.getLogger(__name__)
def num_to_emoji(x: int) -> str:
"""Convet int to emoji."""
if x <= 20:
return {
-1: "💣",
0: "🟦",
1: "1️⃣",
2: "2️⃣",
3: "3️⃣",
4: "4️⃣",
5: "5️⃣",
6: "6️⃣",
7: "7️⃣",
8: "8️⃣",
9: "9️⃣",
10: "🔟",
11: "<:11:803632726509879346>",
12: "<:12:803633006790049806>",
13: "<:13:803633045742682173>",
14: "<:14:803633082330644492>",
15: "<:15:803633109945155664>",
16: "<:16:803633136763142175>",
17: "<:17:803633168640245790>",
18: "<:18:803633195106172958>",
19: "<:19:803633223913177089>",
20: "<:20:803633257358163968>",
}[x]
return f"{x} "
class GameBoard:
"""Represents a Minesweeper game board."""
def __init__(self, x_bombs: int = 10, y_bombs: int = 10, num_bombs: int = 8):
self.guesses = 0
self.started = False
self.x_bombs = x_bombs
self.y_bombs = y_bombs
self.bombs = num_bombs
self.gameover = False
self.updated = datetime.now()
self.dimensions = (x_bombs, y_bombs, num_bombs)
self.buttons = []
for y_coord in range(y_bombs):
row = []
for x_coord in range(x_bombs):
row.append(Tile(self, x_coord, y_coord))
self.buttons.append(row)
def __str__(self):
return (
f"{self.x_bombs} by {self.y_bombs} Minesweeper game, "
+ f"last updated {self.updated.strftime('%I:%M:%S %p on %m/%d/%Y')}"
)
def __repr__(self):
return str(self)
def start(self, x: int, y: int) -> None:
"""Start a new minesweeper game."""
button_numbers = {n for n in range(self.x_bombs * self.y_bombs)}
button_numbers.remove(y * self.x_bombs + x)
bomb_numbers = set(sample(button_numbers, self.bombs))
self.bombPositions = []
for bomb_number in bomb_numbers:
bomb_x = bomb_number % self.x_bombs
bomb_y = bomb_number // self.x_bombs
self.buttons[bomb_y][bomb_x].bomb()
self.bombPositions.append((bomb_x, bomb_y))
for bomb_x, bomb_y in self.bombPositions:
for tile in self.buttons[bomb_y][bomb_x].get_adjacent():
if not tile.isBomb:
tile.reveal_image_state += 1
# Mark the game as started
self.started = True
def game_over(self) -> None:
"""Game over."""
self.gameover = True
def cleared(self) -> bool:
"""Check if the player has cleared the gameboard of mines."""
for row in self.buttons:
for tile in row:
if (not tile.isBomb) and tile.covered:
return False
return True
def stale(self) -> bool:
"""Check if the game is stale."""
if (datetime.now() - self.updated).total_seconds() > 86400:
log.trace("Stale")
return True
log.trace("Not stale")
return False
def update(self) -> None:
"""Update the game board to keep it from going stale."""
self.updated = datetime.now()
def to_covered_message(self) -> str:
"""Return the board as a covered (spoilers) message."""
msg = ""
for row in self.buttons:
for tile in row:
msg = msg + "||" + num_to_emoji(tile.reveal_image_state) + "||"
if not row == self.buttons[-1]:
msg = msg + "\n"
return msg
def to_message(self) -> str:
"""Return the board as a emoji message."""
msg = ":blue_square:"
x = 1
while x <= len(self.buttons[0]):
msg = msg + num_to_emoji(x)
x += 1
x = 1
msg = msg + "\n"
for row in self.buttons:
msg = msg + num_to_emoji(x)
x += 1
for tile in row:
msg = msg + tile.to_emoji()
if not row == self.buttons[-1]:
msg = msg + "\n"
return msg
class Tile:
"""the Tiles on the board."""
def __init__(self, gameboard: GameBoard, x: int, y: int):
self.covered = True
self.isBomb = False
self.x = x
self.y = y
self.tile_image_state = 0 # Shown when covered: 0 = 🟦, 1 = 🚩, 2 = ❓
self.reveal_image_state = 0 # Shown when revealed: num bombs or -1 if bomb
self.gameboard = gameboard
def __str__(self):
return f"{'Tile' if not self.isBomb else 'Bomb'} at ({self.x}, {self.y})"
def __repr__(self):
return str(self)
def left_click(self) -> None:
"""Simulate a left click by the user."""
if self.gameboard.gameover:
return # the game is over
elif self.tile_image_state != 0:
return # flag or ?
elif not self.gameboard.started: # start the game
self.gameboard.start(
self.x,
self.y,
)
self.reveal()
elif self.isBomb: # game over
self.gameboard.game_over()
return
else: # all good
self.reveal()
if self.gameboard.cleared():
self.gameboard.game_over()
def reveal(self) -> None:
"""Reveal the tile."""
self.covered = False
if self.reveal_image_state == 0:
for tile in self.get_adjacent():
if tile.covered and tile.tile_image_state == 0:
tile.reveal()
def right_click(self, image_state: int) -> None:
"""Right click the tile."""
if self.gameboard.gameover:
return
self.tile_image_state = image_state
def bomb(self) -> None:
"""Change the tile to a bomb."""
self.isBomb = True
self.reveal_image_state = -1
def get_adjacent(self) -> list[Tile]:
"""Get the adjacent tiles."""
adjacent = []
for dx, dy in (
(1, -1),
(1, 0),
(1, 1),
(0, 1),
(-1, 1),
(-1, 0),
(-1, -1),
(0, -1),
):
x_pos = self.x + dx
y_pos = self.y + dy
if not (
x_pos < 0
or x_pos >= self.gameboard.x_bombs
or y_pos < 0
or y_pos >= self.gameboard.y_bombs
):
adjacent.append(self.gameboard.buttons[y_pos][x_pos])
return adjacent
def to_emoji(self) -> str:
"""Convert the tile to emoji."""
if self.gameboard.gameover:
if self.covered:
if self.isBomb:
return ":bomb:"
elif self.tile_image_state == 1:
return ":flag_black:"
return {
0: "⬜",
1: "🚩",
2: "❓",
}[self.tile_image_state]
else:
return num_to_emoji(self.reveal_image_state)
else:
if self.covered:
return {
0: "⬜",
1: "🚩",
2: "❓",
}[self.tile_image_state]
else:
return num_to_emoji(self.reveal_image_state)
class Minesweeper(commands.Cog):
"""Minesweeper Game."""
def __init__(self, bot: commands.Bot):
self.bot = bot
self._games = {}
# self.clear_stale_games.start()
def cog_unload(self) -> None:
"""Clean up while unloading the cog."""
# self.clear_stale_games.cancel()
return super().cog_unload()
@tasks.loop(minutes=1.0)
async def clear_stale_games(self) -> None:
"""Clear stale games from the bot."""
stale_games = []
for game_id, game in self._games.items():
if game.stale():
stale_games.append(game_id)
for game_id in stale_games:
del self._games[game_id]
stale = len(stale_games)
log.debug(
f"{stale} stale Minesweeper game{'s' if stale != 1 else ''} removed"
)
@commands.group(invoke_without_command=True, name="minesweeper", aliases=("ms",))
async def minesweeper_group(self, ctx: commands.Context) -> None:
"""Commands for playing minesweeper."""
await ctx.send_help(ctx.command)
@commands.has_any_role(*MOD_ROLES, StaffRoles.bot_team_role)
@minesweeper_group.command(name="list-games", aliases=("list", "ls", "l"))
async def list_games(self, ctx: commands.Context) -> None:
"""List all the games currently being played."""
await ctx.send(
f"{len(self._games)} Game{'s' if len(self._games) != 1 else ''}:"
)
message = ""
for user, game in self._games.items():
message += f"- `{user}`: {game}\n"
if message:
await ctx.send(message)
@minesweeper_group.command(name="spoilers-game", aliases=("s-g", "sg"))
async def spoilers_game(
self,
ctx: commands.Context,
dm: t.Optional[bool] = False,
x_distance: int = 8,
y_distance: int = 8,
bombs: int = 10,
solvable: bool = False,
) -> None:
"""Send a spoilers minesweeper board.
If x- or y-distance are changed, but not bombs, bombs will be scaled
to keep the same difficulty of the Minesweeper game.
If you want to play a DM game with one row, you have to include the `dm`
parameter in the bot command.
"""
if solvable:
await ctx.send(f"{Emoji.no} I am not smart enough for that.")
return
# ========
# Checks
# ========
if x_distance <= 0:
x_distance = 1
if y_distance <= 0:
y_distance = 1
area = x_distance * y_distance
# Keep people from making more bombs than possible
if bombs >= area:
bombs = area - 1
# Scale difficulty
if (x_distance != 8 or y_distance != 8) and bombs == 10:
bombs = round(area * (10 / 64))
# ============
# Start game
# ============
game = GameBoard(x_distance, y_distance, bombs)
game.buttons[0][0].left_click()
if area <= 99:
log.trace(f"Message area: {area}")
if not dm:
await ctx.send(
embed=discord.Embed(
title="Spoilers Minesweeper",
description=game.to_covered_message(),
color=discord.Color.gold(),
timestamp=datetime.now().astimezone(),
).set_author(
name=ctx.author.name,
icon_url=ctx.author.avatar_url_as(static_format="png"),
),
)
else:
await ctx.author.send(
embed=discord.Embed(
title="Spoilers Minesweeper",
description=game.to_covered_message(),
color=discord.Color.gold(),
timestamp=datetime.now().astimezone(),
)
)
else:
if not dm:
await ctx.send(
f"{Emoji.warning} That Minesweeper game is too big. "
+ "Please try smaller dimensions."
)
else:
await ctx.author.send(
f"{Emoji.warning} That Minesweeper game is too big. "
+ "Please try smaller dimensions."
)
@minesweeper_group.command(name="new-game", aliases=("n-g", "ng", "n"))
async def new_game(
self,
ctx: commands.Context,
x_distance: int = 8,
y_distance: int = 8,
bombs: int = 10,
) -> None:
"""Make a new Minesweeper game.
If x- or y-distance are changed, but not bombs, bombs will be scaled
to keep the same difficulty of the Minesweeper game.
"""
log.info(f"{ctx.author} started a new Minesweeper game")
# ========
# Checks
# ========
if x_distance <= 0:
x_distance = 1
if y_distance <= 0:
y_distance = 1
# Keep people from making more bombs than possible
area = x_distance * y_distance
if bombs >= area:
bombs = area - 1
# Scale difficulty
if (x_distance != 8 or y_distance != 8) and bombs == 10:
bombs = round(area * (10 / 64))
# ============
# Start game
# ============
log.trace(f"X: {x_distance}, Y; {y_distance}, Bombs: {bombs}")
game = GameBoard(x_distance, y_distance, bombs)
if area <= 170:
log.trace(f"Message area: {area}")
self._games[str(ctx.message.author)] = game
await | |
descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
See Also:
ImageClass._getRawKeypoints(self,thresh=500.00,forceReset=False,flavor="SURF",highQuality=1)
ImageClass._getFLANNMatches(self,sd,td)
ImageClass.findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4)
ImageClass.drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1)
"""
try:
import cv2
ver = cv2.__version__
new_version = 0
#For OpenCV versions till 2.4.0, cv2.__versions__ are of the form "$Rev: 4557 $"
if not ver.startswith('$Rev:'):
if int(ver.replace('.','0'))>=20400 :
new_version = 1
if int(ver.replace('.','0'))>=20402 :
new_version = 2
except:
logger.warning("Can't run Keypoints without OpenCV >= 2.3.0")
return
if( forceReset ):
self._mKeyPoints = None
self._mKPDescriptors = None
if( self._mKeyPoints is None or self._mKPFlavor != flavor ):
if ( new_version == 0):
if( flavor == "SURF" ):
surfer = cv2.SURF(thresh,_extended=highQuality,_upright=1)
self._mKeyPoints,self._mKPDescriptors = surfer.detect(self.getGrayNumpy(),None,False)
if( len(self._mKPDescriptors) == 0 ):
return None, None
if( highQuality == 1 ):
self._mKPDescriptors = self._mKPDescriptors.reshape((-1,128))
else:
self._mKPDescriptors = self._mKPDescriptors.reshape((-1,64))
self._mKPFlavor = "SURF"
del surfer
elif( flavor == "FAST" and not (int(ver.split(' ')[1])>=4557)) :
faster = cv2.FastFeatureDetector(threshold=int(thresh),nonmaxSuppression=True)
self._mKeyPoints = faster.detect(self.getGrayNumpy())
self._mKPDescriptors = None
self._mKPFlavor = "FAST"
del faster
#elif( flavor == "MSER"):
# mserer = cv2.MSER()
# self._mKeyPoints = mserer.detect(self.getGrayNumpy(),None)
# self._mKPDescriptors = None
# self._mKPFlavor = "MSER"
# del mserer
elif( flavor == "STAR"):
starer = cv2.StarDetector()
self._mKeyPoints = starer.detect(self.getGrayNumpy())
self._mKPDescriptors = None
self._mKPFlavor = "STAR"
del starer
elif( new_version == 2 and flavor in ["SURF", "FAST"] ):
if( flavor == "SURF" ):
surfer = cv2.SURF(hessianThreshold=thresh,extended=highQuality,upright=1)
#mask = self.getGrayNumpy()
#mask.fill(255)
self._mKeyPoints,self._mKPDescriptors = surfer.detect(self.getGrayNumpy(),None,useProvidedKeypoints = False)
if( len(self._mKPDescriptors) == 0 ):
return None, None
if( highQuality == 1 ):
self._mKPDescriptors = self._mKPDescriptors.reshape((-1,128))
else:
self._mKPDescriptors = self._mKPDescriptors.reshape((-1,64))
self._mKPFlavor = "SURF"
del surfer
elif( flavor == "FAST" ):
faster = cv2.FastFeatureDetector(threshold=int(thresh),nonmaxSuppression=True)
self._mKeyPoints = faster.detect(self.getGrayNumpy())
self._mKPDescriptors = None
self._mKPFlavor = "FAST"
del faster
elif( new_version >=1 and flavor in ["ORB", "SIFT", "SURF"] ):
FeatureDetector = cv2.FeatureDetector_create(flavor)
DescriptorExtractor = cv2.DescriptorExtractor_create(flavor)
self._mKeyPoints = FeatureDetector.detect(self.getGrayNumpy())
self._mKeyPoints,self._mKPDescriptors = DescriptorExtractor.compute(self.getGrayNumpy(),self._mKeyPoints)
if( len(self._mKPDescriptors) == 0 ):
return None, None
self._mKPFlavor = flavor
del FeatureDetector
elif( new_version >= 1 and flavor in ["FAST", "STAR", "MSER", "Dense"] ):
FeatureDetector = cv2.FeatureDetector_create(flavor)
self._mKeyPoints = FeatureDetector.detect(self.getGrayNumpy())
self._mKPDescriptors = None
self._mKPFlavor = flavor
del FeatureDetector
else:
logger.warning("ImageClass.Keypoints: I don't know the method you want to use")
return None, None
return self._mKeyPoints,self._mKPDescriptors
def _getFLANNMatches(self,sd,td):
"""
Summary:
This method does a fast local approximate nearest neighbors (FLANN) calculation between two sets
of feature vectors. The result are two numpy arrays the first one is a list of indexes of the
matches and the second one is the match distance value. For the match indices or idx, the index
values correspond to the values of td, and the value in the array is the index in td. I.
I.e. j = idx[i] is where td[i] matches sd[j].
The second numpy array, at the index i is the match distance between td[i] and sd[j].
Lower distances mean better matches.
Parameters:
sd - A numpy array of feature vectors of any size.
td - A numpy array of feature vectors of any size, this vector is used for indexing
and the result arrays will have a length matching this vector.
Returns:
Two numpy arrays, the first one, idx, is the idx of the matches of the vector td with sd.
The second one, dist, is the distance value for the closest match.
Example:
>>> kpt,td = img1._getRawKeypoints() # t is template
>>> kps,sd = img2._getRawKeypoints() # s is source
>>> idx,dist = img1._getFLANNMatches(sd,td)
>>> j = idx[42]
>>> print kps[j] # matches kp 42
>>> print dist[i] # the match quality.
Notes:
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
See:
ImageClass._getRawKeypoints(self,thresh=500.00,forceReset=False,flavor="SURF",highQuality=1)
ImageClass._getFLANNMatches(self,sd,td)
ImageClass.drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1)
ImageClass.findKeypoints(self,min_quality=300.00,flavor="SURF",highQuality=False )
ImageClass.findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4)
"""
try:
import cv2
except:
logger.warning("Can't run FLANN Matches without OpenCV >= 2.3.0")
return
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 4)
flann = cv2.flann_Index(sd, flann_params)
idx, dist = flann.knnSearch(td, 1, params = {}) # bug: need to provide empty dict
del flann
return idx,dist
def drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1):
"""
**SUMMARY**
Draw keypoints draws a side by side representation of two images, calculates
keypoints for both images, determines the keypoint correspondences, and then draws
the correspondences. This method is helpful for debugging keypoint calculations
and also looks really cool :) . The parameters mirror the parameters used
for findKeypointMatches to assist with debugging
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
* *width* - The width of the drawn line.
**RETURNS**
A side by side image of the template and source image with each feature correspondence
draw in a different color.
**EXAMPLE**
>>> img = cam.getImage()
>>> template = Image("myTemplate.png")
>>> result = img.drawKeypointMatches(self,template,300.00,0.4):
**NOTES**
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
**SEE ALSO**
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
:py:meth:`findKeypointMatch`
"""
if template == None:
return None
resultImg = template.sideBySide(self,scale=False)
hdif = (self.height-template.height)/2
skp,sd = self._getRawKeypoints(thresh)
tkp,td = template._getRawKeypoints(thresh)
if( td == None or sd == None ):
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry." )
return resultImg
template_points = float(td.shape[0])
sample_points = float(sd.shape[0])
magic_ratio = 1.00
if( sample_points > template_points ):
magic_ratio = float(sd.shape[0])/float(td.shape[0])
idx,dist = self._getFLANNMatches(sd,td) # match our keypoint descriptors
p = dist[:,0]
result = p*magic_ratio < minDist #, = np.where( p*magic_ratio < minDist )
for i in range(0,len(idx)):
if( result[i] ):
pt_a = (tkp[i].pt[1], tkp[i].pt[0]+hdif)
pt_b = (skp[idx[i]].pt[1]+template.width,skp[idx[i]].pt[0])
resultImg.drawLine(pt_a,pt_b,color=Color.getRandom(Color()),thickness=width)
return resultImg
def findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4):
"""
**SUMMARY**
findKeypointMatch allows you to match a template image with another image using
SURF keypoints. The method extracts keypoints from each image, uses the Fast Local
Approximate Nearest Neighbors algorithm to find correspondences between the feature
points, filters the correspondences based on quality, and then, attempts to calculate
a homography between the two images. This homography allows us to draw a matching
bounding box in the source image that corresponds to the template. This method allows
you to perform matchs the ordinarily fail when using the findTemplate method.
This method should be able to handle a reasonable changes in camera orientation and
illumination. Using a template that is close to the target image will yield much
better results.
.. Warning::
This method is only capable of finding one instance of the template in an image.
If more than one instance is visible the homography calculation and the method will
fail.
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
* *minMatch* - The percentage of features which must have matches to proceed with homography | |
<filename>iFindFriendsMini.indigoPlugin/Contents/Server Plugin/plugin.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
FindFriendsMini
Authors: See (repo)
Logons on to icloud account and access friends information for creation of indigo Devices
Enormously based on FindiStuff by Chameleon and GhostXML by DaveL17
"""
# Stock imports
global MajorProblem
MajorProblem = 0
startingUp = False
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except Exception as e:
pass
#o.server.log(u'error in import locales')
import sys
import math
#import OpenSSL
import WazeRouteCalculator
import time as t
import json
try:
import indigo
except ImportError:
MajorProblem = 2 #1 to restart, 2 to disable
pass
try:
from pyicloud import PyiCloudService
#from pyicloud.exceptions import PyiCloudFailedLoginException
#import moduledoesntexisit
from pyicloud.exceptions import (
PyiCloudFailedLoginException,
PyiCloudAPIResponseException,
PyiCloudNoDevicesException,
PyiCloud2SARequiredException,
PyiCloudServiceNotActivatedException,
)
# try:
# from custompyicloud.custompyicloud import PyiCloudService
# #from pyicloud.exceptions import PyiCloudFailedLoginException
# #import moduledoesntexisit
# from custompyicloud.custompyicloud import (
# PyiCloudException,
# PyiCloudAPIResponseException,
# PyiCloudServiceNotActivatedException,
# PyiCloudFailedLoginException,
# PyiCloud2SARequiredException,
# PyiCloudNoStoredPasswordAvailableException,
# PyiCloudNoDevicesException
# )
except Exception as e:
MajorProblem =2
errortext = str(e)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
indigo.server.log(u'Returned Error:'+unicode(e), isError=True)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
indigo.server.log("-- FATAL ERROR - Cannot find pyicloud or cannot load pyicloud or dependency.", isError=True)
if 'pytz' in errortext:
indigo.server.log('Missing pytz package. Please Follow Below instructions. (Once only needed)', isError=True)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
try:
#import pip
t.sleep(5)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
indigo.server.log('Open Terminal Window and type.', isError=True)
indigo.server.log('sudo easy_install pip', isError=True)
indigo.server.log('& then. [Both followed by enter]', isError=True)
indigo.server.log('sudo pip install pytz', isError=True)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
indigo.server.log('Plugin will restart in 3 minutes', isError=True)
#pip.main(['install', 'microcache'])
t.sleep(180)
indigo.server.log('Restarting Plugin...', isError=True)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
t.sleep(2)
MajorProblem = 1
except Exception as b:
indigo.server.log(u'Major Problem. Please contact developer. Error:'+unicode(b), isError=True)
MajorProblem = 2
pass
if 'six' in errortext:
indigo.server.log('Missing six package. Please Follow Below instructions. (Once only needed)', isError=True)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
try:
#import pip
t.sleep(5)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
indigo.server.log('Open Terminal Window and type.', isError=True)
indigo.server.log('sudo easy_install pip', isError=True)
indigo.server.log('& then. [Both followed by enter]', isError=True)
indigo.server.log('sudo pip install six', isError=True)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
indigo.server.log('Plugin will restart in 3 minutes', isError=True)
#pip.main(['install', 'microcache'])
t.sleep(180)
indigo.server.log('Restarting Plugin...', isError=True)
indigo.server.log(u"{0:=^130}".format(""), isError=True)
t.sleep(2)
MajorProblem = 1
except Exception as b:
indigo.server.log(u'Major Problem. Please contact developer. Error:'+unicode(b), isError=True)
MajorProblem = 2
pass
else:
indigo.server.log(u"{0:=^130}".format(""), isError=True)
indigo.server.log(u'Major Problem. Please contact developer. Error:' + unicode(e), isError=True)
MajorProblem = 2
pass
# Now the HTTP and Compatibility libraries
#indigo.server.log(u"{0:=^130}".format(""), isError=True)
try:
import requests
except:
indigo.server.log("Note: requests.py must be installed for this plugin to operate. Indigo 7 ONLY. See the forum",isError=True)
indigo.server.log(
"Alternatively - check the name of the plugin in the Plugins folder. Is is FindFriendsMini.pluginIndigo"
"or FindFriendsMini(1).pluginIndigo? Make sure that all FindFriendsMini files are deleted from Downloads"
"before downloading the latest versions", isError=True)
# Date and time libraries
import time
try:
import pydevd
except ImportError:
pass
# try:
# # from googlemaps import googlemaps
# # from googlemaps.exceptions import (
# # ApiError,
# # TransportError,
# # HTTPError,
# # Timeout
# # )
# except Exception as e:
# indigo.server.log(u"{0:=^130}".format(""), isError=True)
# indigo.server.log(u'Error Importing Googlemaps. Error:'+unicode(e), isError=True)
# indigo.server.log(u"{0:=^130}".format(""), isError=True)
import webbrowser
import os
import logging
import datetime
import glob
#from ghpu import GitHubPluginUpdater
global accountOK
#global self.appleAPI
# Custom imports
#import iterateXML
__author__ = u"GlennNZ"
__build__ = u""
__copyright__ = u"There is no copyright for the code base."
__license__ = u"MIT"
__title__ = u"FindFriendsMini Plugin for Indigo Home Control"
__version__ = u"0.4.5"
# Establish default plugin prefs; create them if they don't already exist.
kDefaultPluginPrefs = {
u'configMenuServerTimeout': "5", # Server timeout limit.
u'showDebugInfo': False, # Verbose debug logging?
u'showDebugLevel': "20", # Low, Medium or High debug output.
u'updaterEmail': "", # Email to notify of plugin updates.
u'updaterEmailsEnabled': False # Notification of plugin updates wanted.
}
class Plugin(indigo.PluginBase):
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
""" docstring placeholder """
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.startingUp = True
apleAPI = None
self.pluginIsInitializing = True
self.pluginIsShuttingDown = False
self.prefsUpdated = False
self.logger.info(u"")
self.logger.info(u"{0:=^130}".format(" Initializing New Plugin Session "))
self.logger.info(u"{0:<30} {1}".format("Plugin name:", pluginDisplayName))
self.logger.info(u"{0:<30} {1}".format("Plugin version:", pluginVersion))
self.logger.info(u"{0:<30} {1}".format("Plugin ID:", pluginId))
self.logger.info(u"{0:<30} {1}".format("Indigo version:", indigo.server.version))
self.logger.info(u"{0:<30} {1}".format("Python version:", sys.version.replace('\n', '')))
self.logger.info(u"{0:<30} {1}".format("Python Directory:", sys.prefix.replace('\n', '')))
self.logger.info(u"{0:<30} {1}".format("Major Problem equals: ", MajorProblem))
self.logger.info(u"{0:=^130}".format(""))
self.iprefDirectory = '{}/Preferences/Plugins/com.GlennNZ.indigoplugin.FindFriendsMini'.format(indigo.server.getInstallFolderPath())
#Change to logging
pfmt = logging.Formatter('%(asctime)s.%(msecs)03d\t[%(levelname)8s] %(name)20s.%(funcName)-25s%(msg)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.plugin_file_handler.setFormatter(pfmt)
try:
self.logLevel = int(self.pluginPrefs[u"showDebugLevel"])
except:
self.logLevel = logging.INFO
## Create new Log File
try:
self.newloggerhandler = logging.FileHandler(u"{0}/Logs/com.GlennNZ.indigoplugin.FindFriendsMini/FFM-GeofenceData.log".format(
indigo.server.getInstallFolderPath()))
formatter = logging.Formatter('%(asctime)s.%(msecs)03d\t[%(levelname)8s] %(name)20s.%(funcName)-25s%(msg)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.newloggerhandler.setFormatter(formatter)
self.newlogger = logging.getLogger('FindFriends-GeofenceData')
self.newlogger.setLevel(logging.DEBUG)
self.newlogger.addHandler(self.newloggerhandler)
except:
self.logger.exception(u'Error in Debug New Log Setup')
self.indigo_log_handler.setLevel(self.logLevel)
self.logger.debug(u"logLevel = " + str(self.logLevel))
self.triggers = {}
self.appleAPI = None
self.debugicloud = self.pluginPrefs.get('debugicloud', False)
self.debugLevel = int(self.pluginPrefs.get('showDebugLevel', 20))
self.debugmaps = self.pluginPrefs.get('debugmaps', False)
self.debuggeofence = self.pluginPrefs.get('debuggeofence', False)
self.debugdistance = self.pluginPrefs.get('debugdistance', False)
self.logFile = u"{0}/Logs/com.GlennNZ.indigoplugin.FindFriendsMini/plugin.log".format( indigo.server.getInstallFolderPath())
if self.debuggeofence:
self.newlogger.info(u"")
self.newlogger.info(u"{0:=^130}".format(" Initializing New Plugin Session "))
self.newlogger.info(u"{0:<30} {1}".format("Plugin name:", pluginDisplayName))
self.newlogger.info(u"{0:<30} {1}".format("Plugin version:", pluginVersion))
self.newlogger.info(u"{0:<30} {1}".format("Plugin ID:", pluginId))
self.newlogger.info(u"{0:<30} {1}".format("Indigo version:", indigo.server.version))
self.newlogger.info(u"{0:<30} {1}".format("Python version:", sys.version.replace('\n', '')))
self.newlogger.info(u"{0:<30} {1}".format("Python Directory:", sys.prefix.replace('\n', '')))
self.newlogger.info(u"{0:<30} {1}".format("Major Problem equals: ", MajorProblem))
self.newlogger.info(u"{0:=^130}".format(""))
self.TwoFAverified = False
self.configMenuTimeCheck = int(self.pluginPrefs.get('configMenuTimeCheck', "5"))
#self.updater = indigoPluginUpdateChecker.updateChecker(self, "http://")
self.updaterEmailsEnabled = self.pluginPrefs.get('updaterEmailsEnabled', False)
self.updateFrequency = float(self.pluginPrefs.get('updateFrequency', "24")) * 60.0 * 60.0
self.next_update_check = time.time()
self.configVerticalMap = self.pluginPrefs.get('verticalMap', "600")
self.useMaps = self.pluginPrefs.get('useMaps',False)
self.mapType = self.pluginPrefs.get('mapType', "openstreetmap")
if self.mapType == None:
self.useMaps = False
self.configHorizontalMap = self.pluginPrefs.get('horizontalMap', "600")
self.configZoomMap = self.pluginPrefs.get('ZoomMap', "15")
self.datetimeFormat = self.pluginPrefs.get('datetimeFormat','%c')
self.googleAPI = self.pluginPrefs.get('googleAPI','')
self.BingAPI = self.pluginPrefs.get('BingAPI','')
self.wazeRegion = self.pluginPrefs.get('wazeRegion','EU')
self.wazeUnits = self.pluginPrefs.get('wazeUnits','km')
self.deviceNeedsUpdated = ''
self.openStore = self.pluginPrefs.get('openStore',False)
self.requires2FA = False ## If account requires another set to be done, will change from True - False
self.requires2SA = False
if MajorProblem > 0:
plugin = indigo.server.getPlugin('com.GlennNZ.indigoplugin.FindFriendsMini')
if MajorProblem == 1:
self.logger.error(u'Major Problem: Restarting Plugin...')
if plugin.isEnabled():
plugin.restart(waitUntilDone=False)
self.sleep(1)
if MajorProblem == 2:
self.logger.error(u"{0:=^130}".format(""))
self.logger.error(u"{0:=^130}".format(""))
self.logger.error(u'Major Problem: Please Disable Plugin. Now Sleeping. Please contact Developer.')
self.logger.error(u"{0:=^130}".format(""))
self.logger.error(u"{0:=^130}".format(""))
if plugin.isEnabled():
# Can't disabled
# Can Sleep Forever Though
#plugin.disable()
self.sleep(86400)
self.pluginIsInitializing = False
###
### Update ghpu Routines.
def pluginstoreUpdate(self):
iurl = 'http://www.indigodomo.com/pluginstore/139/'
self.browserOpen(iurl)
#####
def __del__(self):
""" docstring placeholder """
self.logger.debug(u"__del__ method called.")
indigo.PluginBase.__del__(self)
def closedPrefsConfigUi(self, valuesDict, userCancelled):
""" docstring placeholder """
self.logger.debug(u"closedPrefsConfigUi() method called.")
if userCancelled:
self.logger.debug(u"User prefs dialog cancelled.")
if not userCancelled:
self.debug = valuesDict.get('showDebugInfo', False)
self.debugLevel = int(valuesDict.get('showDebugLevel', "20"))
self.debugicloud = valuesDict.get('debugicloud', False)
self.debugmaps = valuesDict.get('debugmaps', False)
self.debuggeofence = valuesDict.get('debuggeofence', False)
self.debugdistance = valuesDict.get('debugdistance', False)
self.datetimeFormat = valuesDict.get('datetimeFormat', '%c')
self.configVerticalMap = valuesDict.get('verticalMap', "600")
self.useMaps = valuesDict.get('useMaps',False)
self.mapType = self.pluginPrefs.get('mapType', "openstreetmap")
self.configHorizontalMap = valuesDict.get('horizontalMap', "600")
self.configZoomMap = valuesDict.get('ZoomMap', "15")
self.datetimeFormat = valuesDict.get('datetimeFormat', '%c')
self.googleAPI = valuesDict.get('googleAPI', '')
self.BingAPI = valuesDict.get('BingAPI','')
self.openStore = valuesDict.get('openStore', False)
self.updateFrequency = float(valuesDict.get('updateFrequency', "24")) * 60.0 * 60.0
# If plugin config menu closed update the time for check. Will apply after first change.
self.configMenuTimeCheck = int(valuesDict.get('configMenuTimeCheck', "5"))
self.prefsUpdated = True
try:
self.logLevel = int(valuesDict[u"showDebugLevel"])
except:
self.logLevel = logging.INFO
self.indigo_log_handler.setLevel(self.logLevel)
self.logger.debug(u"logLevel = " + str(self.logLevel))
self.logger.debug(u"User prefs saved.")
self.logger.debug(u"Debugging on (Level: {0})".format(self.debugLevel))
return True
def deviceStartComm(self, dev):
""" docstring placeholder """
self.logger.debug(u"deviceStartComm() method called.")
self.logger.debug(u'Starting FindFriendsMini device: '+unicode(dev.name)+' and dev.id:'+unicode(dev.id)+ ' and dev.type:'+unicode(dev.deviceTypeId))
# Update statelist in case any updates/changes
dev.stateListOrDisplayStateIdChanged()
if dev.deviceTypeId=='FindFriendsGeofence':
stateList = [
#{'key': 'friendsInRange', 'value': 0},
#{'key': 'lastArrivaltime', 'value': ''},
#{'key': 'lastDeptime', 'value': ''},
#{'key': 'lastArrivaltimestamp', 'value': ''},
#{'key': 'lastDeptimestamp', 'value': ''},
#{'key': 'minutessincelastArrival', 'value': 0},
#{'key': 'minutessincelastDep', 'value': 0},
#{'key': 'listFriends', 'value': ''},
{'key': 'deviceIsOnline', 'value': False, 'uiValue':'Waiting'}]
#self.logger.debug(unicode(stateList))
dev.updateStatesOnServer(stateList)
if dev.deviceTypeId == 'FindFriendsFriend':
stateList = [
{'key': 'id', 'value':''},
{'key': 'status', 'value': ''},
{'key': 'locationStatus', 'value': ''},
{'key': 'batteryStatus', 'value': ''},
{'key': 'locationTimestamp', 'value': ''},
{'key': 'timestamp', 'value': ''},
{'key': 'altitude', 'value': ''},
{'key': 'homeDistance', 'value': 0},
{'key': 'homeTime', 'value': 0},
{'key': 'otherDistance', 'value': 0},
{'key': 'otherTime', 'value': 0},
{'key': 'homeDistanceText', 'value': 'unknown'},
{'key': 'homeTimeText', 'value': 'unknown'},
{'key': 'otherDistanceText', 'value': 'unknown'},
{'key': 'otherTimeText', 'value': 'unknown'},
{'key': 'googleMapUrl', 'value': ''},
{'key': 'labels', 'value': ''},
{'key': 'longitude', 'value': 'unknown'},
{'key': 'horizontalAccuracy', 'value': ''},
{'key': 'address', 'value': ''},
{'key': 'latitude', 'value': 'unknown'},
{'key': 'mapUpdateNeeded', 'value': True}
]
#self.logger.debug(unicode(stateList))
dev.updateStatesOnServer(stateList)
elif dev.deviceTypeId=="myDevice":
stateList = [
{'key': 'id', 'value': ''},
{'key': 'status', 'value': ''},
{'key': 'batteryStatus', 'value': ''},
{'key': 'locationTimestamp', 'value': ''},
{'key': 'timestamp', 'value': ''},
{'key': 'altitude', 'value': ''},
{'key': 'homeDistance', 'value': 0},
{'key': 'homeTime', 'value': 0},
{'key': 'batteryCharge', 'value': 0},
{'key': 'otherDistance', 'value': 0},
{'key': 'otherTime', 'value': 0},
{'key': 'homeDistanceText', 'value': 'unknown'},
{'key': 'homeTimeText', 'value': 'unknown'},
{'key': 'otherDistanceText', 'value': 'unknown'},
{'key': 'otherTimeText', 'value': 'unknown'},
{'key': 'googleMapUrl', 'value': ''},
{'key': 'longitude', 'value': 'unknown'},
{'key': 'horizontalAccuracy', 'value': ''},
{'key': 'address', 'value': ''},
{'key': 'latitude', 'value': 'unknown'},
{'key': 'devSummary', 'value': 'Offline'},
{'key': 'mapUpdateNeeded', 'value': True},
]
#self.logger.debug(unicode(stateList))
dev.updateStatesOnServer(stateList)
self.prefsUpdated | |
from locust import HttpLocust, TaskSet, TaskSequence, between, constant
from bs4 import BeautifulSoup, SoupStrainer
from string import ascii_lowercase
import random
import gevent
import sys
import re
# TODO:
# Modify current_hunt request to only look at unsolved puzzles
# Fix no last_pk error with chat post (user and staff)
# ========== HELPTER FUNCTIONS/VARIABLES ==========
thread_list = []
kill_list = []
num_bots = 900
num_staff = int(num_bots / 25)
num_users = num_bots - num_staff
user_ids = list(range(num_users))
staff_ids = list(range(num_users, num_users + num_staff))
USER_PASSWORD = "password"
def get_status(greenlets):
total = 0
running = 0
completed = 0
successed = 0
yet_to_run = 0
failed = 0
for g in greenlets:
total += 1
if bool(g):
running += 1
if(g in kill_list):
sys.stdout.write("Attempting to kill!")
g.kill(block=True)
else:
if g.ready():
completed += 1
if g.successful():
successed += 1
else:
failed += 1
else:
yet_to_run += 1
assert yet_to_run == total - completed - running
assert failed == completed - successed
return dict(total=total,
running=running,
completed=completed,
successed=successed,
yet_to_run=yet_to_run,
failed=failed)
def random_string(n):
return ''.join(random.choice(ascii_lowercase) for i in range(n))
def is_puzzle_link(link):
return link and "/puzzle/" in link
only_puzzles = SoupStrainer(href=is_puzzle_link)
def is_hunt_link(link):
return link and "/hunt/" in link
only_hunts = SoupStrainer(href=is_hunt_link)
ajax_headers = {'X-Requested-With': 'XMLHttpRequest'}
def set_ajax_args(l, attr, val):
# sys.stdout.write("User-id: %d, setting: %s" % (l.locust.user_id, str(val)))
l.locust.ajax_args[attr] = val
def get_ajax_args(l, attr):
# sys.stdout.write("User-id: %d, getting: %s" % (l.locust.user_id, str(l.locust.ajax_args)))
return l.locust.ajax_args[attr]
def better_get(l, url, **kwargs):
# return l.client.get(url, **dict(timeout=None, **kwargs))
return l.client.get(url, **kwargs)
def gen_from_list(in_list):
index = 0
length = len(in_list)
while True:
if(index < length):
yield in_list[index]
else:
yield in_list[index - 1]
def gevent_func(poller, l):
try:
while True:
poller.ajax_func(l)
a = next(poller.time_iter)
gevent.sleep(a)
except gevent.GreenletExit:
# sys.stdout.write("Got GreenletExit exception from %d" % l.locust.user_id)
return
class Poller(object):
thread = None
ajax_vars = None
def __init__(self, ajax_func, delay_list):
self.ajax_func = ajax_func
self.delay_list = delay_list
self.time_iter = gen_from_list(delay_list)
def reset_time_iter(self):
self.time_iter = gen_from_list(self.delay_list)
def apply_poller(task_set, poller):
def poller_on_start(ts):
poller.thread = gevent.spawn(gevent_func, poller, ts)
thread_list.append(poller.thread)
ts.locust.poller = poller
# sys.stdout.write("Started thread %d" % ts.locust.user_id)
# sys.stdout.write(str(get_status(thread_list)))
# sys.stdout.write("KILL:" + str(get_status(kill_list)))
def poller_on_stop(ts):
kill_list.append(poller.thread)
sys.stdout.write(str(len(kill_list)))
poller.thread.kill(block=True)
# sys.stdout.write("Ended thread %d" % ts.locust.user_id)
if(poller):
task_set.on_start = poller_on_start
task_set.on_stop = poller_on_stop
return task_set
def page_and_subpages(main_function, action_set, poller=None, time=None):
class ActionSet(TaskSet):
tasks = action_set
if(time):
wait_time = constant(time)
class ts(TaskSequence):
tasks = [main_function, apply_poller(ActionSet, poller), stop]
if(poller):
wait_time = constant(1)
return ts
def add_static(session, response, cache=True):
# Fetches all static resources from a response
if(response.text):
resource_urls = set()
soup = BeautifulSoup(response.text, "html.parser")
for res in soup.find_all(src=True):
url = res['src']
if ("/static" in url or "/media" in url):
resource_urls.add(url)
for res in soup.find_all(href=True):
url = res['href']
if ("/static" in url or "/media" in url):
resource_urls.add(url)
for url in set(resource_urls):
if(url not in session.locust.static_urls):
session.locust.static_urls.add(url)
if "/media" in url:
session.client.get(url, name="Media File")
else:
session.client.get(url, name="Static File")
return response
def ensure_login(session, input_response, static=True):
# Login if login is required and not already logged in
# optional static arg determines if it should fetch login page static files
if(input_response.url and
("login-selection" in input_response.url or "/staff/login/" in input_response.url)):
if("?next=" in input_response.url):
next_param = input_response.url.split("?next=")[1]
response = session.client.get("/accounts/login/?next=" + next_param)
next_url = '/accounts/login/?next=' + next_param
else:
response = session, session.client.get("/accounts/login/")
next_url = '/accounts/login/'
if(static):
add_static(session, input_response)
add_static(session, response)
session.client.headers['Referer'] = session.client.base_url
store_CSRF(session, response)
args = {"username": "test_user_" + str(session.locust.user_id),
"password": <PASSWORD> + str(session.locust.user_id)
}
response = store_CSRF(session, CSRF_post(session, next_url, args))
if("/accounts/login/" in response.url or "/staff/login/" in response.url):
# Login failed
sys.stdout.write("login-failed")
return response
else:
return input_response
pass
def store_CSRF(session, response):
# sys.stdout.write("|STORED CSRF: " + response.url)
if(response.cookies and 'csrftoken' in response.cookies):
session.locust.client.cookies.set('csrftoken', None)
session.locust.client.cookies.set('csrftoken', response.cookies['csrftoken'])
session.locust.templateCSRF = session.locust.client.cookies['csrftoken']
# sys.stdout.write("| COOKIE: " + session.locust.client.cookies['csrftoken'])
search_results = re.search(r"csrf_token = '(.*?)';", response.text)
if(search_results):
session.locust.templateCSRF = search_results.group(1)
# sys.stdout.write("| TEMPLATE: " + session.locust.templateCSRF)
search_results = re.search(r"name='csrfmiddlewaretoken' value='(.*?)'", response.text)
if(search_results):
session.locust.templateCSRF = search_results.group(1)
# sys.stdout.write("| TEMPLATE: " + session.locust.templateCSRF)
return response
def CSRF_post(session, url, args):
session.client.headers['Referer'] = session.client.base_url
args['csrfmiddlewaretoken'] = session.locust.templateCSRF
response = session.client.post(url, args,
headers={"X-CSRFToken": session.locust.templateCSRF})
if(response.status_code == 403):
sys.stdout.write("|403 FAILURE: " + response.url)
sys.stdout.write(str("| COOKIE: " + str(session.locust.client.cookies.items())))
sys.stdout.write(str("| TEMPLATE: " + session.locust.templateCSRF))
sys.stdout.write(str("| " + str(response.request.headers)))
sys.stdout.write(str("| " + str(response.request.body)))
sys.stdout.write(str("| " + str(response.text)))
return response
def url_all(l, r):
return add_static(l, ensure_login(l, store_CSRF(l, r)))
def stop(l):
if(hasattr(l, 'on_stop')):
l.on_stop()
l.interrupt()
# ========== END HELPTER FUNCTIONS/VARIABLES ==========
# ========== HUNTER PAGE VIEW FUNCTIONS ==========
def index(l):
# Load index page
url_all(l, better_get(l, "/"))
def current_hunt_main_page(l):
# Load page, get puzzles, set puzzles on locust object
# Possibly separate by solved and unsolved
response = url_all(l, better_get(l, "/hunt/current/"))
puzzle_ids = []
soup = BeautifulSoup(response.text, "html.parser", parse_only=only_puzzles)
for puzzle_link in soup.find_all(href=True):
puzzle_ids.append(puzzle_link['href'].split("/")[2])
l.locust.puzzle_ids = puzzle_ids
def puzzle_main_page(l):
# Pick puzzle from puzzles, go to page, possibly weight by solved/unsolved
# Store current puzzle number in locust object
# Get ajax number from page and store to locust object
puzzle_id = random.choice(l.locust.puzzle_ids)
l.locust.puzzle_id = puzzle_id
response = url_all(l, better_get(l, "/puzzle/" + puzzle_id + "/"))
search_results = re.search(r"last_date = '(.*)';", response.text)
if(search_results):
last_date = search_results.group(1)
else:
sys.stdout.write("puzzle_main_page could not find ajax last date: %s" % str(response.text))
sys.stdout.flush()
last_date = ""
set_ajax_args(l, "puzzle", {'last_date': last_date})
def puzzle_ajax(l):
# make request to current puzzle object with current ajax number
# store returned ajax number in locust object
if(get_ajax_args(l, "puzzle")['last_date'] == ""):
print("Cowardly refusing to send empty ajax last date")
return
puzzle_id = l.locust.puzzle_id
puzzle_url = "/puzzle/" + puzzle_id + "/"
response = better_get(l, puzzle_url + "?last_date=" + get_ajax_args(l, "puzzle")['last_date'],
headers=ajax_headers, name=puzzle_url + " AJAX")
try:
set_ajax_args(l, "puzzle", {'last_date': response.json()["last_date"]})
except:
sys.stdout.write("puzzle_ajax could not find ajax: %s" % str(response.text))
sys.stdout.flush()
pass
def puzzle_pdf_link(l):
# Load pdf link for current puzzle number
puzzle_id = l.locust.puzzle_id
better_get(l, "/protected/puzzles/" + puzzle_id + ".pdf")
def puzzle_answer(l):
# Submit answer to current puzzle using POST with some correctness chance
# 1 in 9 submissions is correct
puzzle_id = l.locust.puzzle_id
if(random.random() < (1.0 / 9.0)):
answer = "answer" + puzzle_id
else:
answer = random_string(10)
message_data = {"answer": answer}
store_CSRF(l, CSRF_post(l, "/puzzle/" + puzzle_id + "/", message_data))
l.locust.poller.reset_time_iter()
def chat_main_page(l):
# Load main chat page and store ajax value in locust object
response = url_all(l, better_get(l, "/chat/"))
search_results = re.search(r"last_pk = (.*);", response.text)
if(search_results):
last_pk = search_results.group(1)
else:
sys.stdout.write("chat_main_page could not find ajax: %s" % str(response.text))
sys.stdout.flush()
last_pk = ""
set_ajax_args(l, "chat", {'last_pk': last_pk})
search_results = re.search(r"curr_team = (.*);", response.text)
if(search_results):
curr_team = search_results.group(1)
else:
curr_team = ""
l.locust.team_pk = curr_team
def chat_ajax(l):
# Make ajax request with current ajax value and store new value
response = better_get(l, "/chat/?last_pk=" + str(get_ajax_args(l, "chat")['last_pk']),
headers=ajax_headers, name="/chat/ AJAX")
try:
set_ajax_args(l, "chat", {'last_pk': response.json()["last_pk"]})
except:
sys.stdout.write("chat_ajax could not find ajax: %s" % str(response.text))
sys.stdout.flush()
pass
def chat_new_message(l):
# Make POST request to create a new chat message, store ajax value
message_data = {
"team_pk": int(l.locust.team_pk),
"message": random_string(40),
"is_response": False,
"is_announcement": False
}
store_CSRF(l, CSRF_post(l, "/chat/", message_data))
def info_main_page(l):
# Load info page
url_all(l, better_get(l, "/hunt-info/"))
def registration_main_page(l):
# Load registration page
url_all(l, better_get(l, "/registration/"))
def registration_update_info(l):
# Update the teams room location
registration_data = {
"form type": "new_location",
"team_location": random_string(10)
}
store_CSRF(l, CSRF_post(l, "/registration/", registration_data))
def resources(l):
# Load resources page
url_all(l, better_get(l, "/info/extra/resources/"))
def previous_hunts_main_page(l):
# Load previous hunts page, store list of available hunts in locust object
response = url_all(l, better_get(l, "/previous-hunts/"))
hunt_ids = []
soup = BeautifulSoup(response.text, "html.parser", parse_only=only_hunts)
for hunt_link in soup.find_all(href=True):
hunt_ids.append(hunt_link['href'].split("/")[2])
l.locust.hunt_ids = hunt_ids
def previous_hunt(l):
# Load a random previous hunt page in the locust object
hunt_id = random.choice(l.locust.hunt_ids)
url_all(l, better_get(l, "/hunt/" + hunt_id))
def create_account(l):
# Load the create account page
url_all(l, better_get(l, "/accounts/create/"))
def contact(l):
# Load contact page
url_all(l, better_get(l, "/contact-us/"))
def user_profile(l):
# Load user profile page
url_all(l, better_get(l, "/user-profile/"))
# ========== END HUNTER PAGE VIEW FUNCTIONS ==========
# ========== STAFF PAGE VIEW FUNCTIONS ==========
def staff_chat_main_page(l):
# Load main chat page and store ajax value in locust object
response = url_all(l, better_get(l, "/staff/chat/"))
search_results = re.search(r"last_pk = (.*);", response.text)
if(search_results):
last_pk = search_results.group(1)
else:
sys.stdout.write("staff_chat_main_page could not find ajax: %s" % str(response.text))
sys.stdout.flush()
last_pk = ""
set_ajax_args(l, "staff_chat", {'last_pk': last_pk})
search_results = re.findall(r"data-id='(.*)' ", response.text)
if(search_results):
l.locust.staff_chat_teams = search_results
else:
l.locust.staff_chat_teams = None
def staff_chat_new_message(l):
# Make POST request to create a new chat message, store ajax value
if(l.locust.staff_chat_teams):
message_data = {
"team_pk": int(random.choice(l.locust.staff_chat_teams)),
"message": random_string(40),
"is_response": True,
"is_announcement": False
}
store_CSRF(l, CSRF_post(l, "/staff/chat/", message_data))
def staff_chat_ajax(l):
# Make ajax request | |
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import collections
import concurrent.futures
import contextlib
import functools
import json
import math
import os
import platform
import re
import sys
import time
import warnings
import numbers
import keyword
import numpy as np
import pyarrow as pa
import progressbar
import psutil
import six
import yaml
from .json import VaexJsonEncoder, VaexJsonDecoder
import vaex.file
is_frozen = getattr(sys, 'frozen', False)
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
osname = dict(darwin="osx", linux="linux", windows="windows")[platform.system().lower()]
# $ export VAEX_DEV=1 to enabled dev mode (skips slow tests)
devmode = os.environ.get('VAEX_DEV', '0') == '1'
class AttrDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __getattr__(self, name):
return self[name]
def __setattr__(self, key, value):
self[key] = value
def deprecated(reason):
def wraps(f):
if not f.__doc__:
f.__doc__ = ""
f.__doc__ = "Deprecated: {}\n\n{}".format(reason, f.__doc__)
@functools.wraps(f)
def wraps2(*args, **kwargs):
warnings.warn("Call to deprecated function {}: {}".format(f.__name__, reason),
category=DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
return wraps2
return wraps
def subdivide(length, parts=None, max_length=None):
"""Yields index tuple (i1, i2) that subdivide an array into parts of max_length"""
if max_length is None:
max_length = (length + parts - 1) / parts
i1 = 0
while i1 < length:
i2 = min(i1 + max_length, length)
yield i1, i2
i1 = i2
assert i1 == length
def subdivide_mask(mask, parts=None, max_length=None, logical_length=None):
"""Yields index tuple (l1, l2, i1, i2) that subdivide an array into parts such that it contains max_length True values in mask
l1 an l2 refer to the logical indices, similar as :func:`subdivide`, while i1, and i2 refer to the 'raw' indices, such that
`np.sum(mask[i1:i2]) == logical_length` (except for the last element).
"""
if logical_length is None:
logical_length = np.asarray(mask).sum()
raw_length = len(np.asarray(mask))
if max_length is None:
max_length = (logical_length + parts - 1) / parts
full_mask = mask
logical_start = 0
logical_end = min(logical_start + max_length, logical_length)
raw_index = full_mask.raw_offset(1)
assert raw_index != -1
while logical_start < logical_length:
# slice the mask from our offset till end
mask = full_mask.view(raw_index, raw_length)
# count how many raw elements we need to skip to get a logical chunk_size
raw_offset = mask.raw_offset(logical_end - logical_start)
assert raw_offset != -1
next_raw_index = raw_index + raw_offset + 1
yield logical_start, logical_end, raw_index, next_raw_index
raw_index = next_raw_index
logical_start = min(logical_start + max_length, logical_length)
logical_end = min(logical_start + max_length, logical_length)
def submit_subdivide(thread_count, f, length, max_length):
futures = []
thread_pool = concurrent.futures.ThreadPoolExecutor(thread_count)
# thread_pool = concurrent.futures.ProcessPoolExecutor(thread_count)
for i1, i2 in list(subdivide(length, max_length=max_length)):
futures.append(thread_pool.submit(f, i1, i2))
return futures
def linspace_centers(start, stop, N):
return np.arange(N) / (N + 0.) * (stop - start) + float(stop - start) / N / 2 + start
def multisum(a, axes):
correction = 0
for axis in axes:
a = np.nansum(a, axis=axis - correction)
correction += 1
return a
def disjoined(data):
# create marginalized distributions and multiple them together
data_disjoined = None
dim = len(data.shape)
for d in range(dim):
axes = list(range(dim))
axes.remove(d)
data1d = multisum(data, axes)
shape = [1 for k in range(dim)]
shape[d] = len(data1d)
data1d = data1d.reshape(tuple(shape))
if d == 0:
data_disjoined = data1d
else:
data_disjoined = data_disjoined * data1d
return data_disjoined
def get_root_path():
osname = platform.system().lower()
# if (osname == "linux") and is_frozen: # we are using pyinstaller
if is_frozen: # we are using pyinstaller or py2app
return os.path.dirname(sys.argv[0])
else:
return os.path.abspath(".")
def os_open(document):
"""Open document by the default handler of the OS, could be a url opened by a browser, a text file by an editor etc"""
osname = platform.system().lower()
if osname == "darwin":
os.system("open \"" + document + "\"")
if osname == "linux":
cmd = "xdg-open \"" + document + "\"&"
os.system(cmd)
if osname == "windows":
os.system("start \"" + document + "\"")
def filesize_format(value):
for unit in ['bytes', 'KiB', 'MiB', 'GiB']:
if value < 1024.0:
return "%3.1f%s" % (value, unit)
value /= 1024.0
return "%3.1f%s" % (value, 'TiB')
log_timer = True
class Timer(object):
def __init__(self, name=None, logger=None):
self.name = name
self.logger = logger
def __enter__(self):
if log_timer:
if self.logger:
self.logger.debug("%s starting" % self.name)
else:
print(('[%s starting]...' % self.name))
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if log_timer:
msg = "%s done, %ss elapsed" % (self.name, time.time() - self.tstart)
if self.logger:
self.logger.debug(msg)
else:
print(msg)
if type or value or traceback:
print((type, value, traceback))
return False
def get_private_dir(subdir=None, *extra):
path = os.path.expanduser('~/.vaex')
if subdir:
path = os.path.join(path, subdir, *extra)
os.makedirs(path,exist_ok=True)
return path
def make_list(sequence):
if isinstance(sequence, np.ndarray):
return sequence.tolist()
else:
return list(sequence)
# from progressbar import AnimatedMarker, Bar, BouncingBar, Counter, ETA, \
# FileTransferSpeed, FormatLabel, Percentage, \
# ProgressBar, ReverseBar, RotatingMarker, \
# SimpleProgress, Timer, AdaptiveETA, AbsoluteETA, AdaptiveTransferSpeed
# from progressbar.widgets import TimeSensitiveWidgetBase, FormatWidgetMixin
class CpuUsage(progressbar.widgets.FormatWidgetMixin, progressbar.widgets.TimeSensitiveWidgetBase):
def __init__(self, format='CPU Usage: %(cpu_usage)s%%', usage_format="% 5d"):
super(CpuUsage, self).__init__(format=format)
self.usage_format = usage_format
self.utime_0 = None
self.stime_0 = None
self.walltime_0 = None
def __call__(self, progress, data):
utime, stime, child_utime, child_stime, walltime = os.times()
if self.utime_0 is None:
self.utime_0 = utime
if self.stime_0 is None:
self.stime_0 = stime
if self.walltime_0 is None:
self.walltime_0 = walltime
data["utime_0"] = self.utime_0
data["stime_0"] = self.stime_0
data["walltime_0"] = self.walltime_0
delta_time = utime - self.utime_0 + stime - self.stime_0
delta_walltime = walltime - self.walltime_0
# print delta_time, delta_walltime, utime, self.utime_0, stime, self.stime_0
if delta_walltime == 0:
data["cpu_usage"] = "---"
else:
cpu_usage = delta_time / (delta_walltime * 1.) * 100
data["cpu_usage"] = self.usage_format % cpu_usage
# utime0, stime0, child_utime0, child_stime0, walltime0 = os.times()
return progressbar_mod.widgets.FormatWidgetMixin.__call__(self, progress, data)
progressbar_mod = progressbar
def _progressbar_progressbar2(type=None, name="processing", max_value=1):
widgets = [
name,
': ', progressbar_mod.widgets.Percentage(),
' ', progressbar_mod.widgets.Bar(),
' ', progressbar_mod.widgets.ETA(),
# ' ', progressbar_mod.widgets.AdaptiveETA(),
' ', CpuUsage()
]
bar = progressbar_mod.ProgressBar(widgets=widgets, max_value=max_value)
bar.start()
return bar
# FormatLabel('Processed: %(value)d lines (in: %(elapsed)s)')
def _progressbar_vaex(type=None, name="processing", max_value=1):
import vaex.misc.progressbar as pb
return pb.ProgressBar(0, 1)
def _progressbar_widget(type=None, name="processing", max_value=1):
import vaex.misc.progressbar as pb
return pb.ProgressBarWidget(0, 1, name=name)
_progressbar_typemap = {}
_progressbar_typemap['progressbar2'] = _progressbar_progressbar2
_progressbar_typemap['vaex'] = _progressbar_vaex
_progressbar_typemap['widget'] = _progressbar_widget
def progressbar(type_name=None, title="processing", max_value=1):
type_name = type_name or 'vaex'
return _progressbar_typemap[type_name](name=title)
def progressbar_widget():
pass
class _progressbar(object):
pass
class _progressbar_wrapper(_progressbar):
def __init__(self, bar):
self.bar = bar
def __call__(self, fraction):
self.bar.update(fraction)
if fraction == 1:
self.bar.finish()
return True
def status(self, name):
self.bar.bla = name
class _progressbar_wrapper_sum(_progressbar):
def __init__(self, children=None, next=None, bar=None, parent=None, name=None):
self.next = next
self.children = children or list()
self.finished = False
self.last_fraction = None
self.fraction = 0
self.bar = bar
self.parent = parent
self.name = name
self.cancelled = False
self.oncancel = lambda: None
def cancel(self):
self.cancelled = True
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
return "<%s(name=%r)> instance at 0x%x" % (name, self.name, id(self))
def add(self, name=None):
pb = _progressbar_wrapper_sum(parent=self, name=name)
self.children.append(pb)
return pb
def add_task(self, task, name=None):
pb = self.add(name)
pb.oncancel = task.cancel
task.signal_progress.connect(pb)
if self.bar and hasattr(self.bar, 'add_child'):
self.bar.add_child(pb, task, name)
def __call__(self, fraction):
if self.cancelled:
return False
# ignore fraction
result = True
if len(self.children) == 0:
self.fraction = fraction
else:
self.fraction = sum([c.fraction for c in self.children]) / len(self.children)
fraction = self.fraction
if fraction != self.last_fraction: # avoid too many calls
if fraction == 1 and not self.finished: # make sure we call finish only once
self.finished = True
if self.bar:
self.bar.finish()
elif fraction != 1:
if self.bar:
self.bar.update(fraction)
if self.next:
result = self.next(fraction)
if self.parent:
assert self in self.parent.children
result = self.parent(None) in [None, True] and result # fraction is not used anyway..
if result is False:
self.oncancel()
self.last_fraction = fraction
return result
def status(self, name):
pass
def progressbars(f=True, next=None, name=None):
if isinstance(f, _progressbar_wrapper_sum):
return f
if callable(f):
next = f
f = False
if f in [None, False]:
return _progressbar_wrapper_sum(next=next, name=name)
else:
if f is True:
return _progressbar_wrapper_sum(bar=progressbar(), next=next, name=name)
elif isinstance(f, six.string_types):
return _progressbar_wrapper_sum(bar=progressbar(f), next=next, name=name)
else:
return _progressbar_wrapper_sum(next=next, name=name)
def progressbar_callable(title="processing", max_value=1):
bar = progressbar(title=title, max_value=max_value)
return _progressbar_wrapper(bar)
def confirm_on_console(topic, msg):
done = False
print(topic)
while not done:
output = raw_input(msg + ":[y/n]")
if output.lower() == "y":
return True
if output.lower() == "n":
return False
def yaml_dump(f, data):
yaml.safe_dump(data, f, default_flow_style=False, encoding='utf-8', allow_unicode=True)
def yaml_load(f):
return yaml.safe_load(f)
def write_json_or_yaml(file, data, fs_options={}):
file, path = vaex.file.file_and_path(file, mode='w', fs_options=fs_options)
try:
if path:
base, ext = os.path.splitext(path)
else:
ext = '.json' # default
if ext == ".json":
json.dump(data, file, indent=2, cls=VaexJsonEncoder)
elif ext == ".yaml":
yaml_dump(file, data)
else:
raise ValueError("file should end in .json or .yaml (not %s)" % ext)
finally:
file.close()
def read_json_or_yaml(file, fs_options={}):
file, path = vaex.file.file_and_path(file, fs_options=fs_options)
try:
if path:
base, ext = os.path.splitext(path)
else:
ext = | |
#!/usr/bin/env python
####################################################################################################
# NAME
# <NAME> - contain graphical utility functions
#
# SYNOPSIS
# <NAME>
#
# AUTHOR
# Written by <NAME> (<EMAIL>).
#
# COPYRIGHT
# Copyright © 2013-2021 <NAME> <https://barras.io>.
# The MIT License (MIT) <https://opensource.org/licenses/MIT>.
####################################################################################################
import base64
import io
import itertools
import cv2
import matplotlib.cm as mcm
import matplotlib.colors as mcolors
import matplotlib.figure as mfigure
import matplotlib.ticker as mticker
import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
import plotly.tools as ptools
from xhtml2pdf import pisa
import nutil.html as html
from nutil.stats import normal
from nutil.ts import *
####################################################################################################
# GUI CONSTANTS
####################################################################################################
__GUI_CONSTANTS___________________________________ = ''
# The default scale
DEFAULT_SCALE = 1 # the higher, the better quality
# The default width
DEFAULT_WIDTH = 660
# The default height
DEFAULT_HEIGHT = 933
# The default margin (left, right, bottom and top)
DEFAULT_MARGIN = dict(l=0, r=0, b=0, t=0) # the ratio of the margin to the width or height
# • GUI COLOR ######################################################################################
__GUI_COLOR_CONSTANTS_____________________________ = ''
TRANSPARENT = (0, 0, 0, 0)
##################################################
# The default colors
DEFAULT_COLORS = [
'#1F77B4', # muted blue
'#FF7F0E', # safety orange
'#2CA02C', # cooked asparagus green
'#D62728', # brick red
'#9467BD', # muted purple
'#8C564B', # chestnut brown
'#E377C2', # raspberry yogurt pink
'#7F7F7F', # middle gray
'#BCBD22', # curry yellow-green
'#17BECF' # blue-teal
]
# The default colors iterator
DEFAULT_COLORS_ITERATOR = itertools.cycle(DEFAULT_COLORS)
# The default background color
DEFAULT_BG_COLOR = TRANSPARENT
##################################################
RAINBOW_SCALE = mcm.get_cmap(name='rainbow')
RYG_SCALE = mcm.get_cmap(name='RdYlGn')
# • GUI FIGURE #####################################################################################
__GUI_FIGURE_CONSTANTS____________________________ = ''
# The default tick length
DEFAULT_TICK_LENGTH = 4
# The default tick direction
DEFAULT_TICK_DIRECTION = 'outside'
##################################################
MAP_PROJECTIONS = [
'equirectangular', 'mercator', 'orthographic', 'natural earth', 'kavrayskiy7', 'miller',
'robinson', 'eckert4', 'azimuthal equal area', 'azimuthal equidistant', 'conic equal area',
'conic conformal', 'conic equidistant', 'gnomonic', 'stereographic', 'mollweide', 'hammer',
'transverse mercator', 'albers usa', 'winkel tripel', 'aitoff', 'sinusoidal'
]
####################################################################################################
# GUI FUNCTIONS
####################################################################################################
# • GUI COLOR ######################################################################################
__GUI_COLOR_______________________________________ = ''
def get_alternate_colors(n, row_odd_color='white', row_even_color='lightgray'):
return ceil(n / 2) * [row_odd_color, row_even_color]
def get_complementary_color(*args, r=0, g=0, b=0, alpha=1, scale=True):
r, g, b, alpha = to_rgba(*args, r=r, g=g, b=b, alpha=alpha, scale=scale)
return to_rgba_color([minimum(r, g, b) + maximum(r, g, b) - c for c in (r, g, b)], alpha=alpha,
scale=False)
def get_RYG(brightness='8'):
colors = ['#E.0.0.', '#E..00.', '#E.E.0.', '#.0E.00', '#0...0.']
return [color.replace('.', brightness) for color in colors]
##################################################
def to_color(value, alpha=1, color_scale=RYG_SCALE, normalize=False, scale=True):
"""Converts the specified value to a RGBA color using the specified alpha and color scale."""
if normalize:
value = float(normal.cdf(value))
c = color_scale(value)
return to_rgba_color(c, alpha=alpha, scale=scale)
def to_rgba(*args, r=0, g=0, b=0, alpha=1, scale=True):
if len(args) == 1:
arg = args[0]
if is_string(arg):
if 'rgba' in arg:
arg = to_float(extract(arg, '[0-9\.]+'))
if len(arg) == 3:
r, g, b = arg
elif len(arg) == 4:
r, g, b, alpha = arg
else:
r, g, b, alpha = mcolors.to_rgba(arg, alpha=alpha)
elif is_collection(arg) or is_tuple(arg):
if len(arg) == 3:
r, g, b = arg
elif len(arg) == 4:
r, g, b, alpha = arg
if scale and r <= 1 and g <= 1 and b <= 1:
r = round(r * 255)
g = round(g * 255)
b = round(b * 255)
return r, g, b, alpha
def to_rgba_color(*args, r=0, g=0, b=0, alpha=1, scale=True):
r, g, b, alpha = to_rgba(*args, r=r, g=g, b=b, alpha=alpha, scale=scale)
return 'rgba' + par(collist(r, g, b, alpha))
# • GUI FIGURE #####################################################################################
__GUI_FIGURE______________________________________ = ''
def get_label(data, show_date=False, show_name=True, transformation=None, yaxis=0):
if is_null(data):
return ''
yaxis = '(' + str(yaxis) + ')' if yaxis != 0 else ''
if show_date and is_time_series(data):
date_from = get_first(data.index)
date_to = get_last(data.index)
year_from = date_from.year if not is_null(date_from) else None
year_to = date_to.year if not is_null(date_to) else None
if is_any_null(year_from, year_to):
date_range = ''
elif year_from != year_to:
date_range = collapse(year_from, '-', year_to)
else:
date_range = year_from
else:
date_range = ''
if show_name:
name = get_names(data)[0] if is_collection(data) else data
name = str(name).title() if not is_null(name) else ''
else:
name = ''
transformation = transformation.value.title() if not is_null(transformation) else ''
return paste(yaxis, date_range, name, transformation)
##################################################
def create_figure(auto_size=True,
axis_color='black', axis_width=2,
bar_mode=None,
bg_color=DEFAULT_BG_COLOR,
grid_color='lightgray', grid_width=1,
label_color='black', label_size=None,
legend_bg_color=DEFAULT_BG_COLOR, legend_x=0.01, legend_y=0.99,
range_to_zero_x=False, range_to_zero_y=False, range_to_zero_y2=False,
show_grid_x=True, show_grid_y=True, show_grid_y2=True, show_spine=True,
show_title=True, show_zero_line=True,
tick_color='black', tick_direction=DEFAULT_TICK_DIRECTION,
tick_length=DEFAULT_TICK_LENGTH,
tick_number_x=None, tick_number_y=None, tick_number_y2=None,
tick_start_x=None, tick_start_y=None, tick_start_y2=None,
tick_step_x=None, tick_step_y=None, tick_step_y2=None,
tick_values_x=None, tick_values_y=None, tick_values_y2=None,
title=None, title_x=None, title_y=None, title_y2=None,
width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, margin=DEFAULT_MARGIN,
zero_line_color='darkgray', zero_line_width=2):
fig = go.Figure()
update_layout(fig,
auto_size=auto_size,
axis_color=axis_color, axis_width=axis_width,
bar_mode=bar_mode,
bg_color=bg_color,
grid_color=grid_color, grid_width=grid_width,
label_color=label_color, label_size=label_size,
legend_bg_color=legend_bg_color, legend_x=legend_x, legend_y=legend_y,
range_to_zero_x=range_to_zero_x, range_to_zero_y=range_to_zero_y,
range_to_zero_y2=range_to_zero_y2,
show_grid_x=show_grid_x, show_grid_y=show_grid_y, show_grid_y2=show_grid_y2,
show_spine=show_spine, show_title=show_title, show_zero_line=show_zero_line,
tick_color=tick_color, tick_direction=tick_direction, tick_length=tick_length,
tick_number_x=tick_number_x, tick_number_y=tick_number_y,
tick_number_y2=tick_number_y2,
tick_start_x=tick_start_x, tick_start_y=tick_start_y, tick_start_y2=tick_start_y2,
tick_step_x=tick_step_x, tick_step_y=tick_step_y, tick_step_y2=tick_step_y2,
tick_values_x=tick_values_x, tick_values_y=tick_values_y,
tick_values_y2=tick_values_y2,
title=title, title_x=title_x, title_y=title_y, title_y2=title_y2,
width=width, height=height, margin=margin,
zero_line_color=zero_line_color, zero_line_width=zero_line_width)
return fig
#########################
def create_choropleth_map(df, loc_col, label_col, loc_mode='ISO-3', label_name=None,
# Layout
title=None, dragmode=False, showframe=False,
colors=get_RYG(), range_color=None,
width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, margin=DEFAULT_MARGIN,
# Geos
lat=None, lon=None,
projection='miller',
range_mode='auto', lataxis_range=None, lonaxis_range=None,
resolution=50,
showcoastlines=True, coastlinecolor='Black',
showland=False, landcolor='LightGreen',
showocean=True, oceancolor='AliceBlue',
showlakes=False, lakecolor='Blue',
showrivers=False, rivercolor='Blue'):
"""Creates a choropleth map with the specified parameters."""
fig = px.choropleth(data_frame=df,
lat=None, lon=None,
locations=loc_col, locationmode=loc_mode, projection=projection,
labels={label_col: label_name if not is_null(label_name) else label_col},
color=label_col, range_color=range_color,
color_continuous_scale=colors, color_discrete_sequence=colors)
update_layout(fig, title=title, width=width, height=height, margin=margin)
fig.update_layout(clickmode='event+select', dragmode=dragmode, hovermode='closest')
if range_mode == 'auto':
if is_null(projection) or projection in ['equirectangular', 'kavrayskiy7', 'sinusoidal']:
lataxis_range = [-48, 63]
elif projection == 'aitoff':
lataxis_range = [-39, 63]
elif projection == 'eckert4':
lataxis_range = [-50, 59]
elif projection == 'hammer':
lataxis_range = [-42, 61]
elif projection == 'mercator':
lataxis_range = [-35, 71]
elif projection == 'miller':
lataxis_range = [-43, 68]
elif projection == 'mollweide':
lataxis_range = [-50, 61]
elif projection == 'natural earth' or projection == 'robinson':
lataxis_range = [-49, 62]
elif projection == 'winkel tripel':
lataxis_range = [-44, 64]
fig.update_geos(
lataxis_range=lataxis_range,
lonaxis_range=lonaxis_range,
resolution=resolution,
showframe=showframe,
showcoastlines=showcoastlines, coastlinecolor=coastlinecolor,
showland=showland, landcolor=landcolor,
showocean=showocean, oceancolor=oceancolor,
showlakes=showlakes, lakecolor=lakecolor,
showrivers=showrivers, rivercolor=rivercolor)
return fig
def create_margin(x):
"""Creates a margin with the specified ratio to the width or height."""
return dict(l=x, r=x, b=x, t=x)
##################################################
def draw(x, y=None, color=None, dash=None, fill='none', index=None, mode='lines', name=None,
opacity=1, show_date=False, show_legend=True, show_name=True, size=4, width=2, yaxis=0):
if is_null(y):
data = x
x = data.index
y = get_col(data)
if not is_null(index):
hover_template = collapse('<b>%{customdata}</b><br />',
'<b>x:</b> %{x}<br />',
'<b>y:</b> %{y}')
else:
hover_template = collapse('<b>x:</b> %{x}<br />',
'<b>y:</b> %{y}')
if is_null(name):
name = y if is_collection(y) else name
line = dict(color=color, dash=dash, width=width)
marker = dict(color=color, size=size)
if mode == 'lines':
marker = None
elif mode == 'markers':
line = None
return go.Scatter(x=x, y=y,
name=get_label(name, show_date=show_date, show_name=show_name, yaxis=yaxis),
customdata=index, hovertemplate=hover_template,
fill=fill,
mode=mode, line=line, marker=marker, opacity=opacity,
showlegend=show_legend,
yaxis='y' + str(1 if yaxis == 0 else yaxis))
def draw_ellipse(center, a, b, angle=0, color=None, dash=None, fill='none', index=None,
mode='lines', name=None, opacity=1, precision=100, show_date=False,
show_legend=True, show_name=True, size=4, width=2, yaxis=0):
X, Y = create_ellipse(center, a, b, angle=angle, precision=precision)
return draw(x=X, y=Y, color=color, dash=dash, fill=fill, index=index, mode=mode, name=name,
opacity=opacity, show_date=show_date, show_legend=show_legend, show_name=show_name,
size=size, width=width, yaxis=yaxis)
#########################
def update_layout(fig,
auto_size=True,
axis_color='black', axis_width=2,
bar_mode=None,
bg_color=DEFAULT_BG_COLOR,
grid_color='lightgray', grid_width=1,
label_color='black', label_size=None,
legend_bg_color=DEFAULT_BG_COLOR, legend_x=0.01, legend_y=0.99,
range_to_zero_x=False, range_to_zero_y=False, range_to_zero_y2=False,
show_grid_x=True, show_grid_y=True, show_grid_y2=True, show_spine=True,
show_title=True, show_zero_line=True,
tick_color='black', tick_direction=DEFAULT_TICK_DIRECTION,
tick_length=DEFAULT_TICK_LENGTH,
tick_number_x=None, tick_number_y=None, tick_number_y2=None,
tick_start_x=None, tick_start_y=None, tick_start_y2=None,
tick_step_x=None, tick_step_y=None, tick_step_y2=None,
tick_values_x=None, tick_values_y=None, tick_values_y2=None,
title=None, title_x=None, title_y=None, title_y2=None,
width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, margin=DEFAULT_MARGIN,
zero_line_color='darkgray', zero_line_width=2):
update_layout_plot(fig, auto_size=auto_size, bar_mode=bar_mode, bg_color=bg_color,
show_title=show_title, title=title)
update_layout_axes(fig,
axis_color=axis_color, axis_width=axis_width,
grid_color=grid_color, grid_width=grid_width,
label_color=label_color, label_size=label_size,
range_to_zero_x=range_to_zero_x, range_to_zero_y=range_to_zero_y,
range_to_zero_y2=range_to_zero_y2,
show_grid_x=show_grid_x, show_grid_y=show_grid_y, show_grid_y2=show_grid_y2,
show_spine=show_spine, show_zero_line=show_zero_line,
tick_color=tick_color, tick_direction=tick_direction,
tick_length=tick_length,
tick_number_x=tick_number_x, tick_number_y=tick_number_y,
tick_number_y2=tick_number_y2,
tick_start_x=tick_start_x, tick_start_y=tick_start_y,
tick_start_y2=tick_start_y2,
tick_step_x=tick_step_x, tick_step_y=tick_step_y,
tick_step_y2=tick_step_y2,
tick_values_x=tick_values_x, tick_values_y=tick_values_y,
tick_values_y2=tick_values_y2,
title_x=title_x, title_y=title_y, title_y2=title_y2,
zero_line_color=zero_line_color, zero_line_width=zero_line_width)
update_layout_legend(fig, bg_color=legend_bg_color, x=legend_x, y=legend_y)
update_layout_size(fig, width=width, height=height, margin=margin)
def update_layout_plot(fig, auto_size=True, bar_mode=None, bg_color=DEFAULT_BG_COLOR,
show_title=True, title=None):
if is_matplot(fig):
for ax in fig.axes:
if not is_null(bg_color):
ax.set_facecolor(bg_color)
if not is_null(title) or not show_title:
ax.set_title(title if show_title else None)
elif is_plotly(fig):
bg_color = to_rgba_color(bg_color)
if not is_null(title) or not show_title:
fig.update_layout(title=html.b(title) if show_title else None)
if not is_null(bar_mode):
fig.update_layout(barmode=bar_mode)
fig.update_layout(
autosize=auto_size,
paper_bgcolor=bg_color,
plot_bgcolor=bg_color)
def update_layout_axes(fig,
axis_color='black', axis_width=2,
grid_color='lightgray', grid_width=1,
label_color='black', label_size=None,
range_to_zero_x=False, range_to_zero_y=False, range_to_zero_y2=False,
scale_ratio_y=None, scale_ratio_y2=None,
show_grid_x=True, show_grid_y=True, show_grid_y2=True, show_spine=True,
show_zero_line=True,
tick_color='black', tick_direction=DEFAULT_TICK_DIRECTION,
tick_length=DEFAULT_TICK_LENGTH,
tick_number_x=None, tick_number_y=None, tick_number_y2=None,
tick_start_x=None, tick_start_y=None, tick_start_y2=None,
tick_step_x=None, tick_step_y=None, tick_step_y2=None,
tick_values_x=None, tick_values_y=None, tick_values_y2=None,
title_x=None, title_y=None, title_y2=None,
zero_line_color='darkgray', zero_line_width=2):
if is_matplot(fig):
for ax in fig.axes:
# Set the titles
# - Horizontal axis
if not is_null(title_x):
ax.set_xlabel(title_x)
# - Vertical axis
if not is_null(title_y):
ax.set_ylabel(title_y)
# Set the spines
for _, spine in ax.spines.items():
spine.set_color(axis_color)
spine.set_linewidth(axis_width)
spine.set_visible(show_spine)
# Set the grids
# - Horizontal axis
if show_grid_x:
ax.grid(axis='x', b=True, color=grid_color, linestyle='-', linewidth=grid_width)
else:
ax.grid(axis='x', b=False)
# - Vertical axis
if show_grid_y:
ax.grid(axis='y', b=True, color=grid_color, linestyle='-', linewidth=grid_width)
else:
ax.grid(axis='y', b=False)
# Set the scale
if not is_null(scale_ratio_y):
ax.axes.set_aspect('equal')
# Set the ticks
ax.tick_params(color=tick_color,
direction='out' if tick_direction == 'outside' else 'in',
labelcolor=label_color, labelsize=label_size,
length=tick_length, width=grid_width)
# - Horizontal axis
if range_to_zero_x:
ax.set_xlim(left=0)
if not is_null(tick_number_x):
ax.xaxis.set_major_locator(mticker.MaxNLocator(tick_number_x))
elif not is_null(tick_start_x):
ax.xaxis.set_major_locator(mticker.IndexLocator(base=tick_step_x,
offset=tick_start_x))
elif not is_null(tick_step_x):
ax.xaxis.set_major_locator(mticker.MultipleLocator(base=tick_step_x))
elif not is_null(tick_values_x):
ax.set_xticks(tick_values_x)
# - Vertical axis
if range_to_zero_y:
ax.set_ylim(bottom=0)
if not is_null(tick_number_y):
ax.yaxis.set_major_locator(mticker.MaxNLocator(tick_number_y))
elif not is_null(tick_start_y):
ax.yaxis.set_major_locator(mticker.IndexLocator(base=tick_step_y,
offset=tick_start_y))
elif not is_null(tick_step_y):
ax.yaxis.set_major_locator(mticker.MultipleLocator(base=tick_step_y))
elif not is_null(tick_values_y):
ax.set_yticks(tick_values_y)
elif is_plotly(fig):
axis_color = to_rgba_color(axis_color)
grid_color = to_rgba_color(grid_color)
label_color = to_rgba_color(label_color)
tick_color = to_rgba_color(tick_color)
zero_line_color = to_rgba_color(zero_line_color)
# Set the titles
# - Horizontal axis
if not is_null(title_x):
fig.update_layout(
xaxis=dict(title=dict(text=title_x,
font_color=label_color, font_size=label_size)))
# - Vertical axis
if not is_null(title_y):
fig.update_layout(
yaxis=dict(title=dict(text=title_y,
font_color=label_color, font_size=label_size)))
# - Second vertical axis
if not is_null(title_y2):
fig.update_layout(
yaxis2=dict(title=dict(text=title_y2,
font_color=label_color, font_size=label_size)))
# Set the scale
if not is_null(scale_ratio_y):
fig.update_layout(yaxis=dict(scaleanchor='x', scaleratio=scale_ratio_y))
if not is_null(scale_ratio_y2):
fig.update_layout(yaxis=dict(scaleanchor='x', scaleratio=scale_ratio_y2))
fig.update_layout(
# - Horizontal axis
xaxis=dict(
# Set the spine
showline=show_spine, linecolor=axis_color, linewidth=axis_width,
# Set the grid
showgrid=show_grid_x, gridcolor=grid_color, gridwidth=grid_width,
# Set the range
rangemode='tozero' if range_to_zero_x else None,
# Set the ticks
tickmode='array' if not is_null(tick_values_x)
else 'linear' if not is_null(tick_start_x) or not is_null(tick_step_x)
else 'auto', nticks=tick_number_x, tick0=tick_start_x, dtick=tick_step_x,
tickvals=tick_values_x,
tickcolor=tick_color, ticks=tick_direction, ticklen=tick_length,
tickwidth=grid_width,
# Set the zero line
zeroline=show_zero_line, zerolinecolor=zero_line_color,
zerolinewidth=zero_line_width),
# - Vertical axis
yaxis=dict(
# Set the spine
showline=show_spine, linecolor=axis_color, linewidth=axis_width,
# Set the grid
showgrid=show_grid_y, gridcolor=grid_color, gridwidth=grid_width,
# Set the range
rangemode='tozero' if range_to_zero_y else None,
# Set the ticks
tickmode='array' if not is_null(tick_values_y)
else 'linear' if not is_null(tick_start_y) or not is_null(tick_step_y)
else 'auto', nticks=tick_number_y, tick0=tick_start_y, dtick=tick_step_y,
tickvals=tick_values_y,
tickcolor=tick_color, ticks=tick_direction, ticklen=tick_length,
tickwidth=grid_width,
# Set the zero line
zeroline=show_zero_line, zerolinecolor=zero_line_color,
zerolinewidth=zero_line_width),
# - Second vertical axis
yaxis2=dict(
# Set the spine
showline=show_spine, linecolor=axis_color, | |
<gh_stars>0
# ------------------------------------------------------------------------------------------------ #
# MIT License #
# #
# Copyright (c) 2020, Microsoft Corporation #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or #
# substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ------------------------------------------------------------------------------------------------ #
import os
import time
import logging
from importlib import reload, import_module
from types import ModuleType
import jax.numpy as jnp
import numpy as onp
import lz4.frame
import cloudpickle as pickle
from PIL import Image
__all__ = (
'docstring',
'enable_logging',
'dump',
'dumps',
'load',
'loads',
'generate_gif',
'get_env_attr',
'getattr_safe',
'has_env_attr',
'is_policy',
'is_qfunction',
'is_reward_function',
'is_stochastic',
'is_transition_model',
'is_vfunction',
'pretty_repr',
'pretty_print',
'reload_recursive',
'render_episode',
)
def docstring(obj):
r'''
A simple decorator that sets the ``__doc__`` attribute to ``obj.__doc__``
on the decorated object, see example below.
Parameters
----------
obj : object
The objects whose docstring you wish to copy onto the wrapped object.
Examples
--------
>>> def f(x):
... """Some docstring"""
... return x * x
...
>>> def g(x):
... return 13 - x
...
>>> g.__doc__ = f.__doc__
This can abbreviated by:
>>> @docstring(f)
... def g(x):
... return 13 - x
...
'''
def decorator(func):
func.__doc__ = obj.__doc__
return func
return decorator
def enable_logging(name=None, level=logging.INFO, output_filepath=None, output_level=None):
r"""
Enable logging output.
This executes the following two lines of code:
.. code:: python
import logging
logging.basicConfig(level=logging.INFO)
Parameters
----------
name : str, optional
Name of the process that is logging. This can be set to whatever you
like.
level : int, optional
Logging level for the default :py:class:`StreamHandler
<logging.StreamHandler>`. The default setting is ``level=logging.INFO``
(which is 20). If you'd like to see more verbose logging messages you
might set ``level=logging.DEBUG``.
output_filepath : str, optional
If provided, a :py:class:`FileHandler <logging.FileHandler>` will be
added to the root logger via:
.. code:: python
file_handler = logging.FileHandler(output_filepath)
logging.getLogger('').addHandler(file_handler)
output_level : int, optional
Logging level for the :py:class:`FileHandler <logging.FileHandler>`. If
left unspecified, this defaults to ``level``, i.e. the same level as
the default :py:class:`StreamHandler <logging.StreamHandler>`.
"""
if name is None:
fmt = '[%(name)s|%(levelname)s] %(message)s'
else:
fmt = f'[{name}|%(name)s|%(levelname)s] %(message)s'
logging.basicConfig(level=level, format=fmt)
if output_filepath is not None:
os.makedirs(os.path.dirname(output_filepath) or '.', exist_ok=True)
fh = logging.FileHandler(output_filepath)
fh.setLevel(level if output_level is None else output_level)
logging.getLogger('').addHandler(fh)
def dump(obj, filepath):
r"""
Save an object to disk.
Parameters
----------
obj : object
Any python object.
filepath : str
Where to store the instance.
Warning
-------
References between objects are only preserved if they are stored as part of a single object, for
example:
.. code:: python
# b has a reference to a
a = [13]
b = {'a': a}
# references preserved
dump((a, b), 'ab.pkl.lz4')
a_new, b_new = load('ab.pkl.lz4')
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13, 7] <-- updated
# references not preserved
dump(a, 'a.pkl.lz4')
dump(b, 'b.pkl.lz4')
a_new = load('a.pkl.lz4')
b_new = load('b.pkl.lz4')
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13] <-- not updated!!
Therefore, the safest way to create checkpoints is to store the entire state as a single object
like a dict or a tuple.
"""
dirpath = os.path.dirname(filepath)
if dirpath:
os.makedirs(dirpath, exist_ok=True)
with lz4.frame.open(filepath, 'wb') as f:
f.write(pickle.dumps(obj))
def dumps(obj):
r"""
Serialize an object to an lz4-compressed pickle byte-string.
Parameters
----------
obj : object
Any python object.
Returns
-------
s : bytes
An lz4-compressed pickle byte-string.
Warning
-------
References between objects are only preserved if they are stored as part of a single object, for
example:
.. code:: python
# b has a reference to a
a = [13]
b = {'a': a}
# references preserved
s = dumps((a, b))
a_new, b_new = loads(s)
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13, 7] <-- updated
# references not preserved
s_a = dumps(a)
s_b = dumps(b)
a_new = loads(s_a)
b_new = loads(s_b)
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13] <-- not updated!!
Therefore, the safest way to create checkpoints is to store the entire state as a single object
like a dict or a tuple.
"""
return lz4.frame.compress(pickle.dumps(obj))
def load(filepath):
r"""
Load an object from a file that was created by :func:`dump(obj, filepath) <dump>`.
Parameters
----------
filepath : str
File to load.
"""
with lz4.frame.open(filepath, 'rb') as f:
return pickle.loads(f.read())
def loads(s):
r"""
Load an object from a byte-string that was created by :func:`dumps(obj) <dumps>`.
Parameters
----------
s : str
An lz4-compressed pickle byte-string.
"""
return pickle.loads(lz4.frame.decompress(s))
def _reload(module, reload_all, reloaded, logger):
if isinstance(module, ModuleType):
module_name = module.__name__
elif isinstance(module, str):
module_name, module = module, import_module(module)
else:
raise TypeError(
"'module' must be either a module or str; "
f"got: {module.__class__.__name__}")
for attr_name in dir(module):
attr = getattr(module, attr_name)
check = (
# is it a module?
isinstance(attr, ModuleType)
# has it already been reloaded?
and attr.__name__ not in reloaded
# is it a proper submodule? (or just reload all)
and (reload_all or attr.__name__.startswith(module_name))
)
if check:
_reload(attr, reload_all, reloaded, logger)
logger.debug(f"reloading module: {module_name}")
reload(module)
reloaded.add(module_name)
def reload_recursive(module, reload_external_modules=False):
"""
Recursively reload a module (in order of dependence).
Parameters
----------
module : ModuleType or str
The module to reload.
reload_external_modules : bool, optional
Whether to reload all referenced modules, including external ones which
aren't submodules of ``module``.
"""
logger = logging.getLogger('coax.utils.reload_recursive')
_reload(module, reload_external_modules, set(), logger)
def render_episode(env, policy=None, step_delay_ms=0):
r"""
Run a single episode with env.render() calls with each time step.
Parameters
----------
env : gym environment
A gym environment.
policy : callable, optional
A policy objects that is used to pick actions: ``a = policy(s)``. If left unspecified, we'll
just take random actions instead, i.e. ``a = env.action_space.sample()``.
step_delay_ms : non-negative float
The number of milliseconds to wait between consecutive timesteps. This can be used to slow
down the rendering.
"""
from ..wrappers import TrainMonitor
if isinstance(env, TrainMonitor):
env = env.env # unwrap to strip off TrainMonitor
s = env.reset()
env.render()
for t in range(int(1e9)):
a = env.action_space.sample() if policy is None else policy(s)
s_next, r, done, info = env.step(a)
env.render()
time.sleep(step_delay_ms / 1e3)
if done:
break
s = s_next
time.sleep(5 * step_delay_ms / 1e3)
def has_env_attr(env, attr, max_depth=100):
r"""
Check if a potentially wrapped environment has a given attribute.
Parameters
----------
env : gym environment
A potentially wrapped environment.
attr : str
The attribute name.
max_depth : positive int, optional
The maximum depth of wrappers to traverse.
"""
e = env
for i in range(max_depth):
if hasattr(e, attr):
return True
if not hasattr(e, 'env'):
break
e = e.env
return False
def get_env_attr(env, attr, default='__ERROR__', max_depth=100):
r"""
Get the given attribute from a potentially wrapped environment.
Note that the wrapped envs are traversed from the outside in. Once the
attribute is found, the search stops. This means that an inner wrapped env
may carry the same (possibly conflicting) attribute. This situation is
*not* resolved by this function.
Parameters
----------
env : gym environment
A potentially wrapped environment.
attr : str
The attribute name.
max_depth : positive int, optional
The maximum depth of wrappers to traverse.
"""
e = env
for i in range(max_depth):
if hasattr(e, attr):
return getattr(e, attr)
if not hasattr(e, 'env'):
break
e = e.env
if default == '__ERROR__':
raise AttributeError("env is missing attribute: {}".format(attr))
return default
def generate_gif(env, filepath, policy=None, | |
<gh_stars>0
from skimage import measure
from skimage.segmentation import clear_border
from scipy.stats import skew
from scipy.stats import kurtosis as kurto
from scipy.stats import mode as mod
from scipy import stats
from operator import itemgetter
from bfio.bfio import BioReader
import argparse
import logging
import os
import fnmatch
import difflib
import bioformats
import math
import javabridge as jutil
import numpy as np
import pandas as pd
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
def list_file(img_directory):
"""List all the .ome.tif files in the directory.
Args:
img_directory (str): Path to the directory containing the input images.
Returns:
The path to directory, list of names of the subdirectories in dirpath (if any) and the filenames of .ome.tif files.
"""
list_of_files = [os.path.join(dirpath, file_name)
for dirpath, dirnames, files in os.walk(img_directory)
for file_name in fnmatch.filter(files, '*.ome.tif')]
return list_of_files
def read(img_file):
"""Read the .ome.tif image using BioReader.
Args:
img_directory (str): Path to the directory containing the input images.
Returns:
Array of the image and the embedded unit in the metadata if present else it will be none.
"""
br_int = BioReader(img_file)
#Load only the first channel
image_bfio = br_int.read_image(C=[0])
image_squeeze= np.squeeze(image_bfio)
#Get embedded units from metadata (physical size)
img_bfio_unit = br_int.physical_size_y()
img_unit = img_bfio_unit[1]
return image_squeeze, img_unit
def box_border_search(label_image, boxsize=3):
"""Get perimeter pixels of object for calculating neighbors and feret diameter memory efficiently.
Args:
label_image (ndarray): Labeled image array.
boxsize (int): Box size value.
Returns:
An array containing the perimeter pixels of the object n.
"""
#Get image shape values
height, width = label_image.shape
#Get boxsize values
floor_offset = math.floor(boxsize / 2)
ceil_offset = math.ceil(boxsize / 2)
#Create the integral image
int_image = np.zeros((height + 1, width + 1))
int_image[1:, 1:] = np.cumsum(np.cumsum(np.double(label_image), 0), 1)
int_image_transpose = int_image.T
int_image_int = int_image_transpose.astype(int)
del int_image, int_image_transpose
#Create indices for the original image
height_sequence = height - (boxsize - 1)
width_sequence = width - (boxsize - 1)
width_boxsize = np.linspace(0, width - boxsize, height_sequence)
height_boxsize = np.linspace(0, height - boxsize, width_sequence)
columns, rows = np.meshgrid(width_boxsize, height_boxsize)
columns_flat = columns.flatten(order = 'F')
columns_reshape = columns_flat.reshape(-1, 1)
rows_flat = rows.flatten(order = 'F')
rows_reshape = rows_flat.reshape(-1, 1)
#Upper left value
upper_left = (height + 1) * columns_reshape + rows_reshape
upper_left_int = upper_left.astype(int)
#Upper right value
upper_right = upper_left_int + (boxsize) * (height + 1)
upper_right_int = upper_right.astype(int)
#Lower left value
lower_left = upper_left + boxsize
lower_left_int = lower_left.astype(int)
#Lower right value
lower_right = upper_right_int + boxsize
lower_right_int = lower_right.astype(int)
del height_sequence, width_sequence, width_boxsize, height_boxsize, columns, columns_flat, rows, rows_flat, columns_reshape, rows_reshape, upper_right, lower_left, upper_left, lower_right
#Get the sum of local neighborhood defined by boxSize
int_image_flat = int_image_int.flatten(order = 'F')
int_image_flat_transpose = int_image_flat.T
neighborvals = (int_image_flat_transpose[upper_left_int]
+ int_image_flat_transpose[lower_right_int]
- int_image_flat_transpose[upper_right_int]
- int_image_flat_transpose[lower_left_int])
del lower_left_int, lower_right_int, upper_right_int, upper_left_int, int_image_flat_transpose, int_image_flat, int_image_int
#Divide the pixel averages by the pixel value
reshape_vals = np.reshape(neighborvals, (height - 2 * floor_offset, width - 2 * floor_offset))
double_image = label_image[ceil_offset - 1: -floor_offset, ceil_offset - 1: -floor_offset]
pix_mask = reshape_vals / double_image
pad = np.pad(pix_mask, ((floor_offset, floor_offset), (floor_offset, floor_offset)), mode='constant')
thresh = boxsize * boxsize
del neighborvals, reshape_vals, ceil_offset, double_image, pix_mask, floor_offset
#Get perimeter of the object
pad_array = np.array(pad)
pad_flat = pad_array.flatten(order = 'F')
#Get perimeter indices
perimeter_indices = np.where(pad_flat != thresh)
perimeter_indices_array = np.asarray(perimeter_indices)
perimeter_indices_reshape = perimeter_indices_array.reshape(-1, 1)
perimeter_zeros = np.zeros(label_image.shape)
perimeter_int = perimeter_zeros.astype(int)
perimeter_flat = perimeter_int.flatten(order = 'F')
image_flat = label_image.flatten(order = 'F')
#Calculate perimeter
perimeter_flat[perimeter_indices_reshape] = image_flat[perimeter_indices_reshape]
perimeter_reshape = perimeter_flat.reshape(height, width)
perimeter_transpose = perimeter_reshape.T
del pad_array, pad_flat, thresh, perimeter_indices, perimeter_indices_array, perimeter_zeros, perimeter_int, image_flat, perimeter_indices_reshape, perimeter_flat, perimeter_reshape
return perimeter_transpose
def neighbors_find(lbl_img, boxsize, pixeldistance):
"""Calculate the number of objects within d pixels of object n.
Args:
lbl_image (ndarray): Labeled image array.
boxsize (int): Box size value.
pixeldistance (int): Pixel distance value.
Returns:
An array showing the number of neighbors touching the object for each object in labeled image.
Note:
Number_of_Neighbors = neighbors_find(label_image, boxsize, pixeldistance=None)
Computes the number of objects within 5 pixels of each object.
"""
#Get perimeter pixels
obj_edges = box_border_search(lbl_img, boxsize=3)
#Get the height and width of the labeled image
height,width = obj_edges.shape
#Generate number of samples for creating numeric sequence
num_sequence = (2 * pixeldistance) + 1
pixel_distance_range = np.linspace(-pixeldistance, pixeldistance, num_sequence)
#Create a rectangular grid out of an array of pixel_distance_range and an array of pixel_distance_range1 values
column_index, row_index = np.meshgrid(pixel_distance_range, pixel_distance_range)
#Convert to single column vector
column_index_transpose = column_index.T
row_index_transpose = row_index.T
column_index_reshape = column_index_transpose.reshape(-1, 1)
row_index_reshape = row_index_transpose.reshape(-1, 1)
column_index_int = column_index_reshape.astype(int)
row_index_int = row_index_reshape.astype(int)
del column_index_transpose, row_index_transpose, column_index_reshape, row_index_reshape, row_index, column_index, pixel_distance_range
#Generate pixel neighborhood reference
neighboroffsets = column_index_int * height + row_index_int
neighboroffsets = neighboroffsets[neighboroffsets != 0]
neighboroffsets = neighboroffsets.reshape(-1, 1)
#Get inscribed image linear indices:
width_sequence = width - (2 * pixeldistance)
height_sequence = height - (2 * pixeldistance)
columns_range = np.linspace(pixeldistance, width - pixeldistance - 1, width_sequence)
rows_range = np.linspace(pixeldistance, height - pixeldistance - 1, height_sequence)
columns, rows = np.meshgrid(columns_range, rows_range)
columns_flat = columns.flatten(order = 'F')
columns_reshape = columns_flat.reshape(-1, 1)
rows_flat = rows.flatten(order = 'F')
rows_reshape = rows_flat.reshape(-1, 1)
linear_index = height * columns_reshape + rows_reshape
linear_index_int = linear_index.astype(int)
del columns_flat, rows, rows_flat, linear_index, columns_reshape, rows_reshape
#Consider indices that contain objects
image_flatten = obj_edges.flatten(order = 'F')
mask = image_flatten[linear_index_int]>0
linear_index_mask = linear_index_int[mask]
linear_index_reshape = linear_index_mask.reshape(-1, 1)
#Get indices of neighbor pixels
neighbor_index = (neighboroffsets + linear_index_reshape.T)
#Get values of neighbor pixels
neighborvals = image_flatten[neighbor_index]
del linear_index_int, mask, neighboroffsets, linear_index_reshape, neighbor_index
#Sort pixels by object
objnum = image_flatten[linear_index_mask]
objnum_reshape = objnum.reshape(-1, 1)
index = list(range(len(objnum_reshape)))
index = np.asarray(index).reshape(objnum.shape)
stack_index_objnum = np.column_stack((index, objnum))
sort_index_objnum = sorted(stack_index_objnum, key = itemgetter(1))
index_objnum_array = np.asarray(sort_index_objnum)
index_split = index_objnum_array[:, 0]
objnum_split = index_objnum_array[:, 1]
index_reshape = np.asarray(index_split).reshape(-1, 1)
objnum_reshape = np.asarray(objnum_split).reshape(-1, 1)
del image_flatten, linear_index_mask, objnum, stack_index_objnum, sort_index_objnum, index_split, objnum_split,index
#Find object index boundaries
difference_objnum = np.diff(objnum_reshape, axis=0)
stack_objnum = np.vstack((1, difference_objnum, 1))
objbounds = np.where(stack_objnum)
objbounds_array = np.asarray(objbounds)
objbounds_split = objbounds_array[0, :]
objbounds_reshape = objbounds_split.reshape(-1, 1)
del objbounds_split, objnum_reshape, difference_objnum, stack_objnum, objbounds, objbounds_array
objneighbors = []
#Get border objects
for obj in range(len(objbounds_reshape) - 1):
allvals = neighborvals[:, index_reshape[np.arange(objbounds_reshape[obj], objbounds_reshape[obj + 1])]]
sortedvals = np.sort(allvals.ravel())
sortedvals_reshape = sortedvals.reshape(-1, 1)
difference_sortedvals = np.diff(sortedvals_reshape, axis=0)
difference_sortedvals_flat = difference_sortedvals.flatten(order = 'C')
difference_sortedvals_stack = np.hstack((1, difference_sortedvals_flat))
uniqueindices = np.where(difference_sortedvals_stack)
uniqueindices_array = np.asarray(uniqueindices)
uniqueindices_transpose = uniqueindices_array.T
obj_neighbor = sortedvals_reshape[uniqueindices_transpose]
obj_neighbor_flat = obj_neighbor.flatten(order = 'C')
objneighbors.append(obj_neighbor_flat)
del obj_neighbor_flat, allvals, sortedvals, difference_sortedvals, difference_sortedvals_flat, difference_sortedvals_stack, uniqueindices, uniqueindices_array, uniqueindices_transpose, obj_neighbor
objneighbors_array = np.asarray(objneighbors)
del objbounds_reshape, neighborvals, index_reshape
numneighbors = []
objneighbors = []
#Get the number of neighbor objects and its label
for neigh in objneighbors_array:
len_neighbor = len(neigh) - 1
numneighbors.append(len_neighbor)
numneighbors_arr = np.asarray(numneighbors)
numneighbors_array = numneighbors_arr.reshape(-1, 1)
return numneighbors_array
def feret_diameter(lbl_img, boxsize, thetastart, thetastop):
"""Calculate the maximum caliper diamter and minimum caliper diameter of an object at angle(1-180degrees).
Args:
lbl_image (ndarray): Labeled image array.
boxsize (int): Box size value.
thetastart (int): Angle start value by default it is 1.
thetastop (int): Angle stop value by default it is 180.
Returns:
An array with feret diameters of the corresponding objects at each of the angles in theta.
"""
counts_scalar_copy=None
#Convert to radians
theta = np.arange(thetastart, thetastop + 1)
theta = np.asarray(theta)
theta = np.radians(theta)
#Get perimeter of objects
obj_edges = box_border_search(lbl_img, boxsize=3)
#Get indices and label of all pixels
obj_edges_flat = obj_edges.flatten(order = 'F')
obj_edges_reshape = obj_edges_flat.reshape(-1, 1)
objnum = obj_edges_reshape[obj_edges_reshape != 0]
obj_edges_transpose = obj_edges.T
positionx = np.where(obj_edges_transpose)[0]
positionx_reshape = positionx.reshape(-1, 1)
positiony = np.where(obj_edges_transpose)[1]
positiony_reshape = positiony.reshape(-1, 1)
index = list(range(len(objnum)))
index = np.asarray(index).reshape(objnum.shape)
stack_index_objnum = np.column_stack((index, objnum))
del obj_edges_flat, obj_edges_reshape, objnum, index, obj_edges, positionx, obj_edges_transpose, positiony
#Sort pixels by label
sort_index_objnum = sorted(stack_index_objnum, key=itemgetter(1))
index_objnum_array = np.asarray(sort_index_objnum)
index_split = index_objnum_array[:, 0]
objnum_split = index_objnum_array[:, 1]
positionx_index = positionx_reshape[index_split]
positiony_index = positiony_reshape[index_split]
del positiony_reshape, index_split, stack_index_objnum, sort_index_objnum, index_objnum_array, positionx_reshape
#Get number of pixels for | |
<= 0)
m.c1309 = Constraint(expr= - m.x131 + m.x132 - m.x156 <= 0)
m.c1310 = Constraint(expr= - m.x131 + m.x133 - m.x157 <= 0)
m.c1311 = Constraint(expr= - m.x131 + m.x134 - m.x158 <= 0)
m.c1312 = Constraint(expr= - m.x131 + m.x135 - m.x159 <= 0)
m.c1313 = Constraint(expr= - m.x131 + m.x136 - m.x160 <= 0)
m.c1314 = Constraint(expr= - m.x131 + m.x137 - m.x161 <= 0)
m.c1315 = Constraint(expr= - m.x131 + m.x138 - m.x162 <= 0)
m.c1316 = Constraint(expr= - m.x132 + m.x133 - m.x163 <= 0)
m.c1317 = Constraint(expr= - m.x132 + m.x134 - m.x164 <= 0)
m.c1318 = Constraint(expr= - m.x132 + m.x135 - m.x165 <= 0)
m.c1319 = Constraint(expr= - m.x132 + m.x136 - m.x166 <= 0)
m.c1320 = Constraint(expr= - m.x132 + m.x137 - m.x167 <= 0)
m.c1321 = Constraint(expr= - m.x132 + m.x138 - m.x168 <= 0)
m.c1322 = Constraint(expr= - m.x133 + m.x134 - m.x169 <= 0)
m.c1323 = Constraint(expr= - m.x133 + m.x135 - m.x170 <= 0)
m.c1324 = Constraint(expr= - m.x133 + m.x136 - m.x171 <= 0)
m.c1325 = Constraint(expr= - m.x133 + m.x137 - m.x172 <= 0)
m.c1326 = Constraint(expr= - m.x133 + m.x138 - m.x173 <= 0)
m.c1327 = Constraint(expr= - m.x134 + m.x135 - m.x174 <= 0)
m.c1328 = Constraint(expr= - m.x134 + m.x136 - m.x175 <= 0)
m.c1329 = Constraint(expr= - m.x134 + m.x137 - m.x176 <= 0)
m.c1330 = Constraint(expr= - m.x134 + m.x138 - m.x177 <= 0)
m.c1331 = Constraint(expr= - m.x135 + m.x136 - m.x178 <= 0)
m.c1332 = Constraint(expr= - m.x135 + m.x137 - m.x179 <= 0)
m.c1333 = Constraint(expr= - m.x135 + m.x138 - m.x180 <= 0)
m.c1334 = Constraint(expr= - m.x136 + m.x137 - m.x181 <= 0)
m.c1335 = Constraint(expr= - m.x136 + m.x138 - m.x182 <= 0)
m.c1336 = Constraint(expr= - m.x137 + m.x138 - m.x183 <= 0)
m.c1337 = Constraint(expr= - m.x139 + m.x140 - m.x148 <= 0)
m.c1338 = Constraint(expr= - m.x139 + m.x141 - m.x149 <= 0)
m.c1339 = Constraint(expr= - m.x139 + m.x142 - m.x150 <= 0)
m.c1340 = Constraint(expr= - m.x139 + m.x143 - m.x151 <= 0)
m.c1341 = Constraint(expr= - m.x139 + m.x144 - m.x152 <= 0)
m.c1342 = Constraint(expr= - m.x139 + m.x145 - m.x153 <= 0)
m.c1343 = Constraint(expr= - m.x139 + m.x146 - m.x154 <= 0)
m.c1344 = Constraint(expr= - m.x139 + m.x147 - m.x155 <= 0)
m.c1345 = Constraint(expr= - m.x140 + m.x141 - m.x156 <= 0)
m.c1346 = Constraint(expr= - m.x140 + m.x142 - m.x157 <= 0)
m.c1347 = Constraint(expr= - m.x140 + m.x143 - m.x158 <= 0)
m.c1348 = Constraint(expr= - m.x140 + m.x144 - m.x159 <= 0)
m.c1349 = Constraint(expr= - m.x140 + m.x145 - m.x160 <= 0)
m.c1350 = Constraint(expr= - m.x140 + m.x146 - m.x161 <= 0)
m.c1351 = Constraint(expr= - m.x140 + m.x147 - m.x162 <= 0)
m.c1352 = Constraint(expr= - m.x141 + m.x142 - m.x163 <= 0)
m.c1353 = Constraint(expr= - m.x141 + m.x143 - m.x164 <= 0)
m.c1354 = Constraint(expr= - m.x141 + m.x144 - m.x165 <= 0)
m.c1355 = Constraint(expr= - m.x141 + m.x145 - m.x166 <= 0)
m.c1356 = Constraint(expr= - m.x141 + m.x146 - m.x167 <= 0)
m.c1357 = Constraint(expr= - m.x141 + m.x147 - m.x168 <= 0)
m.c1358 = Constraint(expr= - m.x142 + m.x143 - m.x169 <= 0)
m.c1359 = Constraint(expr= - m.x142 + m.x144 - m.x170 <= 0)
m.c1360 = Constraint(expr= - m.x142 + m.x145 - m.x171 <= 0)
m.c1361 = Constraint(expr= - m.x142 + m.x146 - m.x172 <= 0)
m.c1362 = Constraint(expr= - m.x142 + m.x147 - m.x173 <= 0)
m.c1363 = Constraint(expr= - m.x143 + m.x144 - m.x174 <= 0)
m.c1364 = Constraint(expr= - m.x143 + m.x145 - m.x175 <= 0)
m.c1365 = Constraint(expr= - m.x143 + m.x146 - m.x176 <= 0)
m.c1366 = Constraint(expr= - m.x143 + m.x147 - m.x177 <= 0)
m.c1367 = Constraint(expr= - m.x144 + m.x145 - m.x178 <= 0)
m.c1368 = Constraint(expr= - m.x144 + m.x146 - m.x179 <= 0)
m.c1369 = Constraint(expr= - m.x144 + m.x147 - m.x180 <= 0)
m.c1370 = Constraint(expr= - m.x145 + m.x146 - m.x181 <= 0)
m.c1371 = Constraint(expr= - m.x145 + m.x147 - m.x182 <= 0)
m.c1372 = Constraint(expr= - m.x146 + m.x147 - m.x183 <= 0)
m.c1373 = Constraint(expr= - m.x148 + m.x149 - m.x156 <= 0)
m.c1374 = Constraint(expr= - m.x148 + m.x150 - m.x157 <= 0)
m.c1375 = Constraint(expr= - m.x148 + m.x151 - m.x158 <= 0)
m.c1376 = Constraint(expr= - m.x148 + m.x152 - m.x159 <= 0)
m.c1377 = Constraint(expr= - m.x148 + m.x153 - m.x160 <= 0)
m.c1378 = Constraint(expr= - m.x148 + m.x154 - m.x161 <= 0)
m.c1379 = Constraint(expr= - m.x148 + m.x155 - m.x162 <= 0)
m.c1380 = Constraint(expr= - m.x149 + m.x150 - m.x163 <= 0)
m.c1381 = Constraint(expr= - m.x149 + m.x151 - m.x164 <= 0)
m.c1382 = Constraint(expr= - m.x149 + m.x152 - m.x165 <= 0)
m.c1383 = Constraint(expr= - m.x149 + m.x153 - m.x166 <= 0)
m.c1384 = Constraint(expr= - m.x149 + m.x154 - m.x167 <= 0)
m.c1385 = Constraint(expr= - m.x149 + m.x155 - m.x168 <= 0)
m.c1386 = Constraint(expr= - m.x150 + m.x151 - m.x169 <= 0)
m.c1387 = Constraint(expr= - m.x150 + m.x152 - m.x170 <= 0)
m.c1388 = Constraint(expr= - m.x150 + m.x153 - m.x171 <= 0)
m.c1389 = Constraint(expr= - m.x150 + m.x154 - m.x172 <= 0)
m.c1390 = Constraint(expr= - m.x150 + m.x155 - m.x173 <= 0)
m.c1391 = Constraint(expr= - m.x151 + m.x152 - m.x174 <= 0)
m.c1392 = Constraint(expr= - m.x151 + m.x153 - m.x175 <= 0)
m.c1393 = Constraint(expr= - m.x151 + m.x154 - m.x176 <= 0)
m.c1394 = Constraint(expr= - m.x151 + m.x155 - m.x177 <= 0)
m.c1395 = Constraint(expr= - m.x152 + m.x153 - m.x178 <= 0)
m.c1396 = Constraint(expr= - m.x152 + m.x154 - m.x179 <= 0)
m.c1397 = Constraint(expr= - m.x152 + m.x155 - m.x180 <= 0)
m.c1398 = Constraint(expr= - m.x153 + m.x154 - m.x181 <= 0)
m.c1399 = Constraint(expr= - m.x153 + m.x155 - m.x182 <= 0)
m.c1400 = Constraint(expr= - m.x154 + m.x155 - m.x183 <= 0)
m.c1401 = Constraint(expr= - m.x156 + m.x157 - m.x163 <= 0)
m.c1402 = Constraint(expr= - m.x156 + m.x158 - m.x164 <= 0)
m.c1403 = Constraint(expr= - m.x156 + m.x159 - m.x165 <= 0)
m.c1404 = Constraint(expr= - m.x156 + m.x160 - m.x166 <= 0)
m.c1405 = Constraint(expr= - m.x156 + m.x161 - m.x167 <= 0)
m.c1406 = Constraint(expr= - m.x156 + m.x162 - m.x168 <= 0)
m.c1407 = Constraint(expr= - m.x157 + m.x158 - m.x169 <= 0)
m.c1408 = Constraint(expr= - m.x157 + m.x159 - m.x170 <= 0)
m.c1409 = Constraint(expr= - m.x157 + m.x160 - m.x171 <= 0)
m.c1410 = Constraint(expr= - m.x157 + m.x161 - m.x172 <= 0)
m.c1411 = Constraint(expr= - m.x157 + m.x162 - m.x173 <= 0)
m.c1412 = Constraint(expr= - m.x158 + m.x159 - m.x174 <= 0)
m.c1413 = Constraint(expr= - m.x158 + m.x160 - m.x175 <= 0)
m.c1414 = Constraint(expr= - m.x158 + m.x161 - m.x176 <= 0)
m.c1415 = Constraint(expr= - m.x158 + m.x162 - m.x177 <= 0)
m.c1416 = Constraint(expr= - m.x159 + m.x160 - m.x178 <= 0)
m.c1417 = Constraint(expr= - m.x159 + m.x161 - m.x179 <= 0)
m.c1418 = Constraint(expr= - m.x159 + m.x162 - m.x180 <= 0)
m.c1419 = Constraint(expr= - m.x160 + m.x161 - m.x181 <= 0)
m.c1420 = Constraint(expr= - m.x160 + m.x162 - m.x182 <= 0)
m.c1421 = Constraint(expr= - m.x161 + m.x162 - m.x183 <= 0)
m.c1422 = Constraint(expr= - m.x163 + m.x164 - m.x169 <= 0)
m.c1423 = Constraint(expr= - m.x163 + m.x165 - m.x170 <= 0)
m.c1424 = Constraint(expr= - m.x163 + m.x166 - m.x171 <= 0)
m.c1425 = Constraint(expr= - m.x163 + m.x167 - m.x172 <= 0)
m.c1426 = Constraint(expr= - m.x163 + m.x168 - m.x173 <= 0)
m.c1427 = Constraint(expr= - m.x164 + m.x165 - m.x174 <= 0)
m.c1428 = Constraint(expr= - m.x164 + m.x166 - m.x175 <= 0)
m.c1429 = Constraint(expr= - m.x164 + m.x167 - m.x176 <= 0)
m.c1430 = Constraint(expr= - m.x164 + m.x168 - m.x177 <= 0)
m.c1431 = Constraint(expr= - m.x165 + m.x166 - m.x178 <= 0)
m.c1432 = Constraint(expr= - m.x165 + m.x167 - m.x179 <= 0)
m.c1433 = Constraint(expr= - m.x165 + m.x168 - m.x180 <= 0)
m.c1434 = Constraint(expr= - m.x166 + m.x167 - m.x181 <= 0)
m.c1435 = Constraint(expr= - m.x166 + m.x168 - m.x182 <= 0)
m.c1436 = Constraint(expr= - m.x167 + m.x168 - m.x183 | |
<filename>syft/tensor.py<gh_stars>0
import numpy as np
import syft.controller
class BaseTensor():
def arithmetic_operation(self, x, name, inline=False):
operation_cmd = name
if (type(x) == type(self)):
operation_cmd += "_elem"
parameter = x.id
else:
operation_cmd += "_scalar"
parameter = str(x)
if (inline):
operation_cmd += "_"
response = self.controller.send_json(
self.cmd(operation_cmd, [parameter])) # sends the command
if int(response) == self.id:
return self
else:
return self.__class__(data=int(response), data_is_pointer=True)
def __add__(self, x):
"""
Performs element-wise addition arithmetic between two tensors
Parameters
----------
x : BaseTensor (Subclass)
The Second tensor to perform addition with.
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(x, "add", False)
def __iadd__(self, x):
"""
Performs in place element-wise addition arithmetic between two tensors
Parameters
----------
x : BaseTensor (Subclass)
The Second tensor to perform addition with.
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(x, "add", True)
def __truediv__(self, x):
"""
Performs division arithmetic between two tensors
Parameters
----------
x : BaseTensor (Subclass)
Second divident tensor
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(x, "div", False)
def __itruediv__(self, x):
"""
Performs division arithmetic between two tensors inplace.
Parameters
----------
x : BaseTensor (Subclass)
Second divident tensor
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(x, "div", True)
def __pow__(self, x):
"""
Takes the power of each element in input ('self') with 'x' and
returns a tensor with the result.
Parameters
----------
x : BaseTensor (Subclass)
Exponent tensor
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(x, "pow", False)
def __ipow__(self, x):
"""
Takes the power of each element in input ('self') with 'x' and
returns a tensor with the result inplace.
Parameters
----------
x : BaseTensor (Subclass)
Exponent tensor
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(x, "pow", True)
def pow(self, x):
"""
Takes the power of each element in input ('self') with 'x' and
returns a tensor with the result.
Parameters
----------
x : BaseTensor (Subclass)
Exponent tensor
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(x, "pow", False)
def pow_(self, x):
"""
Takes the power of each element in input ('self') with 'x', inplace.
Parameters
----------
x : BaseTensor (Subclass)
Exponent tensor
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(x, "pow", True)
def __mod__(self, x):
"""
Performs Modulus arithmetic operation between two tensors.
Parameters
----------
x : BaseTensor (Subclass)
Dividend tensor
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(x, "remainder", False)
def __imod__(self, x):
"""
Performs Modulus arithmetic operation between two tensors inplace.
Parameters
----------
x : BaseTensor (Subclass)
Dividend tensor
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(x, "remainder", True)
def __mul__(self, x):
"""
Performs Multiplication arithmetic operation between two tensors.
Parameters
----------
x : BaseTensor (Subclass)
Second tensor to be multiplied with.
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(x, "mul", False)
def __imul__(self, x):
"""
Performs Multiplication arithmetic operation between two tensors inplace.
Parameters
----------
x : BaseTensor (Subclass)
Second tensor to be multiplied with.
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(x, "mul", True)
def __sub__(self, x):
"""
Performs element-wise substraction arithmetic between two tensors
Parameters
----------
x : BaseTensor (Subclass)
The Second tensor to perform addition with.
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(x, "sub", False)
def __isub__(self, x):
"""
Performs element-wise substraction arithmetic between two tensors
Parameters
----------
x : BaseTensor (Subclass)
The Second tensor to perform addition with.
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(x, "sub", True)
def remainder(self, divisor):
"""
Computes the element-wise remainder of division.
inplace.
Parameters
----------
Returns
-------
BaseTensor (Subclass)
Output tensor
"""
return self.arithmetic_operation(divisor, "remainder")
def remainder_(self, divisor):
"""
Computes the element-wise remainder of division, inplace.
Parameters
----------
Returns
-------
BaseTensor (Subclass)
Caller with values inplace
"""
return self.arithmetic_operation(divisor, "remainder", True)
class IntTensor(BaseTensor):
def __init__(self, data, data_is_pointer=False):
self.controller = syft.controller
if (data is not None and not data_is_pointer):
if (type(data) == list):
data = np.array(data)
data = data.astype('float')
self.data = data
self.id = int(self.controller.send_json({"objectType": "IntTensor",
"functionCall": "create",
"data": list(data.flatten()),
"shape": self.data.shape}))
elif (data_is_pointer):
self.id = int(data)
def autograd(self, state):
"do nothing"
def abs(self):
"""
Returns absolute value of tensor as a new tensor
Parameters
----------
Returns
-------
IntTensor:
Output tensor
"""
return self.no_params_func("abs", return_response=True)
def shape(self):
"""
Returns the size of the self tensor as a List.
Returns
-------
Iterable
Output list
"""
return list(np.fromstring(self.get("shape")[:-1], sep=",").astype('int'))
def trace(self):
"""
Returns a new tensor with the sum along diagonals of a 2D tensor.
Returns
-------
IntTensor
Output tensor
"""
return self.no_params_func("trace", return_response=True)
def __repr__(self, verbose=True):
tensor_str = str(self.to_numpy())
type_str = ""
for dim in self.shape():
type_str += str(dim) + "x"
type_str = type_str[:-1]
desc = "[syft.IntTensor:"+str(self.id) + " size:" + type_str + "]" + "\n"
return tensor_str + "\n" + desc
def params_func(self, name, params, return_response=False, return_type='IntTensor'):
# send the command
res = self.controller.send_json(
self.cmd(name, params=params))
self.controller.log(res)
if (return_response):
if (return_type == 'IntTensor'):
self.controller.log("IntTensor.__init__: {}".format(res))
return IntTensor(data=int(res), data_is_pointer=True)
elif(return_type == 'FloatTensor'):
self.controller.log("IntTensor.__init__: {}".format(res))
return FloatTensor(data=int(res), data_is_pointer=True)
else:
return res
return self
def no_params_func(self, name, return_response=False, return_type='IntTensor'):
return (self.params_func(name, [], return_response, return_type))
def get(self, param_name="size", response_as_tensor=False, return_type='IntTensor'):
return self.params_func(name="get", params=[param_name], return_response=True,
return_type="string")
def cmd(self, functionCall, params=[]):
cmd = {
'functionCall': functionCall,
'objectType': 'IntTensor',
'objectIndex': self.id,
'tensorIndexParams': params}
return cmd
def gpu(self):
"""
Returns a GPU copy of this storage if it's not already on the GPU
Parameters
----------
Returns
-------
IntTensor
Output tensor
"""
return self.no_params_func("gpu")
def is_contiguous(self):
return True
def to_numpy(self):
if(self.is_contiguous()):
res = self.controller.send_json({
'functionCall': 'to_numpy',
'objectType': 'IntTensor',
'objectIndex': self.id
})
return np.fromstring(res, sep=' ').astype('int').reshape(self.shape())
else:
return " - non-contiguous - "
class FloatTensor(BaseTensor):
def __init__(self, data, autograd=False, data_is_pointer=False, delete_after_use=False):
self.controller = syft.controller
self.delete_after_use = delete_after_use
if (data is not None and not data_is_pointer):
if (type(data) == list):
data = np.array(data)
data = data.astype('float')
self.data = data
self.id = int(self.controller.send_json({"objectType": "FloatTensor",
"functionCall": "create",
"data": list(data.flatten()),
"shape": self.data.shape}))
# self.controller.log("FloatTensor.__init__: {}".format(self.id))
elif (data_is_pointer):
self.id = int(data)
if (autograd):
self.autograd(True)
def __del__(self):
self.delete_tensor()
def abs(self):
"""
Returns absolute value of tensor as a new tensor
Parameters
----------
Returns
-------
FloatTensor:
Output tensor
"""
return self.no_params_func("abs", return_response=True)
def abs_(self):
"""
Replaces tensor values with its absolute value
Parameters
----------
Returns
-------
FloatTensor
Output tensor
"""
return self.no_params_func("abs_")
def acos(self):
"""
Returns a new Tensor with the arccosine of the elements of input.
Parameters
----------
Returns
-------
FloatTensor
Output tensor
"""
return self.no_params_func("acos", return_response=True)
def acos_(self):
"""
Performs inplace arccosine operation of the elements of input.
Parameters
----------
Returns
-------
FloatTensor
Caller with values inplace
"""
return self.no_params_func("acos_")
def addmm_(self, x, y):
"""
Performs a matrix multiplication of the matrices 'x' and 'y'.
The caller matrix 'self' is added to the final result inplace.
Parameters
----------
x : FloatTensor
First tensor for multiplication
y : FloatTensor
Second tensor for multiplication
Returns
-------
FloatTensor
Caller with values inplace
"""
return self.params_func("addmm_", [x.id, y.id])
def addmm(self, x, y):
"""
Performs a matrix multiplication of the matrices 'x' and 'y'.
The caller matrix 'self' is added to the final result.
Parameters
----------
x : FloatTensor
First tensor for multiplication
y : FloatTensor
Second tensor for multiplication
Returns
-------
copy : FloatTensor
Output tensor
"""
copy = self.copy()
copy.params_func("addmm_", [x.id, y.id])
return copy
def addmv_(self, x, y):
"""
Performs a matrix-vector product of the matrix x and the vector vec.
The vector tensor is added to the final result inplace.
Parameters
----------
x : FloatTensor
tensor for multiplication
vec : FloatTensor
Vector for Matrix-Vector Product
Returns
-------
FloatTensor
Caller with values inplace
"""
return self.params_func("addmv_", [x.id, y.id])
def addmv(self, x, y):
"""
Performs a matrix-vector product of the matrix x and the vector vec.
The vector tensor is added to the final result.
Parameters
----------
x : FloatTensor
tensor for multiplication
y : FloatTensor
Vector for Matrix-Vector Product
Returns
-------
copy : FloatTensor
Output tensor
"""
copy = self.copy()
copy.params_func("addmv_", [x.id, y.id])
return copy
def asin(self):
"""
Returns a new Tensor | |
import re
"""lc3-2000b.py: A definition of the LC3-2200b architecture."""
__author__ = "<NAME>"
# Define the name of the architecture
__name__ = 'LC3-2200b'
# Define overall architecture widths (in bits)
BIT_WIDTH = 32
# Define opcode widths (in bits)
OPCODE_WIDTH = 4
# Define register specifier widths (in bits)
REGISTER_WIDTH = 4
ALIASES = {
'.word' : 'fill',
'.fill' : 'fill',
'str' : 'STR',
'shf' : None,
'shfll' : 'shf',
'shfrl' : 'shf',
'shfra' : 'shf'
}
REGISTERS = {
'$zero' : 0,
'$at' : 1,
'$v0' : 2,
'$a0' : 3,
'$a1' : 4,
'$a2' : 5,
'$t0' : 6,
'$t1' : 7,
'$t2' : 8,
'$s0' : 9,
'$s1' : 10,
'$s2' : 11,
'$k0' : 12,
'$sp' : 13,
'$fp' : 14,
'$ra' : 15}
SYMBOL_TABLE = {}
VALID_PARAMS = {
'delay_slots' : int}
PARAMS = {
'delay_slots' : 1}
# Private Variables
OFFSET_SIZE = BIT_WIDTH - OPCODE_WIDTH - (REGISTER_WIDTH * 2)
assert(OFFSET_SIZE > 0) # Sanity check
UNUSED_SIZE = BIT_WIDTH - OPCODE_WIDTH - (REGISTER_WIDTH * 3)
assert(UNUSED_SIZE > 0) # Sanity check
SHF_IMM_SIZE = 5
SHF_UNUSED_SIZE = OFFSET_SIZE - SHF_IMM_SIZE - 2
assert(SHF_UNUSED_SIZE > 0) # Sanity check
RE_BLANK = re.compile(r'^\s*(!.*)?$')
RE_PARTS = re.compile(r'^\s*((?P<Label>\w+):)?\s*((?P<Opcode>\.?[\w]+)(?P<Operands>[^!]*))?(!.*)?')
def zero_extend(binary, target, pad_right=False):
if binary.startswith('0b'):
binary = binary[2:]
zeros = '0' * (target - len(binary))
if pad_right:
return binary + zeros
else:
return zeros + binary
def sign_extend(binary, target):
if binary.startswith('0b'):
binary = binary[2:]
sign = binary[0] if len(binary) > 1 else '0'
return sign * (target - len(binary)) + binary
def bin2hex(binary):
return '%0*X' % ((len(binary) + 3) // 4, int(binary, 2))
def hex2bin(hexadecimal):
return bin(int(hexadecimal, 16))[2:]
def dec2bin(num, bits):
"""Compute the 2's complement binary of an int value."""
return format(num if num >= 0 else (1 << bits) + num, '0{}b'.format(bits))
def parse_value(offset, size, pc=None, unsigned=False):
bin_offset = None
if type(offset) is str:
if pc is not None and offset in SYMBOL_TABLE:
offset = SYMBOL_TABLE[offset] - pc - 1
elif offset.startswith('0x'):
try:
bin_offset = hex2bin(offset)
except:
raise RuntimeError("'{}' is not in a valid hexadecimal format.".format(offset))
if len(bin_offset) > size:
raise RuntimeError("'{}' is too large for {}.".format(offset, __name__))
bin_offset = zero_extend(bin_offset, size)
elif offset.startswith('0b'):
try:
bin_offset = bin(int(offset))
except:
raise RuntimeError("'{}' is not in a valid binary format.".format(offset))
if len(bin_offset) > size:
raise RuntimeError("'{}' is too large for {}.".format(offset, __name__))
bin_offset = zero_extend(bin_offset, size)
if bin_offset is None:
try:
offset = int(offset)
except:
if pc is not None:
raise RuntimeError("'{}' cannot be resolved as a label or a value.".format(offset))
else:
raise RuntimeError("'{}' cannot be resolved as a value.".format(offset))
if unsigned:
bound = (2**size)
# >= bound because range is [0, 2^n - 1]
if offset < 0:
raise RuntimeError("'{}' cannot be a negative value for {}.".format(offset, __name__))
elif offset >= bound:
raise RuntimeError("'{}' is too large (as a value) or too far away (as a label) for {}.".format(offset, __name__))
else:
bound = 2**(size - 1)
if offset < -bound:
raise RuntimeError("'{}' is too small (as a value) or too far away (as a label) for {}.".format(offset, __name__))
elif offset >= bound:
raise RuntimeError("'{}' is too large (as a value) or too far away (as a label) for {}.".format(offset, __name__))
bin_offset = dec2bin(offset, size)
return bin_offset
def generate_delay_slots(operands):
return noop.create(operands, None, 'noop')*PARAMS['delay_slots']
class Instruction:
"""
This is the base class that all implementations of instructions must override.
"""
@classmethod
def opcode(cls):
"""Return the operation code for the given instruction as an integer."""
raise NotImplementedError()
def __init__(self, operands, pc, instruction):
self.__operands = operands
self.bin_operands = self.parse_operands(operands, pc, instruction)
self.__pc = pc
self.__instruction = instruction
@classmethod
def create(cls, operands, pc, instruction):
"""Generates a list of Instruction(s) for the given operands."""
raise NotImplementedError()
@classmethod
def pc(cls, pc, **kwargs):
"""Return the new PC after assembling the given instruction"""
# By default, return pc + 1
return pc + 1
@classmethod
def parse_operands(cls, operands, pc, instruction):
return ''
def binary(self):
"""Assemble the instruction into binary form.
Returns a string representation of the binary instruction.
"""
raise NotImplementedError()
def hex(self):
"""Assemble the instruction into binary form.
Returns a string representation of the binary instruction.
"""
return bin2hex(self.binary())
class RInstruction(Instruction):
"""
The base class for R-type instructions.
"""
__RE_R = re.compile(r'^\s*(?P<RX>\$\w+?)\s*,\s*(?P<RY>\$\w+?)\s*,\s*(?P<RZ>\$\w+?)\s*$')
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, pc, instruction)]
@classmethod
def parse_operands(cls, operands, pc, instruction):
# Define result
result_list = []
match = cls.__RE_R.match(operands)
if match is None:
raise RuntimeError("Operands '{}' are in an incorrect format.".format(operands.strip()))
for op in (match.group('RX'), match.group('RY'), match.group('RZ')):
if op in REGISTERS:
result_list.append(zero_extend(bin(REGISTERS[op])[2:], REGISTER_WIDTH))
else:
raise RuntimeError("Register identifier '{}' is not valid in {}.".format(op, __name__))
# Insert unused bits
result_list.insert(2, '0' * UNUSED_SIZE)
return ''.join(result_list)
def binary(self):
return zero_extend(bin(self.opcode()), OPCODE_WIDTH) + self.bin_operands
class IInstruction(Instruction):
"""
The base class for I-type instructions.
"""
__RE_I = re.compile(r'^\s*(?P<RX>\$\w+?)\s*,\s*(?P<RY>\$\w+?)\s*,\s*(?P<Offset>\S+?)\s*$')
__RE_OFF = re.compile(r'^\s*(?P<RX>\$\w+?)\s*,\s*(?P<Offset>\S+?)\s*\((?P<RY>\$\w+?)\)\s*$')
@classmethod
def is_offset_style(cls):
raise NotImplementedError()
@classmethod
def parse_operands(cls, operands, pc, instruction):
# Define result
result_list = []
match = cls.__RE_OFF.match(operands) if cls.is_offset_style() else cls.__RE_I.match(operands)
if match is None:
raise RuntimeError("Operands '{}' are in an incorrect format.".format(operands.strip()))
for op in (match.group('RX'), match.group('RY')):
if op in REGISTERS:
result_list.append(zero_extend(bin(REGISTERS[op]), REGISTER_WIDTH))
else:
raise RuntimeError("Register identifier '{}' is not valid in {}.".format(op, __name__))
result_list.append(parse_value(match.group('Offset'), OFFSET_SIZE, pc))
return ''.join(result_list)
def binary(self):
return zero_extend(bin(self.opcode()), OPCODE_WIDTH) + self.bin_operands
class BRInstruction(IInstruction):
"""
The base class for branch versions of I-type instructions.
"""
@classmethod
def is_offset_style(cls):
return False
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, pc, instruction)] + generate_delay_slots(operands)
@classmethod
def pc(cls, pc, **kwargs):
return pc + PARAMS['delay_slots'] + 1
def binary(self):
padded_opcode = zero_extend(bin(self.opcode()), OPCODE_WIDTH)
return zero_extend(padded_opcode + self.bin_operands, BIT_WIDTH, pad_right=True)
class add(RInstruction):
@classmethod
def opcode(cls):
return 0
class addi(IInstruction):
@classmethod
def opcode(cls):
return 1
@classmethod
def is_offset_style(cls):
return False
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, None, instruction)]
class nand(RInstruction):
@classmethod
def opcode(cls):
return 2
class beq(BRInstruction):
@classmethod
def opcode(cls):
return 3
class jalr(Instruction):
__RE_JALR = re.compile(r'^\s*(?P<AT>\$\w+?)\s*,\s*(?P<RA>\$\w+?)\s*$')
@classmethod
def opcode(cls):
return 4
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, pc, instruction)] + generate_delay_slots(operands)
@classmethod
def pc(cls, pc, **kwargs):
return pc + PARAMS['delay_slots'] + 1
@classmethod
def parse_operands(cls, operands, pc, instruction):
# Define result
result_list = []
match = cls.__RE_JALR.match(operands)
if match is None:
raise RuntimeError("Operands '{}' are in an incorrect format.".format(operands.strip()))
for op in (match.group('RA'), match.group('AT')):
if op in REGISTERS:
result_list.append(zero_extend(bin(REGISTERS[op]), REGISTER_WIDTH))
else:
raise RuntimeError("Register identifier '{}' is not valid in {}.".format(op, __name__))
return ''.join(result_list)
def binary(self):
padded_opcode = zero_extend(bin(self.opcode()), OPCODE_WIDTH)
return zero_extend(padded_opcode + self.bin_operands, BIT_WIDTH, pad_right=True)
class ldr(IInstruction):
@classmethod
def opcode(cls):
return 5
@classmethod
def is_offset_style(cls):
return True
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, None, instruction)]
class lea(Instruction):
__RE_LEA = re.compile(r'^\s*(?P<RX>\$\w+?)\s*,\s*(?P<Offset>\S+?)\s*$')
@classmethod
def opcode(cls):
return 6
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, pc, instruction)]
@classmethod
def parse_operands(cls, operands, pc, instruction):
match = cls.__RE_LEA.match(operands)
if match is None:
raise RuntimeError("Operands '{}' are in an incorrect format.".format(operands.strip()))
result_list = []
RX = match.group('RX')
label = match.group('Offset')
if RX in REGISTERS:
result_list.append(zero_extend(bin(REGISTERS[RX]), REGISTER_WIDTH))
else:
raise RuntimeError("Register identifier '{}' is not valid in {}.".format(op, __name__))
result_list.append('0' * REGISTER_WIDTH) # Unused bits
result_list.append(parse_value(match.group('Offset'), OFFSET_SIZE, pc))
return ''.join(result_list)
def binary(self):
padded_opcode = zero_extend(bin(self.opcode()), OPCODE_WIDTH)
return zero_extend(padded_opcode + self.bin_operands, BIT_WIDTH, pad_right=True)
class STR(IInstruction):
@classmethod
def opcode(cls):
return 7
@classmethod
def is_offset_style(cls):
return True
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, None, instruction)]
class shf(Instruction):
__RE_SHF = re.compile(r'^\s*(?P<RX>\$\w+?)\s*,\s*(?P<RY>\$\w+?)\s*,\s*(?P<Offset>\S+?)\s*$')
@classmethod
def opcode(cls):
return 8
@classmethod
def create(cls, operands, pc, instruction):
return [cls(operands, pc, instruction)]
@classmethod
def parse_operands(cls, operands, pc, instruction):
if instruction == 'shfll':
A, D = '0', '0'
elif instruction == 'shfrl':
A, D = '0', '1'
elif instruction == 'shfra':
A, D = '1', '1'
else:
raise RuntimeError("'shf' instruction could not be assembled.")
match = cls.__RE_SHF.match(operands)
if match is None:
raise RuntimeError("Operands '{}' are in an incorrect format.".format(operands.strip()))
result_list = []
for op in (match.group('RX'), match.group('RY')):
if op in REGISTERS:
result_list.append(zero_extend(bin(REGISTERS[op]), REGISTER_WIDTH))
else:
raise RuntimeError("Register identifier '{}' is not valid in {}.".format(op, __name__))
result_list.append(A)
result_list.append(D)
result_list.append('0' * SHF_UNUSED_SIZE)
result_list.append(parse_value(match.group('Offset'), SHF_IMM_SIZE, unsigned=True))
return ''.join(result_list)
def binary(self):
return zero_extend(bin(self.opcode()), OPCODE_WIDTH) + self.bin_operands
class bne(BRInstruction):
@classmethod
def opcode(cls):
return 9
class halt(Instruction):
@classmethod
def opcode(cls):
return 15
@classmethod
def create(cls, operands, | |
this gives extra space between molecules
allow_inversion: Whether or not to allow chiral molecules to be
inverted. If True, the final crystal may contain mirror images of
the original molecule. Unless the chemical properties of the mirror
image are known, it is highly recommended to keep this value False
orientations: Once a crystal with the same spacegroup and molecular
stoichiometry has been generated, you may pass its
valid_orientations attribute here to avoid repeating the
calculation, but this is not required
check_atomic_distances: If True, checks the inter-atomic distances
after each Wyckoff position is added. This requires slightly more
time, but vastly improves accuracy. For approximately spherical
molecules, or for large inter-molecular distances, this may be
turned off
"""
def __init__(self, number, molecules, numMols, area, volume_factor, allow_inversion=False, orientations=None, check_atomic_distances=True):
#Necessary input
self.number = number
"""The number (between 1 and 80) for the crystal's Rod group."""
self.Msgs()
"""A list of warning messages to use during generation."""
numMols = np.array(numMols) #must convert it to np.array
self.factor = volume_factor
"""The supplied volume factor for the unit cell."""
self.numMols0 = numMols
"""The number of molecules of each type in the primitive cell"""
#Reorient the molecules along their principle axes
oriented_molecules = []
#Allow support for generating molecules from text via openbabel
for i, mol in enumerate(molecules):
if type(mol) == str:
#Read strings into molecules, try collection first,
#If string not in collection, use SMILES format
try:
mo = molecule_collection[mol]
except:
mo = ob_mol_from_string(mol)
mo = pmg_from_ob(mo)
molecules[i] = mo
for mol in molecules:
pga = PointGroupAnalyzer(mol)
mo = pga.symmetrize_molecule()['sym_mol']
oriented_molecules.append(mo)
self.molecules = oriented_molecules
"""A list of pymatgen.core.structure.Molecule objects, symmetrized and
oriented along their symmetry axes."""
self.area = area
"""the effective cross-sectional area, in Angstroms squared, of the
unit cell."""
self.PBC = [3]
self.boxes = []
"""A list of bounding boxes for each molecule. Used for estimating
volume of the unit cell."""
self.radii = []
"""A list of approximated radii for each molecule type. Used for
checking inter-molecular distances."""
for mol in self.molecules:
self.boxes.append(get_box(reoriented_molecule(mol)[0]))
max_r = 0
for site in mol:
radius = math.sqrt( site.x**2 + site.y**2 + site.z**2 )
if radius > max_r: max_r = radius
self.radii.append(max_r+1.0)
self.numMols = numMols
"""The number of each type of molecule in the CONVENTIONAL cell"""
self.volume = estimate_volume_molecular(self.numMols, self.boxes, self.factor)
"""The volume of the generated unit cell"""
self.wyckoffs = get_rod(self.number)
"""The Wyckoff positions for the crystal's Rod group."""
self.wyckoffs_organized = get_rod(self.number, organized=True)
"""The Wyckoff positions for the crystal's Rod group. Sorted by
multiplicity."""
self.w_symm = get_rod_symmetry(self.number, molecular=True)
"""A list of site symmetry operations for the Wyckoff positions, obtained
from get_wyckoff_symmetry."""
self.wyckoff_generators = get_rod_generators(self.number)
"""A list of Rod Wyckoff generators (molecular=False)"""
self.wyckoff_generators_m = get_rod_generators(self.number, molecular=True)
"""A list of Rod Wyckoff generators (molecular=True)"""
self.check_atomic_distances = check_atomic_distances
"""Whether or not inter-atomic distances are checked at each step."""
self.allow_inversion = allow_inversion
"""Whether or not to allow chiral molecules to be inverted."""
#When generating multiple crystals of the same stoichiometry and sg,
#allow the user to re-use the allowed orientations, to reduce time cost
if orientations is None:
self.get_orientations()
else:
self.valid_orientations = orientations
"""The valid orientations for each molecule and Wyckoff position.
May be copied when generating a new molecular_crystal to save a
small amount of time"""
self.generate_crystal()
def Msgs(self):
self.Msg1 = 'Error: the stoichiometry is incompatible with the wyckoff sites choice'
self.Msg2 = 'Error: failed in the cycle of generating structures'
self.Msg3 = 'Warning: failed in the cycle of adding species'
self.Msg4 = 'Warning: failed in the cycle of choosing wyckoff sites'
self.Msg5 = 'Finishing: added the specie'
self.Msg6 = 'Finishing: added the whole structure'
def get_orientations(self):
"""
Calculates the valid orientations for each Molecule and Wyckoff
position. Returns a list with 4 indices:
index 1: the molecular prototype's index within self.molecules
index 2: the Wyckoff position's 1st index (based on multiplicity)
index 3: the WP's 2nd index (within the group of equal multiplicity)
index 4: the index of the valid orientation for the molecule/WP pair
For example, self.valid_orientations[i][j][k] would be a list of valid
orientations for self.molecules[i], in the Wyckoff position
self.wyckoffs_organized[j][k]
"""
self.valid_orientations = []
for mol in self.molecules:
self.valid_orientations.append([])
wp_index = -1
for i, x in enumerate(self.wyckoffs_organized):
self.valid_orientations[-1].append([])
for j, wp in enumerate(x):
wp_index += 1
allowed = orientation_in_wyckoff_position(mol, self.wyckoffs, self.w_symm, wp_index, already_oriented=True, allow_inversion=self.allow_inversion)
if allowed is not False:
self.valid_orientations[-1][-1].append(allowed)
else:
self.valid_orientations[-1][-1].append([])
def check_compatible(self):
"""
Checks if the number of molecules is compatible with the Wyckoff
positions. Considers the number of degrees of freedom for each Wyckoff
position, and makes sure at least one valid combination of WP's exists.
"""
N_site = [len(x[0]) for x in self.wyckoffs_organized]
has_freedom = False
#remove WP's with no freedom once they are filled
removed_wyckoffs = []
for i, numMol in enumerate(self.numMols):
#Check that the number of molecules is a multiple of the smallest Wyckoff position
if numMol % N_site[-1] > 0:
return False
else:
#Check if smallest WP has at least one degree of freedom
op = self.wyckoffs_organized[-1][-1][0]
if op.rotation_matrix.all() != 0.0:
if self.valid_orientations[i][-1][-1] != []:
has_freedom = True
else:
#Subtract from the number of ions beginning with the smallest Wyckoff positions
remaining = numMol
for j, x in enumerate(self.wyckoffs_organized):
for k, wp in enumerate(x):
while remaining >= len(wp) and wp not in removed_wyckoffs:
if self.valid_orientations[i][j][k] != []:
#Check if WP has at least one degree of freedom
op = wp[0]
remaining -= len(wp)
if np.allclose(op.rotation_matrix, np.zeros([3,3])):
if (len(self.valid_orientations[i][j][k]) > 1 or
self.valid_orientations[i][j][k][0].degrees > 0):
#NOTE: degrees of freedom may be inaccurate for linear molecules
has_freedom = True
else:
removed_wyckoffs.append(wp)
else:
has_freedom = True
else:
removed_wyckoffs.append(wp)
if remaining != 0:
return False
if has_freedom:
return True
else:
#print("Warning: Wyckoff Positions have no degrees of freedom.")
return 0
return True
def generate_crystal(self, max1=max1, max2=max2, max3=max3):
"""
The main code to generate a random molecular crystal. If successful,
stores a pymatgen.core.structure object in self.struct and sets
self.valid to True. If unsuccessful, sets self.valid to False and
outputs an error message.
Args:
max1: the number of attempts for generating a lattice
max2: the number of attempts for a given lattice
max3: the number of attempts for a given Wyckoff position
"""
#Check the minimum number of degrees of freedom within the Wyckoff positions
degrees = self.check_compatible()
if degrees is False:
print(self.Msg1)
self.struct = None
self.valid = False
return
else:
if degrees == 0:
max1 = 10
max2 = 10
max3 = 10
#Calculate a minimum vector length for generating a lattice
#minvector = max(radius*2 for radius in self.radii)
all_lengths = []
for box in self.boxes:
lengths = []
for a, b in [[0,1],[2,3],[4,5]]:
lengths.append(abs(box[b]-box[a]))
all_lengths.append(min(lengths))
minvector = max(max(all_lengths), tol_m)
for cycle1 in range(max1):
#1, Generate a lattice
cell_para = generate_lattice_1D(self.number, self.volume, area=self.area, minvec=minvector)
if cell_para is None:
break
else:
cell_matrix = para2matrix(cell_para)
if abs(self.volume - np.linalg.det(cell_matrix)) > 1.0:
print('Error, volume is not equal to the estimated value: ', self.volume, ' -> ', np.linalg.det(cell_matrix))
print('cell_para: ', cell_para)
sys.exit(0)
molecular_coordinates_total = [] #to store the added molecular coordinates
molecular_sites_total = [] #to store the corresponding molecular specie
atomic_coordinates_total = [] #to store the added atomic coordinates
atomic_sites_total = [] #to store the corresponding atomic specie
wps_total = [] #to store corresponding Wyckoff position indices
points_total = [] #to store the generating x,y,z points
mol_generators_total = []
good_structure = False
for cycle2 in range(max2):
molecular_coordinates_tmp = deepcopy(molecular_coordinates_total)
molecular_sites_tmp = deepcopy(molecular_sites_total)
atomic_coordinates_tmp = deepcopy(atomic_coordinates_total)
atomic_sites_tmp = deepcopy(atomic_sites_total)
wps_tmp = deepcopy(wps_total)
points_tmp = deepcopy(points_total)
mol_generators_tmp = []
#Add molecules specie by specie
for numMol, mol in zip(self.numMols, self.molecules):
i = self.molecules.index(mol)
numMol_added = 0
#Now we start to add the specie to the wyckoff position
for cycle3 in range(max3):
#Choose a random Wyckoff position for given multiplicity: 2a, 2b, 2c
#NOTE: The molecular version return wyckoff indices, not ops
indices = choose_wyckoff_molecular(self.wyckoffs_organized, numMol-numMol_added, self.valid_orientations[i])
if indices is not False:
j, k = indices
if self.valid_orientations[i][j][k] == []:
print("Error: Failed | |
path_in, dst, excludes = []):
if not os.path.isdir(path_in):
self._abort('Zip source directory "%s" does not exist.' % path_in)
self._verbose_info('add directory "%s" to "%s"' % (path_in, dst))
savedir = os.getcwd()
# Get nice relative paths by temporarily switching directories.
os.chdir(path_in)
try:
for basedir, subdirs, filenames in os.walk('.'):
for filename in filenames:
file_path_in = os.path.join(basedir[2:], filename)
file_path_out = os.path.join(dst, basedir[2:], filename)
self.add_file(file_path_in, file_path_out)
finally:
os.chdir(savedir)
def _verbose_info(self, msg):
verbose_info('%s: %s' % (self.output_path, msg))
def _abort(self, *msgs):
abort('Fatal error writing zip file "%s".' % self.output_path, msgs)
#===============================================================================
def merge_java_options(*opts):
#===============================================================================
"""
Merge redundant -X... java command line options. Keep others intact.
Arguments can be lists or individual arguments. Returns the reduced list.
"""
ret_opts = []
xargs = set()
for opt in flatten(*opts):
if opt is not None:
# This is somewhat simplistic logic that might have unlikely failure scenarios.
if opt.startswith('-X'):
# The symbol is the initial string of contiguous alphabetic characters.
sym = ''.join([c for c in opt[2:] if c.isalpha()])
if sym not in xargs:
xargs.add(sym)
ret_opts.append(opt)
else:
ret_opts.append(opt)
return ret_opts
#===============================================================================
def get_java_version(javaHome="java", verbose=False):
#===============================================================================
"""
Assumes caller has already run "find_in_path(java)" so we know it can be checked.
"""
try:
version = subprocess.Popen([javaHome, '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
grep = subprocess.Popen(['grep', 'java \|openjdk'], stdin=version.stdout, stdout=subprocess.PIPE)
version.stdout.close()
out, err = grep.communicate()
version.wait()
if verbose:
return out
for version in ('11.0', '1.8', '1.7'):
if version in out.decode("utf-8"):
return version
return ""
except (OSError):
return ""
#===============================================================================
def is_pro_version(voltdb_jar):
#===============================================================================
"""
Assumes caller has already run "find_in_path(jar)" so we know it can be checked.
The jar is already validated as present before this is called.
"""
try:
zf = zipfile.ZipFile(voltdb_jar, 'r')
except (IOError, OSError) as e:
print('Error reading zip file "%s".' % voltdb_jar, e)
return False
try:
for ze in zf.infolist():
if "org/voltdb/CommandLogImpl.class" == ze.filename:
return True
return False
except (OSError):
return False
finally:
zf.close()
#===============================================================================
def kwargs_merge_list(kwargs, name, *args):
#===============================================================================
"""
Merge and flatten kwargs list with additional items.
"""
kwargs[name] = flatten_to_list(kwargs.get(name, None), *args)
#===============================================================================
def kwargs_merge_java_options(kwargs, name, *args):
#===============================================================================
"""
Merge and flatten kwargs Java options list with additional options.
"""
kwargs[name] = merge_java_options(kwargs.get(name, None), *args)
#===============================================================================
def choose(prompt, *choices):
#===============================================================================
"""
Prompt the user for multiple choice input. Keep prompting until a valid
choice is received. Choice shortcuts require unique first letters. The user
can either respond with a single letter or an entire word.
"""
letters = set()
choice_list = []
for choice in choices:
if not choice:
abort('Empty choice passed to choose().')
if choice[0] in letters:
abort('Non-unique choices %s passed to choose().' % str(choices))
letters.add(choice[0])
choice_list.append('[%s]%s' % (choice[0], choice[1:]))
while True:
sys.stdout.write('%s (%s) ' % (prompt, '/'.join(choice_list)))
sys.stdout.flush()
response = sys.stdin.readline().strip()
if response in letters or response in choices:
return response[0]
#===============================================================================
def dict_to_sorted_pairs(d):
#===============================================================================
"""
Convert a dictionary to a list of key/value pairs sorted by key.
"""
keys = list(d.keys())
keys.sort()
results = []
for key in keys:
results.append((key, d[key]))
return results
#===============================================================================
def pluralize(s, count):
#===============================================================================
"""
Return word with 's' appended if the count > 1.
"""
if count > 1:
return '%ss' % s
return s
#===============================================================================
def kwargs_extract(kwargs, defaults, remove = True, check_extras = False):
#===============================================================================
"""
Extract and optionally remove valid keyword arguments and convert to an
object with attributes. The defaults argument specifies both the list of
valid keywords and their default values. Abort on any invalid keyword.
"""
class O(object):
pass
o = O()
if check_extras:
bad = list(set(kwargs.keys()).difference(set(defaults.keys())))
if bad:
bad.sort()
abort('Bad keywords passed to kwargs_extract():', bad)
for name in defaults:
if name in kwargs:
if remove:
value = kwargs.pop(name)
else:
value = kwargs[name]
else:
value = defaults[name]
setattr(o, name, value)
return o
#===============================================================================
def kwargs_get(kwargs, name, remove = True, default = None):
#===============================================================================
defaults = {name: default}
args = kwargs_extract(kwargs, defaults, remove = remove, check_extras = False)
return getattr(args, name)
#===============================================================================
def kwargs_get_string(kwargs, name, remove = True, default = None):
#===============================================================================
value = kwargs_get(kwargs, name, remove = remove, default = default)
if value is not None:
value = str(value)
return value
#===============================================================================
def kwargs_get_integer(kwargs, name, remove = True, default = None):
#===============================================================================
value = kwargs_get(kwargs, name, remove = remove, default = default)
if value is not None:
try:
value = int(value)
except (ValueError, TypeError):
abort('Keyword argument "%s" must be an integer: %s' % (name, str(value)))
return value
#===============================================================================
def kwargs_get_boolean(kwargs, name, remove = True, default = None):
#===============================================================================
value = kwargs_get(kwargs, name, remove = remove, default = default)
if value is None or value == True or value == False:
return value
abort('Keyword argument "%s" must be a boolean value: %s' % (name, str(value)))
#===============================================================================
def kwargs_get_list(kwargs, name, remove = True, default = []):
#===============================================================================
return flatten_to_list(kwargs_get(kwargs, name, remove = remove, default = default))
#===============================================================================
def kwargs_set_defaults(kwargs, **defaults):
#===============================================================================
for name in defaults:
if name not in kwargs:
kwargs[name] = defaults[name]
#===============================================================================
def parse_hosts(host_string, min_hosts = None, max_hosts = None, default_port = None):
#===============================================================================
"""
Split host string on commas, extract optional port for each and return list
of host objects. Check against minimum/maximum quantities if specified.
We attempt to classify strings as one of:
<ip4 address> | <ip4 address> : <port>
<ip6_address> | [<ip6 address>] | [<ip6 address>] : <port>
<host name> | <host name> : port
Brackets are required around IPv6 addresses; this is consistent with
VoltDB parsing. Also, don't forget about IPv4-mapped IPv6 addresses,
formatted like fc00:e968:6179::de52:7100:127.0.0.1:21212.
"""
class Host(object):
def __init__(self, host, port):
self.host = host
self.port = port
hosts = []
# approximate syntax only, sufficient only to discriminate
opt_port = r'(:[0-9]+)?$'
addr4 = re.compile(r'[0-9.]+' + opt_port)
addr6 = re.compile(r'\[[0-9A-Fa-f:]+([0-9.]+)?\]' + opt_port)
name = re.compile(r'[0-9A-Za-z.-]+' + opt_port)
for host_port in host_string.split(','):
if host_port.count(':') >= 2 and addr6.match(host_port):
host, port = _split_ip6(host_port)
elif '.' in host_port and addr4.match(host_port):
host, port = _split_ip4(host_port)
elif name.match(host_port):
host, port = _split_name(host_port)
else:
abort('''Unrecognized address syntax "%s"
Use one of the following: (including the brackets)
hostname hostname:port
ip4addr ip4addr:port
[ip6addr] [ip6addr]:port''' % host_port)
if port is None:
port = default_port
if port is not None:
try:
port = int(port)
except ValueError:
abort('Bad port value "%s" for host: %s' % (port, host_port))
#print("# host_port=%s --> host=%s port=%s" % (host_port, host,port))
hosts.append(Host(host, port))
if min_hosts is not None and len(hosts) < min_hosts:
abort('Too few hosts in host string "%s". The minimum is %d.'
% (host_string, min_hosts))
if max_hosts is not None and len(hosts) > max_hosts:
abort('Too many hosts in host string "%s". The maximum is %d.'
% (host_string, max_hosts))
return hosts
def _split_ip4(host_port):
split_host = host_port.split(':')
if len(split_host) == 1:
return (host_port, None)
elif len(split_host) == 2:
return (split_host[0], split_host[1])
else:
abort('Bad HOST:PORT format "%s" - too many colons.' % host_port)
def _split_ip6(host_port):
addr = port = None
if host_port[0] == '[': # bracketed address is unambiguous
split_host = host_port[1:].split(']',1)
if len(split_host) != 2:
abort('Bad [HOST]:PORT format "%s" - missing bracket.' % host_port)
addr = split_host[0]
if split_host[1] != '':
if split_host[1][0] != ':':
abort('Bad [HOST]:PORT format "%s" - colon required.' % host_port)
port = split_host[1][1:]
else: # we must assume it's just an IPv6 address
addr = host_port
return (addr, port)
def _split_name(host_port):
return _split_ip4(host_port)
#===============================================================================
def paragraph(*lines):
#===============================================================================
"""
Strip leading and trailing whitespace and wrap text into a paragraph block.
The arguments can include arbitrarily nested sequences.
"""
wlines = []
for line in flatten_to_list(lines):
wlines.extend(line.strip().split('\n'))
return textwrap.fill('\n'.join(wlines))
#===============================================================================
class File(object):
#===============================================================================
"""
File reader/writer object that aborts on any error. Must explicitly call
close(). The main point is to standardize the error-handling.
"""
def __init__(self, path, mode = 'r', make_dirs=False):
if mode not in ('r', 'w'):
abort('Invalid file mode "%s".' % mode)
self.path = path
self.mode = mode
self.make_dirs = make_dirs
self.f = None
def open(self):
self.close()
if self.mode == 'w' and self.make_dirs:
dir = os.path.dirname(self.path)
if dir and not os.path.exists(dir):
try:
os.makedirs(dir)
except (IOError, OSError) as e:
self._abort('Unable to create directory "%s".' % dir)
self.f = self._open()
def read(self):
if self.mode != 'r':
self._abort('File is not open for reading in call to read().')
# Reading the entire file, so we can automatically open and close here.
if self.f is None:
f = self._open()
else:
f = self.f
try:
try:
return f.read()
except (IOError, OSError) as e:
self._abort('Read error.', e)
finally:
# Close | |
"""This file contains all the classes you must complete for this project.
You can use the test cases in agent_test.py to help during development, and
augment the test suite with your own test cases to further test your code.
You must test your agent's strength against a set of agents with known
relative strength using tournament.py and include the results in your report.
"""
import random
previous_moves = {} # map stack with # of player moves
class Timeout(Exception):
"""Subclass base exception for code clarity."""
pass
def score_1(game, player): # 82.14%
"""
Heuristics computing score using #player moves - k * #opponent moves
:param game: game
:param player: player
:return: score
"""
if game.is_winner(player) or game.is_loser(player):
return game.utility(player)
opponent = game.get_opponent(player)
player_moves = game.get_legal_moves(player)
opponent_moves = game.get_legal_moves(opponent)
# return float(len(player_moves) - len(opponent_moves)) # 72.86%
# return float(len(player_moves) - 2 * len(opponent_moves)) # 79.29%
# return float(len(player_moves) - 3 * len(opponent_moves)) # 79.29%
# return float(len(player_moves) - 4 * len(opponent_moves)) # 79.29%
# return float(len(player_moves) - 5 * len(opponent_moves)) # 80.71%
# return float(len(player_moves) - 6 * len(opponent_moves)) # 80.71%
return float(len(player_moves) - 7 * len(opponent_moves)) # 82.14%
# return float(len(player_moves) - 8 * len(opponent_moves)) # 73.57%
# return float(len(player_moves) - 9 * len(opponent_moves)) # 79.29%
# return float(len(player_moves) - 10 * len(opponent_moves)) # 77.86%
# return float(len(player_moves) - 11 * len(opponent_moves)) # 77.86%
def score_2(game, player): # 67.14%
"""
Heuristics computing score as a difference between change of player moves and change of opponent moves
:param game: game
:param player: player
:return: score
"""
if game.is_winner(player) or game.is_loser(player):
return game.utility(player)
opponent = game.get_opponent(player)
player_moves = len(game.get_legal_moves(player))
opponent_moves = len(game.get_legal_moves(opponent))
previous_player_moves = peek_previous_moves(player)
previous_opponent_moves = peek_previous_moves(opponent)
if previous_player_moves is None:
previous_player_moves = 2 * player_moves
if previous_opponent_moves is None:
previous_opponent_moves = 2 * opponent_moves
return float((player_moves - previous_player_moves) - (previous_opponent_moves - opponent_moves))
def score_3(game, player): # 57.14%
"""
Heuristics computing score as a difference between change ratio of the number player and opponent moves
:param game: game
:param player: player
:return: score
"""
if game.is_winner(player) or game.is_loser(player):
return game.utility(player)
opponent = game.get_opponent(player)
player_moves = len(game.get_legal_moves(player))
opponent_moves = len(game.get_legal_moves(opponent))
previous_player_moves = peek_previous_moves(player)
previous_opponent_moves = peek_previous_moves(opponent)
if previous_player_moves is None:
previous_player_moves = 2 * player_moves
if previous_opponent_moves is None:
previous_opponent_moves = 2 * opponent_moves
if player_moves == 0:
return float("-inf")
if opponent_moves == 0:
return float("inf")
return float((player_moves - previous_player_moves) - (previous_opponent_moves / opponent_moves))
def score_4(game, player): # 50.71%
"""
Heuristics computing score as a ratio of change ratio of the number of player and opponent moves
:param game: game
:param player: player
:return: score
"""
if game.is_winner(player) or game.is_loser(player):
return game.utility(player)
opponent = game.get_opponent(player)
player_moves = len(game.get_legal_moves(player))
opponent_moves = len(game.get_legal_moves(opponent))
previous_player_moves = peek_previous_moves(player)
previous_opponent_moves = peek_previous_moves(opponent)
if previous_player_moves is None:
previous_player_moves = 2 * player_moves
if previous_opponent_moves is None:
previous_opponent_moves = 2 * opponent_moves
if player_moves == 0:
return float("-inf")
if opponent_moves == 0:
return float("inf")
return float((player_moves - previous_player_moves) / (previous_opponent_moves / opponent_moves))
def score_5(game, player): # 78.57%
"""
Heuristics computing score based on ratio of player and opponents moves and difference of number opponent moves
:param game: game
:param player: player
:return: score
"""
if game.is_winner(player) or game.is_loser(player):
return game.utility(player)
opponent = game.get_opponent(player)
player_moves = len(game.get_legal_moves(player))
opponent_moves = len(game.get_legal_moves(opponent))
previous_player_moves = peek_previous_moves(player)
previous_opponent_moves = peek_previous_moves(opponent)
if previous_player_moves is None:
previous_player_moves = 2 * player_moves
if previous_opponent_moves is None:
previous_opponent_moves = 2 * opponent_moves
if player_moves == 0:
return float("-inf")
if opponent_moves == 0:
return float("inf")
if player_moves >= opponent_moves:
return float((player_moves / opponent_moves) + (previous_opponent_moves - opponent_moves))
else:
return float(-(opponent_moves / player_moves) + (previous_opponent_moves - opponent_moves))
def custom_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# return score_1(game, player)
# return score_2(game, player)
# return score_3(game, player)
# return score_4(game, player)
return score_5(game, player)
class CustomPlayer:
"""Game-playing agent that chooses a move using your evaluation function
and a depth-limited minimax algorithm with alpha-beta pruning. You must
finish and test this player to make sure it properly uses minimax and
alpha-beta to return a good move before the search time limit expires.
Parameters
----------
search_depth : int (optional)
A strictly positive integer (i.e., 1, 2, 3,...) for the number of
layers in the game tree to explore for fixed-depth search. (i.e., a
depth of one (1) would only explore the immediate sucessors of the
current state.)
score_fn : callable (optional)
A function to use for heuristic evaluation of game states.
iterative : boolean (optional)
Flag indicating whether to perform fixed-depth search (False) or
iterative deepening search (True).
method : {'minimax', 'alphabeta'} (optional)
The name of the search method to use in get_move().
timeout : float (optional)
Time remaining (in milliseconds) when search is aborted. Should be a
positive value large enough to allow the function to return before the
timer expires.
"""
def __init__(self, search_depth=3, score_fn=custom_score,
iterative=True, method='minimax', timeout=10.):
self.search_depth = search_depth
self.iterative = iterative
self.score = score_fn
self.method = method
self.time_left = None
self.TIMER_THRESHOLD = timeout
def get_move(self, game, legal_moves, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
This function must perform iterative deepening if self.iterative=True,
and it must use the search method (minimax or alphabeta) corresponding
to the self.method value.
**********************************************************************
NOTE: If time_left < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
legal_moves : list<(int, int)>
A list containing legal moves. Moves are encoded as tuples of pairs
of ints defining the next (row, col) for the agent to occupy.
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# Perform any required initializations, including selecting an initial
# move from the game board (i.e., an opening book), or returning
# immediately if there are no legal moves
if len(legal_moves) == 0:
return -1, -1
move_number = random.randint(0, len(legal_moves) - 1)
initial_move = legal_moves[move_number]
best_move = None
depth = self.search_depth
if self.iterative:
depth = 1
method_fn = None
if self.method == 'minimax':
method_fn = self.minimax
if self.method == 'alphabeta':
method_fn = self.alphabeta
try:
# The search method call (alpha beta or minimax) should happen in
# here in order to avoid timeout. The try/except block will
# automatically catch the exception raised by the search method
# when the timer gets close to expiring
value, move = method_fn(game, depth, True)
best_value = value
best_move = move
while self.iterative:
depth += 1
value, move = method_fn(game, depth, True)
if value > best_value:
best_value, best_move = value, move
except Timeout:
# Handle any actions required at timeout, if necessary
pass
return best_move
def minimax(self, game, depth, maximizing_player=True):
"""Implement the minimax search algorithm as described in the lectures.
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
maximizing_player | |
35954 # GL/glext.h:3252
GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT = 35955 # GL/glext.h:3253
# NV_transform_feedback (GL/glext.h:3256)
GL_BACK_PRIMARY_COLOR_NV = 35959 # GL/glext.h:3257
GL_BACK_SECONDARY_COLOR_NV = 35960 # GL/glext.h:3258
GL_TEXTURE_COORD_NV = 35961 # GL/glext.h:3259
GL_CLIP_DISTANCE_NV = 35962 # GL/glext.h:3260
GL_VERTEX_ID_NV = 35963 # GL/glext.h:3261
GL_PRIMITIVE_ID_NV = 35964 # GL/glext.h:3262
GL_GENERIC_ATTRIB_NV = 35965 # GL/glext.h:3263
GL_TRANSFORM_FEEDBACK_ATTRIBS_NV = 35966 # GL/glext.h:3264
GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV = 35967 # GL/glext.h:3265
GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_NV = 35968 # GL/glext.h:3266
GL_ACTIVE_VARYINGS_NV = 35969 # GL/glext.h:3267
GL_ACTIVE_VARYING_MAX_LENGTH_NV = 35970 # GL/glext.h:3268
GL_TRANSFORM_FEEDBACK_VARYINGS_NV = 35971 # GL/glext.h:3269
GL_TRANSFORM_FEEDBACK_BUFFER_START_NV = 35972 # GL/glext.h:3270
GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_NV = 35973 # GL/glext.h:3271
GL_TRANSFORM_FEEDBACK_RECORD_NV = 35974 # GL/glext.h:3272
GL_PRIMITIVES_GENERATED_NV = 35975 # GL/glext.h:3273
GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_NV = 35976 # GL/glext.h:3274
GL_RASTERIZER_DISCARD_NV = 35977 # GL/glext.h:3275
GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_ATTRIBS_NV = 35978 # GL/glext.h:3276
GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_NV = 35979 # GL/glext.h:3277
GL_INTERLEAVED_ATTRIBS_NV = 35980 # GL/glext.h:3278
GL_SEPARATE_ATTRIBS_NV = 35981 # GL/glext.h:3279
GL_TRANSFORM_FEEDBACK_BUFFER_NV = 35982 # GL/glext.h:3280
GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_NV = 35983 # GL/glext.h:3281
# NV_geometry_program4 (GL/glext.h:3284)
GL_GEOMETRY_PROGRAM_NV = 35878 # GL/glext.h:3285
GL_MAX_PROGRAM_OUTPUT_VERTICES_NV = 35879 # GL/glext.h:3286
GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV = 35880 # GL/glext.h:3287
# NV_gpu_program4 (GL/glext.h:3290)
GL_MIN_PROGRAM_TEXEL_OFFSET_NV = 35076 # GL/glext.h:3291
GL_MAX_PROGRAM_TEXEL_OFFSET_NV = 35077 # GL/glext.h:3292
GL_PROGRAM_ATTRIB_COMPONENTS_NV = 35078 # GL/glext.h:3293
GL_PROGRAM_RESULT_COMPONENTS_NV = 35079 # GL/glext.h:3294
GL_MAX_PROGRAM_ATTRIB_COMPONENTS_NV = 35080 # GL/glext.h:3295
GL_MAX_PROGRAM_RESULT_COMPONENTS_NV = 35081 # GL/glext.h:3296
GL_MAX_PROGRAM_GENERIC_ATTRIBS_NV = 36261 # GL/glext.h:3297
GL_MAX_PROGRAM_GENERIC_RESULTS_NV = 36262 # GL/glext.h:3298
# NV_framebuffer_multisample_coverage (GL/glext.h:3301)
GL_RENDERBUFFER_COVERAGE_SAMPLES_NV = 36011 # GL/glext.h:3302
GL_RENDERBUFFER_COLOR_SAMPLES_NV = 36368 # GL/glext.h:3303
GL_MAX_RENDERBUFFER_COVERAGE_SAMPLES_NV = 36183 # GL/glext.h:3304
GL_MAX_RENDERBUFFER_COLOR_SAMPLES_NV = 36369 # GL/glext.h:3305
GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV = 36370 # GL/glext.h:3306
GL_MULTISAMPLE_COVERAGE_MODES_NV = 36371 # GL/glext.h:3307
# EXT_framebuffer_multisample (GL/glext.h:3310)
GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT = 36182 # GL/glext.h:3311
GL_MAX_SAMPLES_EXT = 36183 # GL/glext.h:3312
GL_RENDERBUFFER_SAMPLES_EXT = 36011 # GL/glext.h:3313
# EXT_framebuffer_blit (GL/glext.h:3316)
GL_READ_FRAMEBUFFER_EXT = 36008 # GL/glext.h:3317
GL_DRAW_FRAMEBUFFER_EXT = 36009 # GL/glext.h:3318
GL_DRAW_FRAMEBUFFER_BINDING_EXT = 36006 # GL/glext.h:3319
GL_READ_FRAMEBUFFER_BINDING_EXT = 36010 # GL/glext.h:3320
# EXT_texture_compression_rgtc (GL/glext.h:3323)
GL_COMPRESSED_RED_RGTC1_EXT = 36283 # GL/glext.h:3324
GL_COMPRESSED_SIGNED_RED_RGTC1_EXT = 36284 # GL/glext.h:3325
GL_COMPRESSED_RED_GREEN_RGTC2_EXT = 36285 # GL/glext.h:3326
GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT = 36286 # GL/glext.h:3327
# VERSION_2_0 (GL/glext.h:3333)
GLchar = c_char # GL/glext.h:3335
# VERSION_1_5 (GL/glext.h:3338)
GLintptr = c_ptrdiff_t # GL/glext.h:3340
GLsizeiptr = c_ptrdiff_t # GL/glext.h:3341
# ARB_vertex_buffer_object (GL/glext.h:3344)
GLintptrARB = c_ptrdiff_t # GL/glext.h:3346
GLsizeiptrARB = c_ptrdiff_t # GL/glext.h:3347
# ARB_shader_objects (GL/glext.h:3350)
GLcharARB = c_char # GL/glext.h:3352
GLhandleARB = c_uint # GL/glext.h:3353
# ARB_half_float_pixel (GL/glext.h:3357)
GLhalfARB = c_ushort # GL/glext.h:3358
# NV_half_float (GL/glext.h:3361)
GLhalfNV = c_ushort # GL/glext.h:3362
# EXT_timer_query (GL/glext.h:3365)
GLint64EXT = c_longlong # GL/glext.h:3366
GLuint64EXT = c_ulonglong # GL/glext.h:3367
# VERSION_1_2 (GL/glext.h:3370)
GL_VERSION_1_2 = 1 # GL/glext.h:3371
GLclampf = c_float # /usr/include/GL/gl.h:64
# GL/glext.h:3373
glBlendColor = _link_function('glBlendColor', None, [GLclampf, GLclampf, GLclampf, GLclampf], 'VERSION_1_2')
GLenum = c_uint # /usr/include/GL/gl.h:53
# GL/glext.h:3374
glBlendEquation = _link_function('glBlendEquation', None, [GLenum], 'VERSION_1_2')
GLuint = c_uint # /usr/include/GL/gl.h:62
GLsizei = c_int # /usr/include/GL/gl.h:59
GLvoid = None # /usr/include/GL/gl.h:67
# GL/glext.h:3375
glDrawRangeElements = _link_function('glDrawRangeElements', None, [GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3376
glColorTable = _link_function('glColorTable', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
GLfloat = c_float # /usr/include/GL/gl.h:63
# GL/glext.h:3377
glColorTableParameterfv = _link_function('glColorTableParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
GLint = c_int # /usr/include/GL/gl.h:58
# GL/glext.h:3378
glColorTableParameteriv = _link_function('glColorTableParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3379
glCopyColorTable = _link_function('glCopyColorTable', None, [GLenum, GLenum, GLint, GLint, GLsizei], 'VERSION_1_2')
# GL/glext.h:3380
glGetColorTable = _link_function('glGetColorTable', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3381
glGetColorTableParameterfv = _link_function('glGetColorTableParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3382
glGetColorTableParameteriv = _link_function('glGetColorTableParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3383
glColorSubTable = _link_function('glColorSubTable', None, [GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3384
glCopyColorSubTable = _link_function('glCopyColorSubTable', None, [GLenum, GLsizei, GLint, GLint, GLsizei], 'VERSION_1_2')
# GL/glext.h:3385
glConvolutionFilter1D = _link_function('glConvolutionFilter1D', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3386
glConvolutionFilter2D = _link_function('glConvolutionFilter2D', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3387
glConvolutionParameterf = _link_function('glConvolutionParameterf', None, [GLenum, GLenum, GLfloat], 'VERSION_1_2')
# GL/glext.h:3388
glConvolutionParameterfv = _link_function('glConvolutionParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3389
glConvolutionParameteri = _link_function('glConvolutionParameteri', None, [GLenum, GLenum, GLint], 'VERSION_1_2')
# GL/glext.h:3390
glConvolutionParameteriv = _link_function('glConvolutionParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3391
glCopyConvolutionFilter1D = _link_function('glCopyConvolutionFilter1D', None, [GLenum, GLenum, GLint, GLint, GLsizei], 'VERSION_1_2')
# GL/glext.h:3392
glCopyConvolutionFilter2D = _link_function('glCopyConvolutionFilter2D', None, [GLenum, GLenum, GLint, GLint, GLsizei, GLsizei], 'VERSION_1_2')
# GL/glext.h:3393
glGetConvolutionFilter = _link_function('glGetConvolutionFilter', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3394
glGetConvolutionParameterfv = _link_function('glGetConvolutionParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3395
glGetConvolutionParameteriv = _link_function('glGetConvolutionParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3396
glGetSeparableFilter = _link_function('glGetSeparableFilter', None, [GLenum, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid), POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3397
glSeparableFilter2D = _link_function('glSeparableFilter2D', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid)], 'VERSION_1_2')
GLboolean = c_ubyte # /usr/include/GL/gl.h:54
# GL/glext.h:3398
glGetHistogram = _link_function('glGetHistogram', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3399
glGetHistogramParameterfv = _link_function('glGetHistogramParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3400
glGetHistogramParameteriv = _link_function('glGetHistogramParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3401
glGetMinmax = _link_function('glGetMinmax', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3402
glGetMinmaxParameterfv = _link_function('glGetMinmaxParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3403
glGetMinmaxParameteriv = _link_function('glGetMinmaxParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3404
glHistogram = _link_function('glHistogram', None, [GLenum, GLsizei, GLenum, GLboolean], 'VERSION_1_2')
# GL/glext.h:3405
glMinmax = _link_function('glMinmax', None, [GLenum, GLenum, GLboolean], 'VERSION_1_2')
# GL/glext.h:3406
glResetHistogram = _link_function('glResetHistogram', None, [GLenum], 'VERSION_1_2')
# GL/glext.h:3407
glResetMinmax = _link_function('glResetMinmax', None, [GLenum], 'VERSION_1_2')
# GL/glext.h:3408
glTexImage3D = _link_function('glTexImage3D', None, [GLenum, GLint, GLint, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3409
glTexSubImage3D = _link_function('glTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3410
glCopyTexSubImage3D = _link_function('glCopyTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], 'VERSION_1_2')
PFNGLBLENDCOLORPROC = CFUNCTYPE(None, GLclampf, GLclampf, GLclampf, GLclampf) # GL/glext.h:3412
PFNGLBLENDEQUATIONPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3413
PFNGLDRAWRANGEELEMENTSPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)) # GL/glext.h:3414
PFNGLCOLORTABLEPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3415
PFNGLCOLORTABLEPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3416
PFNGLCOLORTABLEPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3417
PFNGLCOPYCOLORTABLEPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei) # GL/glext.h:3418
PFNGLGETCOLORTABLEPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3419
PFNGLGETCOLORTABLEPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3420
PFNGLGETCOLORTABLEPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3421
PFNGLCOLORSUBTABLEPROC = CFUNCTYPE(None, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3422
PFNGLCOPYCOLORSUBTABLEPROC = CFUNCTYPE(None, GLenum, GLsizei, GLint, GLint, GLsizei) # GL/glext.h:3423
PFNGLCONVOLUTIONFILTER1DPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3424
PFNGLCONVOLUTIONFILTER2DPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3425
PFNGLCONVOLUTIONPARAMETERFPROC = CFUNCTYPE(None, GLenum, GLenum, GLfloat) # GL/glext.h:3426
PFNGLCONVOLUTIONPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3427
PFNGLCONVOLUTIONPARAMETERIPROC = CFUNCTYPE(None, GLenum, GLenum, GLint) # GL/glext.h:3428
PFNGLCONVOLUTIONPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3429
PFNGLCOPYCONVOLUTIONFILTER1DPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei) # GL/glext.h:3430
PFNGLCOPYCONVOLUTIONFILTER2DPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei, GLsizei) # GL/glext.h:3431
PFNGLGETCONVOLUTIONFILTERPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3432
PFNGLGETCONVOLUTIONPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3433
PFNGLGETCONVOLUTIONPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3434
PFNGLGETSEPARABLEFILTERPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid), POINTER(GLvoid)) # GL/glext.h:3435
PFNGLSEPARABLEFILTER2DPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid)) # GL/glext.h:3436
PFNGLGETHISTOGRAMPROC = CFUNCTYPE(None, GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3437
PFNGLGETHISTOGRAMPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3438
PFNGLGETHISTOGRAMPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3439
PFNGLGETMINMAXPROC = CFUNCTYPE(None, GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3440
PFNGLGETMINMAXPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3441
PFNGLGETMINMAXPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3442
PFNGLHISTOGRAMPROC = CFUNCTYPE(None, GLenum, GLsizei, GLenum, GLboolean) # GL/glext.h:3443
PFNGLMINMAXPROC = CFUNCTYPE(None, GLenum, GLenum, GLboolean) # GL/glext.h:3444
PFNGLRESETHISTOGRAMPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3445
PFNGLRESETMINMAXPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3446
PFNGLTEXIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3447
PFNGLTEXSUBIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3448
PFNGLCOPYTEXSUBIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei) # GL/glext.h:3449
# VERSION_1_3 (GL/glext.h:3452)
GL_VERSION_1_3 = 1 # GL/glext.h:3453
# GL/glext.h:3455
glActiveTexture = _link_function('glActiveTexture', None, [GLenum], 'VERSION_1_3')
# GL/glext.h:3456
glClientActiveTexture = _link_function('glClientActiveTexture', None, [GLenum], 'VERSION_1_3')
GLdouble = c_double # /usr/include/GL/gl.h:65
# GL/glext.h:3457
glMultiTexCoord1d = _link_function('glMultiTexCoord1d', None, [GLenum, GLdouble], 'VERSION_1_3')
# GL/glext.h:3458
glMultiTexCoord1dv = _link_function('glMultiTexCoord1dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3459
glMultiTexCoord1f = _link_function('glMultiTexCoord1f', None, [GLenum, GLfloat], 'VERSION_1_3')
# GL/glext.h:3460
glMultiTexCoord1fv = _link_function('glMultiTexCoord1fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3461
glMultiTexCoord1i = _link_function('glMultiTexCoord1i', None, [GLenum, GLint], 'VERSION_1_3')
# GL/glext.h:3462
glMultiTexCoord1iv = _link_function('glMultiTexCoord1iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
GLshort = c_short # /usr/include/GL/gl.h:57
# GL/glext.h:3463
glMultiTexCoord1s = _link_function('glMultiTexCoord1s', None, [GLenum, GLshort], 'VERSION_1_3')
# GL/glext.h:3464
glMultiTexCoord1sv = _link_function('glMultiTexCoord1sv', None, [GLenum, POINTER(GLshort)], 'VERSION_1_3')
# GL/glext.h:3465
glMultiTexCoord2d = _link_function('glMultiTexCoord2d', None, [GLenum, GLdouble, GLdouble], 'VERSION_1_3')
# GL/glext.h:3466
glMultiTexCoord2dv = _link_function('glMultiTexCoord2dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3467
glMultiTexCoord2f = _link_function('glMultiTexCoord2f', None, [GLenum, GLfloat, GLfloat], 'VERSION_1_3')
# GL/glext.h:3468
glMultiTexCoord2fv = _link_function('glMultiTexCoord2fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3469
glMultiTexCoord2i = _link_function('glMultiTexCoord2i', None, [GLenum, GLint, GLint], 'VERSION_1_3')
# GL/glext.h:3470
glMultiTexCoord2iv = _link_function('glMultiTexCoord2iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
# GL/glext.h:3471
glMultiTexCoord2s = _link_function('glMultiTexCoord2s', None, [GLenum, GLshort, GLshort], 'VERSION_1_3')
# GL/glext.h:3472
glMultiTexCoord2sv = _link_function('glMultiTexCoord2sv', None, [GLenum, POINTER(GLshort)], 'VERSION_1_3')
# GL/glext.h:3473
glMultiTexCoord3d = _link_function('glMultiTexCoord3d', None, [GLenum, GLdouble, GLdouble, GLdouble], 'VERSION_1_3')
# GL/glext.h:3474
glMultiTexCoord3dv = _link_function('glMultiTexCoord3dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3475
glMultiTexCoord3f = _link_function('glMultiTexCoord3f', None, [GLenum, GLfloat, GLfloat, GLfloat], 'VERSION_1_3')
# GL/glext.h:3476
glMultiTexCoord3fv = _link_function('glMultiTexCoord3fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3477
glMultiTexCoord3i = _link_function('glMultiTexCoord3i', None, [GLenum, GLint, GLint, GLint], 'VERSION_1_3')
# GL/glext.h:3478
glMultiTexCoord3iv = _link_function('glMultiTexCoord3iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
# GL/glext.h:3479
glMultiTexCoord3s = _link_function('glMultiTexCoord3s', None, [GLenum, GLshort, GLshort, GLshort], 'VERSION_1_3')
# GL/glext.h:3480
glMultiTexCoord3sv = _link_function('glMultiTexCoord3sv', None, [GLenum, POINTER(GLshort)], 'VERSION_1_3')
# GL/glext.h:3481
glMultiTexCoord4d = _link_function('glMultiTexCoord4d', None, [GLenum, GLdouble, GLdouble, GLdouble, GLdouble], 'VERSION_1_3')
# GL/glext.h:3482
glMultiTexCoord4dv = _link_function('glMultiTexCoord4dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3483
glMultiTexCoord4f = _link_function('glMultiTexCoord4f', None, [GLenum, GLfloat, GLfloat, GLfloat, GLfloat], 'VERSION_1_3')
# GL/glext.h:3484
glMultiTexCoord4fv = _link_function('glMultiTexCoord4fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3485
glMultiTexCoord4i = _link_function('glMultiTexCoord4i', None, [GLenum, GLint, GLint, GLint, GLint], 'VERSION_1_3')
# GL/glext.h:3486
glMultiTexCoord4iv = _link_function('glMultiTexCoord4iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
# GL/glext.h:3487
glMultiTexCoord4s = _link_function('glMultiTexCoord4s', None, [GLenum, GLshort, GLshort, GLshort, GLshort], 'VERSION_1_3')
# GL/glext.h:3488
glMultiTexCoord4sv = | |
last_node = supernodes_paths[path_from_shrink[-1]]["supernode_edges"][res[-1]]
if last_node == node_pair[1]:
res.append(node_pair[1])
else:
res += supernodes_paths[path_from_shrink[-1]]["every_pair"][(last_node, node_pair[1])]
return res
source = node_pair[0]
target = node_pair[1]
source = find(supernodes, node_pair[0])
target = find(supernodes, node_pair[1])
cutoff = calculate_cutoff(shrink_graph, source, target, k=2)
paths = []
paths_nodes = []
paths_links_ids = []
visited = [source]
stack = [iter(shrink_graph[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
new_path = visited + [target]
new_path = translate_path(new_path)
int_path = [int(n) for n in new_path]
links = construct_links_from_path(new_path, links_map)
links_ids = [(l.src, l.dst) for l in links]
paths.append(links)
paths_nodes.append(int_path)
paths_links_ids.append(links_ids)
elif child not in visited:
visited.append(child)
stack.append(iter(shrink_graph[child]))
else: # len(visited) == cutoff:
if child == target or target in children:
new_path = visited + [target]
new_path = translate_path(new_path)
int_path = [int(n) for n in new_path]
links = construct_links_from_path(new_path, links_map)
links_ids = [(l.src, l.dst) for l in links]
paths.append(links)
paths_nodes.append(int_path)
paths_links_ids.append(links_ids)
stack.pop()
visited.pop()
if len(paths) == ppp:
break
if source == target and node_pair[0] != node_pair[1]:
new_path = supernodes_paths[source]["every_pair"][(node_pair[0], node_pair[1])]
int_path = [int(n) for n in new_path]
links = construct_links_from_path(new_path, links_map)
links_ids = [(l.src, l.dst) for l in links]
paths.append(links)
paths_nodes.append(int_path)
paths_links_ids.append(links_ids)
key_pair = (int(node_pair[0]), int(node_pair[1]))
return {key_pair: (paths, paths_nodes, paths_links_ids)}
def _all_simple_paths(graph, links_map, ppp, node_pair):
source = node_pair[0]
target = node_pair[1]
paths = []
paths_nodes = []
paths_links_ids = []
count =0
for path in nx.all_simple_paths(graph, source=source, target=target):
if count >= ppp:
break
int_path = [int(n) for n in path]
links = construct_links_from_path(path, links_map)
links_ids = [(l.src, l.dst) for l in links]
paths.append(links)
paths_nodes.append(int_path)
paths_links_ids.append(links_ids)
count += 1
key_pair = (int(source), int(target))
return {key_pair: (paths, paths_nodes, paths_links_ids)}
def _all_simple_paths_bfs(graph, links_map, ppp, node_pair):
source = node_pair[0]
target = node_pair[1]
paths = []
paths_nodes = []
paths_links_ids = []
cutoff = calculate_cutoff(graph, source, target, k=2)
queue = [(source, [source])]
if cutoff < 4:
cutoff *= 2
while queue:
current, path = queue.pop(0)
if path[-1] == target:
int_path = [int(n) for n in path]
links = construct_links_from_path(path, links_map)
links_ids = [(l.src, l.dst) for l in links]
paths.append(links)
paths_nodes.append(int_path)
paths_links_ids.append(links_ids)
for neighbor in graph[current]:
if neighbor not in path and len(path) < cutoff:
queue.append((neighbor, path + [neighbor]))
if len(paths) == ppp:
break
key_pair = (int(source), int(target))
return {key_pair: (paths, paths_nodes, paths_links_ids)}
def _all_simple_paths_dfs_trie(graph, links_map, ppp, node_pair):
source = node_pair[0]
target = node_pair[1]
cutoff = calculate_cutoff(graph, source, target, k=2)
paths = []
paths_nodes = []
paths_links_ids = []
visited = [source]
stack = [iter(graph[source])]
trie = PathTrie(int(source), int(target))
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
new_path = visited + [target]
int_path = [int(n) for n in new_path]
# links = construct_links_from_path(new_path, links_map)
# links_ids = [(l.src, l.dst) for l in links]
trie.add(int_path)
paths.append('1')
# paths_nodes.append(int_path)
# paths_links_ids.append(links_ids)
elif child not in visited:
visited.append(child)
stack.append(iter(graph[child]))
else: # len(visited) == cutoff:
if child == target or target in children:
new_path = visited + [target]
int_path = [int(n) for n in new_path]
# links = construct_links_from_path(new_path, links_map)
# links_ids = [(l.src, l.dst) for l in links]
trie.add(int_path)
paths.append('1')
# paths_nodes.append(int_path)
# paths_links_ids.append(links_ids)
stack.pop()
visited.pop()
# if len(paths) > 2000:
# print 'XXX', source, target
# paths = sorted(paths, key=lambda path: len(path))
key_pair = (int(source), int(target))
# return {key_pair: (paths, paths_nodes, paths_links_ids)}
return {key_pair: trie}
def calculate_simple_paths_trie(graph, links_map, ppp, pool_size=4):
p = Pool(pool_size)
pmap = p.map
pairs = [(n1, n2) for n1 in graph.nodes() for n2 in graph.nodes() if n1 != n2]
fn = partial(_all_simple_paths_dfs_trie, graph, links_map, ppp)
simple_paths = merge(pmap(fn, pairs))
# print simple_paths[24, 17].nodes
# for _x, _y in simple_paths[1, 12].nodes.iteritems():
# print _x, _y.counter, _y.end
# simple_paths[1, 12].traverse()
# for path_key, path_tuples in simple_paths.iteritems():
# paths = path_tuples[0]
# new_paths = []
# for path in paths:
# new_path = []
# for link in path:
# new_link = links_map[(link.src, link.dst)]
# new_path.append(new_link)
# new_paths.append(new_path)
# simple_paths[path_key] = (new_paths, path_tuples[1], path_tuples[2])
p.close()
return simple_paths
def paths_ratio(graph, links_map, ppp, pool_size=4):
p = Pool(pool_size)
pmap = p.map
pairs = [(n1, n2) for n1 in graph.nodes() for n2 in graph.nodes() if n1 != n2]
fn = partial(_all_simple_paths_dfs, graph, links_map, ppp)
simple_paths = merge(pmap(fn, pairs))
all_paths = 0.0
ok_paths = 0.0
count = 0.0
summ = 0.0
averages = []
for path_key, path_tuples in simple_paths.iteritems():
count += 1
ok_paths += len(path_tuples[0])
all_paths += path_tuples[3]
summ += len(path_tuples[0]) / float(path_tuples[3])
print "--", path_key, len(path_tuples[0]) / float(path_tuples[3])
averages.append(len(path_tuples[0]) / float(path_tuples[3]))
print ">>>>>>>>", ok_paths / all_paths
print "Average of average", summ/ count
print "MEAN", statistics.mean(averages)
print "variance", statistics.variance(averages)
p.close()
return simple_paths
def calculate_simple_paths(graph, links_map, ppp, pool_size=4):
p = Pool(pool_size)
pmap = p.map
pairs = [(n1, n2) for n1 in graph.nodes() for n2 in graph.nodes() if n1 != n2]
fn = partial(_all_simple_paths_bfs_disjoint, graph, links_map, ppp)
simple_paths = merge(pmap(fn, pairs))
for path_key, path_tuples in simple_paths.iteritems():
paths = path_tuples[0]
new_paths = []
for path in paths:
new_path = []
for link in path:
new_link = links_map[(link.src, link.dst)]
new_path.append(new_link)
new_paths.append(new_path)
simple_paths[path_key] = (new_paths, path_tuples[1], path_tuples[2])
p.close()
return simple_paths
def calculate_simple_paths_regenerate(graph, links_map, ppp, pool_size=4, pairs=[]):
p = Pool(pool_size)
pmap = p.map
fn = partial(_all_simple_paths_bfs_disjoint, graph, links_map, ppp)
simple_paths = merge(pmap(fn, pairs))
for path_key, path_tuples in simple_paths.iteritems():
paths = path_tuples[0]
new_paths = []
for path in paths:
new_path = []
for link in path:
new_link = links_map[(link.src, link.dst)]
new_path.append(new_link)
new_paths.append(new_path)
simple_paths[path_key] = (new_paths, path_tuples[1], path_tuples[2])
p.close()
return simple_paths
def calculate_simple_paths2(graph, links_map, ppp, pool_size=4):
p = Pool(pool_size)
pmap = p.map
pairs = [(n1, n2) for n1 in graph.nodes() for n2 in graph.nodes() if n1 != n2]
fn = partial(_all_simple_paths_disjoint, graph, links_map, ppp)
simple_paths = merge(pmap(fn, pairs))
for path_key, path_tuples in simple_paths.iteritems():
paths = path_tuples[0]
new_paths = []
for path in paths:
new_path = []
for link in path:
new_link = links_map[(link.src, link.dst)]
new_path.append(new_link)
new_paths.append(new_path)
simple_paths[path_key] = (new_paths, path_tuples[1], path_tuples[2])
p.close()
return simple_paths
def calculate_simple_paths3(graph, links_map, ppp, pool_size=4):
p = Pool(pool_size)
pmap = p.map
pairs = [(n1, n2) for n1 in graph.nodes() for n2 in graph.nodes() if n1 != n2]
fn = partial(_all_simple_paths_dfs, graph, links_map, ppp)
simple_paths = merge(pmap(fn, pairs))
for path_key, path_tuples in simple_paths.iteritems():
paths = path_tuples[0]
new_paths = []
for path in paths:
new_path = []
for link in path:
new_link = links_map[(link.src, link.dst)]
new_path.append(new_link)
new_paths.append(new_path)
simple_paths[path_key] = (new_paths, path_tuples[1], path_tuples[2])
p.close()
return simple_paths
def calculate_simple_paths_shrink(graph, links_map, ppp, pool_size=4):
p = Pool(pool_size)
pmap = p.map
pairs = [(n1, n2) for n1 in graph.nodes() for n2 in graph.nodes() if n1 != n2]
supernodes, shrink_graph = contract_linear_links(graph)
fn = partial(_all_simple_paths_dfs_super, shrink_graph, graph, supernodes, links_map, ppp)
simple_paths = merge(pmap(fn, pairs))
for path_key, path_tuples in simple_paths.iteritems():
paths = path_tuples[0]
new_paths = []
for path in paths:
new_path = []
for link in path:
new_link = links_map[(link.src, link.dst)]
new_path.append(new_link)
new_paths.append(new_path)
simple_paths[path_key] = (new_paths, path_tuples[1], path_tuples[2])
p.close()
return simple_paths
def add_path_to_graph(graph, path):
i = 0
j = 1
while j < len(path):
graph.add_edge(path[i], path[j])
i += 1
j += 1
def calculate_cutoff(graph, src, dst, k):
cutoff = 100000
try:
shortest_path_len = nx.shortest_path_length(graph, src, dst)
cutoff = int(k * shortest_path_len)
# print cutoff, src, dst
# if cutoff >= 20: # or cutoff <= 10:
# cutoff = 17
# if cutoff < 10:
# cutoff = 10
return min(cutoff, len(graph.nodes()) - 1)
except:
return cutoff
def all_simple_paths_dfs_compact(graph, k, node_pair):
source = node_pair[0]
target = node_pair[1]
cutoff = calculate_cutoff(graph, source, target, k)
new_graph = nx.DiGraph()
visited = [source]
stack = [iter(graph[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
new_path = visited + [target]
add_path_to_graph(new_graph, new_path)
elif child not in visited:
visited.append(child)
stack.append(iter(graph[child]))
else: # len(visited) == cutoff:
if child == target or target in children:
new_path = visited + [target]
add_path_to_graph(new_graph, new_path)
stack.pop()
visited.pop()
return {(int(source), int(target)): new_graph}
class PathTrieNode:
def __init__(self, node_id, parent):
self.node_id = node_id
self.parent = parent
self.children = {}
self.link_ids = set()
self.counter = 0
# | |
<reponame>grassking100/optuna<gh_stars>0
import contextlib
import copy
import time
import lightgbm as lgb
import numpy as np
import tqdm
import optuna
from optuna.integration.lightgbm_tuner.alias import _handling_alias_metrics
from optuna.integration.lightgbm_tuner.alias import _handling_alias_parameters
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Callable # NOQA
from typing import Dict # NOQA
from typing import Generator # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
from typing import Tuple # NOQA
from typing import Union # NOQA
from optuna.distributions import BaseDistribution # NOQA
from optuna.structs import FrozenTrial # NOQA
from optuna.study import Study # NOQA
from optuna.trial import Trial # NOQA
VALID_SET_TYPE = Union[List[lgb.Dataset], Tuple[lgb.Dataset, ...], lgb.Dataset]
# EPS is used to ensure that a sampled parameter value is in pre-defined value range.
EPS = 1e-12
# Default value of tree_depth, used for upper bound of num_leaves
DEFAULT_TUNER_TREE_DEPTH = 8
# Default parameter values described in the official webpage.
DEFAULT_LIGHTGBM_PARAMETERS = {
"lambda_l1": 0.0,
"lambda_l2": 0.0,
"num_leaves": 31,
"feature_fraction": 1.0,
"bagging_fraction": 1.0,
"bagging_freq": 0,
"min_child_samples": 20,
}
class _GridSamplerUniform1D(optuna.samplers.BaseSampler):
def __init__(self, param_name, param_values):
# type: (str, Any) -> None
self.param_name = param_name
self.param_values = tuple(param_values)
self.value_idx = 0
def sample_relative(self, study, trial, search_space):
# type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, float]
# todo (g-votte): Take care of distributed optimization.
assert self.value_idx < len(self.param_values)
param_value = self.param_values[self.value_idx]
self.value_idx += 1
return {self.param_name: param_value}
def sample_independent(self, study, trial, param_name, param_distribution):
# type: (Study, FrozenTrial, str, BaseDistribution) -> None
raise ValueError(
"Suggest method is called for an invalid parameter: {}.".format(param_name)
)
def infer_relative_search_space(self, study, trial):
# type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
distribution = optuna.distributions.UniformDistribution(-float("inf"), float("inf"))
return {self.param_name: distribution}
class _TimeKeeper(object):
def __init__(self):
# type: () -> None
self.time = time.time()
def elapsed_secs(self):
# type: () -> float
return time.time() - self.time
@contextlib.contextmanager
def _timer():
# type: () -> Generator[_TimeKeeper, None, None]
timekeeper = _TimeKeeper()
yield timekeeper
class BaseTuner(object):
def __init__(self, lgbm_params=None, lgbm_kwargs=None):
# type: (Dict[str, Any], Dict[str,Any]) -> None
# Handling alias metrics.
if lgbm_params is not None:
_handling_alias_metrics(lgbm_params)
self.lgbm_params = lgbm_params or {}
self.lgbm_kwargs = lgbm_kwargs or {}
def _get_booster_best_score(self, booster):
# type: (lgb.Booster) -> float
metric = self.lgbm_params.get("metric", "binary_logloss")
# todo (smly): This implementation is different logic from the LightGBM's python bindings.
if type(metric) is str:
pass
elif type(metric) is list:
metric = metric[-1]
elif type(metric) is set:
metric = list(metric)[-1]
else:
raise NotImplementedError
valid_sets = self.lgbm_kwargs.get("valid_sets") # type: Optional[VALID_SET_TYPE]
if self.lgbm_kwargs.get("valid_names") is not None:
if type(self.lgbm_kwargs["valid_names"]) is str:
valid_name = self.lgbm_kwargs["valid_names"]
elif type(self.lgbm_kwargs["valid_names"]) in [list, tuple]:
valid_name = self.lgbm_kwargs["valid_names"][-1]
else:
raise NotImplementedError
elif type(valid_sets) is lgb.Dataset:
valid_name = "valid_0"
elif isinstance(valid_sets, (list, tuple)) and len(valid_sets) > 0:
valid_set_idx = len(valid_sets) - 1
valid_name = "valid_{}".format(valid_set_idx)
else:
raise NotImplementedError
metric = self._metric_with_eval_at(metric)
val_score = booster.best_score[valid_name][metric]
return val_score
def _metric_with_eval_at(self, metric):
# type: (str) -> str
if metric != "ndcg" and metric != "map":
return metric
eval_at = self.lgbm_params.get("eval_at")
if eval_at is None:
eval_at = self.lgbm_params.get("{}_at".format(metric))
if eval_at is None:
eval_at = self.lgbm_params.get("{}_eval_at".format(metric))
if eval_at is None:
# Set default value of LightGBM.
# See https://lightgbm.readthedocs.io/en/latest/Parameters.html#eval_at.
eval_at = [1, 2, 3, 4, 5]
# Optuna can handle only a single metric. Choose first one.
if type(eval_at) in [list, tuple]:
return "{}@{}".format(metric, eval_at[0])
if type(eval_at) is int:
return "{}@{}".format(metric, eval_at)
raise ValueError(
"The value of eval_at is expected to be int or a list/tuple of int."
"'{}' is specified.".format(eval_at)
)
def higher_is_better(self):
# type: () -> bool
metric_name = self.lgbm_params.get("metric", "binary_logloss")
return metric_name.startswith(("auc", "ndcg", "map"))
def compare_validation_metrics(self, val_score, best_score):
# type: (float, float) -> bool
if self.higher_is_better():
return val_score > best_score
else:
return val_score < best_score
class OptunaObjective(BaseTuner):
"""Objective for hyperparameter-tuning with Optuna."""
def __init__(
self,
target_param_names, # type: List[str]
lgbm_params, # type: Dict[str, Any]
train_set, # type: lgb.Dataset
lgbm_kwargs, # type: Dict[str, Any]
best_score, # type: float
pbar=None, # type: Optional[tqdm.tqdm]
):
self.target_param_names = target_param_names
self.pbar = pbar
self.lgbm_params = lgbm_params
self.lgbm_kwargs = lgbm_kwargs
self.train_set = train_set
self.report = [] # type: List[Dict[str, Any]]
self.trial_count = 0
self.best_score = best_score
self.best_booster = None
self.action = "tune_" + "_and_".join(self.target_param_names)
self._check_target_names_supported()
def _check_target_names_supported(self):
# type: () -> None
supported_param_names = [
"lambda_l1",
"lambda_l2",
"num_leaves",
"feature_fraction",
"bagging_fraction",
"bagging_freq",
"min_child_samples",
]
for target_param_name in self.target_param_names:
if target_param_name not in supported_param_names:
raise NotImplementedError("Parameter `{}` is not supported for tunning.")
def __call__(self, trial):
# type: (Trial) -> float
pbar_fmt = "{}, val_score: {:.6f}"
if self.pbar is not None:
self.pbar.set_description(pbar_fmt.format(self.action, self.best_score))
if "lambda_l1" in self.target_param_names:
self.lgbm_params["lambda_l1"] = trial.suggest_loguniform("lambda_l1", 1e-8, 10.0)
if "lambda_l2" in self.target_param_names:
self.lgbm_params["lambda_l2"] = trial.suggest_loguniform("lambda_l2", 1e-8, 10.0)
if "num_leaves" in self.target_param_names:
tree_depth = self.lgbm_params.get("max_depth", DEFAULT_TUNER_TREE_DEPTH)
max_num_leaves = 2 ** tree_depth if tree_depth > 0 else 2 ** DEFAULT_TUNER_TREE_DEPTH
self.lgbm_params["num_leaves"] = trial.suggest_int("num_leaves", 2, max_num_leaves)
if "feature_fraction" in self.target_param_names:
# `_GridSamplerUniform1D` is used for sampling feature_fraction value.
# The value 1.0 for the hyperparameter is always sampled.
param_value = min(trial.suggest_uniform("feature_fraction", 0.4, 1.0 + EPS), 1.0)
self.lgbm_params["feature_fraction"] = param_value
if "bagging_fraction" in self.target_param_names:
# `TPESampler` is used for sampling bagging_fraction value.
# The value 1.0 for the hyperparameter might by sampled.
param_value = min(trial.suggest_uniform("bagging_fraction", 0.4, 1.0 + EPS), 1.0)
self.lgbm_params["bagging_fraction"] = param_value
if "bagging_freq" in self.target_param_names:
self.lgbm_params["bagging_freq"] = trial.suggest_int("bagging_freq", 1, 7)
if "min_child_samples" in self.target_param_names:
# `_GridSamplerUniform1D` is used for sampling min_child_samples value.
# The value 1.0 for the hyperparameter is always sampled.
param_value = int(trial.suggest_uniform("min_child_samples", 5, 100 + EPS))
self.lgbm_params["min_child_samples"] = param_value
with _timer() as t:
booster = lgb.train(self.lgbm_params, self.train_set, **self.lgbm_kwargs)
val_score = self._get_booster_best_score(booster)
elapsed_secs = t.elapsed_secs()
average_iteration_time = elapsed_secs / booster.current_iteration()
if self.compare_validation_metrics(val_score, self.best_score):
self.best_score = val_score
self.best_booster = booster
if self.pbar is not None:
self.pbar.set_description(pbar_fmt.format(self.action, self.best_score))
self.pbar.update(1)
self.report.append(
dict(
action=self.action,
trial=self.trial_count,
value=str(trial.params),
val_score=val_score,
elapsed_secs=elapsed_secs,
average_iteration_time=average_iteration_time,
)
)
self.trial_count += 1
return val_score
class LightGBMTuner(BaseTuner):
"""Hyperparameter-tuning with Optuna for LightGBM."""
def __init__(
self,
params, # type: Dict[str, Any]
train_set, # type: lgb.Dataset
num_boost_round=1000, # type: int
valid_sets=None, # type: Optional[VALID_SET_TYPE]
valid_names=None, # type: Optional[Any]
fobj=None, # type: Optional[Callable[..., Any]]
feval=None, # type: Optional[Callable[..., Any]]
feature_name="auto", # type: str
categorical_feature="auto", # type: str
early_stopping_rounds=None, # type: Optional[int]
evals_result=None, # type: Optional[Dict[Any, Any]]
verbose_eval=True, # type: Optional[bool]
learning_rates=None, # type: Optional[List[float]]
keep_training_booster=False, # type: Optional[bool]
callbacks=None, # type: Optional[List[Callable[..., Any]]]
time_budget=None, # type: Optional[int]
sample_size=None, # type: Optional[int]
best_params=None, # type: Optional[Dict[str, Any]]
tuning_history=None, # type: Optional[List[Dict[str, Any]]]
verbosity=1, # type: Optional[int]
):
params = copy.deepcopy(params)
# Handling alias metrics.
_handling_alias_metrics(params)
args = [params, train_set]
kwargs = dict(
num_boost_round=num_boost_round,
valid_sets=valid_sets,
valid_names=valid_names,
fobj=fobj,
feval=feval,
feature_name=feature_name,
categorical_feature=categorical_feature,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result,
verbose_eval=verbose_eval,
learning_rates=learning_rates,
keep_training_booster=keep_training_booster,
callbacks=callbacks,
time_budget=time_budget,
verbosity=verbosity,
sample_size=sample_size,
) # type: Dict[str, Any]
self._parse_args(*args, **kwargs)
self.best_booster = None
self.best_score = -np.inf if self.higher_is_better() else np.inf
self.best_params = {} if best_params is None else best_params
self.tuning_history = [] if tuning_history is None else tuning_history
# Set default parameters as best.
self.best_params.update(DEFAULT_LIGHTGBM_PARAMETERS)
if valid_sets is None:
raise ValueError("`valid_sets` is required.")
def _get_params(self):
# type: () -> Dict[str, Any]
params = copy.deepcopy(self.lgbm_params)
params.update(self.best_params)
return params
def _parse_args(self, *args, **kwargs):
# type: (Any, Any) -> None
self.auto_options = {
option_name: kwargs.get(option_name)
for option_name in [
"time_budget",
"sample_size",
"best_params",
"tuning_history",
"verbosity",
]
}
# Split options.
for option_name in self.auto_options.keys():
if option_name in kwargs:
del kwargs[option_name]
self.lgbm_params = args[0]
self.train_set = args[1]
self.train_subset = None # Use for sampling.
self.lgbm_kwargs = kwargs
def run(self):
# type: () -> lgb.Booster
"""Perform the hyperparameter-tuning with given parameters.
Returns:
booster : Booster
The trained Booster model.
"""
# Surpress log messages.
if self.auto_options["verbosity"] == 0:
optuna.logging.disable_default_handler()
self.lgbm_params["verbose"] = -1
self.lgbm_params["seed"] = 111
self.lgbm_kwargs["verbose_eval"] = False
# Handling aliases.
_handling_alias_parameters(self.lgbm_params)
# Sampling.
self.sample_train_set()
# Tuning.
time_budget = self.auto_options["time_budget"]
self.start_time = time.time()
with _timer() as t:
self.tune_feature_fraction()
if time_budget is not None and time_budget < t.elapsed_secs():
return self.best_booster
self.tune_num_leaves()
if time_budget is not None and time_budget < t.elapsed_secs():
return self.best_booster
self.tune_bagging()
if time_budget is not None and time_budget < t.elapsed_secs():
return self.best_booster
self.tune_feature_fraction_stage2()
if time_budget is not None and time_budget < t.elapsed_secs():
return self.best_booster
self.tune_regularization_factors()
if time_budget is not None and time_budget < t.elapsed_secs():
return self.best_booster
self.tune_min_data_in_leaf()
if time_budget is not None and time_budget < t.elapsed_secs():
return self.best_booster
return self.best_booster
def sample_train_set(self):
# | |
<gh_stars>0
#! /usr/bin/env python2.7
#
# Migrate Picasa Web Album Archive to Smugmug
#
# Requires:
# Python 2.7
# gdata 2.0 python library
#
# <NAME> <EMAIL>
#
# based on:
# https://github.com/jackpal/picasawebuploader
# https://github.com/marekrei/smuploader
# http://nathanvangheem.com/news/moving-to-picasa-update
#
# excludes the following automatically created Google+ albums:
# Auto Backups, Photos from Postings, Profile Photos
# https://get.google.com/albumarchive/GOOGLEUSERID/albums/photos-from-posts
# https://get.google.com/albumarchive/GOOGLEUSERID/albums/profile-photos
# Debian Linux:
# apt-get install python-gdata
# apt-get install python-httplib2
# pip install google-api-python-client
# MacOS + MacPorts
# sudo port install py27-pip
# sudo -H /opt/local/bin/pip-2.7 uninstall atom
# sudo -H /opt/local/bin/pip-2.7 install keyring gdata google-auth-httplib2 google-api-python-client rauth parse
# You will be asked to open two URLs in a web browser.
# One to allow access to your Picasaweb account
# and one to allow access to your SmugMug account.
# Only if the --credfile parameter is used, the created API keys are stored on your local system.
# usage example:
# picasaweb2smugmug.py --gmail <EMAIL> --smgname smguser \
# --smgfolder import --credfile credentials.json --outdir backupdir
# Note, that the smugmuguser is the hostname part of the "site URL": https://SMUGMUGUSER.smugmug.com
#
# Required Parameters:
# --gmail Google email address to access PicasaWeb Archive
# --smgname SmugMug user name (site name) to access SmugMug Account
#
# Optional Parameters:
# --smgfolder Destination folder where albums should be created
# --credfile local storage file, will be created, to reuse authentication on next run
# --outdir if given, the Picasaweb photos are copied to this local directory
# --imgcmd image processes command to be applied to each photo before uploading
# it is expected to take 2 parameters: input-filename and output-filename
# --imgcmdsfx filename suffice to be appended to output-filename, default "_wm"
# --albnr only process given album number
# --dry dry run, don't write anything to the SmugMug account
import sys
if sys.version_info < (2,7):
sys.stderr.write("This script requires Python 2.7 or newer.\n")
sys.stderr.write("Current version: " + sys.version + "\n")
sys.stderr.flush()
sys.exit(1)
import argparse
import httplib2, urllib2
import string, unicodedata
import hashlib, re, json
import os, stat, keyring, time, shutil
from rauth import OAuth1Service, OAuth1Session
from urlparse import urlsplit, urlunsplit, parse_qsl
from urllib import urlencode
from datetime import datetime, timedelta
from getpass import getpass
from subprocess import call
# Google Data API and Google OAuth 2
import gdata.photos.service
from oauth2client.contrib.keyring_storage import Storage
from oauth2client.client import OAuth2WebServerFlow
# Google OAuth 2
# App_id, Client_id and Client_secret are permanently registered by the author.
# In case they are removed in the future, create your own OAuth 2 Client entry of type "other"
# on https://console.cloud.google.com/apis and replace them here.
GOOGLE_SCOPE = 'https://picasaweb.google.com/data/'
GOOGLE_REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
GOOGLE_APP_ID = "picasaweb2smugmug"
GOOGLE_CLIENT_ID = '31124284564-d69979j083npmao9gi5ikpamju7uijs1.apps.googleusercontent.com'
GOOGLE_CLIENT_SECRET = '<KEY>'
# Smugmug API v2
# APP_name, API_key and API_secret are permanently registered by the author.
# In case they are removed in the future create your own API key of type Application
# on https://api.smugmug.com/api/developer/apply
# and replace them here:
SMG_APP_NAME = "picasaweb2smugmug"
SMG_API_KEY = "<KEY>"
SMG_API_SECRET = "<KEY>"
SMG_OAUTH_ORIGIN = 'https://secure.smugmug.com'
SMG_REQUEST_TOKEN_URL = SMG_OAUTH_ORIGIN + '/services/oauth/1.0a/getRequestToken'
SMG_ACCESS_TOKEN_URL = SMG_OAUTH_ORIGIN + '/services/oauth/1.0a/getAccessToken'
SMG_AUTHORIZE_URL = SMG_OAUTH_ORIGIN + '/services/oauth/1.0a/authorize'
SMG_API_VERSION = 'v2'
SMG_API_BASE_URL = 'https://api.smugmug.com/api/' + SMG_API_VERSION
SMG_UPLOAD_URI = 'http://upload.smugmug.com/'
def gd_auth(storage):
gd_client = gdata.photos.service.PhotosService()
flow = OAuth2WebServerFlow(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET, GOOGLE_SCOPE, redirect_uri=GOOGLE_REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print('\nTo allow read access to your Picasaweb account open the following link in a web browser')
print('and copy the authentication code shown afterwards:\n\n%s' % authorize_url)
code = getpass('\nEnter authentication code: ').strip()
credentials = flow.step2_exchange(code)
storage.put(credentials)
return credentials
def gd_login(storage, service_name, user_name):
try:
credentials = storage.get()
except Exception as e:
print("cannot read Google OAuth credentials from local storage: " + str(e))
#Probably file could not be found, so redo auth:
credentials = gd_auth(storage)
if credentials is None or credentials.invalid:
#Probably file could not be found, so redo auth:
credentials = gd_auth(storage)
credentials = storage.get()
http = httplib2.Http()
http = credentials.authorize(http)
gd_client = gdata.photos.service.PhotosService()
gd_client.email = user_name
gd_client.source = service_name
gd_client.additional_headers = {'Authorization' : 'Bearer %s' % credentials.access_token}
gd_refresh(gd_client, storage)
return gd_client
def gd_refresh(gd_client, storage):
credentials = storage.get()
http = httplib2.Http()
if (credentials.token_expiry - datetime.utcnow()) < timedelta(minutes=5):
credentials.refresh(http)
gd_client.additional_headers = {'Authorization' : 'Bearer %s' % credentials.access_token}
def smugmug_add_auth_params(auth_url, access=None, permissions=None):
if access is None and permissions is None:
return auth_url
parts = urlsplit(auth_url)
query = parse_qsl(parts.query, True)
if access is not None:
query.append(('Access', access))
if permissions is not None:
query.append(('Permissions', permissions))
return urlunsplit((
parts.scheme,
parts.netloc,
parts.path,
urlencode(query, True),
parts.fragment))
def smugmug_get_token(service):
rt, rts = service.get_request_token(params={'oauth_callback': 'oob'})
auth_url = smugmug_add_auth_params(service.get_authorize_url(rt), access='Full', permissions='Add')
print('\nTo allow write access to your Smugmug account')
print('open the following URL in a web browser and copy the six-digit access code:\n\n%s' % auth_url)
verifier = getpass('\nEnter the six-digit access code: ').strip()
at, ats = service.get_access_token(rt, rts, params={'oauth_verifier': verifier})
return { 'secret': ats, 'token': at }
def smugmug_login(smugmug_credfile):
service = OAuth1Service(
name=SMG_APP_NAME,
consumer_key=SMG_API_KEY,
consumer_secret=SMG_API_SECRET,
request_token_url=SMG_REQUEST_TOKEN_URL,
access_token_url=SMG_ACCESS_TOKEN_URL,
authorize_url=SMG_AUTHORIZE_URL,
base_url=SMG_API_BASE_URL)
if smugmug_credfile:
try:
with open(smugmug_credfile, 'r') as infile:
smugmugToken = json.load(infile)
except:
smugmugToken = smugmug_get_token(service)
with open(smugmug_credfile, 'w') as outfile:
os.chmod(smugmug_credfile, stat.S_IRUSR | stat.S_IWUSR)
json.dump(smugmugToken, outfile)
else:
smugmugToken = smugmug_get_token(service)
session = OAuth1Session(service.consumer_key, service.consumer_secret, access_token=smugmugToken['token'], access_token_secret=smugmugToken['secret'])
return session
def smugmug_request_once(session, method, url, params={}, headers={}, files={}, data=None, header_auth=False):
if debug:
print('\nREQUEST:\nmethod='+method+'\nurl='+url+'\nparams='+str(params) +'\nheaders='+str(headers) + '\nheader_auth='+str(header_auth))
if len(str(data)) < 300:
print("data="+str(data))
response = session.request(url=url,
params=params,
method=method,
headers=headers,
files=files,
data=data,
header_auth=header_auth)
if debug:
print('RESPONSE DATA:\n' + str(response.content)[:500] + (" ... " + str(response.content)[-500:] if len(str(response.content)) > 1000 else ""))
try:
data = json.loads(response.content)
except Exception:
pass
return data
def smugmug_request(session, method, url, params={}, headers={}, files={}, data=None, header_auth=False, retries=1, sleep=5):
retry_count=retries
while retry_count > 0:
try:
response = smugmug_request_once(session, method, url, params, headers, files, data, header_auth)
if ('Code' in response and response['Code'] in [200, 201]) or ("stat" in response and response["stat"] in ["ok"]):
return response
except (requests.ConnectionError, requests.HTTPError, requests.URLRequired, requests.TooManyRedirects, requests.RequestException, httplib.IncompleteRead) as e:
if debug:
print sys.exc_info()[0]
if debug:
print 'Retrying (' + str(retry_count) + ')...'
time.sleep(sleep)
retry_count -= 1
print('Error: Too many retries, giving up.')
sys.exit(1)
def smugmug_create_nice_name(name):
return "-".join([re.sub(r'[\W_]+', '', x) for x in name.strip().split()]).title()
def smugmug_get_folders(session, username):
response = smugmug_request(session, 'GET', SMG_API_BASE_URL + "/folder/user/"+username+"!folders", headers={'Accept': 'application/json'}, header_auth=True)
folders = []
if 'Response' in response and 'Folder' in response['Response']:
for folder in response['Response']['Folder']:
folders.append({"Name": folder["Name"], "NodeID": folder["NodeID"], "UrlName": folder["UrlName"]})
return folders
def smugmug_get_folder_id(session, username, folder_name):
folder_id = None
for folder in smugmug_get_folders(session, username):
if folder['Name'] == folder_name:
folder_id = folder['UrlName']
break
return folder_id
def smugmug_create_folder(session, folder_name, username, parent = None, password = None):
data = {"UrlName": smugmug_create_nice_name(folder_name), "Name": folder_name, "Privacy": "Unlisted"}
if password != None:
data['Password'] = password
response = smugmug_request(session, 'POST', SMG_API_BASE_URL + "/folder/user/" + username + ("/"+parent if parent != None else "") + "!folders", data=json.dumps(data), headers={'Accept': 'application/json', 'Content-Type': 'application/json'}, header_auth=True)
if debug:
print json.dumps(response)
return smugmug_get_folder_id(session, username, folder_name)
def smugmug_get_albums(session, username, parent = None):
response = smugmug_request(session, 'GET', SMG_API_BASE_URL + "/folder/user/" + username + ("/"+parent if parent != None else "") + "!albums", headers={'Accept': 'application/json'}, header_auth=True)
albums = []
if 'Response' in response and 'Album' in response['Response']:
for album in response['Response']['Album']:
albums.append({"Name": album["Name"], "NiceName": album["NiceName"], "AlbumKey": album["AlbumKey"]})
return albums
def smugmug_get_album_id(session, username, name, parent = None):
album_id = None
for album in smugmug_get_albums(session, username, parent):
if album['Name'] == name:
album_id = album['AlbumKey']
break
return album_id
def smugmug_create_album(session, username, album_name, password = None, parent = None, template_id = None, privacy = None):
data = {"NiceName": smugmug_create_nice_name(album_name), "Title": album_name.encode("utf-8"), "Privacy": privacy}
if password != None:
data['Password'] = password
if template_id != None:
data["AlbumTemplateUri"] = template_id
data["FolderUri"] = "/api/v2/folder/user/"+username+("/"+parent if parent != None else "")+"!albums"
response = smugmug_request(session, 'POST', SMG_API_BASE_URL + "/node" + ("/"+parent if parent != None else "")+"!albumfromalbumtemplate", data=json.dumps(data), headers={'Accept': 'application/json', 'Content-Type': 'application/json'}, header_auth=True)
else:
response = smugmug_request(session, 'POST', SMG_API_BASE_URL + "/folder/user/" + username + ("/"+parent if parent != None else "") + "!albums", data=json.dumps(data), headers={'Accept': 'application/json', 'Content-Type': 'application/json'}, header_auth=True)
if debug:
print json.dumps(response)
return response
def smugmug_upload_image(session, album_id, local_filename, image_name, image_type, image_title, image_caption):
with open(local_filename, "rb") as imgfile:
imgdata = imgfile.read()
imgfile.close()
albumURI = "/api/v2/album/" + album_id
response = smugmug_request(session, 'POST', SMG_UPLOAD_URI,
data=imgdata,
header_auth = True,
headers={'X-Smug-AlbumUri': albumURI,
'X-Smug-Version':SMG_API_VERSION,
'X-Smug-ResponseType':'JSON',
'Content-MD5': hashlib.md5(imgdata).hexdigest(),
'X-Smug-FileName':image_name,
'Content-Length' : str(len(imgdata)),
'Content-Type': image_type,
'X-Smug-Title': image_title,
'X-Smug-Caption': image_caption})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Migrate photos from PicasaWeb Archive to SmugMug')
parser.add_argument('--gmail', help='Google account email address', required=True)
parser.add_argument('--smgname', help='SmugMug username (sitename)', required=True)
parser.add_argument('--smgfolder', help='SmugMug destination folder name', required=False)
parser.add_argument('--credfile', help='Permanent credentials storage file', required=False)
parser.add_argument('--outdir', help='optional output directory for image export', required=False)
parser.add_argument('--imgcmd', help='optional command applied to each image file', required=False)
parser.add_argument('--imgcmdsfx', help='filename suffix appended to processed image', required=False)
parser.add_argument('--albnr', help='limit copy to album number', required=False)
parser.add_argument('--dry', | |
import random
import os
import re
import math
import sys
#
# Convert a set to set of string
#
def set2char( F ) :
F1 = []
for fs in F :
fs1 = []
for s in fs :
fs1.append( set( [ str( x ) for x in s ] ) )
F1.append( fs1 )
return F1
#
# Add " to string
#
def primecover( s ) :
return "\"" + s + "\""
#
# compute number of color after removing S
#
def checkNumberColor( topo , S ) :
temptopo = [ x for x in topo if x not in S ]
# first set color of all nodes = 0
color = {}
for x in topo :
for v in x :
color[ v ] = 0
# paint node v with color c
def travel( v , c ) :
color[ v ] = c
for x in temptopo :
if v in x :
for u in x :
if color[ u ] == 0 :
travel( u , c )
# compute number of color
ncolor = 0
for x in topo :
for v in x :
if color[v] == 0 :
ncolor = ncolor + 1
travel( v , ncolor )
return ncolor
lstNode = []
lstEdge = []
logicdeg = {}
logictopo = []
capacity = {}
shortest = {}
mody = {}
F1 = set()
F2 = set()
F3 = set()
F4 = set()
FNODE = set()
def generate_logic( degree , genedge , ishalf ) :
global logicdeg , logictopo , lstNode , lstEdge , shortest
logicdeg = {}
logictopo = []
for v in lstNode :
logicdeg[ v ] = 0
random.seed()
# GENERATE BY DEGREE
if degree > 0 :
while ( 1 ) :
underdeg = [ v for v in lstNode if logicdeg[v] < degree ]
if not len( underdeg ):
break
# take two random variables from underdeg
v1 = random.choice( underdeg )
v2 = random.choice( underdeg )
if len( underdeg ) > 1 :
while v1 == v2 :
v2 = random.choice( underdeg )
else :
while v1 == v2 :
v2 = random.choice( lstNode )
# update degree
logicdeg[ v1 ] += 1
logicdeg[ v2 ] += 1
logictopo.append( [ v1 , v2 ] )
logictopo.append( [ v2 , v1 ] )
else :
# GENERATE BY EDGE
tmpNode = set()
while (2 * len( tmpNode )) < len( lstNode ) :
v = random.choice( lstNode )
tmpNode.add( v )
#print "- half node : " , tmpNode
ge = 0
while ge < genedge :
if not ishalf :
v1 = random.choice( lstNode )
v2 = random.choice( lstNode )
else :
v1 = random.choice( list(tmpNode) )
v2 = random.choice( list(tmpNode) )
if (v1 != v2) :
logictopo.append( [ v1 , v2 ] )
logictopo.append( [ v2 , v1 ] )
ge += 1
# update degree
logicdeg[ v1 ] += 1
logicdeg[ v2 ] += 1
# verify logic topology
needrerun = 0
for v in lstNode :
if logicdeg[ v ] == 2 :
print "node " , v , " has degree " , logicdeg[ v ]
needrerun = 1
if needrerun:
generate_logic( degree , genedge , ishalf )
#
# Reading topo information
#
def reading_data( basename ) :
global logicdeg , logictopo , lstNode , lstEdge , F1 , F2 , F3 , F4 , FNODE
lstNode = []
lstEdge = []
print "READ TOPO : " , basename
ffile = open( "./topo/" + basename + ".topo" )
tag = 0
edgename = 0
separator = "[\ |\n|\t|\r]+"
for line in ffile :
if ( re.match("TAG" , line ) ) :
item = re.split( separator , line )
tag = int(item[1])
# node
if ( re.match( "node" , line ) ) :
item = re.split( separator , line )
lstNode.append( item[1] )
# link
if ( re.match( "link" , line ) ) :
item = re.split( separator , line )
edgename += 1
if tag == 1 :
lstEdge.append( ( str(edgename) , item[3] , item[5] , item[7] ) )
if tag == 2 :
lstEdge.append( ( str(edgename) + 'a' , item[2] , item[3] , item[4] ) )
lstEdge.append( ( str(edgename) + 'b' , item[3] , item[2] , item[4] ) )
# get set of all undirect edge
F1 = set( )
F2 = set( )
F3 = set( )
F4 = set( )
FNODE = set ( )
# get set of all undirect edge
lstUedge = set( )
for edge in lstEdge :
lstUedge.add( frozenset( ( edge[1] , edge[2] )) )
# build single failure set
for e in lstUedge :
if checkNumberColor( lstUedge , set( [e] ) ) == 1 :
F1.add( frozenset([e]) )
# build single node failure set
for v in lstNode :
tempnodeset = set()
for e in lstUedge :
if v in e :
tempnodeset.add( e )
#print "number color = " , checkNumberColor( lstUedge , tempnodeset)
FNODE.add( frozenset( tempnodeset ) )
# build higher order failure set
for v in lstNode :
temp = set()
for e in lstUedge:
if v in e :
temp.add( e )
# build dual
for e1 in temp :
for e2 in temp :
if len( set( [e1,e2] ) ) == 2 :
if checkNumberColor( lstUedge , set( [e1,e2] ) ) == 1 :
F2.add( frozenset( [ e1 , e2 ] ) )
# build third
for e1 in temp :
for e2 in temp :
for e3 in temp :
if len( frozenset([e1,e2,e3]) )== 3 :
if checkNumberColor( lstUedge , frozenset( [e1,e2,e3] ) ) == 1 :
F3.add( frozenset( [ e1 , e2 , e3 ] ) )
# build fourth
for e1 in temp :
for e2 in temp :
for e3 in temp :
for e4 in temp :
if len( frozenset([ e1 , e2, e3 ,e4]) )== 4 :
if checkNumberColor( lstUedge , frozenset( [e1,e2,e3,e4] ) ) == 1 :
F4.add( frozenset( [ e1 , e2 , e3 , e4 ] ) )
print "number of edges : " , len( lstEdge )
print "number of nodes : " , len( lstNode )
print "number of single failure : " , len( F1 )
print "number of dual failure : " , len( F2 )
print "number of third failure : " , len( F3 )
print "number of fourth failure : " , len( F4 )
F1 = [ x for x in F1 ]
F2 = [ x for x in F2 ]
F3 = [ x for x in F3 ]
F4 = [ x for x in F4 ]
random.shuffle( F1 )
random.shuffle( F2 )
random.shuffle( F3 )
random.shuffle( F4 )
print "------------------------------------"
ffile.close()
#
# Writing basic information : node set + edge set + failure set
#
def write_basic( fnet , nloc , sce ) :
global logicdeg , logictopo , lstNode , lstEdge , shortest , mody , F1,F2,F3,F4 , FNODE
# write node set
fnet.write("nodeset = {\n" )
for nnode in lstNode :
fnet.write( primecover( nnode ) + ",\n" )
fnet.write("};\n")
# compute capacity
for e in lstEdge :
capacity[ e ] = 0
for x,y in logictopo[ 0 : nloc ] :
for e in shortest[ (x,y) ] :
capacity[ e ] += 1
# write edgeset
for e in lstEdge :
if mody[e] > 0 :
dv = 0.2 * capacity[e]
else :
dv = -0.2 * capacity[e]
newe = math.ceil( capacity[ e ] + dv )
if ( newe < 1 ) :
newe = 1
#print e , ":" , capacity[e ] , "=>" , newe
capacity[ e ] = newe
fnet.write("edgeset = | |
"""
Controls Baxter using any game pad / joystick via the logitech and Motion modules.
Designed for Logitech controllers, may work for other controllers too.
Converted by <NAME> Oct 2015 from a script written by <NAME>,
July 2015.job
SETUP:
Before running this script, you must run the system state service and the
controller dispatcher.
CONTROLS:
Controls are modified when special control buttons are pressed to create combinations, noted below.
Always:
Right trigger -- switch between precision grip and power grip
No modifier (End effector velocity mode, ie translations):
Right stick x -- move end effector along Baxter's y axis (ie side to side)
Right stick y -- move end effector along Baxter's x axis (ie outward)
Left stick x -- gripper open and close
Left stick y -- move end effector along Baxter's z axis (ie vertically)
Left bumper (End effector rotational velocity mode):
Right stick x -- rotate end effector about its x axis
Right stick y -- rotate end effector about its y axis
Left stick x -- rotate end effector about its z axis
Right bumper (Joint velocity mode):
Right stick x -- rotate gripper
Right stick y -- bend wrist
Left stick x -- rotate elbow
Left stick y -- bend elbow
Buttons:terminate called after throwing an instance of 'boost::exception_detail::clone_impl<boost::exception_detail::error_info_injector<boost::lock_error> >'
A is used to hold the position of the hand
B is used to change hand pershape
X is used to get the arm to initial position
Y is used to switch between arm and base
Dpad switch camera view
"""
pick = None
workpls = None
J = [0, 0, 0, 0, 0, 0, 0]
xb = None
yb = None
zb = None
pickLimb = "Out"
holdingObj = False
hasMoved = {}
marker_state = None
pickId = 9999
placeId = 9999
grabbing = False
grabAmount = 0
left_pose = None
z_height = 999
got_to_waypoint = False
command = ""
lastCommand = ""
savedx = 0.0
savedy = 0.0
savedz = 0.0
offsetx = 0.0
offsety = 0.0
offsetz = 0.0
rotx = None
roty = None
rotz = None
placing = False
hand = None
hand_rot = None
deathFromAbove = True
maxGripPercent = 90
grabInterrupt = "close"
ascend = False
prevzpick = 0
doneTimer = 0
gripperSpeed = 0.0
ICanceled = False
zwaypoint = 0.0
ywaypoint = 0.0
# Import Modules
import os
import sys
import threading
import rospy
import asyncore
import subprocess
ebolabot_root = os.getenv("EBOLABOT_PATH", ".")
from Common.system_config import EbolabotSystemConfig
sys.path.append(ebolabot_root)
# for logitech module
sys.path.append(os.path.join(ebolabot_root, 'InputDevices/USBControllers'))
import gamepad
from task_generator import TaskGenerator
import time
import csv
from sspp.service import Service
from sspp.topic import MultiTopicListener
from OpenGL.GL import *
from klampt.vis import gldraw
from klampt.vis.glinterface import GLPluginInterface as GLPluginBase
from klampt.vis.glcommon import GLWidgetPlugin
from klampt.math import so3, se3, vectorops
import math
from std_msgs.msg import String, Int16, Float32MultiArray, Int8, Int64, Bool
from geometry_msgs.msg import Pose
# from baxter_pykdl import baxter_kinematics
import numpy as np
from UI.utils.gripper_controller import *
from TrinaPointAndClick.msg import Marker, MarkerArray
from baxter_core_msgs.msg import EndpointState
from tf.transformations import euler_from_quaternion, quaternion_from_euler
# imaging stuff
try:
from PIL import Image
except ImportError as err:
import Image
# set this -1 for view-centric control, looking at the face of the robot
viewToWorldScaleXY = 1
''' gripper Mode: power, precision '''
GripperMode = {'left': 'power', 'right': 'power'}
''' Hold Mode: free, hold '''
HoldMode = {'left': 'free', 'right': 'free'}
HoldPose = {'left': [1.0, 1.0, 1.0, 1.0], 'right': [1.0, 1.0, 1.0, 1.0]}
TuckPose = {}
TuckPose['left'] = [-0.05897088559570313, -0.9675583808532715, -1.2034079267211915, 1.7132575355041506,
0.6776360122741699, 1.0166457660095216, 2.475]
TuckPose['right'] = [0.05897088559570313, -0.9675583808532715, 1.2034079267211915, 1.7132575355041506,
-0.6776360122741699, 1.0166457660095216, -2.475]
TuckStatus = {'left': False, 'right': False}
gamepad_switch = "/transcript" # to switch camera view
M_limit = 0.04
system_state_addr = EbolabotSystemConfig.getdefault_ip('state_server_computer_ip', ('localhost', 4568))
class MarkerTaskGenerator(TaskGenerator):
def __init__(self):
self.serviceThread = None
self.gripperController = None
self.j = None
self.limb = 'left'
self.controlSet = 'arm'
self.lastState = {}
self.plugin = None
# set initial values
self.baseSensedVelocity = [0.0, 0.0, 0.0]
self.baseCommandVelocity = [0.0, 0.0, 0.0]
self.log = False
# === joint control ===
self.jointControlRatio = 0.4
# == arm position ===
self.ArmPosition = [[0.0] * 7, [0.0] * 7]
self.robotEndEffectorPosition = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
self.robotEndEffectorTransform = [se3.identity(), se3.identity()]
self.gripperPosition = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
self.kin = {}
# self.kin['left'] = baxter_kinematics('left')
# self.kin['right'] = baxter_kinematics('right')
self.last_driveVel = [0.0, 0.0, 0.0]
self.near_singularity = False
self.last_sigulatiry = False
# flag to mark the data
self.flag = 0
# open log file
timestr = time.strftime("%Y%m%d-%H%M%S")
"""self.csvfile_gamepad = open('data/gamepad_log' + timestr + '.csv', 'wb')
fieldnames = ['rstick', 'lstick', 'LB', 'RB', 'LT', 'RT', 'B', 'A', 'X', 'Y', 'Dpad_x', 'Dpad_y',
'jointAngles', 'gripperStatus', 'eePosition', 'eeTransformation',
'baseSensedVelocity', 'baseCommandVelocity', 'time', 'Flag']
self.gamepad_csv = csv.DictWriter(self.csvfile_gamepad, fieldnames=fieldnames)
self.gamepad_csv.writeheader()
self.switch_pub = None"""
# == Autonomous ==
self.lastPick = "Out"
self.isAuto = False
rospy.Subscriber('Detect', String, callback_pos)
rospy.Subscriber('/MarkerArray', MarkerArray, callback_state)
rospy.Subscriber('/Offsets', Pose, callback_offsets)
rospy.Subscriber('/PickID', Int64, callback_pick)
rospy.Subscriber('/PlaceID', Int64, callback_place)
rospy.Subscriber('/Command', String, callback_command)
rospy.Subscriber('/GripperSpeed', Int64, callback_speed)
rospy.Subscriber('/GripperClosePercent', Int64, callback_percent)
rospy.Subscriber('/GripperState', String, callback_gripper)
rospy.Subscriber('/robot/limb/left/endpoint_state', EndpointState, self.callback_pose)
# rospy.Subscriber('/robot/limb/left/endpoint_state/pose', Pose, callback_pose)
self.pub_l = rospy.Publisher('/left/UbirosGentle', Int8, queue_size=1)
self.pub_r = rospy.Publisher('/right/UbirosGentle', Int8, queue_size=1)
self.pub_state = rospy.Publisher('/CurrentStatus', String, queue_size=1)
def callback_pose(self, data):
global hand
global hand_rot
hand = [data.pose.position.x, data.pose.position.y, data.pose.position.z]
hand_rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z]
def name(self):
return "A_Best_Point_and_Click_GUI"
def init(self, world):
assert self.j == None, "Init may only be called once"
self.world = world
# rospy.init_node("gamepad_node")
# Connect to controller
return True
def start(self):
global gamepad_switch
try:
self.j = gamepad.Gamepad()
except:
print
"Gamepad not found"
print
"Note: Pygame reports there are " + str(gamepad.Gamepad.numJoys()) + " joysticks"
return False
if self.serviceThread is None:
self.serviceThread = ServiceThread()
self.serviceThread.start()
if self.gripperController is None:
self.gripperController = GripperController()
self.gripperController.start()
# if not self.j: return False
self.limb = 'left'
self._status = 'ok'
# self.plugin = MyWidgetPlugin(self)
self.lastState = {}
# self.switch_pub = rospy.Publisher(gamepad_switch, String, queue_size=1)
return True
def status(self):
# if self.j:
# return 'ok'
# else:
# return 'error'
return 'ok'
def messages(self):
return ["Controlling " + self.limb]
def controlMode(self):
# if len(self.lastState) == 0: return 'None'
# if self.lastState['RB']:
# return 'Joint angles'
# elif self.lastState['LB']:
# return "Cartesian rotation"
# else:
# return "Cartesian position"
return "Cartesian position"
def stop(self):
if self.serviceThread:
self.serviceThread.kill()
print("Waiting for thread join...")
self.serviceThread.join()
print("Done")
self.serviceThread = None
if self.gripperController:
self.gripperController.kill()
print("gripper control thread killed")
self.gripperController = None
self._status = ''
self.plugin = None
self.j.quit()
def get(self):
global marker_state
j = self.j
j.updateState()
state = {}
resp = marker_state
if resp == None:
# print "No data"
return None
state['markers'] = {}
state['markers-vis'] = 0
for marker in resp:
if not marker.id_number in hasMoved:
hasMoved[marker.id_number] = False
state['markers'][marker.id_number] = marker
if marker.visible:
state['markers-vis'] += 1
if self.log:
self.gamepad_csv.writerow(state)
if len(self.lastState) > 0:
res = self.do_logic(self.lastState, state)
else:
res = None
self.lastState = state
return res
def do_logic(self, lastState, state):
global GripperMode
global HoldMode
global HoldPose
global pickLimb
global xb, yb, zb
global savedx, savedy, savedz
global holdingObj
global hasMoved
global grabbing
global grabAmount
global left_pose
global got_to_waypoint
global command, lastCommand
global pickId, placeId
global offsetx, offsety, offsetz
global placing
global deathFromAbove
global maxGripPercent
global ascend
global prevzpick
global doneTimer
global gripperSpeed
global ICanceled
global zwaypoint
global ywaypoint
if ICanceled:
print "The cancel button was hit"
# get robot state data
self.getRobotStatus()
robot_state = {'jointAngles': self.ArmPosition, 'gripperStatus': self.gripperPosition,
'eePosition': self.robotEndEffectorPosition, 'eeTransformation': self.robotEndEffectorTransform,
'baseSensedVelocity': self.baseSensedVelocity, 'baseCommandVelocity': self.baseCommandVelocity,
'time': int(round(time.time() * 1000)), 'Flag': self.flag}
if self.log:
print("definitely logging")
# self.gamepad_csv.writerow(robot_state)
# rstick = state['rstick']
# lstick = state['lstick']
gripPercent = grabAmount
# publish grip command to the correct hand
if (self.limb == 'right'):
self.pub_r.publish(gripPercent)
else:
self.pub_l.publish(gripPercent)
if command == "cancel":
print "cancel"
ICanceled = True
doneTimer = 0
grabbing = False
deathFromAbove = True
ascend = False
got_to_waypoint = False
placing = False
grabAmount = 0
if command == "home":
doneTimer = 0
TuckStatus[self.limb] = True
deathFromAbove = True
ascend = False
got_to_waypoint = False
placing = False
self.pub_state.publish("Done")
else:
TuckStatus[self.limb] = False
if command == "live":
print "Doing live adjustments"
rotxy = np.matmul(rotx, roty)
rotxyz = np.matmul(rotxy, rotz)
if not lastCommand == "live":
print "SAVING X Y and Z"
savedx = hand[0] + 0.06
savedy = hand[1] - 0.1
savedz = hand[2]
print savedx
print savedy
print savedz
pos_msg = {"type": "CartesianPose",
"limb": "left",
"position": [savedx + offsetx, savedy + offsety, savedz + offsetz],
"rotation": [rotxyz[2,0],rotxyz[2,1],rotxyz[2,2],-rotxyz[0,0],-rotxyz[0,1],-rotxyz[0,2],-rotxyz[1,0],-rotxyz[1,1],-rotxyz[1,2]],
# "rotation":[1,0,0,0,1,0,0,0,1],
# "rotation":[0,-1,0,1,0,0,0,0,1], #90 deg rotation about z axis
# "rotation":[1,0,0,0,0,-1,0,1,0], #90 deg rotation about x axis
# "rotation":[0,0,1,0,1,0,-1,0,0], #90 deg rotation about y axis
"speed": 1,
"maxJointDeviation": 0.5,
"safe": | |
plt.bar(ind[len(ScenSel_2015)+1 + len(ScenSel_2050) +1 ::], Stock_Region_2100_use_loss_a[m,ScenSel_2100], width, hatch = '//', color=MyColorCycle_10Reg[m//2,:],
label = Def_RegionsNames_agg[m//2], bottom = Stock_Region_2100_use_loss_a[0:m,ScenSel_2100].sum(axis=0), linewidth = 0.0)
else:
p1 = plt.bar(ind[len(ScenSel_2015)+1 + len(ScenSel_2050) +1 ::], Stock_Region_2100_use_loss_a[m,ScenSel_2100], width, color=MyColorCycle_10Reg[(m -1)//2,:],
label = Def_RegionsNames_agg[(m -1)//2], bottom = Stock_Region_2100_use_loss_a[0:m,ScenSel_2100].sum(axis=0), linewidth = 0.0)
# plot horizontal bar boundary
for xx in xticks:
plt.plot([xx - 0.2, xx - 0.2], [0,1], color='k', linestyle='-', linewidth = Gen_Linewidth)
plt.plot([xx + 0.2, xx + 0.2], [0,1], color='k', linestyle='-', linewidth = Gen_Linewidth)
for xx in range(0,len(ScenSel_2015)):
for yy in range(0,10):
plt.plot([xx/2, xx/2 + 0.4], [Stock_Region_2015_use_loss_a[0:2*yy,ScenSel_2015[xx]].sum(axis=0), Stock_Region_2015_use_loss_a[0:2*yy,ScenSel_2015[xx]].sum(axis=0)], color='k', linestyle='-', linewidth = Gen_Linewidth)
for xx in range(0,len(ScenSel_2050)):
for yy in range(0,10):
plt.plot([len(ScenSel_2015)/2 + 0.5 + xx/2, len(ScenSel_2015)/2 + 0.5 + xx/2 + 0.4], [Stock_Region_2050_use_loss_a[0:2*yy,ScenSel_2050[xx]].sum(axis=0), Stock_Region_2050_use_loss_a[0:2*yy,ScenSel_2050[xx]].sum(axis=0)], color='k', linestyle='-', linewidth = Gen_Linewidth)
for xx in range(0,len(ScenSel_2100)):
for yy in range(0,10):
plt.plot([len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5 + xx/2, len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5 + xx/2 + 0.4], [Stock_Region_2100_use_loss_a[0:2*yy,ScenSel_2100[xx]].sum(axis=0), Stock_Region_2100_use_loss_a[0:2*yy,ScenSel_2100[xx]].sum(axis=0)], color='k', linestyle='-', linewidth = Gen_Linewidth)
# add hatch to legend
ProxyHandlesList.insert(0,plt.Rectangle((0, 0), 1, 1, fc='w', hatch = '//')) # create proxy artist for legend
Def_RegionsNames_agg.insert(0,'Losses')
plt_lgd = plt.legend(reversed(ProxyHandlesList),reversed(Def_RegionsNames_agg),shadow = False, prop={'size':9.0},loc='upper left', bbox_to_anchor=(1.02, 1))
axs[1].set_ylim([ 0, 1.10])
axs[1].set_xlim([ -0.1, ind.max() + 0.6])
axs[1].set_ylabel('Total steel in system, %', fontsize =15)
axs[1].set_yticks(np.arange(0,1.01,0.2))
axs[1].set_yticklabels(['0','20','40','60','80','100'], fontsize =15)
axs[1].set_xlabel('Scenario name', fontsize =15)
axs[1].set_xticks(xticks)
axs[1].set_xticklabels(xticklabels_flat, fontsize =10, rotation = 90)
axs[1].text(0.25 * (len(ScenSel_2015)) - 0.25, 1.03, '2015', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[1].text(0.50 * (len(ScenSel_2015) +1) +0.25 * len(ScenSel_2050) - 0.25, 1.03, '2050', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[1].text(0.50 * (len(ScenSel_2015) +1 +len(ScenSel_2050) +1) + 0.25 * len(ScenSel_2100) - 0.25, 1.03, '2100', fontsize=10, fontweight='bold') #alternative fontweight: bold
plt.show()
fig.savefig(Path_Result + fig_name, dpi = 400,bbox_extra_artists=(plt_lgd,), bbox_inches='tight')
# include figure in logfile:
Mylog.info('<center><img src="'+ fig_name +'" width="857" height="600" alt="'+ fig_name +'"></center>')
Mylog.info('<font "size=+3"><center><b><i>Figure '+ str(Figurecounter) + ': '+ fig_name +'.</i></b></center></font><br>')
Figurecounter += 1
#%% Plot data for Germany
ScenSel_2015 = [46]
ScenSel_2050 = [46,47,48,49,50,51,52,53,54,55,56,57,58,59]
ScenSel_2100 = [46,47,48,49,50,51,52,53,54,55,56,57,58,59]
fig_name = 'Steel_Stock_Sensitivity_Germany.png'
#%% Steel stock by product (top) and region (bottom)
ind = np.arange(0,(len(ScenSel_2015) +len(ScenSel_2050) +len(ScenSel_2100)+ 2)/2,0.5)
width = 0.4 # the width of the bars
xticks = np.concatenate((np.arange(0.2,len(ScenSel_2015)/2,0.5),np.arange(0.2 + len(ScenSel_2015)/2 + 0.5,0.2 + len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2,0.5),np.arange(0.2 + len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5,0.2 + len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5 + len(ScenSel_2100)/2,0.5)), axis = 0)
xticklabels = []
xticklabels.append(['All scenarios'])#Alternative: xticklabels.append([ScenList_Names_Plot[i] for i in ScenSel_2015])
xticklabels.append([ScenList_Names_Plot[i] for i in ScenSel_2050])
xticklabels.append([ScenList_Names_Plot[i] for i in ScenSel_2100])
xticklabels_flat = [item for sublist in xticklabels for item in sublist]
xticklabels_flat_2 = []
for m in range(0,len(xticklabels_flat)):
xticklabels_flat_2.append('')
Gen_Linewidth = 0.5
# Start plotting
fig, axs = plt.subplots(2,1,figsize=(12,8))
axs = axs.ravel()
gs = plt.GridSpec(2, 1)
gs.update(hspace=0.2)#, wspace=0.4)
# create x lables for upper axis:
xticklabels_flat_3 = xticklabels_flat_2.copy()
xticklabels_flat_3[len(ScenSel_2015) -1 + len(ScenSel_2050) -0] = 'Circ_2100:'
for m in range(0,len(ScenSel_2100)):
xticklabels_flat_3[len(ScenSel_2015) -1 + len(ScenSel_2050) +1 +m] = str("%.2f" %Circ_2100[ScenSel_2100[m]])
# plot bars for product split first
axs[0] = plt.subplot(gs[0])#, sharey=True, sharex=True)
axs[0].set_color_cycle(MyColorCycle_prod)
ProxyHandlesList = [] # For legend
for m in range(0,14):
p1 = plt.bar(ind[0:len(ScenSel_2015)], Stock_Product_2015[m,ScenSel_2015], width, color=MyColorCycle_prod[m,:],
label = StockLabels[m], bottom = Stock_Product_2015[0:m,ScenSel_2015].sum(axis=0), linewidth = Gen_Linewidth)
ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle_prod[m,:], linewidth = Gen_Linewidth)) # create proxy artist for legend
p1 = plt.bar(ind[len(ScenSel_2015)+1:len(ScenSel_2015)+1 + len(ScenSel_2050)], Stock_Product_2050[m,ScenSel_2050], width, color=MyColorCycle_prod[m,:],
bottom = Stock_Product_2050[0:m,ScenSel_2050].sum(axis=0), linewidth = Gen_Linewidth)
p1 = plt.bar(ind[len(ScenSel_2015)+1 + len(ScenSel_2050) +1 ::], Stock_Product_2100[m,ScenSel_2100], width, color=MyColorCycle_prod[m,:],
bottom = Stock_Product_2100[0:m,ScenSel_2100].sum(axis=0), linewidth = Gen_Linewidth)
plt_lgd = plt.legend(reversed(ProxyHandlesList),reversed(StockLabels),shadow = False, prop={'size':8.0},loc='upper left', bbox_to_anchor=(1.02, 1))
axs[0].set_ylim([ 0, 1.10])
axs[0].set_xlim([ -0.1, ind.max() + 0.6])
axs[0].set_ylabel('Total steel in system, %', fontsize =15)
axs[0].set_yticks(np.arange(0,1.01,0.2))
axs[0].set_yticklabels(['0','20','40','60','80','100'], fontsize =15)
axs[0].set_xticks(xticks)
axs[0].set_xticklabels(xticklabels_flat_3, fontsize =8, rotation = 0)
axs[0].text(0.25 * (len(ScenSel_2015)) - 0.25, 1.03, '2015', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[0].text(0.50 * (len(ScenSel_2015) +1) +0.25 * len(ScenSel_2050) - 0.25, 1.03, '2050', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[0].text(0.50 * (len(ScenSel_2015) +1 +len(ScenSel_2050) +1) + 0.25 * len(ScenSel_2100) - 0.25, 1.03, '2100', fontsize=10, fontweight='bold') #alternative fontweight: bold
# plot bars for regional split afterwards
axs[1] = plt.subplot(gs[1])#, sharey=True, sharex=True)
axs[1].set_color_cycle(MyColorCycle_10Reg)
ProxyHandlesList = [] # For legend
for m in range(0,10*2):
if m % 2 == 0:
p1 = plt.bar(ind[0:len(ScenSel_2015)], Stock_Region_2015_use_loss_a[m,ScenSel_2015], width, hatch = '//', color=MyColorCycle_10Reg[m//2,:],
label = Def_RegionsNames_agg[m//2], bottom = Stock_Region_2015_use_loss_a[0:m,ScenSel_2015].sum(axis=0), linewidth = 0.0)
ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle_10Reg[m//2,:])) # create proxy artist for legend
else:
p1 = plt.bar(ind[0:len(ScenSel_2015)], Stock_Region_2015_use_loss_a[m,ScenSel_2015], width, color=MyColorCycle_10Reg[(m -1)//2,:],
label = Def_RegionsNames_agg[(m -1)//2], bottom = Stock_Region_2015_use_loss_a[0:m,ScenSel_2015].sum(axis=0), linewidth = 0.0)
if m % 2 == 0:
p1 = plt.bar(ind[len(ScenSel_2015)+1:len(ScenSel_2015)+1 + len(ScenSel_2050)], Stock_Region_2050_use_loss_a[m,ScenSel_2050], width, hatch = '//', color=MyColorCycle_10Reg[m//2,:],
label = Def_RegionsNames_agg[m//2], bottom = Stock_Region_2050_use_loss_a[0:m,ScenSel_2050].sum(axis=0), linewidth = 0.0)
else:
p1 = plt.bar(ind[len(ScenSel_2015)+1:len(ScenSel_2015)+1 + len(ScenSel_2050)], Stock_Region_2050_use_loss_a[m,ScenSel_2050], width, color=MyColorCycle_10Reg[(m -1)//2,:],
label = Def_RegionsNames_agg[(m -1)//2], bottom = Stock_Region_2050_use_loss_a[0:m,ScenSel_2050].sum(axis=0), linewidth = 0.0)
if m % 2 == 0:
p1 = plt.bar(ind[len(ScenSel_2015)+1 + len(ScenSel_2050) +1 ::], Stock_Region_2100_use_loss_a[m,ScenSel_2100], width, hatch = '//', color=MyColorCycle_10Reg[m//2,:],
label = Def_RegionsNames_agg[m//2], bottom = Stock_Region_2100_use_loss_a[0:m,ScenSel_2100].sum(axis=0), linewidth = 0.0)
else:
p1 = plt.bar(ind[len(ScenSel_2015)+1 + len(ScenSel_2050) +1 ::], Stock_Region_2100_use_loss_a[m,ScenSel_2100], width, color=MyColorCycle_10Reg[(m -1)//2,:],
label = Def_RegionsNames_agg[(m -1)//2], bottom = Stock_Region_2100_use_loss_a[0:m,ScenSel_2100].sum(axis=0), linewidth = 0.0)
# plot horizontal bar boundary
for xx in xticks:
plt.plot([xx - 0.2, xx - 0.2], [0,1], color='k', linestyle='-', linewidth = Gen_Linewidth)
plt.plot([xx + 0.2, xx + 0.2], [0,1], color='k', linestyle='-', linewidth = Gen_Linewidth)
for xx in range(0,len(ScenSel_2015)):
for yy in range(0,10):
plt.plot([xx/2, xx/2 + 0.4], [Stock_Region_2015_use_loss_a[0:2*yy,ScenSel_2015[xx]].sum(axis=0), Stock_Region_2015_use_loss_a[0:2*yy,ScenSel_2015[xx]].sum(axis=0)], color='k', linestyle='-', linewidth = Gen_Linewidth)
for xx in range(0,len(ScenSel_2050)):
for yy in range(0,10):
plt.plot([len(ScenSel_2015)/2 + 0.5 + xx/2, len(ScenSel_2015)/2 + 0.5 + xx/2 + 0.4], [Stock_Region_2050_use_loss_a[0:2*yy,ScenSel_2050[xx]].sum(axis=0), Stock_Region_2050_use_loss_a[0:2*yy,ScenSel_2050[xx]].sum(axis=0)], color='k', linestyle='-', linewidth = Gen_Linewidth)
for xx in range(0,len(ScenSel_2100)):
for yy in range(0,10):
plt.plot([len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5 + xx/2, len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5 + xx/2 + 0.4], [Stock_Region_2100_use_loss_a[0:2*yy,ScenSel_2100[xx]].sum(axis=0), Stock_Region_2100_use_loss_a[0:2*yy,ScenSel_2100[xx]].sum(axis=0)], color='k', linestyle='-', linewidth = Gen_Linewidth)
# add hatch to legend
ProxyHandlesList.insert(0,plt.Rectangle((0, 0), 1, 1, fc='w', hatch = '//')) # create proxy artist for legend
Def_RegionsNames_agg.insert(0,'Losses')
plt_lgd = plt.legend(reversed(ProxyHandlesList),reversed(Def_RegionsNames_agg),shadow = False, prop={'size':9.0},loc='upper left', bbox_to_anchor=(1.02, 1))
axs[1].set_ylim([ 0, 1.10])
axs[1].set_xlim([ -0.1, ind.max() + 0.6])
axs[1].set_ylabel('Total steel in system, %', fontsize =15)
axs[1].set_yticks(np.arange(0,1.01,0.2))
axs[1].set_yticklabels(['0','20','40','60','80','100'], fontsize =15)
axs[1].set_xlabel('Scenario name', fontsize =15)
axs[1].set_xticks(xticks)
axs[1].set_xticklabels(xticklabels_flat, fontsize =10, rotation = 90)
axs[1].text(0.25 * (len(ScenSel_2015)) - 0.25, 1.03, '2015', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[1].text(0.50 * (len(ScenSel_2015) +1) +0.25 * len(ScenSel_2050) - 0.25, 1.03, '2050', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[1].text(0.50 * (len(ScenSel_2015) +1 +len(ScenSel_2050) +1) + 0.25 * len(ScenSel_2100) - 0.25, 1.03, '2100', fontsize=10, fontweight='bold') #alternative fontweight: bold
plt.show()
fig.savefig(Path_Result + fig_name, dpi = 400,bbox_extra_artists=(plt_lgd,), bbox_inches='tight')
# include figure in logfile:
Mylog.info('<center><img src="'+ fig_name +'" width="857" height="600" alt="'+ fig_name +'"></center>')
Mylog.info('<font "size=+3"><center><b><i>Figure '+ str(Figurecounter) + ': '+ fig_name +'.</i></b></center></font><br>')
Figurecounter += 1
ScenSel_2015 = [46]
ScenSel_2050 = [46,62,63,60,61,64,65,66,67,68]
ScenSel_2100 = [46,62,63,60,61,64,65,66,67,68]
fig_name = 'Steel_Stock_Development_Germany.png'
#%% Steel stock by product (top) and region (bottom)
ind = np.arange(0,(len(ScenSel_2015) +len(ScenSel_2050) +len(ScenSel_2100)+ 2)/2,0.5)
width = 0.4 # the width of the bars
xticks = np.concatenate((np.arange(0.2,len(ScenSel_2015)/2,0.5),np.arange(0.2 + len(ScenSel_2015)/2 + 0.5,0.2 + len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2,0.5),np.arange(0.2 + len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5,0.2 + len(ScenSel_2015)/2 + 0.5 + len(ScenSel_2050)/2 + 0.5 + len(ScenSel_2100)/2,0.5)), axis = 0)
xticklabels = []
xticklabels.append(['All scenarios'])#Alternative: xticklabels.append([ScenList_Names_Plot[i] for i in ScenSel_2015])
xticklabels.append([ScenList_Names_Plot[i] for i in ScenSel_2050])
xticklabels.append([ScenList_Names_Plot[i] for i in ScenSel_2100])
xticklabels_flat = [item for sublist in xticklabels for item in sublist]
Gen_Linewidth = 0.5
# Start plotting
fig, axs = plt.subplots(2,1,figsize=(12,8))
axs = axs.ravel()
gs = plt.GridSpec(2, 1)
gs.update(hspace=0.2)#, wspace=0.4)
# create x lables for upper axis:
xticklabels_flat_3 = xticklabels_flat_2.copy()
xticklabels_flat_3[len(ScenSel_2015) -1 + len(ScenSel_2050) -0] = 'Circ_2100:'
for m in range(0,len(ScenSel_2100)):
xticklabels_flat_3[len(ScenSel_2015) -1 + len(ScenSel_2050) +1 +m] = str("%.2f" %Circ_2100[ScenSel_2100[m]])
# plot bars for product split first
axs[0] = plt.subplot(gs[0])#, sharey=True, sharex=True)
axs[0].set_color_cycle(MyColorCycle_prod)
ProxyHandlesList = [] # For legend
for m in range(0,14):
p1 = plt.bar(ind[0:len(ScenSel_2015)], Stock_Product_2015[m,ScenSel_2015], width, color=MyColorCycle_prod[m,:],
label = StockLabels[m], bottom = Stock_Product_2015[0:m,ScenSel_2015].sum(axis=0), linewidth = Gen_Linewidth)
ProxyHandlesList.append(plt.Rectangle((0, 0), 1, 1, fc=MyColorCycle_prod[m,:], linewidth = Gen_Linewidth)) # create proxy artist for legend
p1 = plt.bar(ind[len(ScenSel_2015)+1:len(ScenSel_2015)+1 + len(ScenSel_2050)], Stock_Product_2050[m,ScenSel_2050], width, color=MyColorCycle_prod[m,:],
bottom = Stock_Product_2050[0:m,ScenSel_2050].sum(axis=0), linewidth = Gen_Linewidth)
p1 = plt.bar(ind[len(ScenSel_2015)+1 + len(ScenSel_2050) +1 ::], Stock_Product_2100[m,ScenSel_2100], width, color=MyColorCycle_prod[m,:],
bottom = Stock_Product_2100[0:m,ScenSel_2100].sum(axis=0), linewidth = Gen_Linewidth)
plt_lgd = plt.legend(reversed(ProxyHandlesList),reversed(StockLabels),shadow = False, prop={'size':8.0},loc='upper left', bbox_to_anchor=(1.02, 1))
axs[0].set_ylim([ 0, 1.10])
axs[0].set_xlim([ -0.1, ind.max() + 0.6])
axs[0].set_ylabel('Total steel in system, %', fontsize =15)
axs[0].set_yticks(np.arange(0,1.01,0.2))
axs[0].set_yticklabels(['0','20','40','60','80','100'], fontsize =15)
axs[0].set_xticks(xticks)
axs[0].set_xticklabels(xticklabels_flat_3, fontsize =10, rotation = 0)
axs[0].text(0.25 * (len(ScenSel_2015)) - 0.25, 1.03, '2015', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[0].text(0.50 * (len(ScenSel_2015) +1) +0.25 * len(ScenSel_2050) - 0.25, 1.03, '2050', fontsize=10, fontweight='bold') #alternative fontweight: bold
axs[0].text(0.50 * (len(ScenSel_2015) +1 +len(ScenSel_2050) +1) + 0.25 * len(ScenSel_2100) - 0.25, 1.03, '2100', fontsize=10, fontweight='bold') #alternative fontweight: | |
sorted.
# Also build new list with titles ordered same as in display table.
self.src_list = set() # All srcs covering canonical
titles = {} # All titles from all srcs covering canonical
self.scatter_by_src = {} # WRW 4 Apr 2022 - collecting data for scatter plot.
for row in data: # Put is dict indexed by title.
titles.setdefault( row['title'], [] ).append( row )
self.src_list.add( row['src'] )
page = self.fb.get_page_from_sheet( row[ 'sheet' ], row[ 'src' ], row[ 'local'] ) # Applies sheet offset to get page from sheet
self.scatter_by_src.setdefault( row['src'], [] ).append( { 'page' : page, 'sheet' : row['sheet'] } )
self.src_list = set( sorted( self.src_list ) ) # sorted() returns list, must convert back to set().
self.title_by_row = []
self.srcs_by_row = []
self.most_common_by_title = {}
# ------------------------------------------------
table_data = [] # Build entire table
for title in sorted( titles ):
data = self.inspect_data( title, self.src_list, titles[ title ] )
partial_coverage = False if data[ 'srcs' ] == self.src_list else True
self.most_common_by_title[ title ] = data[ 'most_common' ]
if( values[ 'index-diff-controls-1' ] or # Show all
values[ 'index-diff-controls-2' ] and data['same'] == '*' or # or show mismatches.
values[ 'index-diff-controls-3' ] and partial_coverage # or show partial coverage
):
self.title_by_row.append( title ) # WRW 1 Apr 2022 - Save for click on short title.
self.srcs_by_row.append( data[ 'srcs' ] )
table_row = [] # Build one row of table
table_row.append( data['short_title'] )
table_row.append( data['same'] )
res_by_src = data[ 'res_by_src' ]
for src in self.srcs: # Build up horizontal row of page->sheet for each src.
t = res_by_src[src] if src in res_by_src else ''
table_row.append( t )
table_data.append( table_row )
show_data = True
if show_data:
self.fb.safe_update( self.index_diff_table, table_data )
else:
self.fb.safe_update( self.index_diff_table, [] )
self.table_data = table_data
# -----------------------------------------------
return True
# ------------------------------------------
# Click in main index management table, show PDF at page.
# Identifying row & column of click is black magic from the innards of Tkinter.
# See example in Play/table-index.py, found it online.
# Left-click on populated table cell: Show the page.
# Left-click on title: Launch editor for entire line.
# Right-click on a populated table cell: Launch editor just for the index source
# for that cell, not whole line.
# --------------------------------------------------------------------------
if event == 'index-diff-table-Click' or event == 'index-diff-table-Right-Click':
if event == 'index-diff-table-Click':
click = 'Left'
else:
click = 'Right'
if not self.canonical:
self.sg.popup( f"\nPlease select a book from the canonical table\n",
title='Birdland Warning',
icon=BL_Icon,
line_width = 100,
keep_on_top = True,
)
return True
# -----------------------------------------------------
# Gather some data common to all actions here.
bind_event = self.index_diff_table.user_bind_event
col = self.index_diff_table.Widget.identify_column( bind_event.x )
row_i = self.index_diff_table.Widget.identify_row( bind_event.y )
row = self.index_diff_table.Widget.item( row_i )
col_num = int( col[1:])
src = self.srcs[ col_num -3 ] # -1 for title, -1 for M, and -1 since arrives one-based
local = self.fb.get_local_from_canonical_src( self.canonical, src )
# -----------------------------------------------------
# WRW 14 Apr 2022 - Lots of titles that should be the same differ in small ways. Need a quick way
# to select one and add all the rest to a map table for use on subsequent raw processing.
# It appears that this event fires before the clicked element is actually selected.
# Respond to Left Click so event doesn't propagate.
if( click == 'Left' and
col_num == 1 and
values[ 'index-diff-edit-titles' ] ): # Left click on title in editing mode.
return True
# -----------------------------------------------------
# Same logic copied below for event == 'index-diff-select-button'
if( click == 'Right' and
col_num == 1 and
values[ 'index-diff-edit-titles' ] ): # Right click on title in editing mode.
selected_rows = values[ "index-diff-table" ]
if len( selected_rows ) < 2: # Nothing selected
return True
titles_to_edit = [ self.title_by_row[ row ] for row in selected_rows ]
self.do_titles_to_edit_window( titles_to_edit )
self.index_diff_table.update( select_rows = [] )
return True
# -----------------------------------------------------
# WRW 1 Apr 2022 - As I start to actually use Birdland for cleaning up the raw indexes I was spending a lot of
# time just getting to the indexes. Add click on title to bring up editor for all srcs on line.
elif( click == 'Right' and
col_num == 1 and
not values[ 'index-diff-edit-titles' ] ): # Click on title not in editing mode.
if 'tags' not in row or len( row[ 'tags' ] ) == 0: # Click below filled rows
return True
row_num = row[ 'tags' ][0]
title = self.title_by_row[ row_num ]
srcs = self.srcs_by_row[ row_num ]
paths = []
line_number = None
for src in srcs:
source = self.fb.get_source_from_src( src )
raw_folder = self.conf.val( 'folder', source )
local = self.fb.get_local_from_canonical_src( self.canonical, src )
raw_file, tline = self.fb.get_raw_file( title, local, src )
if not line_number: # Fetch first line number
line_number = tline
if not raw_folder:
print( f"ERROR-DEV: Unexpected empty value for raw_folder for source {source}" )
sys.exit(1)
if not raw_file or not tline:
print( f"ERROR-DEV: Unexpected empty value for raw_file or tline for src {src} and title {title}" )
sys.exit(1)
paths.append( Path( raw_folder, 'Raw-Index', raw_file ) ) # WRW 7 May 2022 - build in 'Raw-Index' folder name.
editor = self.conf.val( 'raw_index_editor' )
ln_option = self.conf.val( 'raw_index_editor_line_num' )
if not editor:
t = f"No Text Editor for raw index given in config file"
self.conf.do_popup( t )
elif shutil.which( editor ):
# --------------------------
# Raw index may be compressed, e.g., Buffalo. Make a temp file to edit
epaths = [] # Paths to edit
cpaths = [] # Save to recompress
for path in paths:
if path.suffix == '.gz':
tfile = tempfile.mkstemp()
epath = tfile[1]
with gzip.open( path.as_posix(), 'rt' ) as ifd, open( epath, 'w' ) as ofd:
for line in ifd:
ofd.write( line )
epaths.append( epath )
cpaths.append( { 'epath' : epath, 'path' : path } )
else:
epaths.append( path.as_posix() )
# --------------------------
ln_option_full = ln_option.replace( '?', line_number ).split()
command = [ editor, *ln_option_full, *epaths ]
po = subprocess.Popen( command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL )
po.wait()
# --------------------------------------
for cpath in cpaths:
with open( cpath[ 'epath' ] ) as ifd, gzip.open( cpath[ 'path' ].as_posix(), 'wt' ) as ofd:
for line in ifd:
ofd.write( line )
os.remove( cpath[ 'epath' ] )
# --------------------------------------
else:
t = f"Text Editor for raw index in config file '{editor}' not found."
self.conf.do_popup( t )
return True
# -----------------------------------------------------
# Right-Click in main body of table. Edit one raw index source file.
elif click == 'Right':
if col_num < 3: # Ignore click in title or 'same' column
return True
if src not in self.src_list: # Ignore click in empty column
return True
if not row['values']: # WRW 11 Apr 2022
return True # Ignore right click in header
contents = row['values'][int(col[1:])-1]
if not contents:
return True # Ignore click in empty cell
row_num = row[ 'tags' ][0]
title = self.table_data[ row_num ][ 0 ]
source = self.fb.get_source_from_src( src )
raw_folder = self.conf.val( 'folder', source )
local = self.fb.get_local_from_canonical_src( self.canonical, src )
raw_file, line_number = self.fb.get_raw_file( title, local, src )
if not raw_folder:
print( f"ERROR-DEV: Unexpected empty value for raw_folder for source {source}" )
sys.exit(1)
if not raw_file or not line_number:
print( f"ERROR-DEV: Unexpected empty value for raw_file or line for src {src} and title {title}" )
sys.exit(1)
path = Path( raw_folder, 'Raw-Index', raw_file ) # WRW 7 May 2022 - build in 'Raw-Index' folder name.
editor = self.conf.val( 'raw_index_editor' )
ln_option = self.conf.val( 'raw_index_editor_line_num' )
if not editor:
t = f"No Text Editor for raw index given config file"
self.conf.do_popup( t )
elif shutil.which( editor ):
# Raw index may be compressed, e.g., Buffalo. Make a temp file to edit
if path.suffix == '.gz':
compressed = True
tfile = tempfile.mkstemp()
epath = tfile[1]
with gzip.open( path.as_posix(), 'rt' ) as ifd, open( epath, 'w' ) as ofd:
for line in ifd:
ofd.write( line )
else:
epath = path.as_posix()
compressed = False
| |
ids = data[2:]
elif thru_flag == 1:
assert len(data) == 4, data
#ids = [data[2], 'THRU', data[3]]
ids = list(range(data[2], data[3]+1))
else:
raise NotImplementedError('thru_flag=%s data=%s' % (thru_flag, data))
return cls(components, ids, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by %s' % self.type
self.ids_ref = model.EmptyNodes(self.node_ids, msg=msg)
def uncross_reference(self) -> None:
self.ids = self.node_ids
self.ids_ref = None
@property
def node_ids(self):
msg = ' which is required by %s' % self.type
return _node_ids(self, self.ids, allow_empty_nodes=True, msg=msg)
def raw_fields(self):
"""gets the "raw" card without any processing as a list for printing"""
list_fields = [self.type, self.components] + collapse_thru(self.node_ids)
return list_fields
def __repr__(self):
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
class SuperABQSet1(Set):
"""
Generic Class SEBSET1, SEQSET1 cards inherit from.
Defines degrees-of-freedom in the analysis set (a-set).
+----------+------+-----+------+------+-----+-----+-----+-----+
| SEBSET1 | SEID | C | ID1 | ID2 | ID3 | ID4 | ID5 | ID6 |
+----------+------+-----+------+------+-----+-----+-----+-----+
| | ID7 | ID9 | | | | | | |
+----------+------+-----+------+------+-----+-----+-----+-----+
| SEBSET1 | SEID | C | ID1 | THRU | ID2 | | | |
+----------+------+-----+------+------+-----+-----+-----+-----+
"""
type = 'SuperABQSet1'
def __init__(self, seid, components, ids, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
self.seid = seid
#: Component number. (Integer zero or blank for scalar points or any
#: unique combination of the Integers 1 through 6 for grid points with
#: no embedded blanks.)
self.components = components
#: Identifiers of grids points. (Integer > 0)
self.ids = expand_thru(ids)
#print('ids =', self.ids)
assert None not in self.ids
self.ids_ref = None
@classmethod
def add_card(cls, card, comment=''):
seid = integer(card, 1, 'seid')
components = fcomponents_or_blank(card, 2, 'components', 0)
nfields = len(card)
ids = []
i = 1
for ifield in range(3, nfields):
idi = integer_string_or_blank(card, ifield, 'ID%i' % i)
if idi:
i += 1
ids.append(idi)
ids = expand_thru(ids)
return cls(seid, components, ids, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
seid, components, nids = data
#assert None not in components, 'Type=%s components=%s' % (cls.type, components)
assert None not in nids, 'Type=%s nids=%s' % (cls.type, nids)
assert -1 not in nids, 'nids=%s' % (nids.tolist())
assert 0 not in nids, 'nids=%s' % (nids.tolist())
return cls(seid, components, nids, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by %s seid=%s' % (self.type, self.seid)
self.ids_ref = model.EmptyNodes(self.node_ids, msg=msg)
def uncross_reference(self) -> None:
self.ids = self.node_ids
self.ids_ref = None
@property
def node_ids(self):
msg = ' which is required by %s seid=%s' % (self.type, self.seid)
return _node_ids(self, self.ids, allow_empty_nodes=True, msg=msg)
def raw_fields(self):
"""gets the "raw" card without any processing as a list for printing"""
list_fields = [self.type, self.seid, self.components] + collapse_thru(self.node_ids)
return list_fields
def __repr__(self):
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
class ASET1(ABQSet1):
"""
Defines degrees-of-freedom in the analysis set (a-set)
+-------+-----+-----+------+-----+-----+-----+-----+-----+
| ASET1 | C | ID1 | ID2 | ID3 | ID4 | ID5 | ID6 | ID7 |
+-------+-----+-----+------+-----+-----+-----+-----+-----+
| | ID8 | ID9 | | | | | | |
+-------+-----+-----+------+-----+-----+-----+-----+-----+
| ASET1 | C | ID1 | THRU | ID2 | | | | |
+-------+-----+-----+------+-----+-----+-----+-----+-----+
"""
type = 'ASET1'
def __init__(self, components, ids, comment=''):
ABQSet1.__init__(self, components, ids, comment)
class BSET1(ABQSet1):
type = 'BSET1'
def __init__(self, components, ids, comment=''):
ABQSet1.__init__(self, components, ids, comment)
class CSET1(Set):
"""
Defines analysis set (a-set) degrees-of-freedom to be fixed (b-set) during
generalized dynamic reduction or component mode synthesis calculations.
+-------+-----+-----+------+-----+-----+-----+-----+-----+
| CSET1 | C | ID1 | ID2 | ID3 | ID4 | ID5 | ID6 | ID7 |
+-------+-----+-----+------+-----+-----+-----+-----+-----+
| | ID8 | ID9 | | | | | | |
+-------+-----+-----+------+-----+-----+-----+-----+-----+
| CSET1 | C | ID1 | THRU | ID2 | | | | |
+-------+-----+-----+------+-----+-----+-----+-----+-----+
| CSET1 | ,, | ALL | | | | | | |
+-------+-----+-----+------+-----+-----+-----+-----+-----+
"""
type = 'CSET1'
def __init__(self, ids, components, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
#: Identifiers of grids points. (Integer > 0)
self.ids = expand_thru(ids)
self.components = components
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a CSET1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
if integer_string_or_blank(card, 2, 'C') == 'ALL':
components = '123456'
else:
components = parse_components(card, 1, 'components')
ids = []
id_count = 1
for ifield in range(2, len(card)):
idi = integer_or_string(card, ifield, 'ID%i' % id_count)
ids.append(idi)
id_count += 1
return CSET1(ids, components, comment=comment)
def raw_fields(self):
"""gets the "raw" card without any processing as a list for printing"""
list_fields = ['CSET1', self.components] + collapse_thru(self.ids)
return list_fields
def __repr__(self):
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
class QSET1(ABQSet1):
"""
Defines generalized degrees-of-freedom (q-set) to be used for dynamic
reduction or component mode synthesis.
"""
type = 'QSET1'
def __init__(self, components, ids, comment=''):
ABQSet1.__init__(self, components, ids, comment)
class SET1(Set):
"""
Defines a list of structural grid points or element identification
numbers.
+------+--------+--------+-----+------+-----+-----+------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+========+========+=====+======+=====+=====+======+=====+
| SET1 | SID | ID1 | ID2 | ID3 | ID4 | ID5 | ID6 | ID7 |
+------+--------+--------+-----+------+-----+-----+------+-----+
| | ID8 | etc. | | | | | | |
+------+--------+--------+-----+------+-----+-----+------+-----+
| SET1 | 3 | 31 | 62 | 93 | 124 | 16 | 17 | 18 |
+------+--------+--------+-----+------+-----+-----+------+-----+
| | 19 | | | | | | | |
+------+--------+--------+-----+------+-----+-----+------+-----+
| SET1 | 6 | 29 | 32 | THRU | 50 | 61 | THRU | 70 |
+------+--------+--------+-----+------+-----+-----+------+-----+
| | 17 | 57 | | | | | | |
+------+--------+--------+-----+------+-----+-----+------+-----+
"""
type = 'SET1'
def __init__(self, sid, ids, is_skin=False, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
#: Unique identification number. (Integer > 0)
self.sid = sid
#: List of structural grid point or element identification numbers.
#: (Integer > 0 or 'THRU'; for the 'THRU' option, ID1 < ID2 or 'SKIN';
#: in field 3)
self.ids = expand_thru(ids)
self.clean_ids()
self.is_skin = is_skin
self.xref_type = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SET1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
ids = fields(integer_or_string, card, 'ID', i=2, j=len(card))
is_skin = False
i = 0
if len(ids) > 0:
if isinstance(ids[0], str) and ids[0] == 'SKIN':
is_skin = True
i += 1
else:
assert len(card) > 2, card
return SET1(sid, ids[i:], is_skin=is_skin, comment=comment)
def update(self, maps):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
set_map = maps['set']
if self.xref_type == 'Node':
nid_map = maps['node']
self.ids = [nid_map[nid] for nid in self.ids]
else:
print(self.print_card())
raise NotImplementedError('%s.xref_type = %r' % (self.type, self.xref_type))
self.sid = set_map[self.sid]
def symmetric_difference(self, set1):
ids1 = set(self.ids)
ids2 = set(set1.ids)
return ids1.symmetric_difference(ids2)
def add_set(self, set1):
self.ids += set1.ids
self.clean_ids()
#def IsSkin(self):
#return self.is_skin
def raw_fields(self):
skin = []
if self.is_skin:
skin = ['SKIN']
return ['SET1', self.sid] + skin + self.ids
def cross_reference(self, model, xref_type):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
xref_type : str
{'Node'}
SPLINEx, ACMODL, PANEL, AECOMP, XYOUTPUT
- nodes
- SPLINEx (all nodes must exist)
- PANEL (all nodes must exist)
- XYOUTPUT (missing nodes ignored)
- AECOMP
- ACMODL (optional)
- elements
- ACMODL (optional)
"""
self.xref_type = xref_type
def write_card(self, size: int=8, is_double: bool=False) -> str:
skin = []
if self.is_skin:
skin = ['SKIN']
# checked in NX 2014 / MSC 2005.1
return self.comment + print_card_8(['SET1', self.sid] + skin + self.ids)
class SET3(Set):
"""
Defines a list of grids, elements | |
import json
import re
from collections import defaultdict
import opml
import structlog
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, InvalidPage, Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect, render
from django.template import loader
from django.template.defaultfilters import slugify
from django.utils.html import format_html
from django.utils.translation import ugettext as _, ungettext
from django.views import generic
from elasticsearch.exceptions import ConflictError, RequestError
from .forms import (ActionForm, CategoryForm, FeedForm, OPMLImportForm,
ReadForm, SubscriptionFormSet, UndoReadForm, user_lock)
from .models import Category, UniqueFeed
from .tasks import read_later
from .. import es
from ..decorators import login_required
from ..tasks import enqueue
"""
Each view displays a list of entries, with a level of filtering:
- home: all entries
- category: entries in a specific category
- feed: entries for a specific feed
- item: a single entry
Entries are paginated.
"""
logger = structlog.get_logger(__name__)
MEDIA_RE = re.compile(
r'.*<(img|audio|video|iframe|object|embed|script|source)\s+.*',
re.UNICODE | re.DOTALL)
class Keyboard(generic.TemplateView):
template_name = 'feeds/keyboard.html'
keyboard = Keyboard.as_view()
def paginate(object_list, page=1, nb_items=25, force_count=None):
"""
Simple generic paginator for all the ``Entry`` lists
"""
if force_count is not None:
def count(x):
return force_count
object_list.count = count
paginator = Paginator(object_list, nb_items)
try:
paginated = paginator.page(page)
except (EmptyPage, InvalidPage):
paginated = paginator.page(paginator.num_pages)
return paginated, paginator._count
@login_required
def entries_list(request, page=1, mode=None, category=None, feed=None,
starred=False):
"""
Displays a paginated list of entries.
``page``: the page number
``mode``: filters the list to display all / unread / starred items
``category``: (slug) if set, will filter the entries of this category
``feed``: (object_id) if set, will filter the entries of this feed
Note: only set category OR feed. Not both at the same time.
"""
page = int(page)
user = request.user
es_entries = es.manager.user(request.user).defer(
'content', 'guid', 'tags', 'read_later_url',
'author', 'broadcast', 'link', 'starred',
).query_aggregate('all_unread', read=False)
if mode == 'unread':
es_entries = es_entries.filter(read=False)
elif mode == 'stars':
es_entries = es_entries.filter(
starred=True).query_aggregate('all_starred', starred=True)
search = request.GET.get('q', '')
if search:
es_entries = es_entries.filter(query=search)
if category is not None:
category = get_object_or_404(user.categories, slug=category)
all_url = reverse('feeds:category', args=[category.slug])
unread_url = reverse('feeds:category', args=[category.slug, "unread"])
stars_url = reverse('feeds:category', args=[category.slug, "stars"])
es_entries = es_entries.filter(category=category.pk).query_aggregate(
'all', category=category.pk).query_aggregate(
'unread', category=category.pk, read=False)
if feed is not None:
feed = get_object_or_404(user.feeds.select_related('category'),
pk=feed)
all_url = reverse('feeds:feed', args=[feed.pk])
unread_url = reverse('feeds:feed', args=[feed.pk, "unread"])
stars_url = reverse('feeds:feed', args=[feed.pk, "stars"])
category = feed.category
es_entries = es_entries.filter(feed=feed.pk).query_aggregate(
'all', feed=feed.pk).query_aggregate(
'unread', feed=feed.pk, read=False)
if starred is True:
es_entries = es_entries.filter(starred=True).query_aggregate(
'all', starred=True).query_aggregate(
'unread', starred=True, read=False)
all_url = reverse('feeds:entries', args=['stars'])
unread_url = None
stars_url = None
if feed is None and category is None and starred is not True:
all_url = reverse('feeds:entries')
unread_url = reverse('feeds:entries', args=['unread'])
stars_url = reverse('feeds:entries', args=['stars'])
es_entries = es_entries.query_aggregate('all').query_aggregate(
'unread', read=False)
if user.oldest_first:
es_entries = es_entries.order_by('timestamp', 'id')
if request.method == 'POST':
if request.POST['action'] in (ReadForm.READ_ALL, ReadForm.READ_PAGE):
pages_only = request.POST['action'] == ReadForm.READ_PAGE
form = ReadForm(es_entries, feed, category, user,
pages_only=pages_only, data=request.POST)
if form.is_valid():
pks = form.save()
undo_form = loader.render_to_string('feeds/undo_read.html', {
'form': UndoReadForm(initial={
'pks': json.dumps(pks, separators=(',', ':'))}),
'action': request.get_full_path(),
}, request=request)
message = ungettext(
'1 entry has been marked as read.',
'%(value)s entries have been marked as read.',
len(pks)) % {'value': len(pks)}
messages.success(request,
format_html(u"{0} {1}", message, undo_form))
elif request.POST['action'] == 'undo-read':
form = UndoReadForm(user, data=request.POST)
if form.is_valid():
count = form.save()
messages.success(
request, ungettext(
'1 entry has been marked as unread.',
'%(value)s entries have been marked as unread.',
count) % {'value': count})
if mode == 'unread':
return redirect(unread_url)
elif mode == 'stars':
return redirect(stars_url)
else:
return redirect(all_url)
try:
entries = es_entries.fetch(page=page,
per_page=user.entries_per_page,
annotate=user)
except RequestError as e:
if 'No mapping found' not in e.error: # index is empty
raise
entries = []
user._unread_count = unread_count = total_count = 0
else:
aggs = entries['aggregations']
entries = entries['hits']
unread_count = aggs['entries']['unread']['doc_count']
total_count = aggs['entries']['all']['doc_count']
user._unread_count = aggs['entries']['all_unread']['doc_count']
if mode == 'unread':
card = unread_count
elif mode == 'stars':
card = aggs['entries']['all_starred']['doc_count']
else:
card = total_count
num_pages = card // user.entries_per_page
if card % user.entries_per_page:
num_pages += 1
entries = {
'object_list': entries,
'paginator': {
'num_pages': num_pages,
},
'has_previous': page > 1,
'has_next': page < num_pages,
'previous_page_number': page - 1,
'next_page_number': page + 1,
'number': page,
}
request.session['back_url'] = request.get_full_path()
# base_url is a variable that helps the paginator a lot. The drawback is
# that the paginator can't use reversed URLs.
if mode == 'unread':
base_url = unread_url
elif mode == 'stars':
base_url = stars_url
else:
base_url = all_url
context = {
'category': category,
'feed': feed,
'entries': entries,
'mode': mode,
'unread_count': unread_count,
'total_count': total_count,
'all_url': all_url,
'unread_url': unread_url,
'stars_url': stars_url,
'base_url': base_url,
'stars': starred,
'all_unread': aggs['entries']['unread']['doc_count'],
'entries_template': 'feeds/entries_include.html',
'search': search,
'search_form': True,
}
if unread_count:
context['read_all_form'] = ReadForm()
context['read_page_form'] = ReadForm(pages_only=True, initial={
'action': ReadForm.READ_PAGE,
'pages': json.dumps([int(page)]),
})
context['action'] = request.get_full_path()
if (
len(entries['object_list']) == 0 and
request.user.feeds.count() == 0
):
context['noob'] = True
if request.is_ajax():
template_name = context['entries_template']
else:
template_name = 'feeds/entries_list.html'
return render(request, template_name, context)
class SuccessMixin(object):
success_message = None
def get_success_message(self):
return self.success_message
def form_valid(self, form):
response = super().form_valid(form)
msg = self.get_success_message()
if msg is not None:
messages.success(self.request, msg)
return response
class CategoryMixin(SuccessMixin):
form_class = CategoryForm
success_url = reverse_lazy('feeds:manage')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_object(self):
return get_object_or_404(self.request.user.categories,
slug=self.kwargs['slug'])
class AddCategory(CategoryMixin, generic.CreateView):
template_name = 'feeds/category_form.html'
add_category = login_required(AddCategory.as_view())
class EditCategory(CategoryMixin, generic.UpdateView):
template_name = 'feeds/edit_category.html'
def get_success_message(self):
return _('%(category)s has been successfully '
'updated') % {'category': self.object}
edit_category = login_required(EditCategory.as_view())
class DeleteCategory(CategoryMixin, generic.DeleteView):
success_url = reverse_lazy('feeds:manage')
@transaction.atomic
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
pk = self.object.pk
name = self.object.name
self.object.delete()
request.user.delete_category_entries(pk)
messages.success(
self.request,
_('%(category)s has been successfully deleted') % {
'category': name})
success_url = self.get_success_url()
return redirect(success_url)
def get_context_data(self, **kwargs):
entry_count = es.client.count(
index=es.user_alias(self.request.user.pk),
doc_type='entries',
body={
'query': {
'filtered': {
'filter': {'term': {'category': self.object.pk}},
},
},
},
)['count']
kwargs.update({
'entry_count': entry_count,
'feed_count': self.object.feeds.count(),
})
return super().get_context_data(**kwargs)
delete_category = login_required(DeleteCategory.as_view())
class FeedMixin(SuccessMixin):
form_class = FeedForm
success_url = reverse_lazy('feeds:manage')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_object(self):
return get_object_or_404(self.request.user.feeds,
pk=self.kwargs['feed'])
class AddFeed(FeedMixin, generic.CreateView):
template_name = 'feeds/feed_form.html'
def get_success_message(self):
return _('%(feed)s has been successfully '
'added') % {'feed': self.object.name}
def get_initial(self):
initial = super().get_initial()
if 'feed' in self.request.GET:
initial['url'] = self.request.GET['feed']
if 'name' in self.request.GET:
initial['name'] = self.request.GET['name']
return initial
add_feed = login_required(AddFeed.as_view())
class EditFeed(FeedMixin, generic.UpdateView):
template_name = 'feeds/edit_feed.html'
def get_success_message(self):
return _('%(feed)s has been successfully '
'updated') % {'feed': self.object.name}
edit_feed = login_required(EditFeed.as_view())
class DeleteFeed(FeedMixin, generic.DeleteView):
def get_context_data(self, **kwargs):
entry_count = es.client.count(
index=es.user_alias(self.request.user.pk),
doc_type='entries',
body={
'query': {
'filtered': {
'filter': {'term': {'feed': self.object.pk}},
},
},
},
)['count']
kwargs['entry_count'] = entry_count
return super().get_context_data(**kwargs)
@transaction.atomic
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
pk = self.object.pk
name = self.object.name
self.object.delete()
request.user.delete_feed_entries(pk)
messages.success(
request,
_('%(feed)s has been successfully deleted') % {
'feed': name})
success_url = self.get_success_url()
return redirect(success_url)
delete_feed = login_required(DeleteFeed.as_view())
@login_required
def item(request, entry_id):
entry = es.entry(request.user, entry_id)
if not entry.read:
try:
entry.update(read=True)
except ConflictError:
# Double click // two operations at a time. Entry has already
# been marked as read.
pass
back_url = request.session.get('back_url',
default=entry.feed.get_absolute_url())
# Depending on the list used to access to this page, we try to find in an
# intelligent way which is the previous and the next item in the list.
# This way the user has nice 'previous' and 'next' buttons that are
# dynamically changed
mode = None
bits = back_url.split('/')
# FIXME: The kw thing currently doesn't work with paginated content.
kw = {'user': request.user}
if bits[1] == 'unread':
# only unread
kw['read'] = False
mode = 'unread'
elif bits[1] == 'stars':
mode = 'stars'
kw['starred'] = True
elif bits[1] == 'feed':
# Entries in self.feed
kw = {'feed': entry.feed}
elif bits[1] == 'category':
# Entries in self.feed.category
category_slug = bits[2]
category = Category.objects.get(slug=category_slug, user=request.user)
kw = {'feed__category': category}
if len(bits) > 3:
if bits[3] == 'unread':
kw['read'] = False
mode = 'unread'
elif bits[3] == 'stars':
kw['starred'] = True
# The previous is actually the next by date, and vice versa
es_entries = es.manager.user(request.user).exclude(id=entry.pk)
if 'feed' in kw:
es_entries = es_entries.filter(feed=kw['feed'].pk)
if 'read' in kw:
es_entries = es_entries.filter(read=kw['read'])
if 'feed__category' in kw:
es_entries = es_entries.filter(category=kw['feed__category'].pk)
if 'starred' in kw:
es_entries = es_entries.filter(starred=kw['starred'])
previous = es_entries.filter(timestamp__gte=entry.date).order_by(
'timestamp', 'id').fetch(per_page=1)
previous = previous['hits'][0] if previous['hits'] else None
if previous is not None:
if previous.date == entry.date:
previous = es_entries.filter(
timestamp__gte=entry.date).filter(
id__gt=entry.pk
).order_by('timestamp', | |
<gh_stars>0
import sys, os
import math
import collections
import re
import multiprocessing
import time
import contextlib
import json
import tqdm
def ctqdm(*args, **kwargs): return contextlib.closing(tqdm.tqdm(*args, **kwargs))
import nltk
import numpy as np
import tensorflow as tf
import pandas as pd
import sentencepiece as spm
GLOVE_PATH = "../input/embeddings/glove.840B.300d/glove.840B.300d.txt"
PARAGRAM_PATH = "../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt"
MAX_SEQ_LEN = 400
USE_CHARACTER = False
USE_REPLACE_TOKEN = False
USE_POS = False
USE_HOMEBREW = False
USE_SENTENCE_PIECE = False
SAVE = True
assert not USE_REPLACE_TOKEN or not USE_SENTENCE_PIECE
# preload
if USE_POS:
nltk.pos_tag(["this", "is", "test"])
nltk.stem.WordNetLemmatizer().lemmatize("test")
#----------------------------------------------------------------------------
print("load csv", end="...", flush=True)
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
print("done.")
#/---------------------------------------------------------------------------
#----------------------------------------------------------------------------
NUM_KEYS = [str(i) for i in range(10)] + ["="]
RE_SINGLE_NUM = re.compile("[0-9]")
MATH_TOKEN = "MATHTOKEN"
FWORD_TOKEN = "FWORDTOKEN"
ONLY_STAR_TOKEN = "ONLYSTARTOKEN"
class SuffixCounter:
def __init__(self):
self.counter = 0
def new(self):
suffix = chr(ord("A")+self.counter)
self.counter += 1
return suffix
def preprocess_sent(sent):
suffix = SuffixCounter()
token_map = dict()
# irreversible transformation
sent = sent.replace("\xa0", " ")
sent = sent.replace("\u200b", " ")
sent = sent.strip()
# [0-9] => 9
#sent = RE_SINGLE_NUM.sub("9", sent)
# [math]-[math] => MATH
# split
words = sent.split()
# ?-split
new_words = []
for word in words:
if "?" not in word:
new_words.append(word)
else:
new_words.extend([subword for subword in re.split(r"(\?)", word) if len(subword) > 0])
"""
q_splits = word.split("?")
new_words.append(q_splits[0])
for q_split in q_splits[1:]:
new_words.append("?")
new_words.append(q_split)
"""
words = new_words
# fword
new_words = []
for word in words:
if "*" in word:
if len(set(word)) == 1: # only "*"
after = ONLY_STAR_TOKEN + suffix.new()
new_words.append(after)
token_map[after] = word
else:
if any(num_key in word for num_key in NUM_KEYS):
after = MATH_TOKEN + suffix.new()
new_words.append(after)
token_map[after] = word
elif "*" not in word[1:-1]:
if word[0] == "*":
after = ONLY_STAR_TOKEN + suffix.new()
new_words.append(after)
token_map[after] = "*"
word = word[1:]
if word[-1] == "*":
new_words.append(word[:-1])
after = ONLY_STAR_TOKEN + suffix.new()
new_words.append(after)
token_map[after] = "*"
else:
new_words.append(word)
else:
after = FWORD_TOKEN + suffix.new()
new_words.append(after)
token_map[after] = word
else: # "*" not in word
new_words.append(word)
preprocessed_sent = " ".join(new_words)
if suffix.counter > 20: print(sent)
return preprocessed_sent, token_map
def postprocess_sent(tokenized, token_map):
suffix = SuffixCounter()
suffix.counter = len(token_map)
# url置換
# "://" ".com" ".dll" ".info" ".exe"
# - split
# 全部知っていたらsplit
# 数字のみから構成されるならそのまま
# 両脇が数字なら特殊化
# 数字置換
# 0-9+ -> 9
# A-Z -> X
# a-z -> x
# 's
# n't
# 簡易版
new_tokenized = []
for token in tokenized:
if "-" not in token:
new_tokenized.append(token)
else:
if token == "-":
new_tokenized.append(token)
elif "9" not in token:
new_tokenized.extend([subword for subword in re.split("(-)", token) if len(subword) > 0])
"""
for word in token.split("-"):
if len(word) == 0: continue
new_tokenized.append(word)
"""
else:
new_tokenized.append(token)
tokenized = new_tokenized
new_tokenized = []
for token in tokenized:
if token[-2:].lower() == "'s":
if len(token) > 2:
new_tokenized.append(token[:-2])
new_tokenized.append("'s")
elif token[-3:].lower() == "n't":
if len(token) > 3:
new_tokenized.append(token[:-3])
new_tokenized.append("n't")
elif token[-2:].lower() == "'t":
if len(token) > 2:
new_tokenized.append(token[:-2])
new_tokenized.append("'t")
else:
if len(set(token)) == 1:
new_tokenized.append(token)
else:
new_tokenized.append(token.replace("'", ""))
tokenized = new_tokenized
new_tokenized = []
for token in tokenized:
if "9" not in token:
new_tokenized.append(token)
else:
new_tokenized.append(re.sub("a-z", "x", re.sub("A-Z", "X", token)))
tokenized = new_tokenized
if USE_REPLACE_TOKEN:
for after_token in token_map:
idx = next(filter(lambda x: after_token == x[1], enumerate(tokenized)))[0]
tokenized[idx] = after_token[:-1]
else:
for after_token, before_token in token_map.items():
idx = next(filter(lambda x: after_token == x[1], enumerate(tokenized)))[0]
tokenized[idx] = before_token
if MAX_SEQ_LEN > 0:
tokenized = tokenized[:MAX_SEQ_LEN]
if not USE_POS:
return tokenized, token_map
pos_tags = []
lemmatized = []
word_and_pos_tags = nltk.pos_tag(tokenized)
_, pos_tags = zip(*word_and_pos_tags)
lemmatizer = nltk.stem.WordNetLemmatizer()
wordnet_pos = lambda e: ('a' if e[0].lower() == 'j' else e[0].lower()) if e[0].lower() in ['n', 'r', 'v'] else 'n'
lemmatize = lambda x: lemmatizer.lemmatize(x[0].lower(), wordnet_pos(x[1]))
lemmatized = [lemmatize(w) for w in word_and_pos_tags]
return tokenized, pos_tags, lemmatized, token_map
def tokenize(sent):
sent, token_map = preprocess_sent(sent)
tokenizer = nltk.tokenize.TweetTokenizer()
tokenized = tokenizer.tokenize(sent)
#tokenized, pos_tags, lemmatized, token_map = postprocess_sent(tokenized, token_map)
#return tokenized, pos_tags, lemmatized, token_map
return postprocess_sent(tokenized, token_map)
print("tokenize", end="...", flush=True)
s = time.time()
with multiprocessing.Pool(8) as pool:
if not USE_POS:
all_train_sents, train_token_map = zip(*pool.map(tokenize, train_df.question_text))
else:
all_train_sents, all_train_pos_tags, all_train_lemmas, train_token_map = zip(*pool.map(tokenize, train_df.question_text))
with multiprocessing.Pool(8) as pool:
if not USE_POS:
test_sents, test_token_map = zip(*pool.map(tokenize, test_df.question_text))
else:
test_sents, test_pos_tags, test_lemmas, test_token_map = zip(*pool.map(tokenize, test_df.question_text))
print("done.", time.time() - s)
#/---------------------------------------------------------------------------
print("build vocab", end="...", flush=True)
train_vocab_counter = collections.Counter([word for sent in all_train_sents for word in sent])
test_only_vocab = {word for sent in test_sents for word in sent} - set(train_vocab_counter)
word_to_id = {word:id_+1 for id_,word in enumerate(sorted(set(train_vocab_counter) | test_only_vocab))}
word_to_id["$$UNK$$"] = 0
id_to_word = [word for word,id_ in sorted(word_to_id.items(), key=lambda x:x[1])]
print("done.", flush=True)
def load_embedding(fname, word_to_id, train_vocab_counter, logarithm=True, do_gc=False, paragram=False):
if paragram:
pretrainable_vocab = {word.lower() for word in word_to_id}
else:
pretrainable_vocab = set(word_to_id)
pretrainable_vocab.update(["*", "fuck", "shit"])
word_to_vec = dict()
with open(fname, encoding="latin-1" if paragram else "utf-8") as f:
for line in f:
line = line.rstrip()
if len(line) == 0: continue
word, *vec = line.split(" ")
if word in pretrainable_vocab:
vec = np.array([float(v) for v in vec], dtype=np.float32)
word_to_vec[word] = vec
pretrained_train_words = [word for word in train_vocab_counter if (word.lower() if paragram else word) in word_to_vec]
distribution = [train_vocab_counter[word] for word in pretrained_train_words]
if logarithm:
distribution = np.log(distribution)
unk_vec = np.average([word_to_vec[word.lower() if paragram else word] for word in pretrained_train_words], axis=0, weights=distribution)
outs = np.tile(unk_vec[np.newaxis,:], [len(word_to_id), 1])
for word, id_ in word_to_id.items():
if paragram:
word = word.lower()
if word in word_to_vec:
outs[id_] = word_to_vec[word]
if USE_REPLACE_TOKEN:
if ONLY_STAR_TOKEN in word_to_id:
outs[word_to_id[ONLY_STAR_TOKEN]] = word_to_vec["*"]
if FWORD_TOKEN in word_to_id:
outs[word_to_id[FWORD_TOKEN]] = np.mean([word_to_vec["fuck"], word_to_vec["shit"]], axis=0)
if MATH_TOKEN in word_to_id:
outs[word_to_id[MATH_TOKEN]] = word_to_vec["*"]
oov = pretrainable_vocab - set(word_to_vec)
if do_gc:
del pretrainable_vocab, word_to_vec, pretrained_train_words, distribution, unk_vec
import gc
print("gc:", gc.collect())
return outs, oov
print("load glove", end="...", flush=True)
s = time.time()
glove_emb, glove_oov = load_embedding(GLOVE_PATH, word_to_id, train_vocab_counter, logarithm=True)
e = time.time()
print("done.", e-s, flush=True)
print("load paragram", end="...", flush=True)
s = time.time()
paragram_emb, paragram_oov = load_embedding(PARAGRAM_PATH, word_to_id, train_vocab_counter, logarithm=True, paragram=True)
e = time.time()
print("done.", e-s, flush=True)
# character
if USE_CHARACTER:
train_char_counter = collections.Counter()
for word, count in train_vocab_counter.items():
sub_counter = collections.Counter(word * count)
train_char_counter.update(sub_counter)
MIN_CHAR_FREQUENCY = 1000
char_to_id = {char:i+3 for i,char in enumerate(sorted([char for char,count in train_char_counter.items() if count >= MIN_CHAR_FREQUENCY]))}
char_to_id["$$PAD$$"] = 0
char_to_id["$$CENTER$$"] = 1
char_to_id["$$UNK$$"] = 2
unk_char_id = char_to_id["$$UNK$$"]
id_to_char = [char for char,id_ in sorted(char_to_id.items(), key=lambda x:x[1])]
MAX_WORD_LEN = 13
def func_word_to_chars(word):
if len(word) <= MAX_WORD_LEN:
return ([char_to_id.get(char, unk_char_id) for char in word] + [0] * (MAX_WORD_LEN-len(word)), len(word))
else:
center = [char_to_id["$$CENTER$$"]]
if MAX_WORD_LEN % 2 == 0:
l = MAX_WORD_LEN // 2 - 1
center = center * 2
else:
l = MAX_WORD_LEN // 2
return ([char_to_id.get(char, unk_char_id) for char in word[:l]] + center + [char_to_id.get(char, unk_char_id) for char in word[-l:]], MAX_WORD_LEN)
word_to_chars = {word:func_word_to_chars(word) for word in sorted(set(train_vocab_counter) | test_only_vocab)}
# homebrew
if USE_HOMEBREW:
print("homebrew")
dim_homebrew = 50 # 66.5% (default) 1epoch目で65.91、2epoch目で66.53
glove_window = 15
glove_iter = 15
glove_min = 5
glove_lower = False
#dim_homebrew = 300 # 66.5% 学習が早い。66.32->66.49。word-simはぱっと見変わらないがロスは小さい
#dim_homebrew, glove_iter = 300, 50 # 66.4% 66.40->66.16
#dim_homebrew = 150 # 66.6% 66.04->66.58
#glove_window = 7 # 66.4% 65.51->66.41。word-simは強く関連してそうなものだけ残って変な単語が減る。
#glove_window = 11 # 66.4% 65.54->66.35
#glove_iter = 50 # 66.4% 65.69->66.36 it15とword-simはスコア含めほぼ変わらないように見える。ロスは1割ほど落ちた。(iter15=0.040320, iter50=0.036944)
#glove_min = 50 # 66.2% 悪い。ねばる。64.98->66.20->66.21。word-simはぱっと見変わらない。よく見るとレアワードでちゃんと変わってるかも。
#glove_lower = True # 66.7% 65.61->66.68
#dim_homebrew, glove_lower = 300, True # 66.18->66.69
homebrew_word_to_id = {"<unk>":0}
homebrew_id_to_word = ["<unk>"]
homebrew_new_id = 1
homebrew_init_emb = []
with open("../homebrew/glove-homebrew{}.{}d.win{}-it{}-min{}.txt".format((".lower" if glove_lower else ""), dim_homebrew, glove_window, glove_iter, glove_min)) as f:
for line in f:
line = line.strip()
if len(line) == 0: continue
word, *vec = line.split(" ")
assert len(vec) == dim_homebrew
vec = np.array([float(v) for v in vec], dtype=np.float32)
if word == "<unk>":
homebrew_init_emb = [vec] + homebrew_init_emb
continue
homebrew_word_to_id[word] = homebrew_new_id
homebrew_new_id += 1
homebrew_id_to_word.append(word)
homebrew_init_emb.append(vec)
homebrew_init_emb = np.stack(homebrew_init_emb, axis=0)
if USE_POS:
pos_tag_set = {pos_tag for sents in [all_train_pos_tags, test_pos_tags] for sent in sents for pos_tag in sent}
#id_to_pos_tag = ["$$UNK$$"] + list(pos_tag_set)
id_to_pos_tag = list(pos_tag_set)
pos_tag_to_id = {t:i for i,t in enumerate(id_to_pos_tag)}
all_train_pos_tags = [[pos_tag_to_id[pos_tag] for pos_tag in sent] for sent in all_train_pos_tags]
test_pos_tags = [[pos_tag_to_id[pos_tag] for pos_tag in sent] for sent in test_pos_tags]
if USE_SENTENCE_PIECE:
with open("sentences.txt", "w") as f:
for sents in [all_train_sents, test_sents]:
for words in sents:
print(" ".join(words), file=f)
SP_VOCAB_SIZE = 2048
spm.SentencePieceTrainer.Train('--input=sentences.txt --model_prefix=sp{vocab} --vocab_size={vocab} --character_coverage=0.9995'.format(vocab=SP_VOCAB_SIZE))
sp = spm.SentencePieceProcessor()
sp.Load('sp{}.model'.format(SP_VOCAB_SIZE))
with ctqdm(sorted(set(train_vocab_counter) | test_only_vocab), desc="build sp map") as vocab:
word_to_sp = {word:sp.EncodeAsIds(word) for word in vocab}
def to_instance(idx, sent, label, pos=None, lemma=None):
outs = dict()
outs["word"] = [word_to_id.get(word, 0) for word in sent]
outs["sequence_length"] = | |
from openpyxl import load_workbook
from itertools import islice
from collections import OrderedDict
import json
import uuid
import random
import jsonpickle
import copy
import os
import sys
rules_filename = "deckfight2.xlsx"
cards_filename = "cards.json"
report_folder_path="reports"
log_reports_folder_path = "log_reports"
players_folder_path='players_info'
# keywords for crating the report
# region
keyword="keyword"
beginning_of_round_report_keyword="beginning_of_round_report"
card_played_keyword="card_played"
phase_started_keyword="phase_started"
phase_ended_keyword="phase_ended"
debuff_applied_keyword="debuff_applied"
card_reducer_debuff_effect_activated_keyword="card_reducer_debuff_effect_activated"
card_reducer_debuff_effectiveness_keyword="card_reducer_debuff_effectiveness"
special_debuff_effect_activated_keyword = "special_debuff_effect_activated"
debuff_card_count_reduced_keyword="debuff_card_count_reduced"
boost_applied_keyword="boost_applied"
combo_found_keyword="combo_found"
combo_boosted_keyword="combo_boosted"
boost_card_count_reduced_keyword="boost_card_count_reduced"
effect_applied_keyword="effect_applied"
defense_effect_activated_keyword="defense_effect_activated"
defense_played_keyword="defense_played"
defense_boosted_keyword="defense_boosted"
defense_applied_keyword="defense_applied"
attack_effect_activated_keyword="attack_effect_activated"
damage_dealt_keyword="damage_deal"
pierce_applied_keyword="pierce_applied"
crit_boosted_keyword="crit_boosted"
attack_played_keyword="attack_played"
attack_boosted_keyword="attack_boosted"
attack_applied_keyword="attack_applied"
attack_evaded_keyword = "attack_evaded"
end_of_game_report_keyword="end_of_game_report"
end_of_series_report_keyword="end_of_series_report"
# endregion
series_report = []
#TODO keep track of damages dealt
damages_dealt={}
total_healths={}
crit_damage_ranges={0:0,1:0,2:3,3:3,4:3,5:4,6:4,7:4,8:5,9:5,10:5,11:6,12:6,13:6,14:6,15:6}
class Player:
def __init__(self, card_data, layers, all_combos, all_playing_cards):
self.params = {
"combo_group": 0,
"character_type": 0,
"crit": 0
}
self.data = card_data
dna = (card_data["layer_image"].split("/")[-1]).split(".")[0]
self.params["dna"] = dna
self.params["card_id"] = card_data["id"]
if dna[0:2] == '04' or dna[0:2] == '004':
print("goccha")
self.params["health"] = int(layers[("0" + dna[0:2])]["value"])
self.params["deck_limit"] = 20
self.params["evasion"] = int(layers[("1" + dna[2:4])]["value"])
self.params["combo_group"] = int(layers[("2" + dna[4:6])]["value"])
self.params["character_type"] = int(layers[("3" + dna[6:8])]["value"])
self.params["crit"] = int(layers[("4" + dna[8:10])]["value"])
self.params["combos"] = []
self.params["playing_cards"] = []
for combo in all_combos:
if combo["type"] == self.params["combo_group"]:
self.params["combos"].append(combo)
for playing_card in all_playing_cards:
if playing_card["character_type"] == self.params["character_type"]:
self.params["playing_cards"].append(playing_card)
self.params["deck"] = []
self.params["deck_cost"] = 0
def generate_random_deck(self):
deck = []
deck_cost = 0
for card_counter in range(0,self.params["deck_limit"]):
random_card = random.choice(self.params["playing_cards"])
# this part is super important (details in documentation)
card=copy.deepcopy(random_card)
card["unique_test"]=card_counter
deck_cost += random_card["cost"]
deck.append(card)
self.params["deck"] = deck
self.params["deck_cost"] = deck_cost
print("Generated random deck for player:",self.params["card_id"],"deck size:",len(deck),"deck cost:",deck_cost)
return deck
def validate_and_assign_deck(self,deck_submitted):
deck_cost=0
deck=[]
card_counters = [0] * 100
for card in deck_submitted:
if card["character_type"] != self.params["character_type"]:
self.params["deck"]=[]
print(self.params["card_id"])
print("Invalid card in deck. The card character_type of:",card["character_type"],
"does not match player character_type of", self.params["character_type"])
return []
elif card_counters[card["id"]] >=3:
self.params["deck"] = []
print(self.params["card_id"])
print("Invalid card in deck. The card ", card["id"],
"was used more than 3 times")
return []
else:
card_counters[card["id"]] +=1
deck.append(copy.deepcopy(card))
deck_cost+=card["cost"]
if deck_cost > 200:
self.params["deck"] = []
print(self.params["card_id"])
print("invalid deck, deck total cost to high:", deck_cost, "... the maximum allowed is 200")
return []
if len(deck) > self.params["deck_limit"]:
self.params["deck"] = []
print(self.params["card_id"])
print("invalid deck, deck has more cards than allowed:", len(deck), "... the maximum allowed is",
self.params["deck_limit"])
return []
print(self.params["card_id"])
print("Deck added to the player,deck cost:",deck_cost,"and deck size:",len(deck))
self.params["deck_cost"]=deck_cost
self.params["deck"]=deck
def character(self):
return self.params
class BattlingPlayerObject:
def __init__(self, player):
# initializing permanent components
self.player_combos = player.params["combos"]
self.player_dna = player.params["dna"]
self.player_max_health = player.params["health"]
self.evasion = player.params["evasion"]
self.id=player.params["card_id"]
# initializing changing components for player1
self.player_deck = player.params["deck"]
self.player_health = player.params["health"]
self.player_shield = 0
self.player_combo_string = ""
self.active_boosts = []
self.combo_effects = []
self.debuffs = []
self.player_crit_chance = player.params["crit"]
self.player_current_deck_size = len(self.player_deck)
class Debuff:
def __init__(self, neutralizer_card):
self.neutralizer_card=neutralizer_card
self.special_debuff = None
self.card_value_reducer_debuff=None
self.uuid=str(uuid.uuid4())
self.card_count = 1
self.card_timing = 0
self.evaluate_neutralizer_card(neutralizer_card)
if "card_timing" in neutralizer_card:
self.card_timing = int(neutralizer_card["card_timing"])
if "card_count" in neutralizer_card:
self.card_count = int(neutralizer_card["card_count"])
def reduce_card_count(self):
self.card_count -= 1
if self.special_debuff is not None:
self.special_debuff.reduce_card_count()
if self.card_value_reducer is not None:
self.card_value_reducer_debuff.reduce_card_count()
def reduce_card_timing(self):
self.card_timing-=1
if self.special_debuff is not None:
self.special_debuff.reduce_card_timer()
if self.card_value_reducer_debuff is not None:
self.card_value_reducer_debuff.reduce_card_timer()
def evaluate_neutralizer_card(self,neutralizer_card):
if "special" in neutralizer_card:
self.special_debuff = SpecialDebuff(neutralizer_card,self.uuid)
if ("attack" or "shield" or "life" or "crit") in neutralizer_card:
self.card_value_reducer_debuff = CardValueReducerDebuff(neutralizer_card,self.uuid)
class SpecialDebuff:
def __init__(self, neutralizer_card,uuid):
self.neutralizer_card=neutralizer_card
self.special_description=neutralizer_card["special"]
self.card_count = 1
self.uuid = uuid
self.card_timing = 0
if "card_timing" in neutralizer_card:
self.card_timing= int(neutralizer_card["card_timing"])
if "card_count" in neutralizer_card:
self.card_count= int(neutralizer_card["card_count"])
def reduce_card_timer(self):
self.card_timing-=1
def reduce_card_count(self):
self.card_count-=1
class CardValueReducerDebuff:
def __init__(self,neutralizer_card,uuid):
self.neutralizer_card=neutralizer_card
self.card_count = 1
self.card_timing = 0
self.uuid = uuid
if "card_timing" in neutralizer_card:
self.card_timing = int(neutralizer_card["card_timing"])
if "card_count" in neutralizer_card:
self.card_count = int(neutralizer_card["card_count"])
if "attack" in neutralizer_card:
self.action=neutralizer_card["attack"]["action"]
self.amount=neutralizer_card["attack"]["amount"]
if "life" in neutralizer_card:
self.action = neutralizer_card["life"]["action"]
self.amount = neutralizer_card["life"]["amount"]
if "shield" in neutralizer_card:
self.action = neutralizer_card["shield"]["action"]
self.amount = neutralizer_card["shield"]["amount"]
if "crit" in neutralizer_card:
self.action = neutralizer_card["crit"]["action"]
self.amount = neutralizer_card["crit"]["amount"]
def reduce_card_timer(self):
self.card_timing -= 1
def reduce_card_count(self):
self.card_count -= 1
class ComboEffect:
def __init__(self, combo_card):
self.combo_card = combo_card
self.attack_combo_effect = None
self.shield_combo_effect = None
self.life_combo_effect = None
self.evaluate_combo_card(self.combo_card)
def evaluate_combo_card(self, combo_card):
if "attack" in combo_card:
self.attack_combo_effect = AttackComboEffect(combo_card)
if "shield" in combo_card:
self.shield_combo_effect = ShieldComboEffect(combo_card)
if "life" in combo_card:
self.life_combo_effect = LifeComboEffect(combo_card)
class AttackComboEffect:
def __init__(self, combo_card):
self.action_type = combo_card["attack"]["action"]
self.min_amount = None
self.max_amount = None
if self.action_type == "+":
self.min_amount = combo_card["attack"]["amount"]
self.max_amount = combo_card["attack"]["extra"]
class ShieldComboEffect:
def __init__(self, combo_card):
self.action_type = combo_card["shield"]["action"]
self.min_amount = None
self.max_amount = None
if self.action_type == "+":
self.min_amount = combo_card["shield"]["amount"]
self.max_amount = combo_card["shield"]["extra"]
class LifeComboEffect:
def __init__(self, combo_card):
self.action_type = combo_card["life"]["action"]
self.min_amount = None
self.max_amount = None
if self.action_type == "+":
self.min_amount = combo_card["life"]["amount"]
self.max_amount = combo_card["life"]["extra"]
class Boost:
def __init__(self, boost_card):
self.boost_card = boost_card
self.unique_id = uuid.uuid4()
self.attack_boost = None
self.shield_boost = None
self.life_boost = None
self.crit_boost = None
self.special_boost = None
self.combo_boost = None
self.evaluate_boost_card_information(self.boost_card)
def evaluate_boost_card_information(self, boost_card):
if boost_card["target_type"] == "combo":
self.combo_boost = ComboBoost(boost_card, self.unique_id)
elif "special" in boost_card:
self.special_boost = SpecialBoost(boost_card, self.unique_id)
else:
if "shield" in boost_card:
self.shield_boost = ShieldBoost(boost_card, self.unique_id)
if "life" in boost_card:
self.life_boost = LifeBoost(boost_card, self.unique_id)
if "attack" in boost_card:
self.attack_boost = AttackBoost(boost_card, self.unique_id)
if "crit" in boost_card:
self.crit_boost = CritBoost(boost_card, self.unique_id)
class ComboBoost:
def __init__(self, boost_card, id):
self.boost_card = boost_card
self.unique_id = id
self.target_type = boost_card["target_type"]
self.card_timing = int(boost_card["card_timing"])
self.card_count = int(boost_card["card_count"])
if "attack" in boost_card:
self.action_type = boost_card["attack"]["action"]
self.amount = int(boost_card["attack"]["amount"])
if "crit" in boost_card:
self.action_type = boost_card["crit"]["action"]
self.amount = int(boost_card["crit"]["amount"])
if "shield" in boost_card:
self.action_type = boost_card["shield"]["action"]
self.amount = int(boost_card["shield"]["amount"])
if "life" in boost_card:
self.action_type = boost_card["life"]["action"]
self.amount = int(boost_card["life"]["amount"])
class AttackBoost:
def __init__(self, boost_card, id):
self.boost_card = boost_card
self.unique_id = id
self.target_type = boost_card["target_type"]
self.target_opp = ["target_opp"]
self.action_type = boost_card["attack"]["action"]
self.amount = int(boost_card["attack"]["amount"])
self.extra=None
if "extra" in boost_card["attack"]:
self.extra=int(boost_card["attack"]["extra"])
self.card_timing = int(boost_card["card_timing"])
self.card_count = int(boost_card["card_count"])
class LifeBoost:
def __init__(self, boost_card, id):
self.boost_card = boost_card
self.unique_id = id
self.target_type = boost_card["target_type"]
self.target_opp = boost_card["target_opp"]
self.action_type = boost_card["life"]["action"]
self.amount = int(boost_card["life"]["amount"])
self.extra=None
if "extra" in boost_card["life"]:
self.extra = int(boost_card["life"]["extra"])
self.card_timing = int(boost_card["card_timing"])
self.card_count = int(boost_card["card_count"])
class ShieldBoost:
def __init__(self, boost_card, id):
self.boost_card = boost_card
self.unique_id = id
self.target_type = boost_card["target_type"]
self.target_opp = boost_card["target_opp"]
self.action_type = boost_card["shield"]["action"]
self.amount = int(boost_card["shield"]["amount"])
self.extra=None
self.card_timing = int(boost_card["card_timing"])
self.card_count = int(boost_card["card_count"])
if "extra" in boost_card["shield"]:
self.extra = int(boost_card["shield"]["amount"])
class CritBoost:
def __init__(self, boost_card, id):
self.boost_card = boost_card
self.unique_id = id
self.target_type = boost_card["target_type"]
self.target_opp = boost_card["target_opp"]
self.action_type = boost_card["crit"]["action"]
self.amount = int(boost_card["crit"]["amount"])
self.card_timing = int(boost_card["card_timing"])
self.card_count = int(boost_card["card_count"])
class SpecialBoost:
def __init__(self, boost_card, id):
self.boost_card = boost_card
self.unique_id = id
self.target_type = boost_card["target_type"]
self.target_opp = boost_card["target_opp"]
self.special = boost_card["special"]
self.card_timing = int(boost_card["card_timing"])
self.card_count = int(boost_card["card_count"])
#############################
def count_rows(worksheet):
row_count = 0
for row_cells in worksheet.iter_rows():
if row_cells[0].value is None:
break
row_count += 1
return row_count
def fetch_values(sheet, columns):
# List to hold dictionaries
dict_list = []
# Iterate through non empty rows in worksheet and fetch values into dict
row_count = count_rows(sheet)
for row in islice(sheet.values, 1, row_count):
dict = OrderedDict()
for value, column in zip(row, columns.keys()):
# all number is float by default
if columns[column] == "int":
if value is not None and not isinstance(value, int):
print(
f"Invalid int value: ({value}), type: ({type(value)}), column: ({column}) was generated as 0..."
)
value = None
dict[column] = int(value) if value is not None else value
else:
dict[column] = value
dict_list.append(dict)
return dict_list
def load_layers(sheet):
columns = {
"seq": "str",
"code": "str",
"Type": "str",
"value": "str"
}
ls = fetch_values(sheet, columns)
layers = {}
for layer in ls:
k = str(int(layer["seq"]))
c = layer["code"]
if isinstance(c, int):
c = int(c)
c = str(c)
k = k + c
layers[k] = layer
return (layers)
def load_cards(sheet):
columns = {
"id": "str",
"name": "str",
"character_type": "str",
"card_type": "str",
"combo_sign": "str",
"attack_action": "str",
"attack_amount": "str",
"attack_extra": "str",
"shield_action": "str",
"shield_amount": "str",
"shield_extra": "str",
"life_action": "str",
"life_amount": "str",
"life_extra": "str",
"crit_action": "str",
"crit_amount": "str",
"crit_extra": "str",
"special": "str",
"target_opp": "str",
"target_type": "str",
"target_subtype": "str",
"card_timing": "str",
"card_count": "str",
"cost": "str"
}
cards_data = fetch_values(sheet, columns)
cards = []
for cd in cards_data:
card = {}
if cd["attack_action"] is not None:
card["attack"] = {
"action": cd["attack_action"],
"amount": int(cd["attack_amount"]),
}
if cd["attack_extra"] is not None:
card["attack"]["extra"] = int(cd["attack_extra"])
if cd["shield_action"] is not None:
card["shield"] = {
"action": cd["shield_action"],
"amount": int(cd["shield_amount"]),
}
if cd["shield_extra"] is not None:
card["shield"]["extra"] = int(cd["shield_extra"])
if cd["life_action"] is not None:
card["life"] = {
"action": cd["life_action"],
"amount": int(cd["life_amount"]),
}
if cd["life_extra"] is not None:
card["life"]["extra"] = int(cd["life_extra"])
if cd["crit_action"] is not None:
card["crit"] = {
"action": cd["crit_action"],
"amount": int(cd["crit_amount"]),
}
if cd["crit_extra"] is not None:
| |
from aacharts.aatool.AAColor import AAColor
from aacharts.aatool.AAGradientColor import AAGradientColor
from aacharts.aachartcreator.AASeriesElement import AASeriesElement
from aacharts.aachartcreator.AAChartModel import AAChartModel, AAChartSymbolStyleType, AAChartSymbolType, AAChartType
from aacharts.aatool.AAGradientColor import AAGradientColor
from aacharts.aachartcreator.AASeriesElement import AASeriesElement
from aacharts.aachartcreator.AAChartModel import *
from aacharts.aaoptionsmodel.AAMarker import AAMarker
from aacharts.aaoptionsmodel.AADataElement import AADataElement
from aacharts.aaoptionsmodel.AADataLabels import AADataLabels
from aacharts.aaoptionsmodel.AACrosshair import AACrosshair
from aacharts.aaoptionsmodel.AAStates import AAStates, AAHover, AAHalo, AAInactive, AASelect
from aacharts.aaoptionsmodel.AALegend import AAItemStyle
from aacharts.aaoptionsmodel.AASeries import AAEvents, AAPoint, AAPointEvents
from aacharts.aaoptionsmodel.AALang import AALang
from aacharts.aatool.AAGradientColor import AALinearGradientDirection
from aacharts.aatool.AAJSArrayConverter import AAJSArrayConverter
from aacharts.aaoptionsmodel.AAPlotOptions import AAColumn
import random
from string import Template
class JSFuncOptionsComposer:
@staticmethod
def customAreaChartTooltipStyleWithSimpleFormatString():
aaChartModel = (AAChartModel()
.chartTypeSet(AAChartType.area)#图形类型
.titleSet("近三个月金价起伏周期图")#图表主标题
.subtitleSet("金价Set(元/克)")#图表副标题
.markerSymbolStyleSet(AAChartSymbolStyleType.borderBlank)#折线连接点样式为外边缘空白
.dataLabelsEnabledSet(False)
.categoriesSet([
"10-01", "10-02", "10-03", "10-04", "10-05", "10-06", "10-07", "10-08", "10-09", "10-10", "10-11",
"10-12", "10-13", "10-14", "10-15", "10-16", "10-17", "10-18", "10-19", "10-20", "10-21", "10-22",
"10-23", "10-24", "10-25", "10-26", "10-27", "10-28", "10-29", "10-30", "10-31", "11-01", "11-02",
"11-03", "11-04", "11-05", "11-06", "11-07", "11-08", "11-09", "11-10", "11-11", "11-12", "11-13",
"11-14", "11-15", "11-16", "11-17", "11-18", "11-19", "11-20", "11-21", "11-22", "11-23", "11-24",
"11-25", "11-26", "11-27", "11-28", "11-29", "11-30", "12-01", "12-02", "12-03", "12-04", "12-05",
"12-06", "12-07", "12-08", "12-09", "12-10", "12-11", "12-12", "12-13", "12-14", "12-15", "12-16",
"12-17", "12-18", "12-19", "12-20", "12-21", "12-22", "12-23", "12-24", "12-25", "12-26", "12-27",
"12-28", "12-29", "12-30"
])
.seriesSet([
AASeriesElement()
.nameSet("2020")
.lineWidthSet(3)
.colorSet("#FFD700")#纯金色
.fillOpacitySet(0.5)
.dataSet([
1.51, 6.70, 0.94, 1.44, 1.60, 1.63, 1.56, 1.91, 2.45, 3.87, 3.24, 4.90, 4.61, 4.10,
4.17, 3.85, 4.17, 3.46, 3.46, 3.55, 3.50, 4.13, 2.58, 2.28, 1.51, 12.7, 0.94, 1.44,
18.6, 1.63, 1.56, 1.91, 2.45, 3.87, 3.24, 4.90, 4.61, 4.10, 4.17, 3.85, 4.17, 3.46,
3.46, 3.55, 3.50, 4.13, 2.58, 2.28, 1.33, 4.68, 1.31, 1.10, 13.9, 1.10, 1.16, 1.67,
2.64, 2.86, 3.00, 3.21, 4.14, 4.07, 3.68, 3.11, 3.41, 3.25, 3.32, 3.07, 3.92, 3.05,
2.18, 3.24, 3.23, 3.15, 2.90, 1.81, 2.11, 2.43, 5.59, 3.09, 4.09, 6.14, 5.33, 6.05,
5.71, 6.22, 6.56, 4.75, 5.27, 6.02, 5.48
])
]))
aaOptions = aaChartModel.aa_toAAOptions()
(aaOptions.tooltip
.useHTMLSet(True)
.formatterSet("""
function () {
return ' 🌕 🌖 🌗 🌘 🌑 🌒 🌓 🌔 <br/> '
+ ' Support JavaScript Function Just Right Now !!! <br/> '
+ ' The Gold Price For <b>2020 '
+ this.x
+ ' </b> Is <b> '
+ this.y
+ ' </b> Dollars ';
}
""")
.valueDecimalsSet(2)#设置取值精确到小数点后几位#设置取值精确到小数点后几位
.backgroundColorSet(AAColor.black)
.borderColorSet(AAColor.black)
.styleSet(AAStyle.colorSize("#FFD700", 12)))
return aaOptions
@staticmethod
def customAreaChartTooltipStyleWithDifferentUnitSuffix():
aaChartModel = (AAChartModel()
.chartTypeSet(AAChartType.areaspline)#图形类型
.titleSet("2014 ~ 2020 汪星人生存指数")#图表主标题
.subtitleSet("数据来源:www.无任何可靠依据.<EMAIL>")#图表副标题
.markerSymbolStyleSet(AAChartSymbolStyleType.innerBlank)
.colorsThemeSet([
AAGradientColor.oceanBlue,
AAGradientColor.sanguine,
])
.dataLabelsEnabledSet(False)
.stackingSet(AAChartStackingType.normal)
.seriesSet([
AASeriesElement()
.nameSet("🐶狗子")
.lineWidthSet(5.0)
.dataSet([0.45, 0.43, 0.50, 0.55, 0.58, 0.62, 0.83, 0.39, 0.56, 0.67, 0.50, 0.34, 0.50, 0.67, 0.58, 0.29, 0.46, 0.23, 0.47, 0.46, 0.38, 0.56, 0.48, 0.36])
,
AASeriesElement()
.nameSet("🌲树木")
.lineWidthSet(5.0)
.dataSet([0.38, 0.31, 0.32, 0.32, 0.64, 0.66, 0.86, 0.47, 0.52, 0.75, 0.52, 0.56, 0.54, 0.60, 0.46, 0.63, 0.54, 0.51, 0.58, 0.64, 0.60, 0.45, 0.36, 0.67])
,
]))
aaOptions = aaChartModel.aa_toAAOptions()
(aaOptions.tooltip
.useHTMLSet(True)
.enabledSet(True)
.formatterSet("""
function () {
var s = '第' + '<b>' + this.x + '</b>' + '年' + '<br/>';
let colorDot1 = '<span style=' + 'color:#1e90ff; font-size:13px' + '>◉</span> ';
let colorDot2 = '<span style=' + 'color:#ef476f; font-size:13px' + '>◉</span> ';
let s1 = colorDot1 + this.points[0].series.name + ': ' + this.points[0].y + '只' + '<br/>';
let s2 = colorDot2 + this.points[1].series.name + ': ' + this.points[1].y + '棵';
s += s1 + s2;
return s;
}
"""))
#禁用图例点击事件
aaOptions.plotOptions.series.events = (
AAEvents()
.legendItemClickSet("""
function() {
return false;
}
"""))
return aaOptions
@staticmethod
def customAreaChartTooltipStyleWithColorfulHtmlLabels():
aaChartModel = (AAChartModel()
.chartTypeSet(AAChartType.areaspline)#图形类型
.markerSymbolStyleSet(AAChartSymbolStyleType.borderBlank)#折线连接点样式为外边缘空白
.dataLabelsEnabledSet(False)
.colorsThemeSet(["#04d69f","#1e90ff","#ef476f","#ffd066",])
.stackingSet(AAChartStackingType.normal)
.markerRadiusSet(0)
.seriesSet([
AASeriesElement()
.nameSet("Tokyo Hot")
.lineWidthSet(5.0)
.fillOpacitySet(0.4)
.dataSet([0.45, 0.43, 0.50, 0.55, 0.58, 0.62, 0.83, 0.39, 0.56, 0.67, 0.50, 0.34, 0.50, 0.67, 0.58, 0.29, 0.46, 0.23, 0.47, 0.46, 0.38, 0.56, 0.48, 0.36])
,
AASeriesElement()
.nameSet("Berlin Hot")
.lineWidthSet(5.0)
.fillOpacitySet(0.4)
.dataSet([0.38, 0.31, 0.32, 0.32, 0.64, 0.66, 0.86, 0.47, 0.52, 0.75, 0.52, 0.56, 0.54, 0.60, 0.46, 0.63, 0.54, 0.51, 0.58, 0.64, 0.60, 0.45, 0.36, 0.67])
,
AASeriesElement()
.nameSet("New York Hot")
.lineWidthSet(5.0)
.fillOpacitySet(0.4)
.dataSet([0.46, 0.32, 0.53, 0.58, 0.86, 0.68, 0.85, 0.73, 0.69, 0.71, 0.91, 0.74, 0.60, 0.50, 0.39, 0.67, 0.55, 0.49, 0.65, 0.45, 0.64, 0.47, 0.63, 0.64])
,
AASeriesElement()
.nameSet("London Hot")
.lineWidthSet(5.0)
.fillOpacitySet(0.4)
.dataSet([0.60, 0.51, 0.52, 0.53, 0.64, 0.84, 0.65, 0.68, 0.63, 0.47, 0.72, 0.60, 0.65, 0.74, 0.66, 0.65, 0.71, 0.59, 0.65, 0.77, 0.52, 0.53, 0.58, 0.53])
,
]))
aaOptions = aaChartModel.aa_toAAOptions()
(aaOptions.tooltip
.useHTMLSet(True)
.formatterSet("""
function () {
let wholeContentStr ='<span style=' + 'color:lightGray; font-size:13px' + '>◉ Time: ' + this.x + ' year</span><br/>';
let length = this.points.length;
for (let i = 0; i < length; i++) {
let thisPoint = this.points[i];
let yValue = thisPoint.y;
if (yValue != 0) {
let spanStyleStartStr = '<span style=' + 'color:'+ thisPoint.color + '; font-size:13px' + '>◉ ';
let spanStyleEndStr = '</span> <br/>';
wholeContentStr += spanStyleStartStr + thisPoint.series.name + ': ' + thisPoint.y + '℃' + spanStyleEndStr;
}
}
return wholeContentStr;
}
""")
.backgroundColorSet("#050505")
.borderColorSet("#050505"))
return aaOptions
@staticmethod
def customLineChartTooltipStyleWhenValueBeZeroDoNotShow():
aaChartModel = (AAChartModel()
.chartTypeSet(AAChartType.line)#图形类型
.markerSymbolStyleSet(AAChartSymbolStyleType.borderBlank)#折线连接点样式为外边缘空白
.dataLabelsEnabledSet(False)
.categoriesSet(["临床一期","临床二期","临床三期"])
.seriesSet([
AASeriesElement()
.nameSet("上市")
.dataSet([0,0,7])
,
AASeriesElement()
.nameSet("中止")
.dataSet([4,5,1])
,
AASeriesElement()
.nameSet("无进展")
.dataSet([2,0,1])
,
AASeriesElement()
.nameSet("进行中")
.dataSet([3,5,2])
,
]))
aaOptions = aaChartModel.aa_toAAOptions()
(aaOptions.tooltip
.useHTMLSet(True)
.formatterSet("""
function () {
let wholeContentStr = this.points[0].x + '<br/>';
let length = this.points.length;
for (let i = 0; i < length; i++) {
let thisPoint = this.points[i];
let yValue = thisPoint.y;
if (yValue != 0) {
let prefixStr = '<span style=' + 'color:'+ thisPoint.color + '; font-size:13px' + '>◉ ';
wholeContentStr += prefixStr + thisPoint.series.name + ': ' + yValue + '<br/>';
}
}
return wholeContentStr;
}
"""))
return aaOptions
@staticmethod
def customBoxplotTooltipContent():
aaChartModel = (AAChartModel()
.chartTypeSet(AAChartType.boxplot)
.titleSet("BOXPLOT CHART")
.subtitleSet("virtual data")
.yAxisTitleSet("℃")
.yAxisVisibleSet(True)
.seriesSet([
AASeriesElement()
.nameSet("Observed Data")
.colorSet("#ef476f")
.fillColorSet(AAGradientColor.deepSea)
.dataSet([
[760, 801, 848, 895, 965],
[733, 853, 939, 980, 1080],
[714, 762, 817, 870, 918],
[724, 802, 806, 871, 950],
[834, 836, 864, 882, 910]
])
,
]))
pointFormatStr = (
"◉</span> <b> {series.name}</b><br/>"
+ "最大值: {point.high}<br/>"
+ "Q2: {point.q3}<br/>"
+ "中位数: {point.median}<br/>"
+ "Q1: {point.q1}<br/>"
+ "最小值: {point.low}<br/>"
)
aaOptions = aaChartModel.aa_toAAOptions()
(aaOptions.tooltip
.useHTMLSet(True)
.headerFormatSet("<em>实验号码: point.key</em><br/>")
.pointFormatSet(pointFormatStr)
.valueDecimalsSet(2)#设置取值精确到小数点后几位#设置取值精确到小数点后几位
.backgroundColorSet(AAColor.black)
.borderColorSet(AAColor.black)
.styleSet(AAStyle.colorSize("#1e90ff", 12)))
return aaOptions
@staticmethod
def customYAxisLabels():
aaChartModel = (AAChartModel()
.chartTypeSet(AAChartType.line)#图形类型
.markerSymbolStyleSet(AAChartSymbolStyleType.borderBlank)#折线连接点样式为外边缘空白
.dataLabelsEnabledSet(False)
.colorsThemeSet(["#04d69f","#1e90ff","#ef476f","#ffd066",])
.stackingSet(AAChartStackingType.normal)
.markerRadiusSet(8)
.seriesSet([
AASeriesElement()
.nameSet("Scores")
.lineWidthSet(5.0)
.fillOpacitySet(0.4)
.dataSet([29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4])
,
]))
aaYAxisLabels = (
AALabels()
.formatterSet("""
function () {
let yValue = this.value;
if (yValue >= 200) {
return 'Excellent';
} else if (yValue >= 150 && yValue < 200) {
return 'Very Good';
} else if (yValue >= 100 && yValue < 150) {
return 'Good';
} else if (yValue >= 50 && yValue < 100) {
return 'Not Bad';
} else {
return 'Just So So';
}
}
"""))
aaOptions = aaChartModel.aa_toAAOptions()
aaOptions.yAxis.labelsSet(aaYAxisLabels)
return aaOptions
@staticmethod
def customYAxisLabels2():
aaChartModel = (AAChartModel()
.chartTypeSet(AAChartType.line)#图形类型
.markerSymbolStyleSet(AAChartSymbolStyleType.borderBlank)#折线连接点样式为外边缘空白
.dataLabelsEnabledSet(False)
.colorsThemeSet(["#04d69f","#1e90ff","#ef476f","#ffd066",])
.stackingSet(AAChartStackingType.normal)
.markerRadiusSet(8)
.seriesSet([
AASeriesElement()
.nameSet("Tokyo Hot")
.lineWidthSet(5.0)
.fillOpacitySet(0.4)
.dataSet([229.9, 771.5, 1106.4, 1129.2, 6644.0, 1176.0, 8835.6, 148.5, 8816.4, 6694.1, 7795.6, 9954.4])
]))
aaYAxisLabels = (
AALabels()
.styleSet(AAStyle.colorSizeWeight(AAColor.gray, 10, AAChartFontWeightType.bold))
.formatterSet("""
function () {
let yValue = this.value;
if (yValue == 0) {
return '0';
} else if (yValue == 2500) {
return '25%';
} else if (yValue == 5000) {
return '50%';
} else if (yValue == 7500) {
return '75%';
} else if (yValue == 10000) {
return '100%';
}
}
"""))
aaOptions = aaChartModel.aa_toAAOptions()
(aaOptions.yAxis
.oppositeSet(True)
.tickWidthSet(2)
.lineWidthSet(1.5)#Y轴轴线颜色
.lineColorSet(AAColor.lightGray)#Y轴轴线颜色
.gridLineWidthSet(0)#Y轴网格线宽度
.tickPositionsSet([0,2500,5000,7500,10000])
.labelsSet(aaYAxisLabels))
return aaOptions
@staticmethod
def customStackedAndGroupedColumnChartTooltip():
aaChartModel = (AAChartModel()
.titleSet("Total fruit consumtion, grouped by gender")
.subtitleSet("stacked and grouped")
.yAxisTitleSet("Number of fruits")
.chartTypeSet(AAChartType.column)
.legendEnabledSet(False)#隐藏图例Set(底部可点按的小圆点)
.stackingSet(AAChartStackingType.normal)
.categoriesSet(["Apples", "Oranges", "Pears","Grapes","Bananas",])
.dataLabelsEnabledSet(True)
.seriesSet([
AASeriesElement()
.nameSet("John")
.dataSet([5,3,4,7,2,])
.stackSet("male")
,
AASeriesElement()
.nameSet("Joe")
.dataSet([3,4,4,2,5,])
.stackSet("male")
,
AASeriesElement()
.nameSet("Jane")
.dataSet([2,5,6,2,1,])
.stackSet("female")
,
AASeriesElement()
.nameSet("Janet")
.dataSet([3,0,4, 4,3,])
.stackSet("female")
,
]))
#/*Custom Tooltip Style --- 自定义图表浮动提示框样式及内容*/
aaOptions = aaChartModel.aa_toAAOptions()
(aaOptions.tooltip
.sharedSet(False)
.formatterSet("""
function () {
return '<b>'
+ this.x
+ '</b><br/>'
+ this.series.name
+ ': '
+ this.y
+ '<br/>'
+ 'Total: '
+ this.point.stackTotal;
}
"""))
return aaOptions
@staticmethod
def customDoubleXAxesChart():
gradientColorDic1 = (AAGradientColor.linearGradient1(
AALinearGradientDirection.toTop,
"#7052f4",
"#00b0ff"#颜色字符串设置支持十六进制类型和 rgba 类型
))
gradientColorDic2 = (AAGradientColor.linearGradient1(
AALinearGradientDirection.toTop,
"#EF71FF",
"#4740C8"#颜色字符串设置支持十六进制类型和 rgba 类型
))
aaChart = (AAChart()
.typeSet(AAChartType.bar))
aaTitle = (AATitle()
.textSet("2015 年德国人口金字塔")
.styleSet(AAStyle()
.colorSet(AAColor.black)
.fontSizeSet(12.0)))
aaCategories = [
"0-4", "5-9", "10-14", "15-19",
"20-24", "25-29", "30-34", "35-39", "40-44",
"45-49", "50-54", "55-59", "60-64", "65-69",
"70-74", "75-79", | |
<reponame>nooneisperfect/ReadYourMAPFile<filename>TileHandling.py
# -*- coding: utf-8 -*-
# python imports
import glob
import os
import re
import bisect
from threading import RLock
# numpy imports
import numpy as np
import numpy
import numpy.linalg as linalg
# local imports
from smlogging import *
from mapcoord import UTMZoneForward, UTMZoneReverse, SwissReverse, SwissForward
from SatmapFormat import load_smt
from PIL import Image
def mapCoord2deg(x, y, zone, scale, code):
if code == 326:
(E,N) = list(map(lambda v: v*1000./scale, (x,y)))
return list(map(lambda x: np.degrees(x),
UTMZoneReverse(E, N, zone, FN = 328.)))
elif code == 217:
(X,Y) = list(map(lambda v: v*1000./scale, (x,y)))
deg = list(map(lambda x: np.degrees(x),
SwissReverse(X, Y)))
# heuristic mapping, unclear why this is necessary
deg[0] = deg[0] - 0.000933
deg[1] = deg[1] - 0.001408
return deg
else:
assert False
def deg2mapCoord(lmb, phi, zone, scale, code):
if code == 326:
(l,p) = list(map(np.radians, (lmb, phi)))
(E,N) = UTMZoneForward(l, p, zone)
return list(map(lambda x: x*scale/1000., (E,N)))
elif code == 217:
# heuristic mapping, unclear why this is necessary
lmb += 0.000933
phi += 0.001408
(l,p) = list(map(np.radians, (lmb, phi)))
(X,Y) = SwissForward(l, p)
return list(map(lambda x: x*scale/1000., (X,Y)))
else:
assert False
class FastTileCalculator:
def __init__(self, minx, maxx, w, miny, maxy, h, zone, scale, code):
log( INFO, "Setting up fast tile calculator")
self.minx = minx
self.w = w
self.miny = miny
self.h = h
self.y, self.x = np.mgrid[miny:maxy+h*1.1:h, minx:maxx+w*1.1:w]
self.P_deg = mapCoord2deg(self.x, self.y, zone, scale, code)
# small overlapping (2*o pixel) between neighbored tiles to overcome
# errors in affine transformation
o = 0.1
A = np.array([[o , o , 1, 0 , 0 , 0],
[0 , 0 , 0, o , o , 1],
[w-o, o , 1, 0 , 0 , 0],
[0 , 0 , 0, w-o, o , 1],
[w-o, h-o, 1, 0 , 0 , 0],
[0 , 0 , 0, w-o, h-o, 1],
[o , h-o, 1, 0 , 0 , 0],
[0 , 0 , 0, o , h-o, 1]])
Ai = linalg.pinv(A)
b = np.array([self.P_deg[0][:-1,:-1],self.P_deg[1][:-1,:-1],
self.P_deg[0][:-1, 1:],self.P_deg[1][:-1, 1:],
self.P_deg[0][ 1:, 1:],self.P_deg[1][ 1:, 1:],
self.P_deg[0][ 1:,:-1],self.P_deg[1][ 1:,:-1],
])
br = np.reshape(b, (b.shape[0], b.shape[1]*b.shape[2]))
self.M = np.reshape(np.dot(Ai, br), (6, b.shape[1], b.shape[2]))
log( INFO, "Setting up fast tile calculator done [P_degx = %s P_degy = %s]" % (str(self.P_deg[0][0:2,0:2]), str(self.P_deg[1][0:2,0:2])))
def calc(self, x, y):
ix = (x - self.minx)/self.w
iy = (y - self.miny)/self.h
try:
return self.M[:, iy, ix]
except IndexError:
print (x, y, ix, iy, self.minx, self.w, self.miny, self.h, self.M.shape)
raise
class Tile:
def __init__(self, tc, smt, openfct, x, y, zone, scale, code, w, h, fastTileCalculator):
self.tc = tc
self.smt = smt
self.openfct = openfct
self.x_mc = x
self.y_mc = y
self.zone = zone
self.scale = scale
self.code = code
self.w = w
self.h = h
long1, lat1 = mapCoord2deg(x, y, zone, scale, code)
long2, lat2 = mapCoord2deg(x+w, y+h, zone, scale, code)
self.long_min = min(long1, long2)
self.long_max = max(long1, long2)
self.lat_min = min(lat1, lat2)
self.lat_max = max(lat1, lat2)
log( INFO, "Tile(%s %s %s %s %s %s %s %.5f %.5f %.5f %.5f" % (x, y, w, h, zone, scale, code, self.lat_min, self.lat_max, self.long_min, self.long_max))
def load(self):
im = self.tc.load_tile(self.smt, self.openfct)
return im
class TileCache:
cacheSize = 400
def __init__(self):
self.cache = {}
self.cache_access = []
self.mutex = RLock()
def load_tile(self, smt, openfct):
with self.mutex:
def imgConstruct(f):
cnt = 0
while cnt < 10:
try:
res = Image.open(f)
return res.transpose(Image.FLIP_TOP_BOTTOM)
except OSError:
cnt += 1
return Image.new('RGB', (400, 400))
if smt in self.cache:
self.cache_access.remove(smt)
self.cache_access.append(smt)
return self.cache[smt]
if len(self.cache_access) > self.cacheSize:
pop = self.cache_access[0]
self.cache_access = self.cache_access[1:]
del self.cache[pop]
self.cache_access.append(smt)
self.cache[smt] = load_smt(smt, openfct, imgConstruct)
assert set(self.cache_access) == set(self.cache.keys()), str(set(self.cache.keys())) + "<->" + str(set(self.cache_access))
return self.load_tile(smt, openfct)
tileCache = TileCache()
class TileContainer:
tileH = 400
tileW = 400
cacheSize = 40
# filename decoding:
# AAAABBBCCDDD[+-]EFFF[+-]GHHH[+-]JKKKKKKKLLLLLLL.SMT
# with
# AAAA is a map indicator (probably)
# BBB is the coordinate code
# CC is the zone in UTM or otherwise part of coordinate code
# DDD mantissa of scale
# E exponent of scale
# FFF unused (maybe mantissa of offset?)
# G unused (maybe exponent of offset?)
# HHH unused
# J unused
# KKKKKKK first element of coordinate
# LLLLLLL second element ofo coordinate
tilere = re.compile("[0-9]{4}([0-9]{3})([0-9]{2})([0-9]{3})([+-][0-9])[0-9]{3}[+-][0-9]{4}[+-][0-9]([0-9]{7})([0-9]{7}).SMT")
# AAAA BBB CC DDD E FFF GHHH J KKKKKKK LLLLLLL
def __init__(self, smtglob, progress):
ftc = None
if type(smtglob) == type(""):
smts = [(x, lambda f: open(f, "rb")) for x in glob.glob(smtglob)]
self.name = smtglob
ftcArgs = None
else:
xExtend = smtglob[1]
yExtend = smtglob[2]
smts = [(x[0], lambda f, z=x[1]: z.open(f, "r")) for x in smtglob[0]]
ftcArgs = [min(xExtend), max(xExtend), self.tileW, min(yExtend), max(yExtend), self.tileH]
self.name = None
self.tiles = []
log(INFO, "Number of tiles", len(smts))
self.extends = {}
#if len(smts) > 1000:
# smts = smts[:1000]
for smt, openfct in smts:
log(DEBUG, "Creating Tile for ", smt)
code, zone, scale_man, scale_exp, x, y = list(map(int,
self.tilere.search(smt).groups()))
if code in [326, 217]:
scale = scale_man*(10**scale_exp)
if not ftcArgs is None and ftc is None:
ftcArgs.extend([zone, scale, code])
ftc = FastTileCalculator(*ftcArgs)
tile = Tile(self, smt, openfct, x, y, zone, scale, code, self.tileW, self.tileH, ftc)
self.tiles.append(tile)
if not tile.scale in self.extends:
self.extends[tile.scale] = dict(lat_min=tile.lat_min, lat_max=tile.lat_max, long_min=tile.long_min, long_max=tile.long_max)
else:
self.extends[tile.scale]['lat_min'] = min(self.extends[tile.scale]['lat_min'], tile.lat_min)
self.extends[tile.scale]['lat_max'] = max(self.extends[tile.scale]['lat_max'], tile.lat_max)
self.extends[tile.scale]['long_min'] = min(self.extends[tile.scale]['long_min'], tile.long_min)
self.extends[tile.scale]['long_max'] = max(self.extends[tile.scale]['long_max'], tile.long_max)
progress.incValue(1)
else:
log(ERROR, "unknown code", code, "in smt file", smt)
if self.name is None and len(self.tiles) > 0:
zfname = smtglob[0][0][1].filename
smtname = smtglob[0][0][0]
self.name = os.path.split(zfname)[0] + "/%s@scale=%03d.%02d" % (
smtname[:4],
int(self.tiles[0].scale),
int(self.tiles[0].scale * 100) % 100)
self.idx_sorted_lat_min = sorted(range(len(self.tiles)), key=lambda x: self.tiles[x].lat_min)
self.sorted_lat_min = [self.tiles[i].lat_min for i in self.idx_sorted_lat_min]
self.idx_sorted_lat_max = sorted(range(len(self.tiles)), key=lambda x: self.tiles[x].lat_max)
self.sorted_lat_max = [self.tiles[i].lat_max for i in self.idx_sorted_lat_max]
self.idx_sorted_long_min = sorted(range(len(self.tiles)), key=lambda x: self.tiles[x].long_min)
self.sorted_long_min = [self.tiles[i].long_min for i in self.idx_sorted_long_min]
self.idx_sorted_long_max = sorted(range(len(self.tiles)), key=lambda x: self.tiles[x].long_max)
self.sorted_long_max = [self.tiles[i].long_max for i in self.idx_sorted_long_max]
self.visited_tiles = set()
def populate(self, scene):
res = scene.createItemGroup(self.tiles)
if len(self.tiles) > 0:
res.setZValue(self.tiles[0].scale)
return res
def load_tile(self, smt, openfct):
return tileCache.load_tile(smt, openfct)
def find_tiles_colliding(self, p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon, extend = False, debug=False):
toRender = []
def collide2d(x11, x12, y11, y12, x21, x22, y21, y22):
def collide1d(p11, p12, p21, p22):
if max(p11, p12) < min(p21, p22) or max(p21, p22) < min(p11, p12):
return False
else:
return True
if collide1d(x11, x12, x21, x22) and collide1d(y11, y12, y21, y22):
return True
else:
return False
minLat = min(p1_lat, p2_lat, p3_lat, p4_lat)
maxLat = max(p1_lat, p2_lat, p3_lat, p4_lat)
minLong = min(p1_lon, p2_lon, p3_lon, p4_lon)
maxLong = max(p1_lon, p2_lon, p3_lon, p4_lon)
if extend:
# extend the region by 30% in each direction. Needed because of some black pieces in image
extLat = (maxLat - minLat)*.3
minLat -= extLat
maxLat += extLat
extLong = (maxLong - minLong)*.3
minLong -= extLong
maxLong += extLong
if debug:
print("Extending lat: %f long: %f" % (extLat, extLong))
i = bisect.bisect_right(self.sorted_lat_min, maxLat)
# everything right to this cannot collide
idx1 = set(self.idx_sorted_lat_min[:i])
i = bisect.bisect_left(self.sorted_lat_max, minLat)
# everything left to this cannot collide
idx2 = set(self.idx_sorted_lat_max[i:])
i = bisect.bisect_right(self.sorted_long_min, maxLong)
# everything right to this cannot collide
idx3 = set(self.idx_sorted_long_min[:i])
i = bisect.bisect_left(self.sorted_long_max, minLong)
# everything left to this cannot collide
idx4 = set(self.idx_sorted_long_max[i:])
idx = idx1.intersection(idx2).intersection(idx3).intersection(idx4)
if debug:
print("Number of tiles:", len(idx))
for i in idx:
t = self.tiles[i]
if collide2d(t.lat_min, t.lat_max, t.long_min, t.long_max, minLat, maxLat, minLong, maxLong):
toRender.append(t)
self.visited_tiles.add(i)
else:
if debug:
print("Ignoring tile %d because not colliding." % i)
return toRender
def render_tile(self, p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon, targetSize, debug=False):
#res = Image.new("RGB", (targetSize, targetSize), "red")
#return res
toRender = self.find_tiles_colliding(p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon, extend=True, debug=debug)
if len(toRender) == 0:
return None
assert len(toRender) <= 32
#print ("num_tiles = ", len(toRender))
minx = min([tt.x_mc for tt in toRender])
miny = min([tt.y_mc for tt in toRender])
maxx = max([tt.x_mc+tt.w for tt in toRender])
maxy = max([tt.y_mc+tt.h for tt in toRender])
| |
# -*- coding: utf-8; -*-
#
# @file models.py
# @brief coll-gate application models.
# @author <NAME> (INRA UMR1095)
# @date 2016-09-01
# @copyright Copyright (c) 2016 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
import logging
import re
import uuid as uuid
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import SuspiciousOperation
from django.db import models, transaction, IntegrityError, connection
from django.db.models import Q
from django.dispatch import receiver
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from igdectk.common.models import ChoiceEnum, IntegerChoice, StringChoice
from igdectk.module.manager import module_manager
from messenger.commands import COMMAND_CACHE_INVALIDATION
logger = logging.getLogger('collgate')
class Profile(models.Model):
"""
Additional information about a user.
"""
# related user model
user = models.OneToOneField(User, on_delete=models.CASCADE)
# newly created profile are in pending state
pending = models.BooleanField(default=True)
# information about the organisation where the user is located
organisation = models.CharField(max_length=127)
# comment from an administrator about this user profile
admin_status = models.CharField(max_length=512)
# user saved settings as JSON object
settings = models.TextField(default="{}")
class Settings(models.Model):
"""
Global setting table.
"""
param_name = models.CharField(max_length=127)
value = models.CharField(max_length=1024)
class InterfaceLanguages(ChoiceEnum):
"""
Static for purposes.
"""
EN = StringChoice('en', _('English'))
FR = StringChoice('fr', _('French'))
class Language(models.Model):
"""
Defines the list of configured languages for data (not UI).
"""
# code pattern
CODE_VALIDATOR = {"type": "string", "minLength": 2, "maxLength": 5, "pattern": "^[a-zA-Z]{2}([_-][a-zA-Z]{2})*$"}
# label validator
LABEL_VALIDATOR = {"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^[^\s]+(\s+[^\s]+)*$"}
# label validator optional
LABEL_VALIDATOR_OPTIONAL = {
"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^[^\s]+(\s+[^\s]+)*$", "required": False}
# language code
code = models.CharField(max_length=5, null=False, blank=False)
# Label of the language
# It is i18nized used JSON dict with language code as key and label as string value.
label = JSONField(default={})
def get_label(self):
"""
Get the label for this layout in the current regional.
"""
lang = translation.get_language()
return self.label.get(lang, "")
def set_label(self, lang, label):
"""
Set the label for a specific language.
:param str lang: language code string
:param str label: Localized label
:note Model instance save() is not called.
"""
self.label[lang] = label
def on_client_cache_update(self):
return [{
'category': 'main',
'name': "languages:*",
'values': None
}]
def on_server_cache_update(self):
return [{
'category': 'main',
'name': "languages:*",
'values': None
}]
class EntityStatus(ChoiceEnum):
"""
Status of an entity (pending, active, hidden, removed...).
"""
PENDING = IntegerChoice(0, _('Pending'))
VALID = IntegerChoice(1, _('Valid'))
ARCHIVED = IntegerChoice(2, _('Archived'))
REMOVED = IntegerChoice(3, _('Removed'))
class EntityManager(models.Manager):
"""
Entity manager overriding.
"""
def get_by_uuid(self, entity_uuid):
"""
Get an entity by its UUID.
"""
return self.get(uuid=entity_uuid)
class Entity(models.Model):
"""
Base model for any object that must support audit, history, or any other modular features.
"""
# simple name pattern with alphanumeric characters plus _ and - with a least a length of 3
NAME_RE = re.compile(r'^[a-zA-Z0-9_-]{3,}$', re.IGNORECASE)
# content type natural key pattern : <application_name>.<model_name>
CONTENT_TYPE_RE = re.compile(r'^[a-z]{3,}\.[a-z]{3,}$')
# default name validator
NAME_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 32, "pattern": "^[a-zA-Z0-9\-\_]+$"}
# default name validator optional
NAME_VALIDATOR_OPTIONAL = {
"type": "string", "minLength": 3, "maxLength": 32, "pattern": "^[a-zA-Z0-9\-\_]+$", "required": False}
# language type validator (blank or fr or fr_FR...)
LANGUAGE_VALIDATOR = {
"type:": "string", "minLength": 0, "maxLength": 5, "pattern": r"^([a-zA-Z-]{2,5}){0,1}$", "blank": True}
# content type validator
CONTENT_TYPE_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 64, "pattern": r"^[a-z]{3,}\.[a-z]{3,}$"}
# permission string validator
PERMISSION_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 64, "pattern": r"^\S+[a-z-_]+\S+$"}
# entity status validator
ENTITY_STATUS_VALIDATOR = {"type": "integer", "minimum": 0, "maximum": 3}
# entity status validator
ENTITY_STATUS_VALIDATOR_OPTIONAL = {"type": "integer", "minimum": 0, "maximum": 3, "required": False}
# label validator
LABEL_VALIDATOR = {"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^[^\s]+(\s+[^\s]+)*$"}
# label validator optional
LABEL_VALIDATOR_OPTIONAL = {
"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^[^\s]+(\s+[^\s]+)*$", "required": False}
# content type of the entity
content_type = models.ForeignKey(ContentType, editable=False, on_delete=models.PROTECT)
# status of the entity
entity_status = models.IntegerField(
null=False, blank=False, choices=EntityStatus.choices(), default=EntityStatus.VALID.value)
# insert date
created_date = models.DateTimeField(auto_now_add=True)
# last update date
modified_date = models.DateTimeField(auto_now=True)
# unique object identifier
uuid = models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)
objects = EntityManager()
class Meta:
abstract = True
def _get_content_type(self):
return ContentType.objects.get_for_model(type(self))
def save(self, *args, **kwargs):
"""
Save is overridden to auto-defines the content_type if necessary
"""
if not self.content_type_id:
self.content_type = self._get_content_type()
super(Entity, self).save(*args, **kwargs)
def cast(self):
return self.content_type.get_object_for_this_type(pk=self.pk)
def update_field(self, field_name):
"""
Update the updated fields with a single or a list of field names.
:param field_name: String or tuple or list or field names
"""
if not hasattr(self, 'updated_fields'):
self.updated_fields = []
if isinstance(field_name, str):
if field_name not in self.updated_fields:
self.updated_fields.append(field_name)
elif isinstance(field_name, list) or isinstance(field_name, tuple):
self.updated_fields += [name for name in field_name if name not in self.updated_fields]
def update_descriptors(self, descriptors):
"""
Setup the diff of changed descriptors.
:param descriptors: Dict of descriptors.
"""
self.updated_descriptors = descriptors
@classmethod
def is_name_valid(cls, name):
"""
Check whether or not the name respect a certain convention [a-zA-Z0-9_-]{3,}.
"""
if name is None or not isinstance(name, str):
return False
return Entity.NAME_RE.match(name) is not None
@classmethod
def is_content_type_valid(cls, name):
"""
Check whether or not the name of the content type respect the format.
"""
if name is None or not isinstance(name, str):
return False
return Entity.CONTENT_TYPE_RE.match(name) is not None
@classmethod
def get_by_content_type_and_id(cls, app_label, model, id):
"""
Get an entity by its content type (app_label, model) and its id.
"""
content_type = ContentType.objects.get_by_natural_key(app_label, model)
return content_type.get_object_for_this_type(id=id)
def remove_entity(self):
"""
Set the entity status as removed and save.
"""
self.entity_status = EntityStatus.REMOVED.value
self.save()
def validate_entity(self):
"""
Set the entity status as active and save.
"""
self.entity_status = EntityStatus.VALID.value
self.save()
def hide_entity(self):
"""
Set the entity status as hidden and save.
"""
self.entity_status = EntityStatus.HIDDEN.value
self.save()
def set_status(self, entity_status):
"""
Change the status of the entity in a possible way, otherwise raise an exception
:param entity_status: New status of the entity (upgrade, not downgrade)
"""
if entity_status == self.entity_status:
return
if entity_status == EntityStatus.PENDING.value and self.entity_status >= EntityStatus.VALID.value:
raise SuspiciousOperation(_("It is not allowed to change the status of an entity from valid to pending"))
if self.entity_status == EntityStatus.REMOVED.value:
raise SuspiciousOperation(_("It is not allowed to change the status of a removed entity"))
if self.entity_status == EntityStatus.ARCHIVED.value:
raise SuspiciousOperation(_("It is not allowed to change the status of an archived entity"))
self.entity_status = entity_status
def natural_name(self):
"""
Return the most natural name for defining the specialised entity. By default return the uuid as name.
:return: A string name
"""
return self.uuid
def details(self):
"""
Return the details field for the specialized entity. By default return an empty dict.
:return: A dict of details
"""
return {}
@classmethod
def make_search_by_name(cls, term):
"""
Return a query object for the most common name related to the model.
:param term: String term to search for
:return: A query object
"""
return Q(uuid__startswith=term)
class EventMessage(models.Model):
"""
Displayable and managed event message. No history, no audit, simple creation/deletion, no edition.
"""
# author of the message
author = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
# creation date
created_date = models.DateTimeField(auto_now_add=True)
# message in a JSON text field with an object where key are language code and value are message in locale
message = models.CharField(max_length=4096)
class EntitySynonymType(models.Model):
"""
Type of a synonym for a concrete entity model.
"""
# name pattern
NAME_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 128, "pattern": "^[a-zA-Z0-9\-\_]+$"}
# name pattern
NAME_VALIDATOR_OPTIONAL = {"type": "string", "minLength": 3, "maxLength": 128, "pattern": "^[a-zA-Z0-9\-\_]+$",
"required": False}
# label validator
LABEL_VALIDATOR = {"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^[^\s]+(\s+[^\s]+)*$"}
# label validator optional
LABEL_VALIDATOR_OPTIONAL = {
"type": "string", "minLength": 1, "maxLength": 128, "pattern": r"^[^\s]+(\s+[^\s]+)*$", "required": False}
# content type validator
TARGET_MODEL_VALIDATOR = {"type": "string", "minLength": 3, "maxLength": 64, "pattern": r"^[a-z]{3,}\.[a-z]{3,}$"}
# synonym display name (default for each entity is 'code', 'primary' and 'alternate_name'
name = models.CharField(max_length=128, db_index=True)
# unique means the name of the synonym is unique for the couple (target, synonym_type)
unique = models.BooleanField(default=False)
# If false, only a single synonym of this type is allowed per instance of target entity.
# For example, false for a code, true for an alternate name.
multiple_entry = models.BooleanField(default=False)
# if true the language code is necessary to the synonym. in practice uses false for type like codes
has_language | |
<gh_stars>0
#!/usr/bin/env python
#
# Natural Language Toolkit: TGrep search
#
# Copyright (C) 2001-2021 NLTK Project
# Author: <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
============================================
TGrep search implementation for NLTK trees
============================================
This module supports TGrep2 syntax for matching parts of NLTK Trees.
Note that many tgrep operators require the tree passed to be a
``ParentedTree``.
External links:
- `Tgrep tutorial <http://www.stanford.edu/dept/linguistics/corpora/cas-tut-tgrep.html>`_
- `Tgrep2 manual <http://tedlab.mit.edu/~dr/Tgrep2/tgrep2.pdf>`_
- `Tgrep2 source <http://tedlab.mit.edu/~dr/Tgrep2/>`_
Usage
=====
>>> from nltk.tree import ParentedTree
>>> from nltk.tgrep import tgrep_nodes, tgrep_positions
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> list(tgrep_nodes('NN', [tree]))
[[ParentedTree('NN', ['dog']), ParentedTree('NN', ['cat'])]]
>>> list(tgrep_positions('NN', [tree]))
[[(0, 2), (2, 1)]]
>>> list(tgrep_nodes('DT', [tree]))
[[ParentedTree('DT', ['the']), ParentedTree('DT', ['a'])]]
>>> list(tgrep_nodes('DT $ JJ', [tree]))
[[ParentedTree('DT', ['the'])]]
This implementation adds syntax to select nodes based on their NLTK
tree position. This syntax is ``N`` plus a Python tuple representing
the tree position. For instance, ``N()``, ``N(0,)``, ``N(0,0)`` are
valid node selectors. Example:
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> tree[0,0]
ParentedTree('DT', ['the'])
>>> tree[0,0].treeposition()
(0, 0)
>>> list(tgrep_nodes('N(0,0)', [tree]))
[[ParentedTree('DT', ['the'])]]
Caveats:
========
- Link modifiers: "?" and "=" are not implemented.
- Tgrep compatibility: Using "@" for "!", "{" for "<", "}" for ">" are
not implemented.
- The "=" and "~" links are not implemented.
Known Issues:
=============
- There are some issues with link relations involving leaf nodes
(which are represented as bare strings in NLTK trees). For
instance, consider the tree::
(S (A x))
The search string ``* !>> S`` should select all nodes which are not
dominated in some way by an ``S`` node (i.e., all nodes which are
not descendants of an ``S``). Clearly, in this tree, the only node
which fulfills this criterion is the top node (since it is not
dominated by anything). However, the code here will find both the
top node and the leaf node ``x``. This is because we cannot recover
the parent of the leaf, since it is stored as a bare string.
A possible workaround, when performing this kind of search, would be
to filter out all leaf nodes.
Implementation notes
====================
This implementation is (somewhat awkwardly) based on lambda functions
which are predicates on a node. A predicate is a function which is
either True or False; using a predicate function, we can identify sets
of nodes with particular properties. A predicate function, could, for
instance, return True only if a particular node has a label matching a
particular regular expression, and has a daughter node which has no
sisters. Because tgrep2 search strings can do things statefully (such
as substituting in macros, and binding nodes with node labels), the
actual predicate function is declared with three arguments::
pred = lambda n, m, l: return True # some logic here
``n``
is a node in a tree; this argument must always be given
``m``
contains a dictionary, mapping macro names onto predicate functions
``l``
is a dictionary to map node labels onto nodes in the tree
``m`` and ``l`` are declared to default to ``None``, and so need not be
specified in a call to a predicate. Predicates which call other
predicates must always pass the value of these arguments on. The
top-level predicate (constructed by ``_tgrep_exprs_action``) binds the
macro definitions to ``m`` and initialises ``l`` to an empty dictionary.
"""
import functools
import re
try:
import pyparsing
except ImportError:
print("Warning: nltk.tgrep will not work without the `pyparsing` package")
print("installed.")
import nltk.tree
class TgrepException(Exception):
"""Tgrep exception type."""
pass
def ancestors(node):
"""
Returns the list of all nodes dominating the given tree node.
This method will not work with leaf nodes, since there is no way
to recover the parent.
"""
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current:
results.append(current)
current = current.parent()
return results
def unique_ancestors(node):
"""
Returns the list of all nodes dominating the given node, where
there is only a single path of descent.
"""
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current and len(current) == 1:
results.append(current)
current = current.parent()
return results
def _descendants(node):
"""
Returns the list of all nodes which are descended from the given
tree node in some way.
"""
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:]]
def _leftmost_descendants(node):
"""
Returns the set of all nodes descended in some way through
left branches from this node.
"""
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:] if all(y == 0 for y in x)]
def _rightmost_descendants(node):
"""
Returns the set of all nodes descended in some way through
right branches from this node.
"""
try:
rightmost_leaf = max(node.treepositions())
except AttributeError:
return []
return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)]
def _istree(obj):
"""Predicate to check whether `obj` is a nltk.tree.Tree."""
return isinstance(obj, nltk.tree.Tree)
def _unique_descendants(node):
"""
Returns the list of all nodes descended from the given node, where
there is only a single path of descent.
"""
results = []
current = node
while current and _istree(current) and len(current) == 1:
current = current[0]
results.append(current)
return results
def _before(node):
"""
Returns the set of all nodes that are before the given node.
"""
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions() if x[: len(pos)] < pos[: len(x)]]
def _immediately_before(node):
"""
Returns the set of all nodes that are immediately before the given
node.
Tree node A immediately precedes node B if the last terminal
symbol (word) produced by A immediately precedes the first
terminal symbol produced by B.
"""
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the left
idx = len(pos) - 1
while 0 <= idx and pos[idx] == 0:
idx -= 1
if idx < 0:
return []
pos = list(pos[: idx + 1])
pos[-1] -= 1
before = tree[pos]
return [before] + _rightmost_descendants(before)
def _after(node):
"""
Returns the set of all nodes that are after the given node.
"""
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions() if x[: len(pos)] > pos[: len(x)]]
def _immediately_after(node):
"""
Returns the set of all nodes that are immediately after the given
node.
Tree node A immediately follows node B if the first terminal
symbol (word) produced by A immediately follows the last
terminal symbol produced by B.
"""
try:
pos = node.treeposition()
tree = node.root()
current = node.parent()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the
# right
idx = len(pos) - 1
while 0 <= idx and pos[idx] == len(current) - 1:
idx -= 1
current = current.parent()
if idx < 0:
return []
pos = list(pos[: idx + 1])
pos[-1] += 1
after = tree[pos]
return [after] + _leftmost_descendants(after)
def _tgrep_node_literal_value(node):
"""
Gets the string value of a given parse tree node, for comparison
using the tgrep node literal predicates.
"""
return node.label() if _istree(node) else str(node)
def _tgrep_macro_use_action(_s, _l, tokens):
"""
Builds a lambda function which looks up the macro name used.
"""
assert len(tokens) == 1
assert tokens[0][0] == "@"
macro_name = tokens[0][1:]
def macro_use(n, m=None, l=None):
if m is None or macro_name not in m:
raise TgrepException("macro {0} not defined".format(macro_name))
return m[macro_name](n, m, l)
return macro_use
def _tgrep_node_action(_s, _l, tokens):
"""
Builds a lambda function representing a predicate on a tree node
depending on the name of its node.
"""
if tokens[0] == "'":
# strip initial apostrophe (tgrep2 print command)
tokens = tokens[1:]
if len(tokens) > 1:
# disjunctive definition of a node name
assert list(set(tokens[1::2])) == ["|"]
# recursively call self to interpret each node name definition
tokens = [_tgrep_node_action(None, None, [node]) for node in tokens[::2]]
# capture tokens and return the disjunction
return (lambda t: lambda n, m=None, l=None: any(f(n, m, l) for f in t))(tokens)
else:
if hasattr(tokens[0], | |
<reponame>tjone270/Quake-Live<gh_stars>10-100
# This file is part of the Quake Live server implementation by TomTec Solutions. Do not copy or redistribute or link to this file without the emailed consent of <NAME> (<EMAIL>).
# custom_votes.py - a minqlx plugin to enable the ability to have custom vote functionality in-game.
# This plugin is released to everyone, for any purpose. It comes with no warranty, no guarantee it works, it's released AS IS.
# You can modify everything, except for lines 1-4 and the !tomtec_versions code. They're there to indicate I whacked this together originally. Please make it better :D
"""
The following cvars are used on this plugin:
qlx_rulesetLocked: Is used to prevent '/cv ruleset' votes. Default: 0
qlx_disablePlayerRemoval: Prevents non-privileged players from using '/cv kick' or '/cv tempban'. Default: 0
qlx_disableCvarVoting: Prevents anyone from calling a CVAR vote. Default: 0
qlx_cvarVotePermissionRequired: Required permission level to call a CVAR vote. Default: 3
"""
#
# List of custom votes this plugin provides: http://tomtecsolutions.com.au/thepurgery/index.php?title=Special_votes
#
import minqlx
class custom_votes(minqlx.Plugin):
def __init__(self):
self.add_hook("vote_called", self.handle_vote_called)
self.add_hook("player_loaded", self.player_loaded)
self.add_command("tomtec_versions", self.cmd_showversion)
self.add_command("excessiveweaps", self.cmd_excessive_weaps, 5, usage="on/off")
self.add_command("ruleset", self.cmd_ruleset, 5, usage="pql/vql")
self.set_cvar_once("qlx_rulesetLocked", "0")
self.set_cvar_once("qlx_excessive", "0")
self.set_cvar_once("qlx_disablePlayerRemoval", "0")
self.set_cvar_once("qlx_disableCvarVoting", "0")
self.set_cvar_once("qlx_cvarVotePermissionRequired", "3")
self.plugin_version = "2.3"
def player_loaded(self, player):
if (self.get_cvar("qlx_excessive", bool)):
player.tell("Excessive weapons are ^2enabled^7. To disable them, ^2/cv excessive off^7.")
def cmd_ruleset(self, player, msg, channel):
if len(msg) < 2:
return minqlx.RET_USAGE
if msg[1].lower() == "pql":
minqlx.set_cvar("pmove_airControl", "1")
minqlx.set_cvar("pmove_rampJump", "1")
minqlx.set_cvar("weapon_reload_rg", "1200")
minqlx.set_cvar("pmove_weaponRaiseTime", "10")
minqlx.set_cvar("pmove_weaponDropTime", "10")
minqlx.set_cvar("g_damage_lg", "7")
minqlx.set_cvar("dmflags", "60")
if self.game.type_short == "ca":
minqlx.set_cvar("g_startingHealth", "200")
minqlx.set_cvar("g_startingArmor", "200")
minqlx.console_command("map_restart")
self.msg("PQL ruleset is now set.")
if msg[1].lower() == "vql":
minqlx.set_cvar("pmove_airControl", "0")
minqlx.set_cvar("pmove_rampJump", "0")
minqlx.set_cvar("weapon_reload_rg", "1500")
minqlx.set_cvar("pmove_weaponRaiseTime", "200")
minqlx.set_cvar("pmove_weaponDropTime", "200")
minqlx.set_cvar("g_damage_lg", "6")
if self.game.type_short == "ca":
minqlx.set_cvar("dmflags", "28")
else:
minqlx.console_command("reset dmflags")
minqlx.console_command("reset g_startingHealth")
minqlx.console_command("reset g_startingArmor")
minqlx.console_command("map_restart")
self.msg("VQL ruleset is now set.")
def cmd_excessive_weaps(self, player, msg, channel):
if len(msg) < 2:
return minqlx.RET_USAGE
if msg[1] == "on":
minqlx.set_cvar("weapon_reload_sg", "200")
minqlx.set_cvar("weapon_reload_rl", "200")
minqlx.set_cvar("weapon_reload_rg", "50")
minqlx.set_cvar("weapon_reload_prox", "200")
minqlx.set_cvar("weapon_reload_pg", "40")
minqlx.set_cvar("weapon_reload_ng", "800")
minqlx.set_cvar("weapon_reload_mg", "40")
minqlx.set_cvar("weapon_reload_hmg", "40")
minqlx.set_cvar("weapon_reload_gl", "200")
minqlx.set_cvar("weapon_reload_gauntlet", "100")
minqlx.set_cvar("weapon_reload_cg", "30")
minqlx.set_cvar("weapon_reload_bfg", "75")
minqlx.set_cvar("qlx_excessive", "1")
self.msg("Excessive weapons are enabled.")
if msg[1] == "off":
minqlx.console_command("reset weapon_reload_sg")
minqlx.console_command("reset weapon_reload_rl")
if (minqlx.get_cvar("pmove_airControl")) == "1":
minqlx.set_cvar("weapon_reload_rg", "1200")
else:
minqlx.console_command("reset weapon_reload_rg")
minqlx.console_command("reset weapon_reload_prox")
minqlx.console_command("reset weapon_reload_pg")
minqlx.console_command("reset weapon_reload_ng")
minqlx.console_command("reset weapon_reload_mg")
minqlx.console_command("reset weapon_reload_hmg")
minqlx.console_command("reset weapon_reload_gl")
minqlx.console_command("reset weapon_reload_gauntlet")
minqlx.console_command("reset weapon_reload_cg")
minqlx.console_command("reset weapon_reload_bfg")
minqlx.set_cvar("qlx_excessive", "0")
self.msg("Excessive weapons are disabled.")
def handle_vote_called(self, caller, vote, args):
if not (self.get_cvar("g_allowSpecVote", bool)) and caller.team == "spectator":
if caller.privileges == None:
caller.tell("You are not allowed to call a vote as spectator.")
return minqlx.RET_STOP_ALL
if vote.lower() == "infiniteammo":
# enables the '/cv infiniteammo [on/off]' command
if args.lower() == "off":
self.callvote("set g_infiniteAmmo 0", "infinite ammo: off")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "on":
self.callvote("set g_infiniteAmmo 1", "infinite ammo: on")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv infiniteammo [on/off]^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() == "freecam":
# enables the '/cv freecam [on/off]' command
if args.lower() == "off":
self.callvote("set g_teamSpecFreeCam 0", "team spectator free-cam: off")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "on":
self.callvote("set g_teamSpecFreeCam 1", "team spectator free-cam: on")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv freecam [on/off]^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() == "floordamage":
# enables the '/cv floordamage [on/off]' command
if args.lower() == "off":
self.callvote("set g_forceDmgThroughSurface 0", "damage through floor: off")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "on":
self.callvote("set g_forceDmgThroughSurface 1", "damage through floor: on")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv floordamage [on/off]^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() == "alltalk":
# enables the '/cv alltalk [on/off]' command
if args.lower() == "off":
self.callvote("set g_allTalk 0", "voice comm between teams: off")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "on":
self.callvote("set g_allTalk 1", "voice comm between teams: on")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv alltalk [on/off]^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() == "allready":
# enables the '/cv allready' command
if self.game.state == "warmup":
self.callvote("allready", "begin game immediately")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("You can't vote to begin the game when the game is already on.")
return minqlx.RET_STOP_ALL
if vote.lower() == "ruleset":
# enables the '/cv ruleset [pql/vql]' command
if (minqlx.get_cvar("qlx_rulesetLocked")) == "1":
caller.tell("Voting to change the ruleset is disabled on ruleset-locked servers.")
return minqlx.RET_STOP_ALL
if args.lower() == "pql":
self.callvote("qlx !ruleset pql", "ruleset: pql")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "vql":
self.callvote("qlx !ruleset vql", "ruleset: vql")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv ruleset [pql/vql]^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() == "abort":
# enables the '/cv abort' command
if self.game.state != "warmup":
self.callvote("abort", "abort the game", 30)
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("You can't vote to abort the game when the game isn't in progress.")
return minqlx.RET_STOP_ALL
if vote.lower() == "chatsounds":
# enables the '/cv chatsounds [on/off]' command
if args.lower() == "off":
self.callvote("qlx !unload fun", "chat-activated sounds: off")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "on":
self.callvote("qlx !load fun", "chat-activated sounds: on")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv chatsounds [on/off]^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() in ("silence", "mute"):
# enables the '/cv silence <id>' command
try:
player_name = self.player(int(args)).clean_name
player_id = self.player(int(args)).id
except:
caller.tell("^1Invalid ID.^7 Use a client ID from the ^2/players^7 command.")
return minqlx.RET_STOP_ALL
if self.get_cvar("qlx_serverExemptFromModeration") == "1":
caller.tell("This server has the serverExemptFromModeration flag set, and therefore, silencing is disabled.")
return minqlx.RET_STOP_ALL
self.callvote("qlx !silence {} 10 minutes You were call-voted silent for 10 minutes.; mute {}".format(player_id, player_id), "silence {} for 10 minutes".format(player_name))
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
if vote.lower() == "tempban":
# enables the '/cv tempban <id>' command
if self.get_cvar("qlx_disablePlayerRemoval", bool):
# if player removal cvar is set, do not permit '/cv tempban'
if caller.privileges == None:
caller.tell("Voting to tempban is disabled in this server.")
caller.tell("^2/cv spec <id>^7 and ^2/cv silence <id>^7 exist as substitutes to kicking/tempbanning.")
return minqlx.RET_STOP_ALL
try:
player_name = self.player(int(args)).clean_name
player_id = self.player(int(args)).id
except:
caller.tell("^1Invalid ID.^7 Use a client ID from the ^2/players^7 command.")
return minqlx.RET_STOP_ALL
if self.player(int(args)).privileges != None:
caller.tell("The player specified is an admin, a mod or banned, and cannot be tempbanned.")
return minqlx.RET_STOP_ALL
self.callvote("tempban {}".format(player_id), "^1ban {} until the map changes^3".format(player_name))
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
if vote.lower() == "spec":
# enables the '/cv spec <id>' command
try:
player_name = self.player(int(args)).clean_name
player_id = self.player(int(args)).id
except:
caller.tell("^1Invalid ID.^7 Use a client ID from the ^2/players^7 command.")
return minqlx.RET_STOP_ALL
if self.player(int(args)).team == "spectator":
caller.tell("That player is already in the spectators.")
return minqlx.RET_STOP_ALL
self.callvote("put {} spec".format(player_id), "move {} to the spectators".format(player_name))
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
if vote.lower() == "excessive":
# enables the '/cv excessive [on/off]' command
if args.lower() == "off":
self.callvote("qlx !excessiveweaps off", "excessive weapons: off")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "on":
self.callvote("qlx !excessiveweaps on", "excessive weapons: on")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv excessive [on/off]^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() in ("kick", "clientkick"):
# if player removal cvar is set, do not permit '/cv kick' or '/cv clientkick'
if self.get_cvar("qlx_disablePlayerRemoval", bool):
if caller.privileges == None:
caller.tell("Voting to kick/clientkick is disabled in this server.")
caller.tell("^2/cv spec <id>^7 and ^2/cv silence <id>^7 exist as substitutes to kicking.")
return minqlx.RET_STOP_ALL
if vote.lower() == "lock":
# enables the '/cv lock <team>' command
if len(args) <= 1:
self.callvote("lock", "lock all teams")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
if args.lower() == "blue":
self.callvote("lock blue", "lock the ^4blue^3 team")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
elif args.lower() == "red":
self.callvote("lock red", "lock the ^1red^3 team")
self.msg("{}^7 called a vote.".format(caller.name))
return minqlx.RET_STOP_ALL
else:
caller.tell("^2/cv lock^7 or ^2/cv lock <blue/red>^7 is the usage for this callvote command.")
return minqlx.RET_STOP_ALL
if vote.lower() == "unlock":
# enables the '/cv unlock <team>' command
if len(args) <= 1:
self.callvote("unlock", "unlock all teams")
self.msg("{}^7 | |
# encoding: utf-8
# module System.Text calls itself Text
# from mscorlib,Version=4.0.0.0,Culture=neutral,PublicKeyToken=b77a5c561934e089,System,Version=4.0.0.0,Culture=neutral,PublicKeyToken=b77a5c561934e089
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class Encoding(object,ICloneable):
""" Represents a character encoding. """
def Clone(self):
"""
Clone(self: Encoding) -> object
When overridden in a derived class,creates a shallow copy of the current
System.Text.Encoding object.
Returns: A copy of the current System.Text.Encoding object.
"""
pass
@staticmethod
def Convert(srcEncoding,dstEncoding,bytes,index=None,count=None):
"""
Convert(srcEncoding: Encoding,dstEncoding: Encoding,bytes: Array[Byte],index: int,count: int) -> Array[Byte]
Converts a range of bytes in a byte array from one encoding to another.
srcEncoding: The encoding of the source array,bytes.
dstEncoding: The encoding of the output array.
bytes: The array of bytes to convert.
index: The index of the first element of bytes to convert.
count: The number of bytes to convert.
Returns: An array of type System.Byte containing the result of converting a range of
bytes in bytes from srcEncoding to dstEncoding.
Convert(srcEncoding: Encoding,dstEncoding: Encoding,bytes: Array[Byte]) -> Array[Byte]
Converts an entire byte array from one encoding to another.
srcEncoding: The encoding format of bytes.
dstEncoding: The target encoding format.
bytes: The bytes to convert.
Returns: An array of type System.Byte containing the results of converting bytes from
srcEncoding to dstEncoding.
"""
pass
def Equals(self,value):
"""
Equals(self: Encoding,value: object) -> bool
Determines whether the specified System.Object is equal to the current instance.
value: The System.Object to compare with the current instance.
Returns: true if value is an instance of System.Text.Encoding and is equal to the
current instance; otherwise,false.
"""
pass
def GetByteCount(self,*__args):
"""
GetByteCount(self: Encoding,chars: Array[Char],index: int,count: int) -> int
When overridden in a derived class,calculates the number of bytes produced by
encoding a set of characters from the specified character array.
chars: The character array containing the set of characters to encode.
index: The index of the first character to encode.
count: The number of characters to encode.
Returns: The number of bytes produced by encoding the specified characters.
GetByteCount(self: Encoding,chars: Char*,count: int) -> int
When overridden in a derived class,calculates the number of bytes produced by
encoding a set of characters starting at the specified character pointer.
chars: A pointer to the first character to encode.
count: The number of characters to encode.
Returns: The number of bytes produced by encoding the specified characters.
GetByteCount(self: Encoding,chars: Array[Char]) -> int
When overridden in a derived class,calculates the number of bytes produced by
encoding all the characters in the specified character array.
chars: The character array containing the characters to encode.
Returns: The number of bytes produced by encoding all the characters in the specified
character array.
GetByteCount(self: Encoding,s: str) -> int
When overridden in a derived class,calculates the number of bytes produced by
encoding the characters in the specified string.
s: The string containing the set of characters to encode.
Returns: The number of bytes produced by encoding the specified characters.
"""
pass
def GetBytes(self,*__args):
"""
GetBytes(self: Encoding,s: str) -> Array[Byte]
When overridden in a derived class,encodes all the characters in the specified
string into a sequence of bytes.
s: The string containing the characters to encode.
Returns: A byte array containing the results of encoding the specified set of characters.
GetBytes(self: Encoding,s: str,charIndex: int,charCount: int,bytes: Array[Byte],byteIndex: int) -> int
When overridden in a derived class,encodes a set of characters from the
specified string into the specified byte array.
s: The string containing the set of characters to encode.
charIndex: The index of the first character to encode.
charCount: The number of characters to encode.
bytes: The byte array to contain the resulting sequence of bytes.
byteIndex: The index at which to start writing the resulting sequence of bytes.
Returns: The actual number of bytes written into bytes.
GetBytes(self: Encoding,chars: Char*,charCount: int,bytes: Byte*,byteCount: int) -> int
When overridden in a derived class,encodes a set of characters starting at the
specified character pointer into a sequence of bytes that are stored starting
at the specified byte pointer.
chars: A pointer to the first character to encode.
charCount: The number of characters to encode.
bytes: A pointer to the location at which to start writing the resulting sequence of
bytes.
byteCount: The maximum number of bytes to write.
Returns: The actual number of bytes written at the location indicated by the bytes
parameter.
GetBytes(self: Encoding,chars: Array[Char]) -> Array[Byte]
When overridden in a derived class,encodes all the characters in the specified
character array into a sequence of bytes.
chars: The character array containing the characters to encode.
Returns: A byte array containing the results of encoding the specified set of characters.
GetBytes(self: Encoding,chars: Array[Char],index: int,count: int) -> Array[Byte]
When overridden in a derived class,encodes a set of characters from the
specified character array into a sequence of bytes.
chars: The character array containing the set of characters to encode.
index: The index of the first character to encode.
count: The number of characters to encode.
Returns: A byte array containing the results of encoding the specified set of characters.
GetBytes(self: Encoding,chars: Array[Char],charIndex: int,charCount: int,bytes: Array[Byte],byteIndex: int) -> int
When overridden in a derived class,encodes a set of characters from the
specified character array into the specified byte array.
chars: The character array containing the set of characters to encode.
charIndex: The index of the first character to encode.
charCount: The number of characters to encode.
bytes: The byte array to contain the resulting sequence of bytes.
byteIndex: The index at which to start writing the resulting sequence of bytes.
Returns: The actual number of bytes written into bytes.
"""
pass
def GetCharCount(self,bytes,*__args):
"""
GetCharCount(self: Encoding,bytes: Byte*,count: int) -> int
When overridden in a derived class,calculates the number of characters
produced by decoding a sequence of bytes starting at the specified byte
pointer.
bytes: A pointer to the first byte to decode.
count: The number of bytes to decode.
Returns: The number of characters produced by decoding the specified sequence of bytes.
GetCharCount(self: Encoding,bytes: Array[Byte],index: int,count: int) -> int
When overridden in a derived class,calculates the number of characters
produced by decoding a sequence of bytes from the specified byte array.
bytes: The byte array containing the sequence of bytes to decode.
index: The index of the first byte to decode.
count: The number of bytes to decode.
Returns: The number of characters produced by decoding the specified sequence of bytes.
GetCharCount(self: Encoding,bytes: Array[Byte]) -> int
When overridden in a derived class,calculates the number of characters
produced by decoding all the bytes in the specified byte array.
bytes: The byte array containing the sequence of bytes to decode.
Returns: The number of characters produced by decoding the specified sequence of bytes.
"""
pass
def GetChars(self,bytes,*__args):
"""
GetChars(self: Encoding,bytes: Array[Byte],byteIndex: int,byteCount: int,chars: Array[Char],charIndex: int) -> int
When overridden in a derived class,decodes a sequence of bytes from the
specified byte array into the specified character array.
bytes: The byte array containing the sequence of bytes to decode.
byteIndex: The index of the first byte to decode.
byteCount: The number of bytes to decode.
chars: The character array to contain the resulting set of characters.
charIndex: The index at which to start writing the resulting set of characters.
Returns: The actual number of characters written into chars.
GetChars(self: Encoding,bytes: Byte*,byteCount: int,chars: Char*,charCount: int) -> int
When overridden in a derived class,decodes a sequence of bytes starting at the
specified byte pointer into a set of | |
None
def __init__(self, TariffCode=None, Question=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.TariffCode = TariffCode
self.TariffCode_nsprefix_ = None
if Question is None:
self.Question = []
else:
self.Question = Question
self.Question_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ProductAnswerType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ProductAnswerType.subclass:
return ProductAnswerType.subclass(*args_, **kwargs_)
else:
return ProductAnswerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_TariffCode(self):
return self.TariffCode
def set_TariffCode(self, TariffCode):
self.TariffCode = TariffCode
def get_Question(self):
return self.Question
def set_Question(self, Question):
self.Question = Question
def add_Question(self, value):
self.Question.append(value)
def insert_Question_at(self, index, value):
self.Question.insert(index, value)
def replace_Question_at(self, index, value):
self.Question[index] = value
def hasContent_(self):
if (
self.TariffCode is not None or
self.Question
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductAnswerType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ProductAnswerType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ProductAnswerType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ProductAnswerType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ProductAnswerType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ProductAnswerType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductAnswerType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.TariffCode is not None:
namespaceprefix_ = self.TariffCode_nsprefix_ + ':' if (UseCapturedNS_ and self.TariffCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTariffCode>%s</%sTariffCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.TariffCode), input_name='TariffCode')), namespaceprefix_ , eol_))
for Question_ in self.Question:
namespaceprefix_ = self.Question_nsprefix_ + ':' if (UseCapturedNS_ and self.Question_nsprefix_) else ''
Question_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Question', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'TariffCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TariffCode')
value_ = self.gds_validate_string(value_, node, 'TariffCode')
self.TariffCode = value_
self.TariffCode_nsprefix_ = child_.prefix
elif nodeName_ == 'Question':
obj_ = AnswerType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Question.append(obj_)
obj_.original_tagname_ = 'Question'
# end class ProductAnswerType
class ProductsChargesType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Product=None, ProductsSubTotal=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if Product is None:
self.Product = []
else:
self.Product = Product
self.Product_nsprefix_ = None
self.ProductsSubTotal = ProductsSubTotal
self.ProductsSubTotal_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ProductsChargesType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ProductsChargesType.subclass:
return ProductsChargesType.subclass(*args_, **kwargs_)
else:
return ProductsChargesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Product(self):
return self.Product
def set_Product(self, Product):
self.Product = Product
def add_Product(self, value):
self.Product.append(value)
def insert_Product_at(self, index, value):
self.Product.insert(index, value)
def replace_Product_at(self, index, value):
self.Product[index] = value
def get_ProductsSubTotal(self):
return self.ProductsSubTotal
def set_ProductsSubTotal(self, ProductsSubTotal):
self.ProductsSubTotal = ProductsSubTotal
def hasContent_(self):
if (
self.Product or
self.ProductsSubTotal is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductsChargesType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ProductsChargesType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ProductsChargesType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ProductsChargesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ProductsChargesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ProductsChargesType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductsChargesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Product_ in self.Product:
namespaceprefix_ = self.Product_nsprefix_ + ':' if (UseCapturedNS_ and self.Product_nsprefix_) else ''
Product_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Product', pretty_print=pretty_print)
if self.ProductsSubTotal is not None:
namespaceprefix_ = self.ProductsSubTotal_nsprefix_ + ':' if (UseCapturedNS_ and self.ProductsSubTotal_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sProductsSubTotal>%s</%sProductsSubTotal>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ProductsSubTotal), input_name='ProductsSubTotal')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Product':
obj_ = ProductEstimateType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Product.append(obj_)
obj_.original_tagname_ = 'Product'
elif nodeName_ == 'ProductsSubTotal':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ProductsSubTotal')
value_ = self.gds_validate_string(value_, node, 'ProductsSubTotal')
self.ProductsSubTotal = value_
self.ProductsSubTotal_nsprefix_ = child_.prefix
# end class ProductsChargesType
class ProductEstimateType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, TariffCode=None, Charges=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.TariffCode = TariffCode
self.TariffCode_nsprefix_ = None
self.Charges = Charges
self.Charges_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ProductEstimateType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ProductEstimateType.subclass:
return ProductEstimateType.subclass(*args_, **kwargs_)
else:
return ProductEstimateType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_TariffCode(self):
return self.TariffCode
def set_TariffCode(self, TariffCode):
self.TariffCode = TariffCode
def get_Charges(self):
return self.Charges
def set_Charges(self, Charges):
self.Charges = Charges
def hasContent_(self):
if (
self.TariffCode is not None or
self.Charges is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductEstimateType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ProductEstimateType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ProductEstimateType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ProductEstimateType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ProductEstimateType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ProductEstimateType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductEstimateType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.TariffCode is not None:
namespaceprefix_ = self.TariffCode_nsprefix_ + ':' if (UseCapturedNS_ and self.TariffCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTariffCode>%s</%sTariffCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.TariffCode), input_name='TariffCode')), namespaceprefix_ , eol_))
if self.Charges is not None:
namespaceprefix_ = self.Charges_nsprefix_ + ':' if (UseCapturedNS_ and self.Charges_nsprefix_) else ''
self.Charges.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Charges', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'TariffCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TariffCode')
value_ = self.gds_validate_string(value_, node, 'TariffCode')
self.TariffCode = value_
self.TariffCode_nsprefix_ = child_.prefix
elif nodeName_ == 'Charges':
obj_ = ProductChargesType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Charges = obj_
obj_.original_tagname_ = 'Charges'
# end class ProductEstimateType
class TariffInfoType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, TariffCode=None, DetailID=None, SecondaryTariffCode=None, SecondaryDetailID=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.TariffCode = TariffCode
self.TariffCode_nsprefix_ = None
self.DetailID = DetailID
self.DetailID_nsprefix_ = None
self.SecondaryTariffCode = SecondaryTariffCode
self.SecondaryTariffCode_nsprefix_ = None
self.SecondaryDetailID = SecondaryDetailID
self.SecondaryDetailID_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TariffInfoType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TariffInfoType.subclass:
return TariffInfoType.subclass(*args_, **kwargs_)
else:
| |
"""
Generally useful mixins for view tests (integration tests) of any project.
"""
import sys
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.storage.fallback import FallbackStorage
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.urlresolvers import reverse, resolve
from django.http import Http404
from django.test import RequestFactory
class ViewTestMixin(object):
"""Mixin that provides commonly tested assertions."""
longMessage = True
def _check_callable(self, method='get', data=None, message=None,
kwargs=None, user=None, anonymous=False,
and_redirects_to=None, status_code=None,
called_by='is_callable', ajax=False, no_redirect=False,
extra=None):
"""
The method that does the actual assertions for ``is_callable`` and
``is_not_callable``.
:method: 'get' or 'post'. Default is 'get'.
:data: Post data or get data payload.
:message: Lets you override the assertion message.
:kwargs: Lets you override the view kwargs.
:user: If user argument is given, it logs it in first.
:anonymous: If True, it logs out the user first. Default is False
:and_redirects_to: If set, it additionally makes an assertRedirect on
whatever string is given. This can be either a relative url or a
name.
:status_code: Overrides the expected status code. Default is 200.
Can either be a list of status codes or a single integer.
:called_by: A string that is either 'is_callable' or 'is_not_callable'.
:extra: Additional parameters to be passed to the client GET/POST. For
example, follow = True if you want the client to follow redirects.
"""
# Setting up defaults if not overwritten.
if extra is None:
extra = {}
if called_by == 'is_not_callable':
message_addin = ' not'
elif called_by == 'is_callable':
message_addin = ''
if user:
self.login(user)
if anonymous:
self.client.logout()
if not status_code and and_redirects_to:
status_code = 302
if not status_code and called_by == 'is_callable':
status_code = 200
if not status_code and called_by == 'is_not_callable':
status_code = 404
client_args = (
self.get_url(view_kwargs=kwargs or self.get_view_kwargs()),
data or self.get_data_payload(),
)
if ajax:
extra.update({'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
# making the request
if method.lower() == 'get':
resp = self.client.get(*client_args, **extra)
elif method.lower() == 'post':
resp = self.client.post(*client_args, **extra)
else:
raise Exception('Not a valid request method: "{0}"'.format(method))
# usage validation
if resp.status_code == 302 and not and_redirects_to and not (
status_code in [200, 404]):
# TODO change the defaults and remove this warning
sys.stderr.write(
'\n\033[1;31mDeprecationWarning:\033[1;m'
' Your response status code'
' was 302, although ``and_redirects_to`` was not set.\n'
'Please use ``and_redirects_to`` for a test on redirects since'
' the callable methods will default to 200 or 404 in the'
' future.\n'
)
# assertions
if and_redirects_to:
self.assertRedirects(
resp, and_redirects_to, status_code=status_code,
msg_prefix=('The view did not redirect as expected.'))
else:
self.assertIn(resp.status_code, [status_code, 302], msg=(
message or
'The view should{0} be callable'.format(message_addin)))
return resp
def is_callable(self, method='get', data=None, message=None, kwargs=None,
user=None, anonymous=False, and_redirects_to=None,
status_code=None, ajax=False, no_redirect=False,
extra=None):
"""
A shortcut for an assertion on status code 200 or 302.
:method: 'get' or 'post'. Default is 'get'.
:data: Post data or get data payload.
:message: Lets you override the assertion message.
:kwargs: Lets you override the view kwargs.
:user: If user argument is given, it logs it in first.
:anonymous: If True, it logs out the user first. Default is False
:and_redirects_to: If set, it additionally makes an assertRedirect on
whatever string is given. This can be either a relative url or a
name.
:status_code: Overrides the expected status code. Default is 200.
Can either be a list of status codes or a single integer.
:extra: Additional parameters to be passed to the client GET/POST. For
example, follow = True if you want the client to follow redirects.
If no arguments are given, it makes the assertion according to the
current test situation.
"""
return self._check_callable(
method=method, data=data, message=message, kwargs=kwargs,
user=user, anonymous=anonymous, and_redirects_to=and_redirects_to,
status_code=status_code, ajax=ajax, no_redirect=no_redirect,
called_by='is_callable', extra=extra)
def is_not_callable(self, method='get', message=None, data=None,
kwargs=None, user=None, anonymous=False,
and_redirects_to=None, status_code=None, ajax=False,
no_redirect=False, extra=None):
"""
A shortcut for a common assertion on a 404 status code.
:method: 'get' or 'post'. Default is 'get'.
:message: The message to display if the assertion fails
:data: Get data payload or post data.
:kwargs: View kwargs can be overridden. This is e.g. necessary if
you call is_not_callable for a deleted object, where the object.pk
was assigned in get_view_kwargs.
:user: If a user is given, it logs it in first.
:anonymous: If True, it logs out the user first. Default is False
:status_code: Overrides the expected status code. Default is 404.
Can either be a list of status codes or a single integer.
:extra: Additional parameters to be passed to the client GET/POST. For
example, follow = True if you want the client to follow redirects.
If no arguments are given, it makes the assertion according to the
current test situation.
"""
return self._check_callable(
method=method, data=data, message=message, kwargs=kwargs,
user=user, anonymous=anonymous, and_redirects_to=and_redirects_to,
status_code=status_code, ajax=ajax, no_redirect=no_redirect,
called_by='is_not_callable', extra=extra)
def get_data_payload(self):
"""
Returns a dictionairy providing GET data payload sent to the view.
If the view expects request.GET data to include this, you can override
this method and return the proper data for the test.
"""
if hasattr(self, 'data_payload'):
return self.data_payload
return {}
def get_view_name(self):
"""
Returns a string representing the view name as set in the ``urls.py``.
You must implement this when inheriting this mixin. If your ``urls.py``
looks like this::
...
url(r'^$', HomeView.as_view(), name='home_view'
Then you should simply return::
return 'home_view'
"""
return NotImplementedError
def get_view_args(self):
"""
Returns a list representing the view's args, if necessary.
If the URL of this view is constructed via args, you can override this
method and return the proper args for the test.
"""
return None
def get_view_kwargs(self):
"""
Returns a dictionary representing the view's kwargs, if necessary.
If the URL of this view is constructed via kwargs, you can override
this method and return the proper args for the test.
"""
return None
def get_url(self, view_name=None, view_args=None, view_kwargs=None):
"""
Returns the url to be consumed by ``self.client.get``.
When calling ``self.client.get`` we usually need three parameter:
* The URL, which we construct from the view name using ``reverse``
* The args
* The kwargs
In most cases ``args`` and ``kwargs`` are ``None``, so this method will
help to return the proper URL by calling instance methods that can
be overridden where necessary.
:param view_name: A string representing the view name. If ``None``,
the return value of ``get_view_name()`` will be used.
:param view_args: A list representing the view args. If ``None``,
the return value of ``get_view_args()`` will be used.
:param view_kwargs: A dict representing the view kwargs. If ``None``,
the return value of ``get_view_kwargs()`` will be used.
"""
if view_name is None:
view_name = self.get_view_name()
if view_args is None:
view_args = self.get_view_args()
if view_kwargs is None:
view_kwargs = self.get_view_kwargs()
return reverse(view_name, args=view_args, kwargs=view_kwargs)
def login(self, user, password='<PASSWORD>'):
"""
Performs a login for the given user.
By convention we always use ``test123`` in our test fixutres. When you
create your users with the UserFactory, that password will be set by
default.
If you must you can provide a password to this method in order to
override the ``test123`` default.
:param user: A ``User`` instance.
:param password: A string if you want to login with another password
than '<PASSWORD>'.
"""
self.client.login(username=user.username, password=password)
def get_login_url(self):
"""
Returns the URL when testing the redirect for anonymous users to the
login page.
Can be overwritten if you do not use the auth_login as default or
configure your urls.py file in a specific way.
"""
login_url = getattr(settings, 'LOGIN_URL')
if login_url is None:
return reverse('auth_login')
return login_url
def should_redirect_to_login_when_anonymous(self, url=None):
"""
Tests if the view redirects to login when the user is anonymous.
:param url: A string representing the URL to be called. If ``None``,
the return value of ``get_url()`` will be used.
"""
if not url:
url = self.get_url()
resp = self.client.get(url)
self.assertRedirects(resp,
'{0}?next={1}'.format(self.get_login_url(), url))
return resp
def should_be_callable_when_anonymous(self, url=None):
"""
Tests if the view returns 200 when the user is anonymous.
:param url: A string representing the URL to be called. If ``None``,
the return value of ``get_url()`` will be used.
"""
if not url:
url = self.get_url()
resp = self.client.get(url, data=self.get_data_payload())
self.assertEqual(resp.status_code, 200)
return resp
| |
Metrics hdf5 file. See deepethogram.metrics """
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
train = f['train/' + name][:]
val = f['val/' + name][:]
if name == 'time':
train *= 1000
val *= 1000
label = 'time per image (ms)'
else:
label = name
xs = np.arange(len(train))
ax.plot(xs, train, label='train')
if len(xs) > 0:
x, y = xs[len(xs) - 1], train[len(xs) - 1]
x, y = remove_nan_or_inf(x), remove_nan_or_inf(y)
string = '%.4f' % y
ax.text(x, y, string, color=colors[0])
xs = np.arange(len(val))
ax.plot(xs, val, label='val')
if len(xs) > 0:
y = val[len(xs) - 1]
string = '%.4f' % y
x = xs[-1]
x, y = remove_nan_or_inf(x), remove_nan_or_inf(y)
ax.text(x, y, string, color=colors[1])
if name == 'time':
test = f['test/' + name][:]
test *= 1000
xs = np.arange(len(test))
ax.plot(xs, test, label='test')
if len(xs) > 0:
y = test[len(xs) - 1]
string = '%.4f' % y
x, y = remove_nan_or_inf(x), remove_nan_or_inf(y)
ax.text(x, y, string, color=colors[2])
ax.set_xlim([-0.5, len(xs) - 0.5])
ax.set_ylabel(label)
ax.set_xlabel('Epochs')
ax.set_title(label)
if legend:
ax.legend()
def plot_metrics(logger_file, fig):
""" plot all metrics in a Metrics hdf5 file. see deepethogram.metrics """
splits = ['train', 'val']
num_cols = 2
with h5py.File(logger_file, 'r') as f:
for split in splits:
keys = list(f[split].keys())
# all metrics files will have loss and time
num_custom_vars = len(keys) - 2
if 'confusion' in keys:
num_custom_vars -= 1
num_rows = int(np.ceil(num_custom_vars / num_cols)) + 1
forbidden = ['loss', 'time', 'confusion']
shape = (num_rows, num_cols)
with h5py.File(logger_file, 'r') as f:
ax = fig.add_subplot(num_rows, num_cols, 1)
plot_metric(f, ax, 'loss', legend=True)
ax = fig.add_subplot(num_rows, num_cols, 2)
plot_metric(f, ax, 'time')
cnt = 3
for key in keys:
if key in forbidden:
continue
ax = fig.add_subplot(num_rows, num_cols, cnt)
cnt += 1
plot_metric(f, ax, key)
keys = f.attrs.keys()
args = {}
for key in keys:
args[key] = f.attrs[key]
# title = 'Project {}: model:{} \nNotes: {}'.format(args['name'], args['model'], args['notes'])
# fig.suptitle(title, size=18)
plt.tight_layout()
fig.subplots_adjust(top=0.9)
def plot_confusion_from_logger(logger_file, fig, class_names=None, epoch=None):
""" Plots train and validation confusion matrices from a Metrics file """
with h5py.File(logger_file, 'r') as f:
best_epoch = np.argmax(f['val/' + f.attrs['key_metric']][:])
if epoch is None:
epoch = best_epoch
if epoch == 'last':
epoch = -1
splits = list(f.keys())
if 'train' in splits:
cm_train = f['train/confusion'][epoch, ...].astype(np.int64)
else:
cm_train = np.array([np.nan])
if 'val' in splits:
cm_val = f['val/confusion'][epoch, ...].astype(np.int64)
else:
cm_val = np.array([np.nan])
if class_names is None:
class_names = np.arange(cm_train.shape[0])
ax0 = fig.add_subplot(221)
plot_confusion_matrix(cm_train, class_names, ax0, fig)
ax0.set_title('Train')
ax1 = fig.add_subplot(222)
plot_confusion_matrix(cm_train, class_names, ax1, fig,
normalize=True)
ax0 = fig.add_subplot(223)
plot_confusion_matrix(cm_val, class_names, ax0, fig)
ax0.set_title('Val')
ax1 = fig.add_subplot(224)
plot_confusion_matrix(cm_val, class_names, ax1, fig,
normalize=True)
fig.suptitle('Confusion matrices at epoch: %d' % (epoch))
plt.subplots_adjust(top=0.8)
plt.tight_layout()
def make_roc_figure(train_metrics, val_metrics, fig=None):
""" Plots ROC curves """
plt.style.use('ggplot')
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if fig is None:
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 2, 1)
if train_metrics is not None:
tpr, fpr = train_metrics['tpr'], train_metrics['fpr']
K = tpr.shape[1]
for i in range(K):
color = colors[i] if i < len(colors) else colors[-1]
x = fpr[:, i]
y = tpr[:, i]
auroc = auc(x, y)
string = '{}: {:4f}'.format(i, auroc)
ax.plot(fpr[:, i], tpr[:, i], color=color, label=string)
ax.legend()
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_title('Train')
ax = fig.add_subplot(1, 2, 2)
if val_metrics is not None:
tpr, fpr = val_metrics['tpr'], val_metrics['fpr']
K = tpr.shape[1]
for i in range(K):
color = colors[i] if i < len(colors) else colors[-1]
x = fpr[:, i]
y = tpr[:, i]
auroc = auc(x, y)
string = '{}: {:.4f}'.format(i, auroc)
ax.plot(fpr[:, i], tpr[:, i], color=color, label=string)
ax.legend()
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_title('Val')
plt.tight_layout()
def visualize_binary_confusion(cms, cms_valid_bg, fig=None):
""" Visualizes binary confusion matrices """
if fig is None:
fig = plt.figure(figsize=(14, 14))
# if there's more than 3 dimensions, it could be [epochs, classes, 2, 2]
# take the last one
if cms.ndim > 3:
cms = cms[-1, ...]
if cms_valid_bg.ndim > 3:
cms_valid_bg = cms_valid_bg[-1, ...]
K = cms.shape[0]
num_rows = 4
num_cols = K
ind = 1
# print(cms.shape)
for j in range(num_cols):
ax = fig.add_subplot(num_rows, num_cols, ind)
cm = cms[j, ...]
# print(cm.shape)
plot_confusion_matrix(cms[j, ...], range(cm.shape[0]),
ax, fig, colorbar=False)
if j == 0:
ax.set_ylabel('Simple threshold\nTrue')
ax.set_xlabel('')
else:
ax.set_ylabel('')
ax.set_xlabel('')
ind += 1
for j in range(num_cols):
ax = fig.add_subplot(num_rows, num_cols, ind)
cm = cms[j, ...]
plot_confusion_matrix(cms[j, ...], range(cm.shape[0]),
ax, fig, normalize=True, colorbar=False)
if j == 0:
ax.set_ylabel('Normalized\nTrue')
ax.set_xlabel('')
else:
ax.set_ylabel('')
ax.set_xlabel('')
ind += 1
for j in range(num_cols):
ax = fig.add_subplot(num_rows, num_cols, ind)
cm = cms[j, ...]
plot_confusion_matrix(cms_valid_bg[j, ...], range(cm.shape[0]),
ax, fig, normalize=False, colorbar=False)
if j == 0:
ax.set_ylabel('Valid background class\nTrue')
ax.set_xlabel('')
else:
ax.set_ylabel('')
ax.set_xlabel('')
ind += 1
for j in range(num_cols):
ax = fig.add_subplot(num_rows, num_cols, ind)
cm = cms[j, ...]
plot_confusion_matrix(cms_valid_bg[j, ...], range(cm.shape[0]),
ax, fig, normalize=True, colorbar=False)
ind += 1
if j == 0:
ax.set_ylabel('Valid BG, normalized\nTrue')
ax.set_xlabel('Predicted')
else:
ax.set_ylabel('')
ax.set_xlabel('')
plt.tight_layout()
def make_precision_recall_figure(train_metrics, val_metrics, fig=None):
""" Plots precision vs recall """
plt.style.use('ggplot')
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if fig is None:
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 2, 1)
precision, recall = train_metrics['precision'], train_metrics['recall']
K = precision.shape[1]
for i in range(K):
color = colors[i] if i < len(colors) else colors[-1]
x = recall[:, i]
y = precision[:, i]
# there's a bug in how this is computed
x = x[x != 0]
y = y[y != 0]
try:
au_prc = auc(x, y)
except ValueError as e:
# not enough points
au_prc = np.nan
string = '{}: {:.4f}'.format(i, au_prc)
ax.plot(x, y, color=color, label=string)
ax.set_aspect('equal', 'box')
ax.legend()
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_title('Train')
ax = fig.add_subplot(1, 2, 2)
precision, recall = val_metrics['precision'], val_metrics['recall']
K = precision.shape[1]
for i in range(K):
color = colors[i] if i < len(colors) else colors[-1]
x = recall[:, i]
y = precision[:, i]
x = x[x != 0]
y = y[y != 0]
try:
au_prc = auc(x, y)
except ValueError as e:
# not enough points
au_prc = np.nan
string = '{}: {:.4f}'.format(i, au_prc)
ax.plot(x, y, color=color, label=string)
ax.set_aspect('equal', 'box')
ax.legend()
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_title('Val')
# plt.tight_layout()
fig.suptitle('Precision vs recall. Legend: Average Precision')
def visualize_logger(logger_file, examples):
""" makes a bunch of figures from a Metrics hdf5 file """
ims = []
plt.style.use('ggplot')
fig = plt.figure(figsize=(14, 14))
plot_metrics(logger_file, fig)
ims.append(fig_to_img(fig))
plt.close(fig)
with h5py.File(logger_file, 'r') as f:
keys = list(f.keys())
if 'thresholds' in keys:
metrics_by_threshold, epoch_summaries = load_threshold_data(logger_file)
fig = plt.figure(figsize=(14, 14))
thresholds_by_epoch_figure(epoch_summaries, fig=fig)
ims.append(fig_to_img(fig))
plt.close(fig)
fig = plt.figure(figsize=(14, 14))
make_thresholds_figure(metrics_by_threshold['train'], fig=fig)
ims.append(fig_to_img(fig))
plt.close(fig)
fig = plt.figure(figsize=(14, 14))
make_thresholds_figure(metrics_by_threshold['val'], fig=fig)
ims.append(fig_to_img(fig))
plt.close(fig)
fig = plt.figure(figsize=(14, 14))
make_roc_figure(metrics_by_threshold['train'], metrics_by_threshold['val'], fig=fig)
ims.append(fig_to_img(fig))
plt.close(fig)
fig = plt.figure(figsize=(14, 14))
make_precision_recall_figure(metrics_by_threshold['train'], metrics_by_threshold['val'], fig=fig)
ims.append(fig_to_img(fig))
plt.close(fig)
fig = plt.figure(figsize=(14, 14))
# import pdb
# pdb.set_trace()
visualize_binary_confusion(epoch_summaries['train']['binary_confusion'],
epoch_summaries['train']['binary_confusion_valid'], fig=fig)
fig.suptitle('Train')
ims.append(fig_to_img(fig))
plt.close(fig)
fig = plt.figure(figsize=(14, 14))
visualize_binary_confusion(epoch_summaries['val']['binary_confusion'],
epoch_summaries['val']['binary_confusion_valid'], fig=fig)
fig.suptitle('Val')
ims.append(fig_to_img(fig))
plt.close(fig)
splits = ['train', 'val']
with h5py.File(logger_file, 'r') as f:
for split in splits:
keys = list(f[split].keys())
if 'confusion' in keys:
# confusion_fig = plt.figure(figsize=(16,16))
fig = plt.figure(figsize=(14, 14))
plot_confusion_from_logger(logger_file, fig, epoch=None)
ims.append(fig_to_img(fig))
plt.close(fig)
fig = plt.figure(figsize=(14, 14))
plot_confusion_from_logger(logger_file, fig, epoch='last')
ims.append(fig_to_img(fig))
plt.close(fig)
if examples is not None and len(examples) > 0:
ims.extend(examples)
fname = os.path.basename(logger_file)[:-3]
tiff_fname = os.path.join(os.path.dirname(logger_file), fname + '.tiff')
image_list_to_tiff_stack(ims, tiff_fname)
hues = [212, 4, 121, 36, 55, 276, 237, 299, 186]
hues = np.array(hues) / 360 * 180
saturation = .85 * 255
value = .95 * 255
start = [0, 0, value]
gray_value = 102
class Mapper:
""" Applies a custom colormap to a K x T matrix. Used in the GUI to visualize probabilities and labels """
def __init__(self, colormap='deepethogram'):
if colormap == 'deepethogram':
self.init_deepethogram()
else:
try:
self.cmap = plt.get_cmap(colormap)
except ValueError as e:
raise ('Colormap not in matplotlib''s defaults! {}'.format(colormap))
def init_deepethogram(self):
gray_LUT = make_LUT([0, 0, value], [0, 0, gray_value])
LUTs = []
for hue in hues:
LUTs.append(make_LUT([hue, 0, value], [hue, saturation, value]))
self.gray_LUT = gray_LUT
self.LUTs = LUTs
self.cmap = self.apply_cmaps
def apply_cmaps(self, array: Union[np.ndarray, int, float]) -> np.ndarray:
# assume columns are timepoints, rpws are behaviors
if type(array) == int or type(array) == float:
# use the 0th LUT by default
return apply_cmap(array, self.LUTs[0])
elif array.shape[0] == 1 and len(array.shape) == 1:
return apply_cmap(array[0], self.LUTs[0])
# print('array shape apply cmaps: {}'.format(array.shape))
K, T = array.shape
ims = []
for k in | |
#!/usr/bin/python3
# Copyright 2018 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_vk_internal_shaders.py:
# Code generation for internal Vulkan shaders. Should be run when an internal
# shader program is changed, added or removed.
# Because this script can be slow direct invocation is supported. But before
# code upload please run scripts/run_code_generation.py.
from datetime import date
import io
import json
import multiprocessing
import os
import platform
import re
import subprocess
import sys
import gzip
out_file_cpp = 'vk_internal_shaders_autogen.cpp'
out_file_h = 'vk_internal_shaders_autogen.h'
out_file_gni = 'vk_internal_shaders_autogen.gni'
is_windows = platform.system() == 'Windows'
is_linux = platform.system() == 'Linux'
# Templates for the generated files:
template_shader_library_cpp = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Pre-generated shader library for the ANGLE Vulkan back-end.
#include "libANGLE/renderer/vulkan/vk_internal_shaders_autogen.h"
#define USE_SYSTEM_ZLIB
#include "compression_utils_portable.h"
namespace rx
{{
namespace vk
{{
namespace
{{
{internal_shader_includes}
// This is compressed SPIR-V binary blob and size
struct CompressedShaderBlob
{{
const uint8_t *code;
uint32_t size;
}};
{shader_tables_cpp}
angle::Result GetShader(Context *context,
RefCounted<ShaderAndSerial> *shaders,
const CompressedShaderBlob *compressedShaderBlobs,
size_t shadersCount,
uint32_t shaderFlags,
RefCounted<ShaderAndSerial> **shaderOut)
{{
ASSERT(shaderFlags < shadersCount);
RefCounted<ShaderAndSerial> &shader = shaders[shaderFlags];
*shaderOut = &shader;
if (shader.get().valid())
{{
return angle::Result::Continue;
}}
// Create shader lazily. Access will need to be locked for multi-threading.
const CompressedShaderBlob &compressedShaderCode = compressedShaderBlobs[shaderFlags];
ASSERT(compressedShaderCode.code != nullptr);
uLong uncompressedSize = zlib_internal::GetGzipUncompressedSize(compressedShaderCode.code,
compressedShaderCode.size);
std::vector<uint32_t> shaderCode((uncompressedSize + 3) / 4, 0);
// Note: we assume a little-endian environment throughout ANGLE.
int zResult = zlib_internal::GzipUncompressHelper(reinterpret_cast<uint8_t *>(shaderCode.data()),
&uncompressedSize, compressedShaderCode.code, compressedShaderCode.size);
if (zResult != Z_OK)
{{
ERR() << "Failure to decompressed internal shader: " << zResult << "\\n";
return angle::Result::Stop;
}}
return InitShaderAndSerial(context, &shader.get(), shaderCode.data(), shaderCode.size() * 4);
}}
}} // anonymous namespace
ShaderLibrary::ShaderLibrary()
{{
}}
ShaderLibrary::~ShaderLibrary()
{{
}}
void ShaderLibrary::destroy(VkDevice device)
{{
{shader_destroy_calls}
}}
{shader_get_functions_cpp}
}} // namespace vk
}} // namespace rx
"""
template_shader_library_h = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Pre-generated shader library for the ANGLE Vulkan back-end.
#ifndef LIBANGLE_RENDERER_VULKAN_VK_INTERNAL_SHADERS_AUTOGEN_H_
#define LIBANGLE_RENDERER_VULKAN_VK_INTERNAL_SHADERS_AUTOGEN_H_
#include "libANGLE/renderer/vulkan/vk_utils.h"
namespace rx
{{
namespace vk
{{
namespace InternalShader
{{
{shader_variation_definitions}
}} // namespace InternalShader
class ShaderLibrary final : angle::NonCopyable
{{
public:
ShaderLibrary();
~ShaderLibrary();
void destroy(VkDevice device);
{shader_get_functions_h}
private:
{shader_tables_h}
}};
}} // namespace vk
}} // namespace rx
#endif // LIBANGLE_RENDERER_VULKAN_VK_INTERNAL_SHADERS_AUTOGEN_H_
"""
template_shader_includes_gni = u"""# GENERATED FILE - DO NOT EDIT.
# Generated by {script_name} using data from {input_file_name}
#
# Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# {out_file_name}:
# List of generated shaders for inclusion in ANGLE's build process.
angle_vulkan_internal_shaders = [
{shaders_list}
]
"""
template_spirv_blob_inc = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Pre-generated shader for the ANGLE Vulkan back-end.
#pragma once
constexpr uint8_t {variable_name}[] = {{
{blob}
}};
// Generated from:
//
{preprocessed_source}
"""
# Gets the constant variable name for a generated shader.
def get_var_name(output, prefix='k'):
return prefix + output.replace(".", "_")
# Gets the namespace name given to constants generated from shader_file
def get_namespace_name(shader_file):
return get_var_name(os.path.basename(shader_file), '')
# Gets the namespace name given to constants generated from shader_file
def get_variation_table_name(shader_file, prefix='k'):
return get_var_name(os.path.basename(shader_file), prefix) + '_shaders'
# Gets the internal ID string for a particular shader.
def get_shader_id(shader):
file = os.path.splitext(os.path.basename(shader))[0]
return file.replace(".", "_")
# Returns the name of the generated SPIR-V file for a shader.
def get_output_path(name):
return os.path.join('shaders', 'gen', name + ".inc")
# Finds a path to GN's out directory
def get_linux_glslang_exe_path():
return '../../../../tools/glslang/glslang_validator'
def get_win_glslang_exe_path():
return get_linux_glslang_exe_path() + '.exe'
def get_glslang_exe_path():
glslang_exe = get_win_glslang_exe_path() if is_windows else get_linux_glslang_exe_path()
if not os.path.isfile(glslang_exe):
raise Exception('Could not find %s' % glslang_exe)
return glslang_exe
# Generates the code for a shader blob array entry.
def gen_shader_blob_entry(shader):
var_name = get_var_name(os.path.basename(shader))[0:-4]
return "{%s, %s}" % (var_name, "sizeof(%s)" % var_name)
def slash(s):
return s.replace('\\', '/')
def gen_shader_include(shader):
return '#include "libANGLE/renderer/vulkan/%s"' % slash(shader)
def get_variations_path(shader):
variation_file = shader + '.json'
return variation_file if os.path.exists(variation_file) else None
def get_shader_variations(shader):
variation_file = get_variations_path(shader)
if variation_file is None:
# If there is no variation file, assume none.
return ({}, [])
with open(variation_file) as fin:
variations = json.loads(fin.read())
flags = {}
enums = []
for key, value in variations.iteritems():
if key == "Description":
continue
elif key == "Flags":
flags = value
elif len(value) > 0:
enums.append((key, value))
# sort enums so the ones with the most waste ends up last, reducing the table size
enums.sort(key=lambda enum: (1 << (len(enum[1]) - 1).bit_length()) / float(len(enum[1])))
return (flags, enums)
def get_variation_bits(flags, enums):
flags_bits = len(flags)
enum_bits = [(len(enum[1]) - 1).bit_length() for enum in enums]
return (flags_bits, enum_bits)
def next_enum_variation(enums, enum_indices):
"""Loop through indices from [0, 0, ...] to [L0-1, L1-1, ...]
where Li is len(enums[i]). The list can be thought of as a number with many
digits, where each digit is in [0, Li), and this function effectively implements
the increment operation, with the least-significant digit being the first item."""
for i in range(len(enums)):
current = enum_indices[i]
# if current digit has room, increment it.
if current + 1 < len(enums[i][1]):
enum_indices[i] = current + 1
return True
# otherwise reset it to 0 and carry to the next digit.
enum_indices[i] = 0
# if this is reached, the number has overflowed and the loop is finished.
return False
compact_newlines_regex = re.compile(r"\n\s*\n", re.MULTILINE)
def cleanup_preprocessed_shader(shader_text):
return compact_newlines_regex.sub('\n\n', shader_text.strip())
def read_and_compress_spirv_blob(blob_path):
with open(blob_path, 'rb') as blob_file:
blob = blob_file.read()
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode='wb', compresslevel=9, mtime=0) as f:
f.write(blob)
return buf.getvalue()
def write_compressed_spirv_blob_as_c_array(output_path, variable_name, compressed_blob,
preprocessed_source):
hex_array = ['0x{:02x}'.format(ord(byte)) for byte in compressed_blob]
blob = ',\n '.join(','.join(hex_array[i:i + 16]) for i in range(0, len(hex_array), 16))
with open(output_path, 'wb') as incfile:
incfile.write(
template_spirv_blob_inc.format(
script_name=__file__,
copyright_year=date.today().year,
out_file_name=output_path,
variable_name=variable_name,
blob=blob,
preprocessed_source=preprocessed_source))
class CompileQueue:
class CompressAndAppendPreprocessorOutput:
def __init__(self, shader_file, preprocessor_args, output_path, variable_name):
# Asynchronously launch the preprocessor job.
self.process = subprocess.Popen(
preprocessor_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Store the file name for output to be appended to.
self.output_path = output_path
self.variable_name = variable_name
# Store info for error description.
self.shader_file = shader_file
def wait(self, queue):
(out, err) = self.process.communicate()
if self.process.returncode == 0:
# Use unix line endings.
out = out.replace('\r\n', '\n')
# Clean up excessive empty lines.
out = cleanup_preprocessed_shader(out)
# Comment it out!
out = '\n'.join([('// ' + line).strip() for line in out.splitlines()])
# Read the SPIR-V blob and compress it.
compressed_blob = read_and_compress_spirv_blob(self.output_path)
# Write the compressed blob as a C array in the output file, followed by the
# preprocessor output.
write_compressed_spirv_blob_as_c_array(self.output_path, self.variable_name,
compressed_blob, out)
out = None
return (out, err, self.process.returncode, None,
"Error running preprocessor on " + self.shader_file)
class CompileToSPIRV:
def __init__(self, shader_file, shader_basename, variation_string, output_path,
compile_args, preprocessor_args, variable_name):
# Asynchronously launch the compile job.
self.process = subprocess.Popen(
compile_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Store info for launching the preprocessor.
self.preprocessor_args = preprocessor_args
self.output_path = output_path
# Store info for job and error description.
self.shader_file = shader_file
self.shader_basename = shader_basename
self.variation_string = variation_string
self.variable_name = variable_name
def wait(self, queue):
(out, err) = self.process.communicate()
if self.process.returncode == 0:
# Insert the preprocessor job in the queue.
queue.append(
CompileQueue.CompressAndAppendPreprocessorOutput(self.shader_file,
self.preprocessor_args,
self.output_path,
self.variable_name))
# If all the output says is the source file name, don't bother printing it.
if out.strip() == self.shader_file:
out = None
description = self.output_path + ': ' + self.shader_basename + self.variation_string
return (out, err, self.process.returncode, description,
"Error compiling " + self.shader_file)
def __init__(self):
# Compile with as many CPU threads are detected. Once a shader is compiled, another job is
# automatically added to the queue to append the preprocessor output to the generated file.
self.queue = []
self.thread_count = multiprocessing.cpu_count()
def _wait_first(self, ignore_output=False):
(out, err, returncode, description, exception_description) = self.queue[0].wait(self.queue)
self.queue.pop(0)
if not ignore_output:
if description:
print(description)
if out and out.strip():
print(out.strip())
if err and err.strip():
print(err)
if returncode != 0:
return exception_description
return None
# Wait for all pending tasks. If called after error is detected, ignore_output can be used to
# make sure errors in later jobs are suppressed to avoid cluttering | |
x
@staticmethod
def exec_np(x):
"""
Computes the absolute value of real numbers `x`, which is the "unsigned" portion of `x` and
often denoted as `|x|`. The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : np.array
Numbers.
Returns
-------
np.array :
The computed absolute values.
"""
return np.abs(x)
@staticmethod
def exec_xar(x):
"""
Computes the absolute value of real numbers `x`, which is the "unsigned" portion of `x` and
often denoted as `|x|`. The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : xr.DataArray
Numbers.
Returns
-------
xr.DataArray :
The computed absolute values.
"""
return xr.ufuncs.fabs(x)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Sgn Process
########################################################################################################################
@process
def sgn():
"""
Returns class instance of `Sgn`.
For more details, please have a look at the implementations inside `Sgn`.
Returns
-------
Sgn :
Class instance implementing all 'sgn processes.
"""
return Sgn()
class Sgn:
"""
Class implementing all 'sgn' processes.
"""
@staticmethod
def exec_num(x):
"""
The signum (also known as sign) of `x` is defined as:
- 1 if x > 0
- 0 if x = 0
- -1 if x < 0
The no-data value None is passed through and therefore gets propagated.
Parameters
----------
x : int or float
A number.
Returns
-------
int :
The computed signum value of `x`.
"""
return np.sign(x) if x is not None else x
@staticmethod
def exec_np(x):
"""
The signum (also known as sign) of `x` is defined as:
- 1 if x > 0
- 0 if x = 0
- -1 if x < 0
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : np.array
Numbers.
Returns
-------
np.array :
The computed signum values of `x`.
"""
return np.sign(x)
@staticmethod
def exec_xar(x):
"""
The signum (also known as sign) of `x` is defined as:
- 1 if x > 0
- 0 if x = 0
- -1 if x < 0
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : xr.DataArray
Numbers.
Returns
-------
xr.DataArray :
The computed signum values of `x`.
"""
return xr.ufuncs.sign(x)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Sqrt Process
########################################################################################################################
@process
def sqrt():
"""
Returns class instance of `Sqrt`.
For more details, please have a look at the implementations inside `Sqrt`.
Returns
-------
Sqrt :
Class instance implementing all 'sqrt' processes.
"""
return Sqrt()
class Sqrt:
"""
Class implementing all 'sqrt' processes.
"""
@staticmethod
def exec_num(x):
"""
Computes the square root of a real number `x`, which is equal to calculating `x` to the power of 0.5.
A square root of `x` is a number `a` such that `a^2 = x`. Therefore, the square root is the inverse function
of `a` to the power of 2, but only for `a >= 0`.
The no-data value None is passed through and therefore gets propagated.
Parameters
----------
x : int or float
A number.
Returns
-------
float :
The computed square root.
"""
return np.sqrt(x)
@staticmethod
def exec_np(x):
"""
Computes the square root of real numbers `x`, which is equal to calculating `x` to the power of 0.5.
Square roots of `x` are numbers `a` such that `a^2 = x`. Therefore, the square root is the inverse function
of `a` to the power of 2, but only for `a >= 0`.
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : np.array
Numbers.
Returns
-------
np.array :
The computed square roots.
"""
return np.sqrt(x)
@staticmethod
def exec_xar(x):
"""
Computes the square root of real numbers `x`, which is equal to calculating `x` to the power of 0.5.
Square roots of `x` are numbers `a` such that `a^2 = x`. Therefore, the square root is the inverse function
of `a` to the power of 2, but only for `a >= 0`.
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : xr.DataArray
Numbers.
Returns
-------
xr.DataArray :
The computed square roots.
"""
return xr.ufuncs.sqrt(x)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Power Process
########################################################################################################################
@process
def power():
"""
Returns class instance of `Power`.
For more details, please have a look at the implementations inside `Power`.
Returns
-------
Power :
Class instance implementing all 'power' processes.
"""
return Power()
class Power:
"""
Class implementing all 'power' processes.
"""
@staticmethod
def exec_num(base, p):
"""
Computes the exponentiation for the base `base` raised to the power of `p`.
The no-data value None is passed through and therefore gets propagated if any of the arguments is None.
Parameters
----------
base : int or float
The numerical base.
p : int or float
The numerical exponent.
Returns
-------
int or float :
The computed value for `base` raised to the power of `p`.
"""
return np.power(base, float(p)) if base is not None and p is not None else None # float(p) because of error message in NumPy: ValueError: Integers to negative integer powers are not allowed.
@staticmethod
def exec_np(base, p):
"""
Computes the exponentiation for the bases `base` raised to the power of `p`.
The no-data value np.nan is passed through and therefore gets propagated if any of the arguments is np.nan.
Parameters
----------
base : np.array
The numerical bases.
p : int or float
The numerical exponent.
Returns
-------
np.array :
The computed values for `base` raised to the power of `p`.
"""
return np.power(base, float(p)) # float(p) because of error message in NumPy: ValueError: Integers to negative integer powers are not allowed.
@staticmethod
def exec_xar(base, p):
"""
Computes the exponentiation for the bases `base` raised to the power of `p`.
The no-data value np.nan is passed through and therefore gets propagated if any of the arguments is np.nan.
Parameters
----------
base : xr.DataArray
The numerical bases.
p : int or float
The numerical exponent.
Returns
-------
xr.DataArray :
The computed values for `base` raised to the power of `p`.
"""
pow = base**float(p)
if isinstance(base, xr.DataArray):
pow.attrs = base.attrs
return pow
@staticmethod
def exec_da():
pass
########################################################################################################################
# Mean Process
########################################################################################################################
@process
def mean():
"""
Returns class instance of `Mean`.
For more details, please have a look at the implementations inside `Mean`.
Returns
-------
Mean :
Class instance implementing all 'mean' processes.
"""
return Mean()
class Mean:
"""
Class implementing all 'mean' processes.
"""
@staticmethod
def exec_num():
pass
@staticmethod
def exec_np(data, ignore_nodata=True, dimension=0):
"""
The arithmetic mean of an array of numbers is the quantity commonly called the average.
It is defined as the sum of all elements divided by the number of elements.
Parameters
----------
data : np.array
An array of numbers. An empty array resolves always with np.nan.
ignore_nodata : bool, optional
Indicates whether no-data values are ignored or not. Ignores them by default (=True).
Setting this flag to false considers no-data values so that np.nan is returned if any value is such a value.
dimension : int, optional
Defines the dimension to calculate the mean along (default is 0).
Returns
-------
np.array :
The computed arithmetic mean values.
"""
if is_empty(data):
return np.nan
if not ignore_nodata:
return np.mean(data, axis=dimension)
else:
return np.nanmean(data, axis=dimension)
@staticmethod
def exec_xar(data, ignore_nodata=True, dimension=None):
"""
The arithmetic mean of an array of numbers is the quantity commonly called the average.
It is defined as the sum of all elements divided by the number of elements.
Parameters
----------
data : xr.DataArray
An array of numbers. An empty array resolves always with np.nan.
ignore_nodata : bool, optional
Indicates whether no-data values are ignored or not. Ignores them by default (=True).
Setting this flag to False considers no-data values so that np.nan is returned if any value is such a value.
dimension : int, optional
Defines the dimension to calculate the mean along (default is 0).
Returns
-------
xr.DataArray :
The computed arithmetic mean values.
"""
if is_empty(data):
return np.nan
if not dimension:
dimension = data.dims[0]
m = data.mean(dim=dimension, skipna=~ignore_nodata)
if isinstance(data, xr.DataArray):
| |
<filename>ps4/src/cartpole/cartpole.py
"""
CS 229 Machine Learning
Question: Reinforcement Learning - The Inverted Pendulum
"""
from __future__ import division, print_function
from env import CartPole, Physics
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import lfilter
"""
Parts of the code (cart and pole dynamics, and the state
discretization) are inspired from code available at the RL repository
http://www-anw.cs.umass.edu/rlr/domains.html
Briefly, the cart-pole system is described in `cart_pole.py`. The main
simulation loop in this file calls the `simulate()` function for
simulating the pole dynamics, `get_state()` for discretizing the
otherwise continuous state space in discrete states, and `show_cart()`
for display.
Some useful parameters are listed below:
`NUM_STATES`: Number of states in the discretized state space
You must assume that states are numbered 0 through `NUM_STATES` - 1. The
state numbered `NUM_STATES` - 1 (the last one) is a special state that
marks the state when the pole has been judged to have fallen (or when
the cart is out of bounds). However, you should NOT treat this state
any differently in your code. Any distinctions you need to make between
states should come automatically from your learning algorithm.
After each simulation cycle, you are supposed to update the transition
counts and rewards observed. However, you should not change either
your value function or the transition probability matrix at each
cycle.
Whenever the pole falls, a section of your code below will be
executed. At this point, you must use the transition counts and reward
observations that you have gathered to generate a new model for the MDP
(i.e. transition probabilities and state rewards). After that, you
must use value iteration to get the optimal value function for this MDP
model.
`TOLERANCE`: Controls the convergence criteria for each value iteration
run. In value iteration, you can assume convergence when the maximum
absolute change in the value function at any state in an iteration
becomes lower than `TOLERANCE.
You need to write code that chooses the best action according
to your current value function, and the current model of the MDP. The
action must be either 0 or 1 (corresponding to possible directions of
pushing the cart)
Finally, we assume that the simulation has converged when
`NO_LEARNING_THRESHOLD` consecutive value function computations all
converged within one value function iteration. Intuitively, it seems
like there will be little learning after this, so we end the simulation
here, and say the overall algorithm has converged.
Learning curves can be generated by calling a code snippet at the end
(it assumes that the learning was just executed, and the array
`time_steps_to_failure` that records the time for which the pole was
balanced before each failure is in memory). `num_failures` is a variable
that stores the number of failures (pole drops / cart out of bounds)
till now.
Other parameters in the code are described below:
`GAMMA`: Discount factor to be used
The following parameters control the simulation display; you dont
really need to know about them:
`pause_time`: Controls the pause between successive frames of the
display. Higher values make your simulation slower.
`min_trial_length_to_start_display`: Allows you to start the display only
after the pole has been successfully balanced for at least this many
trials. Setting this to zero starts the display immediately. Choosing a
reasonably high value (around 100) can allow you to rush through the
initial learning quickly, and start the display only after the
performance is reasonable.
"""
def initialize_mdp_data(num_states):
"""
Return a variable that contains all the parameters/state you need for your MDP.
Feel free to use whatever data type is most convenient for you (custom classes, tuples, dicts, etc)
Assume that no transitions or rewards have been observed.
Initialize the value function array to small random values (0 to 0.10, say).
Initialize the transition probabilities uniformly (ie, probability of
transitioning for state x to state y using action a is exactly
1/num_states).
Initialize all state rewards to zero.
Args:
num_states: The number of states
Returns: The initial MDP parameters
"""
transition_counts = np.zeros((num_states, num_states, 2))
transition_probs = np.ones((num_states, num_states, 2)) / num_states
#Index zero is count of rewards being -1 , index 1 is count of total num state is reached
reward_counts = np.zeros((num_states, 2))
reward = np.zeros(num_states)
value = np.random.rand(num_states) * 0.1
return {
'transition_counts': transition_counts,
'transition_probs': transition_probs,
'reward_counts': reward_counts,
'reward': reward,
'value': value,
'num_states': num_states,
}
def choose_action(state, mdp_data):
"""
Choose the next action (0 or 1) that is optimal according to your current
mdp_data. When there is no optimal action, return a random action.
Args:
state: The current state in the MDP
mdp_data: The parameters for your MDP. See initialize_mdp_data.
Returns:
0 or 1 that is optimal according to your current MDP
"""
# *** START CODE HERE ***
psa = mdp_data['transition_probs']
value = mdp_data['value']
# average value of 0
v0 = np.sum(np.inner(psa[state,:,0],value))
# average value of 1
v1 = np.sum(np.inner(psa[state,:,1],value))
# make actions
if v0 > v1: return 0
elif v0 < v1: return 1
else: return np.random.randint(2)
# *** END CODE HERE ***
def update_mdp_transition_counts_reward_counts(mdp_data, state, action, new_state, reward):
"""
Update the transition count and reward count information in your mdp_data.
Do not change the other MDP parameters (those get changed later).
Record the number of times `state, action, new_state` occurs.
Record the rewards for every `new_state`
(since rewards are -1 or 0, you just need to record number of times reward -1 is seen in 'reward_counts' index new_state,0)
Record the number of time `new_state` was reached (in 'reward_counts' index new_state,1)
Args:
mdp_data: The parameters of your MDP. See initialize_mdp_data.
state: The state that was observed at the start.
action: The action you performed.
new_state: The state after your action.
reward: The reward after your action (i.e. reward corresponding to new_state).
Returns:
Nothing
"""
# *** START CODE HERE ***
trans_count = mdp_data['transition_counts']
reward_count = mdp_data['reward_counts']
# update transition counts
trans_count[state, new_state, action] += 1
# update reward counts
reward_count[new_state, 1] += 1
reward_count[new_state, 0] += int(reward==-1)
mdp_data['transition_counts'] = trans_count
mdp_data['reward_counts'] = reward_count
# *** END CODE HERE ***
# This function does not return anything
return
def update_mdp_transition_probs_reward(mdp_data):
"""
Update the estimated transition probabilities and reward values in your MDP.
Make sure you account for the case when a state-action pair has never
been tried before, or the state has never been visited before. In that
case, you must not change that component (and thus keep it at the
initialized uniform distribution).
Args:
mdp_data: The data for your MDP. See initialize_mdp_data.
Returns:
Nothing
"""
# *** START CODE HERE ***
trans_count = mdp_data['transition_counts']
trans_prob = mdp_data['transition_probs']
reward_count = mdp_data['reward_counts']
num_states = mdp_data['num_states']
# update transition probability
for state in range(num_states):
cs = np.sum(trans_count[state,:,:], axis=0)
if cs[0] > 0: trans_prob[state,:,0] = trans_count[state,:,0] / cs[0]
if cs[1] > 0: trans_prob[state,:,1] = trans_count[state,:,1] / cs[1]
mdp_data['transition_probs'] = trans_prob
# update reward
reward_list = mdp_data['reward']
for state in range(num_states):
if reward_count[state,1] > 0: reward_list[state] = - reward_count[state,0] / reward_count[state,1]
mdp_data['reward'] = reward_list
# *** END CODE HERE ***
# This function does not return anything
return
def update_mdp_value(mdp_data, tolerance, gamma):
"""
Update the estimated values in your MDP.
Perform value iteration using the new estimated model for the MDP.
The convergence criterion should be based on `TOLERANCE` as described
at the top of the file.
Return true if it converges within one iteration.
Args:
mdp_data: The data for your MDP. See initialize_mdp_data.
tolerance: The tolerance to use for the convergence criterion.
gamma: Your discount factor.
Returns:
True if the value iteration converged in one iteration
"""
# *** START CODE HERE ***
value = mdp_data['value']
new_value = np.zeros((value.shape))
reward = mdp_data['reward']
trans_prob = mdp_data['transition_probs']
num_state = mdp_data['num_states']
for state in range(num_state):
action = choose_action(state,mdp_data)
ave_value = 0
for sp in range(num_state):
ave_value += trans_prob[state,sp,action] * value[sp]
new_value[state] = reward[state] + gamma * ave_value
mdp_data['value'] = new_value
# stop criteria
if np.max(np.abs(new_value - value)) < tolerance: return True
return False
# *** END CODE HERE ***
def main(plot=True):
# Seed the randomness of the simulation so this outputs the same thing each time
np.random.seed(1)
# Simulation parameters
pause_time = 0.0001
min_trial_length_to_start_display = 100
display_started = min_trial_length_to_start_display == 0
NUM_STATES = 163
GAMMA = 0.995
TOLERANCE = 0.01
NO_LEARNING_THRESHOLD = 20
# Time cycle of | |
from MedTAG_sket_dock_App.utils import *
from psycopg2.extensions import register_adapter, AsIs
def addapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
def addapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.float64, addapt_numpy_float64)
register_adapter(numpy.int64, addapt_numpy_int64)
from collections import defaultdict
from MedTAG_sket_dock_App.utils_pubmed import *
import os.path
import owlready2
"""This .py file includes the methods needed to configure MedTAG and update its configuration"""
def process_ontology(workpath, disease):
ontology_path = os.path.join(workpath, 'sket/sket/ont_proc/ontology/examode.owl')
ontology = owlready2.get_ontology(ontology_path).load()
sparql = "PREFIX exa: <https://w3id.org/examode/ontology/> " \
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> " \
"select ?iri ?iri_label ?semantic_area_label where { " \
"?iri rdfs:label ?iri_label ; exa:AssociatedDisease ?disease . " \
"filter (langMatches( lang(?iri_label), 'en')). " \
"?disease rdfs:label '" + disease + "'@en . " \
"OPTIONAL {?iri exa:hasSemanticArea ?semantic_area . " \
"?semantic_area rdfs:label ?semantic_area_label . " \
"filter (langMatches( lang(?semantic_area_label), 'en')).} " \
"} " \
"limit 1000"
# get ontology graph as in rdflib
ontology_graph = ontology.world.as_rdflib_graph()
# issue sparql query
r = ontology_graph.query(query_object=sparql)
ontology_dict = defaultdict(list)
for e in r:
ontology_dict['iri'].append(e[0].toPython() if e[0] else None)
ontology_dict['label'].append(e[1].toPython() if e[1] else None)
ontology_dict['semantic_area_label'].append(e[2].toPython() if e[2] else None)
return r
def configure_concepts(cursor,load_concepts,author):
"""This method configures concepts when a NEW CONFIGURATION is performed"""
if author == 'admin':
to_add = 'Manual and Automatic'
elif author == 'robot':
to_add = 'Automatic'
for usecase in load_concepts:
disease = ''
if usecase.lower() == 'colon':
disease = 'colon carcinoma'
elif usecase.lower() == 'uterine cervix':
disease = 'cervical cancer'
elif usecase.lower() == 'lung':
disease = 'lung cancer'
if disease != '':
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
r = process_ontology(workpath,disease)
# convert query output to DataFrame
belong_to = []
concept_has_uc = []
conc = []
with transaction.atomic():
cursor.execute("SELECT c.concept_url FROM concept AS c inner join concept_has_uc as ch on ch.concept_url = c.concept_url where annotation_mode in %s and ch.name = %s",[tuple(['Manual','Manual and Automatic']),usecase])
ans = cursor.fetchall()
if len(ans) > 0:
concepts = []
for el in ans:
concepts.append(el[0])
if author == 'admin':
cursor.execute("DELETE FROM ground_truth_log_file WHERE gt_type = %s and (id_report,language) in (SELECT id_report, language FROM contains WHERE concept_url in %s AND ns_id = %s)", ['concepts',tuple(concepts),'Human'])
cursor.execute("DELETE FROM ground_truth_log_file WHERE gt_type = %s and (id_report,language) in (SELECT id_report, language FROM linked WHERE concept_url in %s AND ns_id = %s)", ['concept-mention',tuple(concepts),'Human'])
cursor.execute("DELETE FROM contains WHERE concept_url in %s AND ns_id = %s",[tuple(concepts),'Human'])
cursor.execute("DELETE FROM linked WHERE concept_url in %s AND ns_id = %s", [tuple(concepts),'Human'])
cursor.execute("DELETE FROM belong_to WHERE concept_url in %s ", [tuple(concepts)])
cursor.execute("DELETE FROM concept_has_uc WHERE concept_url in %s", [tuple(concepts)])
cursor.execute("DELETE FROM concept WHERE concept_url in %s", [tuple(concepts)])
cursor.execute(
"SELECT c.concept_url FROM concept AS c inner join concept_has_uc as ch on ch.concept_url = c.concept_url where annotation_mode = %s and ch.name = %s",
['Automatic', usecase])
ans_auto = cursor.fetchall()
if len(ans_auto) > 0:
concepts = []
for el in ans_auto:
concepts.append(el[0])
if author == 'admin':
cursor.execute("UPDATE concept SET annotation_mode = %s WHERE annotation_mode = %s",
['Manual and Automatic', 'Automatic'])
for e in r:
if (e[0] is not None and e[1] is not None and e[2] is not None):
concept = e[0].toPython()
# print(e[2].toPython())
if concept == 'SevereColonDysplasia':
concept = 'https://w3id.org/examode/ontology/SevereColonDysplasia'
elif concept == 'uterusNOS':
concept = 'https://w3id.org/examode/ontology/UterusNOS'
cursor.execute("SELECT * FROM semantic_area WHERE name = %s",[e[2].toPython()])
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute("INSERT INTO semantic_area VALUES(%s)",[e[2].toPython()])
belong_to.append((concept, e[2].toPython()))
concept_has_uc.append((concept, usecase))
conc.append((concept, e[1].toPython(), None))
cursor.execute('SELECT * FROM concept WHERE concept_url = %s',(concept,))
ans = cursor.fetchall()
if len(ans) == 0:
query = ("INSERT INTO concept (concept_url,name,annotation_mode) VALUES(%s,%s,%s);")
values = (concept, e[1].toPython(),to_add)
cursor.execute(query, values)
# elif author == 'admin':
else:
cursor.execute("UPDATE concept SET annotation_mode = %s WHERE concept_url = %s",
['Manual and Automatic', e[0].toPython()])
cursor.execute('SELECT * FROM belong_to WHERE concept_url = %s AND name=%s',[concept,e[2].toPython()])
if len(cursor.fetchall()) == 0:
query1 = ("INSERT INTO belong_to (name, concept_url) VALUES(%s,%s);")
values1 = (e[2].toPython(), concept)
cursor.execute(query1, values1)
cursor.execute('SELECT * FROM concept_has_uc WHERE concept_url = %s AND name=%s',[concept,usecase.lower()])
if len(cursor.fetchall()) == 0:
query2 = ("INSERT INTO concept_has_uc (concept_url,name) VALUES(%s,%s);")
values2 = (concept, usecase.lower())
cursor.execute(query2, values2)
def configure_labels(cursor,load_labels):
"""This method configures labels when a NEW CONFIGURATION is performed"""
with transaction.atomic():
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
with open(os.path.join(workpath, 'automatic_annotation/db_examode_data/examode_db_population.json'),
'r') as outfile:
data = json.load(outfile)
usecases = data['labels'].keys()
areas = data['semantic area']
ar_tup = []
for el in areas:
ar_tup.append((el,))
cursor.executemany("INSERT INTO semantic_area VALUES (%s)", ar_tup)
ar_tup = []
ar_tup_label = []
seq = 0
for el in usecases:
to_add = 'Automatic'
if el in load_labels:
to_add = 'Manual and Automatic'
ar_tup.append(el.lower())
for label in data['labels'][el]:
seq = seq + 1
ar_tup_label.append((label, seq, el.lower(),to_add,))
for el in ar_tup:
cursor.execute('SELECT * FROM use_case WHERE name = %s', (str(el).lower(),))
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute('INSERT INTO use_case VALUES(%s)', (str(el).lower(),))
cursor.executemany("INSERT INTO annotation_label (label,seq_number,name,annotation_mode) VALUES (%s,%s,%s,%s)", ar_tup_label)
def configure_update_labels(cursor,load_labels):
"""This method configures labels when a an existing configuration is updated"""
with transaction.atomic():
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
with open(os.path.join(workpath, 'automatic_annotation/db_examode_data/examode_db_population.json'),
'r') as outfile:
data = json.load(outfile)
up = False
if load_labels is not None:
for el in load_labels:
for label in data['labels'][el]:
cursor.execute("UPDATE annotation_label SET annotation_mode = %s WHERE label = %s AND name = %s AND seq_number < %s",['Manual and Automatic',label,str(el).lower(),int(21)])
a = AnnotationLabel.objects.filter(name=str(el).lower(), annotation_mode='Manual')
if a.count() > 0:
up = True
if up:
cursor.execute("DELETE FROM ground_truth_log_file WHERE gt_type = %s AND ns_id = %s ",['labels','Human'])
cursor.execute("DELETE FROM associate WHERE ns_id = %s",['Human'])
cursor.execute("DELETE FROM annotation_label WHERE annotation_mode = %s",['Manual'])
def check_file(reports,pubmedfiles, labels, concepts, jsonDisp, jsonAnn, username, password,load_concepts,load_labels):
"""This method checks whether the inserted files complies with the requirements"""
json_resp = {}
json_keys = []
usecases_list = []
json_resp['general_message'] = ''
json_resp['username_message'] = ''
json_resp['report_message'] = ''
json_resp['pubmed_message'] = ''
json_resp['concept_message'] = ''
json_resp['label_message'] = ''
json_resp['fields_message'] = ''
json_resp['keys'] = json_keys
if load_labels is not None:
load_labels = ''.join(load_labels)
load_labels = load_labels.split(',')
load_labels = list(set(load_labels))
else:
load_labels = ''
if load_concepts is not None:
load_concepts = ''.join(load_concepts)
load_concepts = load_concepts.split(',')
load_concepts = list(set(load_concepts))
else:
load_concepts = ''
load_labels = [x.lower() for x in load_labels]
load_concepts = [x.lower() for x in load_concepts]
# Error if the user has not inserted enough files
if len(concepts) == 0 and load_concepts is None and load_labels is None and len(labels) == 0 and len(jsonAnn) == 0 and len(reports) > 0:
json_resp[
'general_message'] = 'ERROR - You must provide at least one file between labels and concepts or at least one field to annotate.'
elif len(reports) == 0 and len(pubmedfiles) == 0:
json_resp['general_message'] = 'ERROR - You must provide a file with one or more reports or one or more pubmed files.'
elif len(pubmedfiles) > 0 and len(concepts) == 0 and load_concepts is None and load_labels is None and len(labels) == 0 and len(jsonAnn) == 0:
json_resp['general_message'] = 'PUBMED - only mentions allowed.'
try:
try:
cursor = connection.cursor()
cursor.execute('SELECT * FROM public.user WHERE username = %s', (str(username),))
ans = cursor.fetchall()
# Error on username and password: duplicated username or missing
if len(ans) > 0 or username == 'Test':
json_resp['username_message'] = 'USERNAME - The username you selected is already taken. Choose another one.'
if (username == ''):
json_resp['username_message'] = 'USERNAME - Please, provide a username.'
if password == '' and username == '':
json_resp['username_message'] = 'USERNAME - Please, provide a username and a password.'
except (Exception, psycopg2.Error) as e:
print(e)
json_resp[
'username_message'] = 'An error occurred handling the username and the password. Please, insert them again.'
pass
else:
if json_resp['username_message'] == '':
json_resp['username_message'] = 'Ok'
# This is necessary to collect the fields to annotate and display
fields = []
fields_to_ann = []
jsondisp = ''.join(jsonDisp)
jsonann = ''.join(jsonAnn)
jsondisp = jsondisp.split(',')
jsonann = jsonann.split(',')
for el in jsondisp:
fields.append(el)
for el in jsonann:
if len(el) > 0:
fields_to_ann.append(el)
# Error if 0 report files are added
if len(reports) == 0 and len(pubmedfiles) == 0:
json_resp['report_message'] = 'REPORTS FILES - You must provide at least one file containing reports or at least one file containing PubMED IDs before checking'
json_resp['pubmed_message'] = 'PUBMED FILES - You must provide at least one file containing reports or at | |
<filename>models/ deeplabv3_plus_xception.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 21 15:16:18 2021
@author: Administrator
"""
from base import BaseModel
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
from utils.helpers import initialize_weights,set_trainable
from itertools import chain
'''
'xception_65.pth'URL:https://github.com/zhangtianlun12/deeplabv3-/releases/download/v0.1/xception_65.pth
'''
'''
-> ResNet BackBone
'''
class ResNet(nn.Module):
def __init__(self, in_channels=3, output_stride=16, backbone='resnet101', pretrained=True):
super(ResNet, self).__init__()
model = getattr(models, backbone)(pretrained)
if not pretrained or in_channels != 3:
self.layer0 = nn.Sequential(
nn.Conv2d(in_channels, 64, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
initialize_weights(self.layer0)
else:
self.layer0 = nn.Sequential(*list(model.children())[:4])
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
if output_stride == 16: s3, s4, d3, d4 = (2, 1, 1, 2)
elif output_stride == 8: s3, s4, d3, d4 = (1, 1, 2, 4)
if output_stride == 8:
for n, m in self.layer3.named_modules():
if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):
m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)
elif 'conv2' in n:
m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)
elif 'downsample.0' in n:
m.stride = (s3, s3)
for n, m in self.layer4.named_modules():
if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):
m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)
elif 'conv2' in n:
m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)
elif 'downsample.0' in n:
m.stride = (s4, s4)
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
low_level_features = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_features
"""
Created on Fri Sep 13 19:04:23 2019
@author: shirhe-lyh
Implementation of Xception model.
Xception: Deep Learning with Depthwise Separable Convolutions, F. Chollect,
arxiv:1610.02357 (https://arxiv.org/abs/1610.02357).
Official tensorflow implementation:
https://github.com/tensorflow/models/blob/master/research/deeplab/core/xception.py
"""
import collections
import os
import torch
_DEFAULT_MULTI_GRID = [1, 1, 1]
# The cap for torch.clamp
_CLIP_CAP = 6
_BATCH_NORM_PARAMS = {
'eps': 0.001,
'momentum': 0.9997,
'affine': True,
}
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing an Xception block.
Its parts are:
scope: The scope of the block.
unit_fn: The Xception unit function which takes as input a tensor and
returns another tensor with the output of the Xception unit.
args: A list of length equal to the number of units in the block. The
list contains one dictionary for each unit in the block to serve
as argument to unit_fn.
"""
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d
operation. Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
padded_inputs: A tensor of size [batch, height_out, width_out,
channels] with the input, either intact (if kernel_size == 1) or
padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = torch.nn.functional.pad(
inputs, pad=(pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class Conv2dSame(torch.nn.Module):
"""Strided 2-D convolution with 'SAME' padding."""
def __init__(self, in_channels, out_channels, kernel_size, stride, rate=1):
"""Constructor.
If stride > 1 and use_explicit_padding is True, then we do explicit
zero-padding, followed by conv2d with 'VALID' padding.
Args:
in_channels: An integer, the number of input filters.
out_channels: An integer, the number of output filters.
kernel_size: An integer with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
"""
super(Conv2dSame, self).__init__()
self._kernel_size = kernel_size
self._rate = rate
self._without_padding = stride == 1
if self._without_padding:
# Here, we assume that floor(padding) = padding
padding = (kernel_size - 1) * rate // 2
self._conv = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dilation=rate,
padding=padding,
bias=False)
else:
self._conv = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
bias=False)
self._batch_norm = torch.nn.BatchNorm2d(out_channels,
**_BATCH_NORM_PARAMS)
self._relu = torch.nn.ReLU(inplace=True)
def forward(self, x):
"""
Args:
x: A 4-D tensor with shape [batch, height_in, width_in, channels].
Returns:
A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if not self._without_padding:
x = fixed_padding(x, self._kernel_size, self._rate)
x = self._conv(x)
x = self._batch_norm(x)
x = self._relu(x)
return x
class SeparableConv2dSame(torch.nn.Module):
"""Strided 2-D separable convolution with 'SAME' padding."""
def __init__(self, in_channels, out_channels, kernel_size,
depth_multiplier, stride, rate, use_explicit_padding=True,
activation_fn=None, regularize_depthwise=False, **kwargs):
"""Constructor.
If stride > 1 and use_explicit_padding is True, then we do explicit
zero-padding, followed by conv2d with 'VALID' padding.
Args:
in_channels: An integer, the number of input filters.
out_channels: An integer, the number of output filters.
kernel_size: An integer with the kernel_size of the filters.
depth_multiplier: The number of depthwise convolution output
channels for each input channel. The total number of depthwise
convolution output channels will be equal to `num_filters_in *
depth_multiplier`.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
use_explicit_padding: If True, use explicit padding to make the
model fully compatible with the open source version, otherwise
use the nattive Pytorch 'SAME' padding.
activation_fn: Activation function.
regularize_depthwise: Whether or not apply L2-norm regularization
on the depthwise convolution weights.
**kwargs: Additional keyword arguments to pass to torch.nn.Conv2d.
"""
super(SeparableConv2dSame, self).__init__()
self._kernel_size = kernel_size
self._rate = rate
self._without_padding = stride == 1 or not use_explicit_padding
out_channels_depthwise = in_channels * depth_multiplier
if self._without_padding:
# Separable convolution for padding 'SAME'
# Here, we assume that floor(padding) = padding
padding = (kernel_size - 1) * rate // 2
self._conv_depthwise = torch.nn.Conv2d(in_channels,
out_channels_depthwise,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
groups=in_channels,
padding=padding,
bias=False,
**kwargs)
else:
# Separable convolution for padding 'VALID'
self._conv_depthwise = torch.nn.Conv2d(in_channels,
out_channels_depthwise,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
groups=in_channels,
bias=False,
**kwargs)
self._batch_norm_depthwise = torch.nn.BatchNorm2d(
out_channels_depthwise, **_BATCH_NORM_PARAMS)
self._conv_pointwise = torch.nn.Conv2d(out_channels_depthwise,
out_channels,
kernel_size=1,
stride=1,
bias=False,
**kwargs)
self._batch_norm_pointwise = torch.nn.BatchNorm2d(
out_channels, **_BATCH_NORM_PARAMS)
self._activation_fn = activation_fn
def forward(self, x):
"""
Args:
x: A 4-D tensor with shape [batch, height_in, width_in, channels].
Returns:
A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if not self._without_padding:
x = fixed_padding(x, self._kernel_size, self._rate)
x = self._conv_depthwise(x)
x = self._batch_norm_depthwise(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
x = self._conv_pointwise(x)
x = self._batch_norm_pointwise(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class XceptionModule(torch.nn.Module):
"""An Xception module.
The output of one Xception module is equal to the sum of `residual` and
`shortcut`, where `residual` is the feature computed by three seperable
convolution. The `shortcut` is the feature computed by 1x1 convolution
with or without striding. In some cases, the `shortcut` path could be a
simple identity function or none (i.e, no shortcut).
"""
def __init__(self, in_channels, depth_list, skip_connection_type, stride,
unit_rate_list, rate=1, activation_fn_in_separable_conv=False,
regularize_depthwise=False, use_bounded_activation=False,
use_explicit_padding=True):
"""Constructor.
Args:
in_channels: An integer, the number of input filters.
depth_list: A list of three integers specifying the depth values
of one Xception module.
skip_connection_type: Skip connection type for the residual path.
Only supports 'conv', 'sum', or 'none'.
stride: The block unit's stride. Detemines the amount of
downsampling of the units output compared to its input.
unit_rate_list: A list of three integers, determining the unit
rate for each separable convolution in the Xception module.
rate: An integer, rate for atrous convolution.
activation_fn_in_separable_conv: Includes activation function in
the seperable convolution or not.
regularize_depthwise: Whether or not apply L2-norm regularization
on the depthwise convolution weights.
use_bounded_activation: Whether or not to use bounded activations.
Bounded activations better lend themselves to quantized
inference.
use_explicit_padding: If True, use explicit padding to make the
model fully compatible with the open source version, otherwise
use the nattive Pytorch 'SAME' padding.
Raises:
ValueError: If depth_list and unit_rate_list do not contain three
integers, or if stride != 1 for the third seperable convolution
operation in the residual path, or unsupported skip connection
type.
"""
super(XceptionModule, self).__init__()
if len(depth_list) != 3:
raise ValueError('Expect three elements in `depth_list`.')
if len(unit_rate_list) != 3:
raise ValueError('Expect three elements in `unit_rate_list`.')
if skip_connection_type not in ['conv', 'sum', 'none']:
raise ValueError('Unsupported skip | |
"parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
await client.batch_create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_create_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_tensorboard_time_series(
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].requests
mock_val = [
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
]
assert arg == mock_val
def test_batch_create_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_tensorboard_time_series(
tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(),
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_tensorboard_time_series(
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].requests
mock_val = [
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_tensorboard_time_series(
tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(),
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
def test_create_tensorboard_time_series(
transport: str = "grpc",
request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest,
):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
response = client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
def test_create_tensorboard_time_series_from_dict():
test_create_tensorboard_time_series(request_type=dict)
def test_create_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
client.create_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
)
response = await client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_async_from_dict():
await test_create_tensorboard_time_series_async(request_type=dict)
def test_create_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries()
)
await client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tensorboard_time_series(
parent="parent_value",
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_time_series
mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name="name_value")
assert arg == mock_val
def test_create_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tensorboard_time_series(
tensorboard_service.CreateTensorboardTimeSeriesRequest(),
parent="parent_value",
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tensorboard_time_series(
parent="parent_value",
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_time_series
mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with | |
mngr.remove_directory_locks()
Including data from other runs
==============================
If a field has been observed over more than one run, the manager will
need to be made aware of the pre-existing data to make combined
datacubes. Note that this is only necessary for the final data
reduction, so observers do not need to worry about this.
To combine the data, first create a manager for each run (you may
already have done this):
>>> mngr = sami.manager.Manager('2014_04_24-2014_05_04')
>>> mngr_old = sami.manager.Manager('2014_05_23-2014_06_01')
Then create the link:
>>> mngr.link_manager(mngr_old)
Now `mngr` will include files from `mngr_old` when necessary, i.e. for
these steps:
>>> mngr.measure_offsets()
>>> mngr.cube()
>>> mngr.scale_cubes()
>>> mngr.bin_cubes()
For all previous steps the two managers still act independently, so
you need to follow through up to scale_frames() for each manager
individually.
Other functions
===============
The other functions defined probably aren't useful to you.
"""
# Task list provides the list of standard reduction tasks in the necessary
# order. This is used by `reduce_all`, and also by each reduction step to provide instructions on the next step to run.
task_list = (
('reduce_bias', True),
('combine_bias', False),
('reduce_dark', True),
('combine_dark', False),
('reduce_lflat', True),
('combine_lflat', False),
('make_tlm', True),
('reduce_arc', True),
('reduce_fflat', True),
('reduce_sky', True),
('reduce_object', True),
('derive_transfer_function', True),
('combine_transfer_function', True),
('flux_calibrate', True),
('telluric_correct', True),
('fluxcal_secondary',True),
('scale_frames', True),
('measure_offsets', True),
('cube', True),
('scale_cubes', True),
('bin_cubes', True),
('record_dust', True),
('bin_aperture_spectra', True),
('gzip_cubes', True)
)
def __init__(self, root, copy_files=False, move_files=False, fast=False,
gratlpmm=GRATLPMM, n_cpu=1,demo_data_source='demo',
use_twilight_tlm_blue=False, use_twilight_flat_blue=False,
improve_blue_wavecorr=False, telluric_correct_primary=False, debug=False):
if fast:
self.speed = 'fast'
else:
self.speed = 'slow'
self.idx_files = IDX_FILES[self.speed]
# define the internal flag that allows twilights to be used for
# making tramline maps:
self.use_twilight_tlm_blue = use_twilight_tlm_blue
# define the internal flag that allows twilights to be used for
# fibre flat fielding:
self.use_twilight_flat_blue = use_twilight_flat_blue
# define the internal flag that specifies the improved twlight wavelength
# calibration step should be applied
self.improve_blue_wavecorr = improve_blue_wavecorr
# Internal flag to set telluric correction for primary standards
self.telluric_correct_primary = telluric_correct_primary
self.gratlpmm = gratlpmm
self.n_cpu = n_cpu
self.root = root
self.abs_root = os.path.abspath(root)
self.tmp_dir = os.path.join(self.abs_root, 'tmp')
# Match objects within 1'
if ASTROPY_VERSION[0] == 0 and ASTROPY_VERSION[1] == 2:
self.matching_radius = coord.AngularSeparation(
0.0, 0.0, 0.0, 1.0, units.arcmin)
else:
self.matching_radius = coord.Angle('0:1:0 degrees')
self.file_list = []
self.extra_list = []
self.dark_exposure_str_list = []
self.dark_exposure_list = []
self.linked_managers = []
self.cwd = os.getcwd()
if 'IMP_SCRATCH' in os.environ:
self.imp_scratch = os.environ['IMP_SCRATCH']
else:
self.imp_scratch = None
self.scratch_dir = None
self.min_exposure_for_throughput = 900.0
self.min_exposure_for_sky_wave = 900.0
self.min_exposure_for_5577pca = 599.0
self.aat_username = None
self.aat_password = None
self.inspect_root(copy_files, move_files)
if self.find_directory_locks():
print('Warning: directory locks in place!')
print('If this is because you killed a crashed manager, clean them')
print('up using mngr.remove_directory_locks()')
if use_twilight_tlm_blue:
print('Using twilight frames to derive TLM and profile map')
else:
print('NOT using twilight frames to derive TLM and profile map')
if use_twilight_flat_blue:
print('Using twilight frames for fibre flat field')
else:
print('NOT using twilight frames for fibre flat field')
if improve_blue_wavecorr:
print('Applying additional twilight-based wavelength calibration step')
else:
print('NOT applying additional twilight-based wavelength calibration step')
if telluric_correct_primary:
print('Applying telluric correction to primary standard stars before flux calibration')
print('WARNING: Only do this if the reference spectra for the primary standards have good telluric correction')
else:
print('NOT applying telluric correction to primary standard stars')
self._debug = False
self.debug = debug
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
if not isinstance(value, bool):
raise ValueError("debug must be set to a boolean value.")
if not value == self._debug:
if value:
log.setLevel(slogging.DEBUG)
tdfdr.log.setLevel(slogging.DEBUG)
else:
log.setLevel(slogging.WARNING)
tdfdr.log.setLevel(slogging.WARNING)
self._debug = value
def next_step(self, step, print_message=False):
task_name_list = list(map(lambda x: x[0], self.task_list))
current_index = task_name_list.index(step)
if current_index + 1 < len(task_name_list):
next_step = task_name_list[current_index + 1]
else:
# nothing left
next_step = None
if print_message:
print("'{}' step complete. Next step is '{}'".format(step, next_step))
return next_step
def __repr__(self):
return "SAMIManagerInstance at {}".format(self.root)
def map(self, function, input_list):
"""Map inputs to a function, using built-in map or multiprocessing."""
if not input_list:
# input_list is empty. I expected the map functions to deal with
# this issue, but in one case it hung on aatmacb, so let's be
# absolutely sure to avoid the issue
print('empty input_list, returning...')
return []
# if asyncio.iscoroutinefunction(function):
#
# result_list = []
#
# # loop = asyncio.new_event_loop()
# loop = asyncio.get_event_loop()
# # Break up the overall job into chunks that are n_cpu in size:
# for i in range(0, len(input_list), self.n_cpu):
# print("{} jobs total, running {} to {} in parallel".format(len(input_list), i, min(i+self.n_cpu, len(input_list))))
# # Create an awaitable object which can be used as a future.
# # This is the job that will be run in parallel.
# @asyncio.coroutine
# def job():
# tasks = [function(item) for item in input_list[i:i+self.n_cpu]]
# # for completed in asyncio.as_completed(tasks): # print in the order they finish
# # await completed
# # # print(completed.result())
# sub_results = yield from asyncio.gather(*tasks, loop=loop)
# result_list.extend(sub_results)
#
# loop.run_until_complete(job())
# # loop.close()
#
# return np.array(result_list)
#
# else:
# Fall back to using multiprocessing for non-coroutine functions
if self.n_cpu == 1:
result_list = list(map(function, input_list))
else:
pool = multiprocessing.Pool(self.n_cpu)
result_list = pool.map(function, input_list, chunksize=1)
pool.close()
pool.join()
return result_list
def inspect_root(self, copy_files, move_files, trust_header=True):
"""Add details of existing files to internal lists."""
files_to_add = []
for dirname, subdirname_list, filename_list in os.walk(os.path.join(self.abs_root, "raw")):
for filename in filename_list:
if self.file_filter(filename):
full_path = os.path.join(dirname, filename)
files_to_add.append(full_path)
assert len(set(files_to_add)) == len(files_to_add), "Some files would be duplicated on manager startup."
if self.n_cpu == 1:
fits_list = list(map(FITSFile, files_to_add))
else:
pool = multiprocessing.Pool(self.n_cpu)
fits_list = pool.map(FITSFile, files_to_add, chunksize=20)
pool.close()
pool.join()
for fits in fits_list:
self.import_file(fits,
trust_header=trust_header,
copy_files=copy_files,
move_files=move_files)
def file_filter(self, filename):
"""Return True if the file should be added."""
# Match filenames of the form 01jan10001.fits
return (re.match(r'[0-3][0-9]'
r'(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)'
r'[1-2][0-9]{4}\.(fit|fits|FIT|FITS)$',
filename)
and (self.fits_file(filename) is None))
def import_file(self, source,
trust_header=True, copy_files=True, move_files=False):
"""Add details of a file to the manager"""
if not isinstance(source, FITSFile):
# source_path = os.path.join(dirname, filename)
# Initialize an instance of the FITSFile:
filename = os.path.basename(source)
fits = FITSFile(source)
else:
filename = source.filename
fits = source
if fits.copy:
# print 'this is a copy, do not import:',dirname,filename
# This is a copy of a file, don't add it to the list
return
if fits.ndf_class not in [
'BIAS', 'DARK', 'LFLAT', 'MFFFF', 'MFARC', 'MFSKY',
'MFOBJECT']:
print('Unrecognised NDF_CLASS for {}: {}'.format(
filename, fits.ndf_class))
print('Skipping this file')
return
if fits.ndf_class == 'DARK':
if fits.exposure_str not in self.dark_exposure_str_list:
self.dark_exposure_str_list.append(fits.exposure_str)
self.dark_exposure_list.append(fits.exposure)
self.set_raw_path(fits)
if os.path.abspath(fits.source_path) != os.path.abspath(fits.raw_path):
if copy_files:
print('Copying file:', filename)
self.update_copy(fits.source_path, fits.raw_path)
if move_files:
print('Moving file: ', filename)
self.move(fits.source_path, fits.raw_path)
if not copy_files and not move_files:
print('Warning! Adding', filename, 'in unexpected location')
fits.raw_path = fits.source_path
else:
print('Adding file: ', filename)
self.set_name(fits, trust_header=trust_header)
fits.set_check_data()
self.set_reduced_path(fits)
if not fits.do_not_use:
fits.make_reduced_link()
if fits.grating in self.gratlpmm:
try:
fits.add_header_item('GRATLPMM', self.gratlpmm[fits.grating])
except IOError:
pass
if fits.grating not in self.idx_files:
# Without an idx file we would have no way to reduce this file
self.disable_files([fits])
self.file_list.append(fits)
return
def set_raw_path(self, fits):
"""Set the raw path for a FITS file."""
if fits.ndf_class == 'BIAS':
rel_path = os.path.join('bias', fits.ccd, fits.date)
elif fits.ndf_class == 'DARK':
rel_path = os.path.join('dark', fits.ccd, fits.exposure_str,
fits.date)
elif fits.ndf_class == 'LFLAT':
rel_path = os.path.join('lflat', fits.ccd, fits.date)
else:
rel_path = os.path.join(fits.date, fits.ccd)
fits.raw_dir = os.path.join(self.abs_root, 'raw', rel_path)
fits.raw_path = os.path.join(fits.raw_dir, fits.filename)
return
def update_copy(self, source_path, dest_path):
"""Copy the file, unless a more recent version exists."""
dest_dir = os.path.dirname(dest_path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
elif os.path.exists(dest_path):
if os.path.getmtime(source_path) <= os.path.getmtime(dest_path):
# File has already been copied and no update to be done
return
shutil.copy2(source_path, dest_path)
return
def move(self, source_path, dest_path):
"""Move the file."""
dest_dir = os.path.dirname(dest_path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.move(source_path, dest_path)
return
def move_reduced_files(self, filename_root, old_reduced_dir, reduced_dir):
"""Move all reduced files connected to the given root."""
for filename in os.listdir(old_reduced_dir):
if filename.startswith(filename_root):
self.move(os.path.join(old_reduced_dir, filename),
os.path.join(reduced_dir, filename))
# If there is nothing useful left in the old directory, delete it.
if not self.check_reduced_dir_contents(old_reduced_dir):
# There's nothing useful in | |
[18, 35, 87, 10],
[20, 4, 82, 47, 65],
[19, 1, 23, 75, 3, 34],
[88, 2, 77, 73, 7, 63, 67],
[99, 65, 4, 28, 06, 16, 70, 92],
[41, 41, 26, 56, 83, 40, 80, 70, 33],
[41, 48, 72, 33, 47, 32, 37, 16, 94, 29],
[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],
[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],
[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],
[63, 66, 04, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],
[4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23] ]
def euler_18(i, j):
if i >= len(matrix_18):
return 0
else:
x = matrix_18[i][j]
y = euler_18(i+1, j)
z = euler_18(i+1, j+1)
if y > z:
return x + y
else:
return x + z
print euler_18(0, 0)
def mainE19(min, max):
countSunday = 0
if min == 1900:
dayOfTheWeek = 1
elif min == 1901:
dayOfTheWeek = 2
months = [31,28,31,30,31,30,31,31,30,31,30,31]
year = []
leapyear = []
for m in months:
for i in range(1, m+1):
year.append(i)
leapyear.append(i)
if m==28:
leapyear.append(29)
for y in range(min, max+1):
if (y%4 == 0 and y%100!=0) or (y%100==0 and y%400==0):
#leap
month = 0
for dayInYear in range(0, len(leapyear)):
if leapyear[dayInYear] == 1:
month += 1
if dayOfTheWeek == 7 :
#Is Sunday
if leapyear[dayInYear] == 1:
countSunday += 1
print str(dayInYear) + ", " + str(month) + "/1/" + str(y)
dayOfTheWeek = 1
else:
dayOfTheWeek += 1
else:
#regular
month = 0
for dayInYear in range(0, len(year)):
if year[dayInYear] == 1:
month += 1
if dayOfTheWeek == 7 :
#Is Sunday
if year[dayInYear] == 1:
countSunday += 1
print str(dayInYear) + ", " + str(month) + "/1/" + str(y)
dayOfTheWeek = 1
else:
dayOfTheWeek += 1
return countSunday
print mainE19(1901, 2000)
def mainE20(upper):
prod = 1
for i in range(2, upper+1):
prod *= i
return sum([int(i) for i in str(prod)])
print mainE20(100)
def euler_21(upper):
amicable = [True]*(upper+1)
amicable[0] = False
amicable_number = []
ix = 1
while ix <= upper:
if amicable[ix]:
sum_divs = sum_divisors(ix)
if sum_divs != ix:
if sum_divisors(sum_divs) == ix:
amicable_number.append(ix)
if sum_divs <= upper:
amicable_number.append(sum_divs)
amicable[ix] = False
if sum_divs <= upper:
amicable[sum_divs] = False
ix += 1
print(amicable_number)
return sum(amicable_number)
def sum_divisors(num):
i = 2
sum = 1
while i < num//2:
if num % i == 0:
sum += i
i += 1
return sum
print(euler_21(2900))
#[220, 284, 1184, 1210, 2620, 2924, 5020, 5564, 6232, 6368]
#31626
def mainE25(digits):
a = 1
b = 1
count = 2
while len(str(b)) < digits:
t = a+b
a = b
b = t
count += 1
return count
print mainE25(1000)
def euler_28(size):
matrix28 = [[0 for i in range (size)] for j in range(size)]
i = len(matrix28)//2
j = int(i)
lenmx = len(matrix28)
sumX = 0
current = 1
direction = "right"
matrix28[i][j] = int(current)
sumX += current
while current < (lenmx**2):
if direction == "right":
if i+1 < lenmx:
i += 1
current += 1
matrix28[i][j] = int(current)
if j+1 < lenmx:
if matrix28[i][j+1] == 0:
direction = "down"
elif direction == "down":
if j+1 < lenmx:
j += 1
current += 1
matrix28[i][j] = int(current)
if i-1 >= 0:
if matrix28[i-1][j] == 0:
direction = "left"
elif direction == "left":
if i-1 >= 0:
i -= 1
current += 1
matrix28[i][j] = int(current)
if j-1 >= 0:
if matrix28[i][j-1] == 0:
direction = "up"
elif direction == "up":
if j-1 >= 0:
j -= 1
current += 1
matrix28[i][j] = int(current)
if i+1 < lenmx:
if matrix28[i+1][j] == 0:
direction = "right"
if i == j:
sumX += current
elif (lenmx - j - 1) == i:
sumX += current
return sumX
#print matrix28
#sumA = sum(matrix28[i][i] for i in range(0, lenmx))
#sumB = sum(matrix28[lenmx-1-i][i] for i in range(0, lenmx))
#return (sumA + sumB - 1)
print(euler_28(1001))
# 669171001
def euler_29(a, b):
res = []
for i in xrange(2, a+1):
for j in xrange(2, b+1):
num = i**j
if num not in res:
res.append(num)
print(res)
return len(res)
print(euler_29(100, 100))
#9183
def main30(upper):
fifths = []
for num in range(2, upper):
digits = [int(i) for i in str(num)]
prod = 0
for dig in digits:
prod += dig**5
if prod == num:
fifths.append(num)
print fifths
return sum(fifths)
print mainE30(20000)
def mainE31(amount):
cases = 0
#options = []
#current = c1 + 2*c2 + 5*c5 + 10*c10 + 20*c20 + 50*c50 + 100*c100 + 200*c200
for c200 in range(0, amount/200+1):
current = 200*c200
if current == amount:
cases += 1
#options.append([c200])
break
for c100 in range(0, (amount - c200*200)/100 + 1):
current = 100*c100 + 200*c200
if current == amount:
cases += 1
#options.append([c200, c100])
break
for c50 in range(0, (amount - c200*200 - c100*100)/50 + 1):
current = 50*c50 + 100*c100 + 200*c200
if current == amount:
cases += 1
#options.append([c200, c100, c50])
break
for c20 in range(0, (amount - c200*200 - c100*100 - c50*50)/20 + 1):
current = 20*c20 + 50*c50 + 100*c100 + 200*c200
if current == amount:
cases += 1
#options.append([c200, c100, c50, c20])
break
for c10 in range(0, (amount - c200*200 - c100*100 - c50*50 - c20*20)/10 + 1):
current = 10*c10 + 20*c20 + 50*c50 + 100*c100 + 200*c200
if current == amount:
cases += 1
#options.append([c200, c100, c50, c20, c10])
break
for c5 in range(0, (amount - c200*200 - c100*100 - c50*50 - c20*20 - c10*10)/5 + 1):
current = 5*c5 + 10*c10 + 20*c20 + 50*c50 + 100*c100 + 200*c200
if current == amount:
cases += 1
#options.append([c200, c100, c50, c20, c10, c5])
break
for c2 in range(0, (amount - c200*200 - c100*100 - c50*50 - c20*20 - c10*10 - c5*5)/2 + 1):
current = 2*c2 + 5*c5 + 10*c10 + 20*c20 + 50*c50 + 100*c100 + 200*c200
if current == amount:
cases += 1
#options.append([c200, c100, c50, c20, c10, c5, c2])
break
for c1 in range(0, (amount - c200*200 - c100*100 - c50*50 - c20*20 - c10*10 - c5*5 - c2*2) + 1):
current = c1 + 2*c2 + 5*c5 + 10*c10 + 20*c20 + 50*c50 + 100*c100 + 200*c200
if current == amount:
cases += 1
#options.append([c200, c100, c50, c20, c10, c5, c2, c1])
break
#print options
return cases
print mainE31(200)
def mainE34(upper):
# Number is Sum of DigitFactorials
fact = [0, 1, 2]
for i in range(3, 10):
fact.append(fact[i-1]*i)
res = 0
resList = []
for number in range(3, upper+1):
sum = 0
for digit in str(number):
sum += fact[int(digit)]
if sum == number:
resList.append(number)
res += number
print resList
return res
print mainE34(2000)
print mainE34(1000000)
def euler_34():
factorials = [0, 1]
for i in range(2, 10):
factorials.append(factorials[i-1] * i)
upper = 2540162 # 2540161
number = 3
while number < upper:
sumF = 0
i = int(number)
while i > 0:
sumF += factorials[int(i%10)]
if sumF > number:
break
i %= 10
if sumF == number:
yield number
number += 1
print(sum(euler_34()))
def euler_list_34():
factorials = [0, 1]
for i in range(2, 10):
factorials.append(factorials[i-1] * i)
upper = 2540162
return sum([num for num in range(3, upper+1) if num == sum([factorial[int(i)] for i in str(num)])])
def rotations_35(number):
strnum = str(number)
res = []
curr_str = strnum[1:] + strnum[0]
ix = 0
while ix < len(strnum):
if len(str(int(curr_str))) == len(strnum):
res.append(int(curr_str))
else:
return []
curr_str = str(curr_str)[1:] + str(curr_str)[0]
ix += 1
return res
def euler_35_erathostenes(upper):
# Find Primes through Sieve
#numbers = [True]*(upper+1)
numbers = [i % 2 == 1 for i in range(upper+1)] # Only odd numbers are Primes
numbers[0]=False
numbers[1]=False
numbers[2]=True # Excepting 2
p = 3
while p <= upper:
if numbers[p]:
f = int(p)
n = f*p
while n <= upper:
numbers[n] = False
f += 1
n = f*p
p += 2
#circular = [2]
i = 3
count = 1
while i <= upper:
if numbers[i]:
| |
_shtools.SHVectorToCilm(self.coeffs[:, itaper])
if normalization == 'schmidt':
for l in range(self.lwin + 1):
coeffs[:, l, :l+1] *= _np.sqrt(2.0 * l + 1.0)
elif normalization == 'ortho':
coeffs *= _np.sqrt(4.0 * _np.pi)
if csphase == -1:
for m in range(self.lwin + 1):
if m % 2 == 1:
coeffs[:, :, m] = - coeffs[:, :, m]
return coeffs
def rotate(self, clat, clon, coord_degrees=True, dj_matrix=None,
nwinrot=None):
""""
Rotate the spherical-cap windows centered on the North pole to clat
and clon, and save the spherical harmonic coefficients in the
attribute coeffs.
Usage
-----
x.rotate(clat, clon [coord_degrees, dj_matrix, nwinrot])
Parameters
----------
clat, clon : float
Latitude and longitude of the center of the rotated spherical-cap
localization windows (default in degrees).
coord_degrees : bool, optional, default = True
True if clat and clon are in degrees.
dj_matrix : ndarray, optional, default = None
The djpi2 rotation matrix computed by a call to djpi2.
nwinrot : int, optional, default = (lwin+1)**2
The number of best concentrated windows to rotate, where lwin is
the spherical harmonic bandwidth of the localization windows.
Description
-----------
This function will take the spherical-cap localization windows
centered at the North pole (and saved in the attributes tapers and
orders), rotate each function to the coordinate (clat, clon), and save
the spherical harmonic coefficients in the attribute coeffs. Each
column of coeffs contains a single window, and the coefficients are
ordered according to the convention in SHCilmToVector.
"""
self.coeffs = _np.zeros(((self.lwin + 1)**2, self.nwin))
self.clat = clat
self.clon = clon
self.coord_degrees = coord_degrees
if nwinrot is not None:
self.nwinrot = nwinrot
else:
self.nwinrot = self.nwin
if self.coord_degrees:
angles = _np.radians(_np.array([0., -(90. - clat), -clon]))
else:
angles = _np.array([0., -(_np.pi/2. - clat), -clon])
if dj_matrix is None:
if self.dj_matrix is None:
self.dj_matrix = _shtools.djpi2(self.lwin + 1)
dj_matrix = self.dj_matrix
else:
dj_matrix = self.dj_matrix
if ((coord_degrees is True and clat == 90. and clon == 0.) or
(coord_degrees is False and clat == _np.pi/2. and clon == 0.)):
for i in range(self.nwinrot):
coeffs = self._taper2coeffs(i)
self.coeffs[:, i] = _shtools.SHCilmToVector(coeffs)
else:
coeffs = _shtools.SHRotateTapers(self.tapers, self.orders,
self.nwinrot, angles, dj_matrix)
self.coeffs = coeffs
def _coupling_matrix(self, lmax, nwin=None, weights=None):
"""Return the coupling matrix of the first nwin tapers."""
if nwin is None:
nwin = self.nwin
if weights is None:
weights = self.weights
if weights is None:
return _shtools.SHMTCouplingMatrix(lmax, self.tapers**2, k=nwin)
else:
return _shtools.SHMTCouplingMatrix(lmax, self.tapers**2, k=nwin,
taper_wt=self.weights)
def _multitaper_spectrum(self, clm, k, convention='power', unit='per_l',
clat=None, clon=None, coord_degrees=True,
lmax=None, taper_wt=None):
"""
Return the multitaper spectrum estimate and standard error for an
input SHCoeffs class instance.
"""
if lmax is None:
lmax = clm.lmax
if (clat is not None and clon is not None and clat == self.clat and
clon == self.clon and coord_degrees is self.coord_degrees and
k <= self.nwinrot):
# use the already stored coeffs
pass
elif (clat is None and clon is None) and \
(self.clat is not None and self.clon is not None and
k <= self.nwinrot):
# use the already stored coeffs
pass
else:
if clat is None:
clat = self.clat
if clon is None:
clon = self.clon
if (clat is None and clon is not None) or \
(clat is not None and clon is None):
raise ValueError('clat and clon must both be input. ' +
'clat = {:s}, clon = {:s}'
.format(repr(clat), repr(clon)))
if clat is None and clon is None:
self.rotate(clat=90., clon=0., coord_degrees=True, nwinrot=k)
else:
self.rotate(clat=clat, clon=clon, coord_degrees=coord_degrees,
nwinrot=k)
sh = clm.to_array(normalization='4pi', csphase=1, lmax=lmax)
if taper_wt is None:
mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.coeffs,
lmax=lmax, k=k)
else:
mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.coeffs, lmax=lmax,
k=k, taper_wt=taper_wt)
if (unit == 'per_l'):
pass
elif (unit == 'per_lm'):
degree_l = _np.arange(len(mtse))
mtse /= (2.0 * degree_l + 1.0)
sd /= (2.0 * degree_l + 1.0)
else:
raise ValueError(
"unit must be 'per_l' or 'per_lm'." +
"Input value was {:s}".format(repr(unit)))
if (convention == 'power'):
return mtse, sd
elif (convention == 'energy'):
return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi
else:
raise ValueError(
"convention must be 'power' or 'energy'." +
"Input value was {:s}".format(repr(convention)))
def _multitaper_cross_spectrum(self, clm, slm, k, convention='power',
unit='per_l', clat=None, clon=None,
coord_degrees=True, lmax=None,
taper_wt=None):
"""
Return the multitaper cross-spectrum estimate and standard error for
two input SHCoeffs class instances.
"""
if lmax is None:
lmax = min(clm.lmax, slm.lmax)
if (clat is not None and clon is not None and clat == self.clat and
clon == self.clon and coord_degrees is self.coord_degrees and
k <= self.nwinrot):
# use the already stored coeffs
pass
elif (clat is None and clon is None) and \
(self.clat is not None and self.clon is not None and
k <= self.nwinrot):
# use the already stored coeffs
pass
else:
if clat is None:
clat = self.clat
if clon is None:
clon = self.clon
if (clat is None and clon is not None) or \
(clat is not None and clon is None):
raise ValueError('clat and clon must both be input. ' +
'clat = {:s}, clon = {:s}'
.format(repr(clat), repr(clon)))
if clat is None and clon is None:
self.rotate(clat=90., clon=0., coord_degrees=True, nwinrot=k)
else:
self.rotate(clat=clat, clon=clon, coord_degrees=coord_degrees,
nwinrot=k)
sh1 = clm.to_array(normalization='4pi', csphase=1, lmax=lmax)
sh2 = slm.to_array(normalization='4pi', csphase=1, lmax=lmax)
if taper_wt is None:
mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs,
lmax1=lmax, lmax2=lmax,
k=k)
else:
mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs,
lmax1=lmax, lmax2=lmax,
k=k, taper_wt=taper_wt)
if (unit == 'per_l'):
pass
elif (unit == 'per_lm'):
degree_l = _np.arange(len(mtse))
mtse /= (2.0 * degree_l + 1.0)
sd /= (2.0 * degree_l + 1.0)
else:
raise ValueError(
"unit must be 'per_l' or 'per_lm'." +
"Input value was {:s}".format(repr(unit)))
if (convention == 'power'):
return mtse, sd
elif (convention == 'energy'):
return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi
else:
raise ValueError(
"convention must be 'power' or 'energy'." +
"Input value was {:s}".format(repr(convention)))
def _biased_spectrum(self, spectrum, k, convention='power', unit='per_l',
**kwargs):
"""
Calculate the multitaper (cross-) spectrum expectation of a function
localized by spherical cap windows.
"""
# The equation is not modified if the in- and out- spectra are power
# or energy. However, the convention can not be l2norm, which depends
# upon the normalization of the coefficients.
if (convention != 'power' and convention != 'energy'):
raise ValueError(
"convention must be 'power' or 'energy'." +
"Input value was {:s}".format(repr(convention)))
if (unit == 'per_l'):
outspectrum = _shtools.SHBiasK(self.tapers, spectrum, k=k,
**kwargs)
elif (unit == 'per_lm'):
degree_l = _np.arange(len(spectrum))
temp = spectrum * (2.0 * degree_l + 1.0)
outspectrum = _shtools.SHBiasK(self.tapers, temp, k=k,
**kwargs)
outspectrum /= (2.0 * degree_l + 1.0)
else:
raise ValueError(
"unit must be 'per_l' or 'per_lm'." +
"Input value was {:s}".format(repr(unit)))
return outspectrum
def _info(self):
"""Print a summary of the data in the SHWindow instance."""
print(repr(self))
def __repr__(self):
str = 'kind = {:s}\n'.format(repr(self.kind))
if self.theta_degrees:
str += 'theta = {:f} degrees\n'.format(self.theta)
else:
str += 'theta = {:f} radians'.format(self.theta)
str += ('lwin = {:d}\n'
'nwin = {:d}\n'
'nwinrot = {:s}\n'
'shannon = {:e}\n'
'area (radians) = {:e}\n'
.format(self.lwin, self.nwin, repr(self.nwinrot), self.shannon,
self.area))
if self.clat is not None:
if self.coord_degrees:
str += 'clat = {:f} degrees\n'.format(self.clat)
else:
str += 'clat = {:f} radians\n'.format(self.clat)
else:
str += 'clat is not specified\n'
if self.clon is not None:
if self.coord_degrees:
str += 'clon = {:f} degrees\n'.format(self.clon)
else:
str += 'clon = {:f} radians\n'.format(self.clon)
else:
str += 'clon is not specified\n'
if self.dj_matrix is not None:
str += 'dj_matrix is stored\n'
else:
str += 'dj_matrix is not stored\n'
if self.weights is None:
str += 'Taper weights are not set'
else:
str += 'Taper weights are set'
return str
class SHWindowMask(SHWindow):
"""
Class for localization windows concentrated within a specified mask and
for a given spherical harmonic bandwidth.
"""
@staticmethod
def istype(kind):
return kind == 'mask'
def __init__(self, tapers, eigenvalues, weights, area, copy=True):
self.kind = 'mask'
self.lwin = _np.sqrt(tapers.shape[0]).astype(int) - 1
self.nwin = tapers.shape[1]
if copy:
self.weights = weights
self.tapers = _np.copy(tapers)
self.eigenvalues = _np.copy(eigenvalues)
else:
self.weights = weights
self.tapers = tapers
self.eigenvalues = eigenvalues
self.area = area
| |
label smoothing
"""
return losses.binary_crossentropy(y_true*0.9, y_pred)
if self.discriminator_train_model is None:
if self.gpus > 1:
self.discriminator_train_model = multi_gpu_model(self.discriminator(), gpus=self.gpus)
else:
self.discriminator_train_model = self.discriminator()
# set trainable flag and recompile
self.generator().trainable = False
self.classifier().trainable = False
self.discriminator().trainable = True
self.lcnn.extractor().trainable = False
self.discriminator_train_model.compile(optimizer=Adam(lr=0.0001), loss=loss_disc, metrics=['binary_accuracy'])
callbacks = []
callbacks.append(TensorBoard(log_dir=out_dir+'logs/discriminator/'))
callbacks.append(self.SaveWeightsCallback(target_models=[self.discriminator()], out_dir=out_dir+'weights/', period=out_period))
history = self.discriminator_train_model.fit_generator(train_gen, steps_per_epoch=steps_per_epoch,
epochs=epochs+self.disc_current_epochs, callbacks=callbacks,
workers=0, validation_data=valid_gen, validation_steps=100,
shuffle=False, initial_epoch=self.disc_current_epochs)
self.disc_current_epochs += epochs
return history
def generate(self, inputs):
"""
generate frontal image
"""
if self.generator is None:
self._init_generator()
img128, img64, img32, fc2, front_leye_img, front_reye_img, front_nose_img, front_mouth_img\
= self.generator().predict(inputs)
img128 = (img128*np.iinfo(np.uint8).max).astype(np.uint8)
img64 = (img64*np.iinfo(np.uint8).max).astype(np.uint8)
img32 = (img32*np.iinfo(np.uint8).max).astype(np.uint8)
front_leye_img = (front_leye_img*np.iinfo(np.uint8).max).astype(np.uint8)
front_reye_img = (front_reye_img*np.iinfo(np.uint8).max).astype(np.uint8)
front_nose_img = (front_nose_img*np.iinfo(np.uint8).max).astype(np.uint8)
front_mouth_img = (front_mouth_img*np.iinfo(np.uint8).max).astype(np.uint8)
return img128, img64, img32, front_leye_img, front_reye_img, front_nose_img, front_mouth_img
def rotate_parts(self, inputs):
"""
generate rotated part images
"""
out_leyes, _, out_reyes, _, out_noses, _, out_mouthes, _ = self.parts_rotator().predict(inputs)
out_leyes = (out_leyes*np.iinfo(np.uint8).max).astype(np.uint8)
out_reyes = (out_reyes*np.iinfo(np.uint8).max).astype(np.uint8)
out_noses = (out_noses*np.iinfo(np.uint8).max).astype(np.uint8)
out_mouthes = (out_mouthes*np.iinfo(np.uint8).max).astype(np.uint8)
return out_leyes, out_reyes, out_noses, out_mouthes
def discriminate(self, frontal_img):
"""
discriminate frontal image.
Returns: discriminated score map
"""
if self.discriminator is None:
self._init_discriminator()
out_img = self.discriminator().predict(frontal_img[np.newaxis, ...])[0]
out_img = (out_img*np.iinfo(np.uint8).max).astype(np.uint8)
print('out_img',out_img)
out_img = cv2.resize(out_img, (frontal_img.shape[1], frontal_img.shape[0]))
return out_img
class LightCNN():
class SaveWeightsCallback(Callback):
def __init__(self, target_models, out_dir, period):
self.target_models = target_models
self.out_dir = out_dir
self.period = period
def on_epoch_end(self, epoch, logs):
if (epoch + 1) % self.period == 0:
for target_model in self.target_models:
target_model.save_weights(self.out_dir + 'weights/lcnn_finetune/epoch{epoch:04d}_lr{lr:.5f}_loss{loss:.3f}_valacc{val_acc:.3f}.hdf5'.format(epoch=epoch + 1, lr=K.get_value(self.model.optimizer.lr), loss=logs['loss'], val_acc=logs['val_acc']), overwrite=True)
def __init__(self, classes=None, extractor_type='29v2', extractor_weights=None, classifier_weights=None, in_size_hw=(128, 128)):
"""
initialize light cnn network with given weights file. if weights file is None, the weights are initialized by default initializer.
Args:
classes (int): number of output classes. required when training or using classifier. not required when using only exractor.
extractor_type (str): string of network type. must be one of the following strings "29v2", "29", "9".
extractor_weights (str): trained extractor weights file path. it is used to resume training. not required when train from scratch.
classifier_weights (str): trained classifier weights file path. it is used to resume training. not required when training from scratch or only using extractor.
in_size_hw (tuple): height and width of input image.
"""
self.in_size_hw = in_size_hw
self.num_classes = classes
self.extractor_weights = extractor_weights
self.classifier_weights = classifier_weights
self._extractor = None
self._classifier = None
# if extractor_weights is not None, attempt to resume current epoch number from file name.
if self.extractor_weights is not None:
try:
self.current_epochs = int(re.match(r'.+[_h]([0-9]+)\.hdf5', self.extractor_weights).groups()[0])
except:
print('trained epochs was not found in extractor_weights_file name. use 0 as current_epochs.')
self.current_epochs = 0
else:
self.current_epochs = 0
self.extractor_type = extractor_type
def extractor(self):
"""
getter for singleton extractor.
"""
if self._extractor is None:
if self.extractor_type == '29v2':
self._extractor = self.build_extractor_29layers_v2(name='extract29v2', block=self._res_block, layers=[1, 2, 3, 4])
elif self.extractor_type == '29':
self._extractor = self.build_extractor_29layers(name='extract29', block=self._res_block, layers=[1, 2, 3, 4])
elif self.extractor_type == '9':
self._extractor = self.build_extractor_9layers(name='extract9')
if self.extractor_weights is not None:
self._extractor.load_weights(self.extractor_weights)
return self._extractor
def classifier(self):
"""
getter for singleton classifier.
"""
if self._classifier is None:
self._classifier = self.build_classifier(name='classify')
if self.classifier_weights is not None:
self._classifier.load_weights(self.classifier_weights)
return self._classifier
def _mfm(self, X, name, out_channels, kernel_size=3, strides=1, dense=False):
"""
private func for creating mfm layer.
Todo:
* maybe more natural if implemented as custom layer like the comment out code at the bottom of this file.
"""
if dense:
X = Dense(out_channels*2, name = name + '_dense1', kernel_regularizer=regularizers.l2(0.0005))(X)
else:
X = Conv2D(out_channels*2, name = name + '_conv2d1', kernel_size=kernel_size, kernel_regularizer=regularizers.l2(0.0005), strides=strides, padding='same')(X)
X = Maximum()([Lambda(lambda x, c: x[..., :c], arguments={'c':out_channels})(X), Lambda(lambda x, c: x[..., c:], arguments={'c':out_channels})(X)])
return X
def _group(self, X, name, in_channels, out_channels, kernel_size, strides):
X = self._mfm(X, name = name + '_mfm1', out_channels=in_channels, kernel_size=1, strides=1, dense=False)
X = self._mfm(X, name = name + '_mfm2', out_channels=out_channels, kernel_size=kernel_size, strides=strides)
return X
def _res_block(self, X, name, out_channels):
"""
private func for creating residual block with mfm layers.
"""
X_shortcut = X
X = self._mfm(X, name = name + '_mfm1', out_channels=out_channels, kernel_size=3, strides=1)
X = self._mfm(X, name = name + '_mfm2', out_channels=out_channels, kernel_size=3, strides=1)
X = Add()([X, X_shortcut])
return X
def _make_layer(self, X, name, block, num_blocks, out_channels):
"""
private func for creating multiple blocks. block is usualy res_block.
"""
for i in range(0, num_blocks):
X = block(X, name = name + '_block{}'.format(i), out_channels=out_channels)
return X
def build_extractor_9layers(self, name):
in_img = Input(shape=(*self.in_size_hw, 1))
X = self._mfm(in_img, name = name + '_mfm1', out_channels=48, kernel_size=5, strides=1)
X = MaxPooling2D(pool_size=2, padding='same')(X)
X = self._group(X, name = name + '_group1', in_channels=48, out_channels=96, kernel_size=3, strides=1)
X = MaxPooling2D(pool_size=2, padding='same')(X)
X = self._group(X, name = name + '_group2', in_channels=96, out_channels=192, kernel_size=3, strides=1)
X = MaxPooling2D(pool_size=2, padding='same')(X)
X = self._group(X, name = name + '_group3', in_channels=192, out_channels=128, kernel_size=3, strides=1)
X = self._group(X, name = name + '_group4', in_channels=128, out_channels=128, kernel_size=3, strides=1)
feat_map = MaxPooling2D(pool_size=2, padding='same')(X)
feat_vec = Dense(256, name = name + '_dense1', kernel_regularizer=regularizers.l2(0.0005))(Flatten()(X))
ret_extractor = CloudableModel(inputs=in_img, outputs=[feat_vec, feat_map], name=name)
#ret_extractor.summary()
return ret_extractor
def build_extractor_29layers(self, name, block, layers):
in_img = Input(shape=(*self.in_size_hw, 1))
X = self._mfm(in_img, name = name + '_mfm1', out_channels=48, kernel_size=5, strides=1)
X = MaxPooling2D(pool_size=2, padding='same')(X)
X = self._make_layer(X, name = name + '_layers1', block=block, num_blocks=layers[0], out_channels=48)
X = self._group(X, name = name + '_group1', in_channels=48, out_channels=96, kernel_size=3, strides=1)
X = MaxPooling2D(pool_size=2, padding='same')(X)
X = self._make_layer(X, name = name + '_layers2', block=block, num_blocks=layers[1], out_channels=96)
X = self._group(X, name = name + '_group2', in_channels=96, out_channels=192, kernel_size=3, strides=1)
X = MaxPooling2D(pool_size=2, padding='same')(X)
X = self._make_layer(X, name = name + '_layers3', block=block, num_blocks=layers[2], out_channels=192)
X = self._group(X, name = name + '_group3', in_channels=192, out_channels=128, kernel_size=3, strides=1)
X = self._make_layer(X, name = name + '_layers4', block=block, num_blocks=layers[3], out_channels=128)
X = self._group(X, name = name + '_group4', in_channels=128, out_channels=128, kernel_size=3, strides=1)
feat_map = MaxPooling2D(pool_size=2, padding='same')(X)
feat_vec = self._mfm(Flatten()(feat_map), name = name + '_mfm2', out_channels=256, dense=True)
ret_extractor = CloudableModel(inputs=in_img, outputs=[feat_vec, feat_map], name=name)
#ret_extractor.summary()
return ret_extractor
def build_extractor_29layers_v2(self, name, block, layers):
in_img = Input(shape=(*self.in_size_hw, 1))
X = self._mfm(in_img, name = name + '_mfm1', out_channels=48, kernel_size=5, strides=1)
X = Average()([MaxPooling2D(pool_size=2, padding='same')(X), AveragePooling2D(pool_size=2, padding='same')(X)])
X = self._make_layer(X, name = name + '_layers1', block=block, num_blocks=layers[0], out_channels=48)
X = self._group(X, name = name + '_group1', in_channels=48, out_channels=96, kernel_size=3, strides=1)
X = Average()([MaxPooling2D(pool_size=2, padding='same')(X), AveragePooling2D(pool_size=2, padding='same')(X)])
X = self._make_layer(X, name = name + '_layers2', block=block, num_blocks=layers[1], out_channels=96)
X = self._group(X, name = name + '_group2', in_channels=96, out_channels=192, kernel_size=3, strides=1)
X = Average()([MaxPooling2D(pool_size=2, padding='same')(X), AveragePooling2D(pool_size=2, padding='same')(X)])
X = self._make_layer(X, name = name + '_layers3', block=block, num_blocks=layers[2], out_channels=192)
X = self._group(X, name = name + '_group3', in_channels=192, out_channels=128, kernel_size=3, strides=1)
X = self._make_layer(X, name = name + '_layers4', block=block, num_blocks=layers[3], out_channels=128)
X = self._group(X, name = name + '_group4', in_channels=128, out_channels=128, kernel_size=3, strides=1)
feat_map = Average()([MaxPooling2D(pool_size=2, padding='same')(X), AveragePooling2D(pool_size=2, padding='same')(X)])
feat_vec = Dense(256, name = name + '_dense1', kernel_regularizer=regularizers.l2(0.0005))(Flatten()(feat_map))
ret_extractor = CloudableModel(inputs=in_img, outputs=[feat_vec, feat_map], name=name)
#ret_extractor.summary()
return ret_extractor
def build_classifier(self, name):
in_feat = Input(shape=(256,))
X = Dropout(0.7)(in_feat)
X = Dense(500, activation='relu', name = name + '_dense1', kernel_regularizer=regularizers.l2(0.005))(X)
X = Dropout(0.7)(X)
clas = Dense(self.num_classes, activation='softmax', name = name + '_dense2', use_bias=False , kernel_regularizer=regularizers.l2(0.005))(X)
ret_classifier = CloudableModel(inputs=in_feat, outputs=clas, name=name)
#ret_classifier.summary()
return ret_classifier
def train(self, train_gen, valid_gen=None, optimizer=SGD(lr=0.001, momentum=0.9, decay=0.00004, nesterov=True),
classifier_dropout=0.7, steps_per_epoch=100, validation_steps=100,
epochs=1, out_dir='../out/', out_period=1, fix_extractor=False):
"""
train extractor and classifier.
Args:
train_gen (generator): train data generator provided by celeb_gen.
valid_gen (generator): valid data generator provided by celeb_gen.
optimizer (Optimizer): keras optimizer used to train.
classifier_dropout (float): dropout ratio for training classifier.
steps_per_epoch (int): steps for each epoch.
validation_steps (int): steps for validation on the end of each epoch.
epochs (int): epochs to train.
out_prefix (str): prefix str for output weights file.
out_period (int): interval epochs | |
issuing the following command::
matplotlib.pyplot.style.use('default')
Raises
------
aspecd.exceptions.MissingSaverError
Raised when no saver is provided when trying to save
"""
def __init__(self):
# Name defaults always to the full class name, don't change!
self.name = aspecd.utils.full_class_name(self)
self.parameters = {
'show_legend': False,
'show_zero_lines': True
}
self.properties = PlotProperties()
self.description = 'Abstract plotting step'
self.figure = None
self.axes = None
self.filename = ''
self.caption = Caption()
self.legend = None
self.style = ''
@property
def fig(self):
"""Short hand for :attr:`figure`."""
return self.figure
@property
def ax(self): # pylint: disable=invalid-name
"""Short hand for :attr:`axes`."""
return self.axes
def plot(self):
"""Perform the actual plotting.
The actual plotting should be implemented within the private
method :meth:`_create_plot`.
"""
self._set_style()
self._create_figure_and_axes()
self._create_plot()
self.properties.apply(plotter=self)
self._set_legend()
self._add_zero_lines()
# noinspection PyUnusedLocal
@staticmethod
def applicable(dataset): # pylint: disable=unused-argument
"""Check whether plot is applicable to the given dataset.
Returns `True` by default and needs to be implemented in classes
inheriting from Plotter according to their needs.
A typical example would be a 2D plot applied to a 1D dataset that will
most probably not be possible/sensible.
Returns
-------
applicable : :class:`bool`
`True` if successful, `False` otherwise.
"""
return True
def _set_style(self):
if self.style:
if self.style not in plt.style.available + ['default', 'xkcd']:
message = 'Cannot find matplotlib style "{style}".'.format(
style=self.style)
raise aspecd.exceptions.StyleNotFoundError(message=message)
if self.style == 'xkcd':
plt.xkcd()
else:
plt.style.use(self.style)
def _create_figure_and_axes(self):
"""Create figure and axes and assign to attributes.
Figure and axes will only be created upon calling the method
:meth:`plot`. If you need to change the way figure and axes are
created, override this method.
.. note::
Figure and axes will only be created if both are not existing
already. Therefore, if you like to use a plotter to plot to an
existing axis, set its figure and axes properties before calling
the :meth:`plot` method.
If you do so, make sure to set *both*, figure and axes
properties, as failing to set a valid figure property will cause
matplotlib to throw exceptions.
In any case, figure and axes need to be assigned to the
:attr:`figure` and :attr:`axes` properties of the plotter class.
"""
if not self.figure and not self.axes:
mpl.interactive(False) # Mac OS X: prevent plot window from opening
self.figure, self.axes = plt.subplots()
def _create_plot(self):
"""Perform the actual plotting of the data of the dataset(s).
The implementation of the actual plotting goes in here in all
classes inheriting from Plotter. This method is automatically
called by :meth:`plot` after some background checks.
The reference to the figure object is stored in :attr:`figure`. By
default, the backend is set to non-interactive, and to actually
display the figure, you would need to call :meth:`show` on the
figure object stored in :attr:`figure`.
Plotting should be done using a method of the
:class:`matplotlib.axes.Axes` class accessible via the :attr:`axes`
attribute of the plotter.
"""
def save(self, saver=None):
"""Save the plot to a file.
The actual saving is postponed to an object of class
:class:`aspecd.plotting.Saver` that is submitted as parameter.
Parameters
----------
saver : `aspecd.plotting.Saver`
Saver handling the actual saving of the plot
Returns
-------
saver : `aspecd.plotting.Saver`
Saver used to save the plot
Raises
------
aspecd.exceptions.MissingSaverError
Raised if no Saver is provided as parameter.
"""
if not saver:
raise aspecd.exceptions.MissingSaverError
saver.save(self)
self.filename = saver.filename
return saver
# @staticmethod
def _create_axis_label_string(self, axis):
"""Create axis label conforming to conventions used in science
Here, the quantity is set in italics, and the unit in upright font,
with a slash separating both, quantity and unit. In case the
quantity contains spaces, these will be escaped thus that they are
contained in the final string (using the math mode of matplotlib).
.. note::
It might be worth discussing whether a proper axis label
conforming to scientific conventions sets the symbol in italics,
but not the quantity (name) as such. Therefore, a full label might
look like this: "magnetic field, B_0 / mT" with the term
"magnetic field" set in upright font, and only the symbol,
here $B_0$, in italics. For this, a property for the symbol has
been added to the axis class.
This method is called automatically and indirectly by :meth:`plot`.
If you ever need to change the appearance of your axes labels,
override this method in a child class.
Returns
-------
label: :class:`str`
label for the axis
"""
label = ''
if axis.quantity:
if self.style == 'xkcd':
label = axis.quantity
else:
label = '$' + axis.quantity.replace(' ', '\\ ') + '$'
if axis.unit:
label += ' / ' + axis.unit
return label
def _set_legend(self):
if self.parameters['show_legend']:
self.legend = self.axes.legend(**self.properties.legend.to_dict())
def _add_zero_lines(self):
if self.parameters['show_zero_lines']:
if isinstance(self.axes, list):
for axes in self.axes:
if axes.get_ylim()[0] <= 0 <= axes.get_ylim()[1]:
axes.axhline(**self.properties.zero_lines.to_dict(),
zorder=1)
if axes.get_xlim()[0] <= 0 <= axes.get_xlim()[1]:
axes.axvline(**self.properties.zero_lines.to_dict(),
zorder=1)
else:
if self.axes.get_ylim()[0] <= 0 <= self.axes.get_ylim()[1]:
self.axes.axhline(**self.properties.zero_lines.to_dict(),
zorder=1)
if self.axes.get_xlim()[0] <= 0 <= self.axes.get_xlim()[1]:
self.axes.axvline(**self.properties.zero_lines.to_dict(),
zorder=1)
class SinglePlotter(Plotter):
"""Base class for plots of single datasets.
Each class actually plotting data of a dataset should inherit from this
class. Furthermore, all parameters, implicit and explicit, necessary to
perform the plot, should eventually be stored in the property
:attr:`parameters` (currently a dictionary).
There are two concrete classes available for conveniently performing
plots of single datasets:
* :class:`aspecd.plotting.SinglePlotter1D`
1D plots, such as line, scatter, log, semilog
* :class:`aspecd.plotting.SinglePlotter2D`
2D plots, such as contour, image
To perform the plot, call the :meth:`plot` method of the dataset the plot
should be performed for, and provide a reference to the actual plotter
object to it.
Further things that need to be changed upon inheriting from this class
are the string stored in :attr:`description`, being basically a one-liner.
The actual implementation of the plotting is done in the private method
:meth:`_create_plot` that in turn gets called by :meth:`plot`
which is called by the :meth:`aspecd.dataset.Dataset.plot` method of the
dataset object.
Attributes
----------
properties : :class:`aspecd.plotting.SinglePlotProperties`
Properties of the plot, defining its appearance
dataset : :class:`aspecd.dataset.Dataset`
Dataset the plotting should be done for
drawing : :class:`matplotlib.artist.Artist`
Actual graphical representation of the data
Raises
------
aspecd.exceptions.MissingDatasetError
Raised when no dataset exists to act on
aspecd.exceptions.NotApplicableToDatasetError
Raised when processing step is not applicable to dataset
"""
def __init__(self):
super().__init__()
self.properties = SinglePlotProperties()
self.dataset = None
self.drawing = None
self.description = 'Abstract plotting step for single dataset'
# pylint: disable=arguments-differ
def plot(self, dataset=None, from_dataset=False):
"""Perform the actual plotting on the given dataset.
If no dataset is set as property in the object, the method will
raise a respective exception. The Dataset object :meth:`plot` method
always assigns its dataset as the respective dataset attribute of
the plotter class.
The actual plotting should be implemented within the non-public
method :meth:`_create_plot`. Besides that, the applicability of the
plotting to the given dataset will be checked automatically. These
checks should be implemented in the method :meth:`applicable`.
Note that the axis labels are added automatically. If you ever need
to change the handling or appearance of your axis labels, you may
want to override the corresponding methods :meth:`_set_axes_labels`
and :meth:`_create_axis_label_string`, respectively.
Parameters
----------
dataset : :class:`aspecd.dataset.Dataset`
dataset to perform plot for
from_dataset : `boolean`
whether we are called from within a dataset
Defaults to "False" and shall never be set manually.
Returns
-------
dataset : :class:`aspecd.dataset.Dataset`
dataset plot has been performed for
Raises
------
aspecd.exceptions.NotApplicableToDatasetError
Raised when plotting is not applicable to dataset
aspecd.exceptions.MissingDatasetError
Raised when no dataset exists to act on
"""
self._assign_dataset(dataset)
self._call_from_dataset(from_dataset)
return self.dataset
def create_history_record(self):
"""
Create history record to be added to the dataset.
Usually, this method gets called from within the
:meth:`aspecd.dataset.Dataset.plot` method of the
:class:`aspecd.dataset.Dataset` class and ensures the history of
each plotting step to get written properly.
Returns
-------
history_record : :class:`aspecd.history.PlotHistoryRecord`
history record for plotting step
"""
history_record = \
aspecd.history.PlotHistoryRecord(package=self.dataset.package_name)
history_record.plot = aspecd.history.SinglePlotRecord(plotter=self)
history_record.plot.preprocessing = copy.deepcopy(
self.dataset.history)
return history_record
def _assign_dataset(self, dataset):
if not dataset:
if not self.dataset:
raise aspecd.exceptions.MissingDatasetError
else:
self.dataset = dataset
def _call_from_dataset(self, from_dataset):
if not from_dataset:
self.dataset.plot(self)
else:
self._check_applicability()
super().plot()
self._set_axes_labels()
self.properties.apply(plotter=self)
| |
<filename>bespin_api_v2/tests_api.py<gh_stars>0
import json
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from data.tests_api import UserLogin
from data.models import Workflow, WorkflowVersion, WorkflowConfiguration, JobStrategy, ShareGroup, JobFlavor, \
JobSettings, CloudSettingsOpenStack, VMProject, JobFileStageGroup, DDSUserCredential, DDSEndpoint, Job, \
JobRuntimeK8s, LandoConnection, JobRuntimeStepK8s, EmailMessage, EmailTemplate, WorkflowVersionToolDetails
from data.tests_models import create_vm_job_settings
from bespin_api_v2.jobtemplate import STRING_VALUE_PLACEHOLDER, INT_VALUE_PLACEHOLDER, \
REQUIRED_ERROR_MESSAGE, PLACEHOLDER_ERROR_MESSAGE
from mock import patch, Mock
class AdminCreateListRetrieveMixin(object):
"""
Many of our Admin models are CreateListRetrieveModelViewSet subclasses, therefore
most of the API tests follow the same pattern. This base class provides test for the standard behaviors
"""
# Override these variables and methods in implementation
BASE_NAME = None # Name of the base_view from urls, e.g. 'v2-workflowversiontooldetails'
MODEL_CLS = None # Name of the model class
def create_model_object(self):
raise NotImplemented('Override create_model_object to use this base class')
def build_post_data(self):
raise NotImplemented('Override build_post_data to use this base class')
def check_single_response(self, model_object, response_data):
raise NotImplemented('Override check_single_response to use this base class')
# May override
def check_list_response(self, model_object, response_data):
self.assertEqual(len(response_data), 1, 'Should have one item as one item was created')
self.check_single_response(model_object, response_data[0])
# Do not override
def list_url(self):
return reverse('{}-list'.format(self.BASE_NAME))
def object_url(self, pk):
return '{}{}/'.format(self.list_url(), pk)
def get_model_object(self, pk):
return self.MODEL_CLS.objects.get(pk=pk)
def test_list_fails_unauthenticated(self):
self.user_login.become_unauthorized()
url = self.list_url()
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list_fails_not_admin_user(self):
self.user_login.become_normal_user()
url = self.list_url()
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_list_with_admin_user(self):
model_object = self.create_model_object()
self.user_login.become_admin_user()
url = self.list_url()
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.check_list_response(model_object, response.data)
def test_retrieve_with_admin_user(self):
model_object = self.create_model_object()
self.user_login.become_admin_user()
url = self.object_url(model_object.id)
response = self.client.get(url ,format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.check_single_response(model_object, response.data)
def test_create_with_admin_user(self):
self.user_login.become_admin_user()
url = self.list_url()
response = self.client.post(url, format='json', data=self.build_post_data())
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
model_object = self.get_model_object(response.data['id'])
self.check_single_response(model_object, response.data)
def test_put_fails_with_admin_user(self):
self.user_login.become_admin_user()
url = self.object_url('placeholder-id')
response = self.client.put(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_fails_with_admin_user(self):
self.user_login.become_admin_user()
url = self.object_url('placeholder-id')
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class AdminWorkflowViewSetTestCase(APITestCase, AdminCreateListRetrieveMixin):
BASE_NAME = 'v2-admin_workflow'
MODEL_CLS = Workflow
def setUp(self):
self.user_login = UserLogin(self.client)
def test_list_url(self):
self.assertEqual(self.list_url(), '/api/v2/admin/workflows/')
def test_object_url(self):
self.assertEqual(self.object_url(3), '/api/v2/admin/workflows/3/')
def create_model_object(self):
model_object = Workflow.objects.create(name='Exome Seq', tag='exomeseq')
return model_object
def check_single_response(self, model_object, response_data):
self.assertEqual(response_data['id'], model_object.id)
self.assertEqual(response_data['tag'], 'exomeseq')
def build_post_data(self):
return {
'name': 'Exome Seq',
'tag': 'exomeseq',
}
class AdminWorkflowVersionViewSetTestCase(APITestCase, AdminCreateListRetrieveMixin):
BASE_NAME = 'v2-admin_workflowversion'
MODEL_CLS = WorkflowVersion
def setUp(self):
self.user_login = UserLogin(self.client)
self.workflow = Workflow.objects.create(name='Exome Seq', tag='exomeseq')
self.version_change_log = 'https://github.com/bespin-workflows/exomeseq-gatk3/blob/release-4.1/CHANGELOG.md'
def test_list_url(self):
self.assertEqual(self.list_url(), '/api/v2/admin/workflow-versions/')
def test_object_url(self):
self.assertEqual(self.object_url(3), '/api/v2/admin/workflow-versions/3/')
def create_model_object(self):
model_object = WorkflowVersion.objects.create(
workflow=self.workflow,
description='v1 exomeseq',
version='1.0.1',
version_info_url=self.version_change_log,
url='https://someurl.com',
fields=[{"name":"threads", "class": "int"}],
)
return model_object
def check_single_response(self, model_object, response_data):
self.assertEqual(response_data['id'], model_object.id)
self.assertEqual(response_data['workflow'], self.workflow.id)
self.assertEqual(response_data['description'], 'v1 exomeseq')
self.assertEqual(response_data['version'], '1.0.1')
self.assertEqual(response_data['url'], 'https://someurl.com')
self.assertEqual(response_data['fields'], [{"name": "threads", "class": "int"}])
def build_post_data(self):
return {
'workflow': self.workflow.id,
'description': 'v1 exomeseq',
'version': '1.0.1',
'url': 'https://someurl.com',
'fields': [{"name": "threads", "class": "int"}],
}
# Additional tests
def test_create_with_version_change_log(self):
self.user_login.become_admin_user()
url = reverse('v2-admin_workflowversion-list')
response = self.client.post(url, format='json', data={
'workflow': self.workflow.id,
'description': 'v1 exomseq',
'version': '2.0.1',
'url': 'https://someurl.com',
'version_info_url': 'https://someurl.com/changelog',
'fields': [{"name": "threads", "class": "int"}],
})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['description'], 'v1 exomseq')
self.assertEqual(response.data['enable_ui'], False)
workflow_versions = WorkflowVersion.objects.all()
self.assertEqual(len(workflow_versions), 1)
self.assertEqual(workflow_versions[0].version, '2.0.1')
self.assertEqual(workflow_versions[0].version_info_url, 'https://someurl.com/changelog')
self.assertEqual(workflow_versions[0].fields, [{"name": "threads", "class": "int"}])
def test_sorted_by_workflow_and_version(self):
wf1 = Workflow.objects.create(name='workflow1', tag='one')
wfv_1 = WorkflowVersion.objects.create(workflow=wf1, version="1", url='', fields=[])
wfv_2_2_2_dev = WorkflowVersion.objects.create(workflow=wf1, version="2.2.2-dev", url='', fields=[])
wfv_1_3_1 = WorkflowVersion.objects.create(workflow=wf1, version="1.3.1", url='', fields=[])
wf2 = Workflow.objects.create(name='workflow2', tag='two')
wfv_5 = WorkflowVersion.objects.create(workflow=wf2, version="5", url='', fields=[])
self.user_login.become_admin_user()
url = reverse('v2-admin_workflowversion-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 4)
workflow_versions_ary = [(item['workflow'], item['version']) for item in response.data]
self.assertEqual(workflow_versions_ary, [
(wf1.id, '1'),
(wf1.id, '1.3.1'),
(wf1.id, '2.2.2-dev'),
(wf2.id, '5'),
])
def test_includes_tool_details(self):
workflow_version = self.create_model_object()
details = WorkflowVersionToolDetails.objects.create(
workflow_version=workflow_version,
details=[{'k':'v'}]
)
self.user_login.become_admin_user()
url = self.object_url(workflow_version.id)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['tool_details'], details.pk)
class AdminWorkflowConfigurationViewSetTestCase(APITestCase, AdminCreateListRetrieveMixin):
BASE_NAME = 'v2-admin_workflowconfiguration'
MODEL_CLS = WorkflowConfiguration
def setUp(self):
self.user_login = UserLogin(self.client)
self.workflow = Workflow.objects.create(name='Exome Seq', tag='exomeseq')
self.workflow_version = WorkflowVersion.objects.create(
workflow=self.workflow,
description='v1 exomeseq',
version='1',
url='',
fields=[{"name": "threads", "class": "int"}]
)
job_flavor = JobFlavor.objects.create(name='large')
vm_project = VMProject.objects.create()
lando_connection = LandoConnection.objects.create(
cluster_type=LandoConnection.K8S_TYPE,
host='somehost',
username='user1',
password='<PASSWORD>',
queue_name='lando'
)
job_settings = JobSettings.objects.create(lando_connection=lando_connection, job_runtime_k8s=JobRuntimeK8s.objects.create())
self.job_strategy = JobStrategy.objects.create(name='default', job_flavor=job_flavor, job_settings=job_settings)
self.share_group = ShareGroup.objects.create()
def test_list_url(self):
self.assertEqual(self.list_url(), '/api/v2/admin/workflow-configurations/')
def test_object_url(self):
self.assertEqual(self.object_url(3), '/api/v2/admin/workflow-configurations/3/')
def create_model_object(self):
model_object = WorkflowConfiguration.objects.create(
tag='b37xGen',
workflow=self.workflow,
system_job_order={"A":"B"},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
return model_object
def check_single_response(self, model_object, response_data):
self.assertEqual(response_data['id'], model_object.id)
self.assertEqual(response_data['tag'], 'b37xGen')
self.assertEqual(response_data['workflow'], self.workflow.id)
self.assertEqual(response_data['system_job_order'], {"A": "B"})
self.assertEqual(response_data['default_job_strategy'], self.job_strategy.id)
self.assertEqual(response_data['share_group'], self.share_group.id)
def build_post_data(self):
return {
'workflow': self.workflow.id,
'tag': 'b37xGen',
'system_job_order': {"A": "B"},
'default_job_strategy': self.job_strategy.id,
'share_group': self.share_group.id,
}
class AdminWorkflowVersionToolDetailsViewSetTestCase(APITestCase, AdminCreateListRetrieveMixin):
BASE_NAME = 'v2-workflowversiontooldetails'
MODEL_CLS = WorkflowVersionToolDetails
def setUp(self):
self.user_login = UserLogin(self.client)
self.workflow = Workflow.objects.create(name='Test Workflow', tag='test')
self.workflow_version = WorkflowVersion.objects.create(
workflow=self.workflow,
description='Test vABC',
version='vABC',
url='https://example.org/test.zip',
fields=[{'name': 'size', 'type': 'int'},]
)
self.details = [{'k1': 'v1'}, {'k2': 'v2'}]
def test_list_url(self):
self.assertEqual(self.list_url(), '/api/v2/admin/workflow-version-tool-details/')
def test_object_url(self):
self.assertEqual(self.object_url(3), '/api/v2/admin/workflow-version-tool-details/3/')
def create_model_object(self):
model_object = WorkflowVersionToolDetails.objects.create(
workflow_version=self.workflow_version,
details=self.details
)
return model_object
def check_single_response(self, model_object, response_data):
self.assertEqual(response_data['id'], model_object.id)
self.assertEqual(response_data['workflow_version'], self.workflow_version.id)
self.assertEqual(response_data['details'], self.details)
def build_post_data(self):
return {
'workflow_version': self.workflow_version.id,
'details': self.details
}
class JobStrategyViewSetTestCase(APITestCase):
def setUp(self):
self.user_login = UserLogin(self.client)
self.job_flavor = JobFlavor.objects.create(name='large')
self.job_settings = create_vm_job_settings()
def test_list_fails_unauthenticated(self):
self.user_login.become_unauthorized()
url = reverse('v2-jobstrategies-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list_normal_user(self):
self.job_strategy = JobStrategy.objects.create(name='default', job_flavor=self.job_flavor,
job_settings=self.job_settings)
self.user_login.become_normal_user()
url = reverse('v2-jobstrategies-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['id'], self.job_strategy.id)
self.assertEqual(response.data[0]['name'], 'default')
self.assertEqual(response.data[0]['job_flavor']['name'], 'large')
self.assertEqual(response.data[0]['job_settings'], self.job_settings.id)
def test_list_filtering(self):
JobStrategy.objects.create(name='default', job_flavor=self.job_flavor, job_settings=self.job_settings)
JobStrategy.objects.create(name='better', job_flavor=self.job_flavor, job_settings=self.job_settings)
self.user_login.become_normal_user()
url = reverse('v2-jobstrategies-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertEqual(set([item['name'] for item in response.data]), set(['default', 'better']))
url = reverse('v2-jobstrategies-list') + "?name=better"
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(set([item['name'] for item in response.data]), set(['better']))
def test_retrieve_with_normal_user(self):
self.job_strategy = JobStrategy.objects.create(name='default', job_flavor=self.job_flavor,
job_settings=self.job_settings)
self.user_login.become_normal_user()
url = reverse('v2-jobstrategies-list') + str(self.job_strategy.id) + '/'
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], self.job_strategy.id)
self.assertEqual(response.data['name'], 'default')
self.assertEqual(response.data['job_flavor']['id'], self.job_flavor.id)
self.assertEqual(response.data['job_settings'], self.job_settings.id)
def test_post_fails_with_normal_user(self):
self.user_login.become_normal_user()
url = reverse('v2-jobstrategies-list') + '1/'
response = self.client.post(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_fails_with_normal_user(self):
self.user_login.become_normal_user()
url = reverse('v2-jobstrategies-list') + '1/'
response = self.client.put(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_fails_with_normal_user(self):
self.user_login.become_normal_user()
url = reverse('v2-jobstrategies-list') + '1/'
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class WorkflowConfigurationViewSetTestCase(APITestCase):
def setUp(self):
self.user_login = UserLogin(self.client)
self.workflow = Workflow.objects.create(name='Exome Seq', tag='exomeseq')
self.workflow2 = Workflow.objects.create(name='Microbiome', tag='microbiome')
self.workflow_version = WorkflowVersion.objects.create(
workflow=self.workflow,
description='v1 exomeseq',
version='1',
url='',
fields=[{"name":"threads", "type": "int"},{"name":"items", "type": "int"}],
)
self.workflow_version2 = WorkflowVersion.objects.create(
workflow=self.workflow,
description='v2 exomeseq',
version='2',
url='',
fields=[{"name":"threads", "type": "int"}],
)
job_flavor = JobFlavor.objects.create(name='large')
job_settings = create_vm_job_settings()
self.job_strategy = JobStrategy.objects.create(name='default', job_flavor=job_flavor, job_settings=job_settings)
self.share_group = ShareGroup.objects.create()
self.endpoint = DDSEndpoint.objects.create(name='DukeDS', agent_key='secret',
api_root='https://someserver.com/api')
def test_list_fails_unauthenticated(self):
self.user_login.become_unauthorized()
url = reverse('v2-workflowconfigurations-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list_normal_user(self):
workflow_configuration = WorkflowConfiguration.objects.create(
tag='b37xGen',
workflow=self.workflow,
system_job_order={"A": "B"},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
self.user_login.become_normal_user()
url = reverse('v2-workflowconfigurations-list')
response = self.client.get(url, format='json')
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['id'], workflow_configuration.id)
self.assertEqual(response.data[0]['tag'], 'b37xGen')
self.assertEqual(response.data[0]['workflow'], self.workflow.id)
self.assertEqual(response.data[0]['system_job_order'], {"A": "B"})
self.assertEqual(response.data[0]['default_job_strategy'], self.job_strategy.id)
self.assertEqual(response.data[0]['share_group'], self.share_group.id)
def test_list_normal_user_with_workflow_tag_filtering(self):
workflow_configuration1 = WorkflowConfiguration.objects.create(
tag='b37xGen',
workflow=self.workflow,
system_job_order={"A": "B"},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
workflow_configuration2 = WorkflowConfiguration.objects.create(
tag='b37other',
workflow=self.workflow2,
system_job_order={"A": "C"},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
self.user_login.become_normal_user()
url = reverse('v2-workflowconfigurations-list')
response = self.client.get(url, format='json')
self.assertEqual(len(response.data), 2)
url = reverse('v2-workflowconfigurations-list') + "?workflow__tag=microbiome"
response = self.client.get(url, format='json')
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['tag'], 'b37other')
def test_list_normal_user_with_tag_filtering(self):
workflow_configuration1 = WorkflowConfiguration.objects.create(
tag='b37xGen',
workflow=self.workflow,
system_job_order={"A": "B"},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
workflow_configuration2 = WorkflowConfiguration.objects.create(
tag='b37other',
workflow=self.workflow2,
system_job_order={"A": "C"},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
self.user_login.become_normal_user()
url = reverse('v2-workflowconfigurations-list')
response = self.client.get(url, format='json')
self.assertEqual(len(response.data), 2)
url = reverse('v2-workflowconfigurations-list') + "?tag=b37other"
response = self.client.get(url, format='json')
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['tag'], 'b37other')
def test_retrieve_normal_user(self):
workflow_configuration = WorkflowConfiguration.objects.create(
tag='b37xGen',
workflow=self.workflow,
system_job_order={"items": 4},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
self.user_login.become_normal_user()
url = reverse('v2-workflowconfigurations-list') + str(workflow_configuration.id) + '/'
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], workflow_configuration.id)
self.assertEqual(response.data['tag'], 'b37xGen')
self.assertEqual(response.data['workflow'], self.workflow.id)
self.assertEqual(response.data['system_job_order'], {"items": 4})
self.assertEqual(response.data['default_job_strategy'], self.job_strategy.id)
self.assertEqual(response.data['share_group'], self.share_group.id)
def test_create_with_admin_user(self):
self.user_login.become_admin_user()
url = reverse('v2-workflowconfigurations-list')
response = self.client.post(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_fails_with_normal_user(self):
self.user_login.become_normal_user()
url = reverse('v2-workflowconfigurations-list') + '1/'
response = self.client.put(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_fails_with_admin_user(self):
self.user_login.become_normal_user()
url = reverse('v2-workflowconfigurations-list') + '1/'
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_create_with_admin_user(self):
self.user_login.become_admin_user()
url = reverse('v2-workflowconfigurations-list')
response = self.client.post(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class JobTemplatesViewSetTestCase(APITestCase):
def setUp(self):
self.user_login = UserLogin(self.client)
self.workflow = Workflow.objects.create(name='Exome Seq', tag='exomeseq')
self.workflow2 = Workflow.objects.create(name='Microbiome', tag='microbiome')
self.workflow_version = WorkflowVersion.objects.create(
workflow=self.workflow,
description='v1 exomeseq',
version='v1',
url='',
fields=[{"name": "threads", "type": "int"}, {"name": "items", "type": "string"}],
)
job_flavor = JobFlavor.objects.create(name='large')
job_settings = create_vm_job_settings()
self.job_strategy = JobStrategy.objects.create(name='default', job_flavor=job_flavor, job_settings=job_settings)
self.share_group = ShareGroup.objects.create()
self.endpoint = DDSEndpoint.objects.create(name='DukeDS', agent_key='secret',
api_root='https://someserver.com/api')
workflow_configuration1 = WorkflowConfiguration.objects.create(
tag='b37xGen',
workflow=self.workflow,
system_job_order={"A": "B"},
default_job_strategy=self.job_strategy,
share_group=self.share_group,
)
def test_init(self):
user = self.user_login.become_normal_user()
DDSUserCredential.objects.create(endpoint=self.endpoint, user=user, token='secret1', dds_id='1')
stage_group = JobFileStageGroup.objects.create(user=user)
url = reverse('v2-jobtemplate_init')
response = | |
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from gitea_api.api_client import ApiClient
class AdminApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def admin_adopt_repository(self, owner, repo, **kwargs): # noqa: E501
"""Adopt unadopted files as a repository # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_adopt_repository(owner, repo, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner: owner of the repo (required)
:param str repo: name of the repo (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admin_adopt_repository_with_http_info(owner, repo, **kwargs) # noqa: E501
else:
(data) = self.admin_adopt_repository_with_http_info(owner, repo, **kwargs) # noqa: E501
return data
def admin_adopt_repository_with_http_info(self, owner, repo, **kwargs): # noqa: E501
"""Adopt unadopted files as a repository # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_adopt_repository_with_http_info(owner, repo, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner: owner of the repo (required)
:param str repo: name of the repo (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admin_adopt_repository" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params or
params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `admin_adopt_repository`") # noqa: E501
# verify the required parameter 'repo' is set
if ('repo' not in params or
params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `admin_adopt_repository`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner'] # noqa: E501
if 'repo' in params:
path_params['repo'] = params['repo'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['AccessToken', 'AuthorizationHeaderToken', 'BasicAuth', 'SudoHeader', 'SudoParam', 'TOTPHeader', 'Token'] # noqa: E501
return self.api_client.call_api(
'/admin/unadopted/{owner}/{repo}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admin_create_org(self, body, username, **kwargs): # noqa: E501
"""Create an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_create_org(body, username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateOrgOption body: (required)
:param str username: username of the user that will own the created organization (required)
:return: Organization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admin_create_org_with_http_info(body, username, **kwargs) # noqa: E501
else:
(data) = self.admin_create_org_with_http_info(body, username, **kwargs) # noqa: E501
return data
def admin_create_org_with_http_info(self, body, username, **kwargs): # noqa: E501
"""Create an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_create_org_with_http_info(body, username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateOrgOption body: (required)
:param str username: username of the user that will own the created organization (required)
:return: Organization
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'username'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admin_create_org" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `admin_create_org`") # noqa: E501
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `admin_create_org`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['AccessToken', 'AuthorizationHeaderToken', 'BasicAuth', 'SudoHeader', 'SudoParam', 'TOTPHeader', 'Token'] # noqa: E501
return self.api_client.call_api(
'/admin/users/{username}/orgs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Organization', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admin_create_public_key(self, username, **kwargs): # noqa: E501
"""Add a public key on behalf of a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_create_public_key(username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: username of the user (required)
:param CreateKeyOption body:
:return: PublicKey
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admin_create_public_key_with_http_info(username, **kwargs) # noqa: E501
else:
(data) = self.admin_create_public_key_with_http_info(username, **kwargs) # noqa: E501
return data
def admin_create_public_key_with_http_info(self, username, **kwargs): # noqa: E501
"""Add a public key on behalf of a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_create_public_key_with_http_info(username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: username of the user (required)
:param CreateKeyOption body:
:return: PublicKey
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admin_create_public_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `admin_create_public_key`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['AccessToken', 'AuthorizationHeaderToken', 'BasicAuth', 'SudoHeader', 'SudoParam', 'TOTPHeader', 'Token'] # noqa: E501
return self.api_client.call_api(
'/admin/users/{username}/keys', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PublicKey', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admin_create_repo(self, body, username, **kwargs): # noqa: E501
"""Create a repository on behalf of a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_create_repo(body, username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateRepoOption body: (required)
:param str username: username of the user. This user will own the created repository (required)
:return: Repository
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admin_create_repo_with_http_info(body, username, **kwargs) # noqa: E501
else:
(data) = self.admin_create_repo_with_http_info(body, username, **kwargs) # noqa: E501
return data
def admin_create_repo_with_http_info(self, body, username, **kwargs): # noqa: E501
"""Create a repository on behalf of a user # noqa: E501
This method makes a synchronous HTTP request by default. To | |
import psycopg2
import psycopg2.extras
import requests
from osm_handler import get_outer_way
class DBHandler():
def __init__(self, dsn):
pg_host = 'localhost'
pg_port = 5432
pg_user = 'postgres'
pg_pass = '<PASSWORD>'
pg_db = 'osm_test'
# Extent of Large Building Footprints dataset
self.bbox = '25.23561, -80.87864, 25.97467, -80.11845'
# Downtown MIA
self.bbox = '25.770098, -80.200582,25.780107,-80.185132'
if dsn is not None:
self.conn = psycopg2.connect(dsn)
else:
self.conn = psycopg2.connect(
host=pg_host,
port=pg_port,
user=pg_user,
password=pg_pass,
dbname=pg_db)
try:
psycopg2.extras.register_hstore(self.conn)
except:
print 'Could not register hstore. Are you running it for the first time (no hstore data in DB). You should be OK next time though.'
self.cursor = self.conn.cursor()
def close_db_conn(self):
self.conn.close()
def setup_db(self):
create_extension_sql = '''
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS hstore;
'''
create_building_table_sql = '''
CREATE TABLE IF NOT EXISTS osm_buildings (
id bigint,
type varchar,
tags hstore,
constraint pk_building_id_type primary key (id, type)
);
-- Use generic GEOMETRY type so we can store nodes and ways together
ALTER TABLE osm_buildings drop column IF EXISTS geom;
SELECT AddGeometryColumn('osm_buildings', 'geom', 4326, 'GEOMETRY', 2);
'''
create_highway_railway_table_sql = '''
CREATE TABLE IF NOT EXISTS osm_highway_railway (
id bigint,
type varchar,
tags hstore,
constraint pk_road_id_type primary key (id, type)
);
-- Use generic GEOMETRY type so we can store nodes and ways together
ALTER TABLE osm_highway_railway DROP COLUMN IF EXISTS geom;
SELECT AddGeometryColumn('osm_highway_railway', 'geom', 4326, 'GEOMETRY', 2);
'''
create_address_table_sql = '''
CREATE TABLE IF NOT EXISTS osm_addresses (
id bigint,
type varchar,
tags hstore,
constraint pk_address_id_type primary key (id, type)
);
ALTER TABLE osm_addresses DROP COLUMN IF EXISTS geom;
SELECT AddGeometryColumn('osm_addresses', 'geom', 4326, 'GEOMETRY', 2);
'''
populate_geom_sql = 'select Populate_Geometry_Columns();'
self.cursor.execute(create_extension_sql)
self.cursor.execute(create_building_table_sql)
self.cursor.execute(create_address_table_sql)
self.cursor.execute(create_highway_railway_table_sql)
self.cursor.execute(populate_geom_sql)
self.conn.commit()
def create_index(self):
building_index_sql = 'CREATE INDEX osm_building_geom_idx ON osm_buildings USING GIST (geom);'
address_index_sql = 'CREATE INDEX osm_address_geom_idx ON osm_addresses USING GIST (geom);'
highway_index_sql = 'CREATE INDEX osm_highway_railway_geom_idx ON osm_highway_railway USING GIST (geom);'
address_county_index_sql = 'CREATE INDEX address_geom_idx ON address_with_condo USING GIST (geom);'
building_county_index_sql = 'CREATE INDEX building_geom_idx ON building_footprint_2d USING GIST (geom)'
building_county_merc_index_sql = 'create index building_merc_idx on building_footprint_2d using gist (geom_merc)'
addr_county_merc_index_sql = 'create index addr_merc_idx on address_with_condo using gist (geom_merc)'
self.cursor.execute(building_index_sql)
self.cursor.execute(address_index_sql)
self.cursor.execute(highway_index_sql)
self.cursor.execute(building_county_merc_index_sql)
self.cursor.execute(addr_county_merc_index_sql)
self.conn.commit()
def update_stats(self):
old_isolation_level = self.conn.isolation_level
self.conn.set_isolation_level(0)
self.cursor.execute('VACUUM ANALYZE')
self.conn.set_isolation_level(old_isolation_level)
self.conn.commit()
def dedupe_address(self):
dedupe_tmp_sql = '''CREATE TABLE address_tmp AS
SELECT DISTINCT ON (geom) * FROM address_with_condo;
'''
dedupe_drop_old_sql = "drop table address_with_condo;"
dedupe_rename_sql = "alter table address_tmp rename to address_with_condo;"
dedupe_populate_geom_sql = 'select Populate_Geometry_Columns();'
dedupe_index_sql = 'create index address_county_geom_idx on address_with_condo using gist (geom);'
self.cursor.execute(dedupe_tmp_sql)
self.cursor.execute(dedupe_drop_old_sql)
self.cursor.execute(dedupe_rename_sql)
self.conn.commit()
self.cursor.execute(dedupe_populate_geom_sql)
self.cursor.execute(dedupe_index_sql)
self.conn.commit()
#self.cursor.execute('vacuum analyze;')
def insert_osm(self, data, table):
i = 0
sql_pre = 'INSERT INTO %s ' % table
sql = sql_pre + '(id, type, tags, geom) VALUES (%s, %s, %s, ST_SetSRID(ST_GeomFromText(%s), 4326));'
for el in data['elements']:
if 'railway' in el['tags'].keys():
if el['tags']['railway'] == 'abandoned':
continue
if 'highway' in el['tags'].keys():
if el['tags']['highway'] == 'abandoned':
continue
if i % 10000 == 0:
print '%s: %s/%s' % (table, i, len(data['elements']))
i += 1
# print building
# print el['type'], el['id']
if el['type'] == 'node':
self.cursor.execute(sql, (el['id'], el['type'], el['tags'], 'POINT (' + str(el['lon']) + ' ' + str(el['lat']) + ')'))
# Upload them as Linestring
if el['type'] == 'way':
geom = 'LINESTRING ('
try:
geom += self.build_wkt_coord_list(el['geometry'])
geom += ')'
self.cursor.execute(sql, (el['id'], el['type'], el['tags'], geom))
except KeyError:
continue
# Safe to assume relations are polygons but let's stick to Linestrings. Use only outer as we're interested in spatial overlaps.
if el['type'] == 'relation':
geom = 'LINESTRING('
membercnt = 0
for member in el['members']:
if member['role'] == 'outer':
membercnt += 1
if membercnt > 1:
# it's already been returned if there's no bounds... passing
try:
bounds = el['bounds']
except KeyError:
continue
lower_left = str(bounds['minlon']) + ' ' + str(bounds['minlat'])
lower_right = str(bounds['maxlon']) + ' ' + str(bounds['minlat'])
upper_right = str(bounds['maxlon']) + ' ' + str(bounds['maxlat'])
upper_left = str(bounds['minlon']) + ' ' + str(bounds['maxlat'])
geom = 'POLYGON((' + lower_left + ',' + lower_right + ',' + upper_right + ',' + upper_left + ',' + lower_left + '))'
else:
print el['id'], el['type']
for member in el['members']:
if member['role'] == 'outer':
geom += self.build_wkt_coord_list(get_outer_way(member['ref'])['geometry'])
geom += ')'
if geom == 'LINESTRING()':
print 'geometry error with %s id: %s. geom: %s' % (el['type'], str(el['id']), geom)
continue
self.cursor.execute(sql, (el['id'], el['type'], el['tags'], geom))
# Upload bounds if it's a multipolygon
if el['type'] == 'multipolygon':
bounds = el['bounds']
lower_left = str(bounds['minlon']) + ' ' + str(bounds['minlat'])
lower_right = str(bounds['maxlon']) + ' ' + str(bounds['minlat'])
upper_right = str(bounds['maxlon']) + ' ' + str(bounds['maxlat'])
upper_left = str(bounds['minlon']) + ' ' + str(bounds['maxlat'])
geom = 'POLYGON((' + lower_left + ',' + lower_right + ',' + upper_right + ',' + upper_left + ',' + lower_left + '))'
self.cursor.execute(sql, (el['id'], el['type'], el['tags'], geom))
self.conn.commit()
def build_wkt_coord_list(self, geometry):
i = 0
coord_list = ''
for node in geometry:
if i > 0:
coord_list += ', '
coord_list += str(node['lon']) + ' ' + str(node['lat'])
i += 1
return coord_list
def convert_to_poly(self):
self.cursor.execute("update osm_addresses set geom = st_buildarea(geom) where st_geometrytype(geom) != 'ST_Point'")
self.cursor.execute("update osm_buildings set geom = st_buildarea(geom) where st_geometrytype(geom) != 'ST_Point'")
self.conn.commit()
def simplify_buildings(self):
# Simplify geometry using 0.3m tolerance (get rid of unnecesary nodes
self.cursor.execute("update osm_buildings set geom = st_transform(st_simplify(st_transform(geom, 3857), 0.3), 4326) where st_geometrytype(geom) != 'ST_Point'")
self.conn.commit()
def add_fields_input_data(self):
self.cursor.execute('alter table address_with_condo drop column if exists in_osm')
self.cursor.execute('alter table address_with_condo add column in_osm boolean')
self.cursor.execute('update address_with_condo set in_osm = false')
self.cursor.execute('alter table building_footprint_2d drop column if exists in_osm')
self.cursor.execute('alter table building_footprint_2d add column in_osm boolean')
self.cursor.execute('update building_footprint_2d set in_osm = false')
self.cursor.execute("select addgeometrycolumn('building_footprint_2d', 'geom_merc', 3857, 'multipolygon', 2);")
self.cursor.execute("select addgeometrycolumn('address_with_condo', 'geom_merc', 3857, 'point', 2);")
self.cursor.execute('''alter table building_footprint_2d
add column addr_assigned boolean,
add column zip int,
add column mailing_mu varchar,
add column hse_num int,
add column pre_dir varchar,
add column suf_dir varchar,
add column st_type varchar,
add column st_name varchar,
add column road_intersect boolean;
update building_footprint_2d set addr_assigned = false;
alter table address_with_condo add column assigned_to_bldg boolean;
update address_with_condo set assigned_to_bldg = false;
''')
self.conn.commit()
self.cursor.execute('update building_footprint_2d set geom_merc = st_transform(geom, 3857);')
self.cursor.execute('update address_with_condo set geom_merc = st_transform(geom, 3857);')
self.conn.commit()
def check_exitsing_addresses(self):
self.cursor.execute('''
update address_with_condo county
set in_osm = True
from
(select * from osm_addresses where st_geometrytype(geom) = 'ST_Polygon') osm
where
st_within(county.geom, osm.geom) and county.hse_num::varchar = (osm.tags->'addr:housenumber')::varchar
''')
self.conn.commit()
self.cursor.execute('''
update address_with_condo county
set in_osm = True
from osm_addresses osm
where
st_dwithin(county.geom::geography, osm.geom::geography, 20) and county.hse_num::varchar = (osm.tags->'addr:housenumber')::varchar
''')
self.conn.commit()
def check_existing_buildings(self):
self.cursor.execute('''
update building_footprint_2d county
set in_osm = True
from osm_buildings osm
where
st_intersects(county.geom, osm.geom)
''')
self.conn.commit()
def assign_address(self):
old_isolation_level = self.conn.isolation_level
self.conn.set_isolation_level(0)
self.cursor.execute('''
update building_footprint_2d bldg
set
zip = x.zip,
mailing_mu = x.mailing_mu,
hse_num = x.hse_num,
st_name = x.st_name,
st_type = x.st_type,
pre_dir = x.pre_dir,
suf_dir = x.suf_dir,
addr_assigned = true
from (
select bldg.objectid as bldgid, addr.objectid as addrid, addr.* from
building_footprint_2d as bldg
left join (select b.building_id, b.ids[1] from
(select building.objectid as building_id, array_agg(a.objectid) as ids , count(a.objectid) as cnt
from
building_footprint_2d building,
address_with_condo a
where
st_within(a.geom, building.geom) and building.in_osm = false
group by building_id, building.geom) b
where b.cnt = 1) pairs on bldg.objectid = pairs.building_id
left join address_with_condo addr on pairs.ids = addr.objectid
where bldg.objectid = pairs.building_id
) x
where bldg.objectid = x.bldgid;
''')
self.conn.commit()
self.cursor.execute('''update building_footprint_2d bldg
set
zip = x.zip,
mailing_mu = x.mailing_mu,
hse_num = x.hse_num,
st_name = x.st_name,
st_type = x.st_type,
pre_dir = x.pre_dir,
suf_dir = x.suf_dir,
addr_assigned = true
from (
select bldg.objectid as bldgid, addr.objectid as addrid, addr.* from
building_footprint_2d as bldg
left join (select b.building_id, b.ids[1] from
(select building.objectid as building_id, array_agg(a.objectid) as ids , count(a.objectid) as cnt
from
building_footprint_2d building,
address_with_condo a
where
st_dwithin(a.geom_merc, building.geom_merc, 5) and building.in_osm = false and building.addr_assigned = false
group by building_id, building.geom) b
where b.cnt = 1) pairs on bldg.objectid = pairs.building_id
left join address_with_condo addr on pairs.ids = addr.objectid
where bldg.objectid = pairs.building_id
) x
where bldg.objectid = x.bldgid;
''')
self.conn.commit()
self.cursor.execute('''
update address_with_condo | |
import struct
import logging
SYNC1=0xb5
SYNC2=0x62
CLASS = {
"NAV" : 0x01,
"RXM" : 0x02,
"INF" : 0x04,
"ACK" : 0x05,
"CFG" : 0x06,
"UPD" : 0x09,
"MON" : 0x0a,
"AID" : 0x0b,
"TIM" : 0x0d,
"USR" : 0x40,
"ESF" : 0x10,
"MGA" : 0x13
}
CLIDPAIR = {
"ACK-ACK" : (0x05, 0x01),
"ACK-NACK" : (0x05, 0x00),
"AID-ALM" : (0x0b, 0x30),
"AID-DATA" : (0x0b, 0x10),
"AID-EPH" : (0x0b, 0x31),
"AID-HUI" : (0x0b, 0x02),
"AID-INI" : (0x0b, 0x01),
"AID-REQ" : (0x0b, 0x00),
"AID-UNKNOWN-0x32" : (0x0b, 0x32),
"AID-UNKNOWN-0x50" : (0x0b, 0x50),
"AID-REQ" : (0x0b, 0x00),
"CFG-ANT" : (0x06, 0x13),
"CFG-CFG" : (0x06, 0x09),
"CFG-DAT" : (0x06, 0x06),
"CFG-EKF" : (0x06, 0x12),
"CFG-FXN" : (0x06, 0x0e),
"CFG-GNSS" : (0x06, 0x3e),
"CFG-HNR" : (0x06, 0x5c),
"CFG-INF" : (0x06, 0x02),
"CFG-LIC" : (0x06, 0x80),
"CFG-MSG" : (0x06, 0x01),
"CFG-NAV2" : (0x06, 0x1a),
"CFG-NMEA" : (0x06, 0x17),
"CFG-PMS" : (0x06, 0x86),
"CFG-PRT" : (0x06, 0x00),
"CFG-RATE" : (0x06, 0x08),
"CFG-RST" : (0x06, 0x04),
"CFG-RXM" : (0x06, 0x11),
"CFG-SBAS" : (0x06, 0x16),
"CFG-TM" : (0x06, 0x10),
"CFG-TM2" : (0x06, 0x19),
"CFG-TMODE" : (0x06, 0x1d),
"CFG-TP" : (0x06, 0x07),
"CFG-USB" : (0x06, 0x1b),
"ESF-INS" : (0x10, 0x15),
"ESF-MEAS" : (0x10, 0x02),
"ESF-RAW" : (0x10, 0x03),
"ESF-STATUS" : (0x10, 0x10),
"HNR-PVT" : (0x28, 0x00),
"INF-DEBUG" : (0x04, 0x04),
"INF-ERROR" : (0x04, 0x00),
"INF-NOTICE" : (0x04, 0x02),
"INF-TEST" : (0x04, 0x03),
"INF-USER" : (0x04, 0x07),
"INF-WARNING" : (0x04, 0x01),
"LOG-FINDTIME" : (0x21, 0x0e),
"LOG-RETRIEVEPOS" : (0x21, 0x0b),
"LOG-RETRIEVEPOSEXTRA" : (0x21, 0x0f),
"LOG-RETRIEVESTRING" : (0x21, 0x0d),
"MGA-ACK" : (0x13, 0x60),
"MGA-DBD" : (0x13, 0x80),
"MGA-FLASH" : (0x13, 0x21),
"MGA-GPS-EPH" : (0x13, 0x00),
"MGA-GPS-ALM" : (0x13, 0x00),
"MGA-GLO-EPH" : (0x13, 0x06),
"MGA-GLO-ALM" : (0x13, 0x06),
"MGA-INI-TIME_UTC" : (0x13, 0x40),
"MON-EXCEPT" : (0x0a, 0x05),
"MON-HW" : (0x0a, 0x09),
"MON-IO" : (0x0a, 0x02),
"MON-IPC" : (0x0a, 0x03),
"MON-MSGPP" : (0x0a, 0x06),
"MON-RXBUF" : (0x0a, 0x07),
"MON-SCHD" : (0x0a, 0x01),
"MON-SMGR" : (0x0a, 0x2e),
"MON-TXBUF" : (0x0a, 0x08),
"MON-USB" : (0x0a, 0x0a),
"MON-VER" : (0x0a, 0x04),
"NAV-ATT" : (0x01, 0x05),
"NAV-CLOCK" : (0x01, 0x22),
"NAV-DGPS" : (0x01, 0x31),
"NAV-DOP" : (0x01, 0x04),
"NAV-EKFSTATUS" : (0x01, 0x40),
"NAV-HPPOSECEF" : (0x01, 0x13),
"NAV-HPPOSLLH" : (0x01, 0x14),
"NAV-POSECEF" : (0x01, 0x01),
"NAV-POSLLH" : (0x01, 0x02),
"NAV-POSUTM" : (0x01, 0x08),
"NAV-PVT" : (0x01, 0x07),
"NAV-RELPOSNED" : (0x01, 0x3c),
"NAV-SBAS" : (0x01, 0x32),
"NAV-SOL" : (0x01, 0x06),
"NAV-STATUS" : (0x01, 0x03),
"NAV-SVIN" : (0x01, 0x3b),
"NAV-SVINFO" : (0x01, 0x30),
"NAV-TIMEGPS" : (0x01, 0x20),
"NAV-TIMEUTC" : (0x01, 0x21),
"NAV-VELECEF" : (0x01, 0x11),
"NAV-VELNED" : (0x01, 0x12),
"RXM-ALM" : (0x02, 0x30),
"RXM-EPH" : (0x02, 0x31),
"RXM-POSREQ" : (0x02, 0x40),
"RXM-PMREQ" : (0x02, 0x41),
"RXM-RAW" : (0x02, 0x10),
"RXM-RTCM" : (0x02, 0x32),
"RXM-SFRB" : (0x02, 0x11),
"RXM-SVSI" : (0x02, 0x20),
"SEC-SIGN" : (0x27, 0x01),
"SEC-UNIQID" : (0x27, 0x03),
"TIM-DOSC" : (0x0d, 0x11),
"TIM-FCHG" : (0x0d, 0x16),
"TIM-SMEAS" : (0x0d, 0x13),
"TIM-SVIN" : (0x0d, 0x04),
"TIM-TM" : (0x0d, 0x02),
"TIM-TM2" : (0x0d, 0x03),
"TIM-TOS" : (0x0d, 0x12),
"TIM-TP" : (0x0d, 0x01),
"TIM-VCOCAL" : (0x0d, 0x15),
"UPD-DOWNL" : (0x09, 0x01),
"UPD-EXEC" : (0x09, 0x03),
"UPD-MEMCPY" : (0x09, 0x04),
"UPD-SOS" : (0x09, 0x14),
"UPD-UPLOAD" : (0x09, 0x02),
"NMEA-DTM" : (0xf0, 0x0a),
"NMEA-GBS" : (0xf0, 0x09),
"NMEA-GGA" : (0xf0, 0x00),
"NMEA-GLL" : (0xf0, 0x01),
"NMEA-GNS" : (0xf0, 0x0d),
"NMEA-GRS" : (0xf0, 0x06),
"NMEA-GSA" : (0xf0, 0x02),
"NMEA-GST" : (0xf0, 0x07),
"NMEA-GSV" : (0xf0, 0x03),
"NMEA-RMC" : (0xf0, 0x04),
"NMEA-VTG" : (0xf0, 0x05),
"NMEA-ZDA" : (0xf0, 0x08),
"NMEAP-UNKNOWN-0x01" : (0xf1, 0x01),
"NMEAP-TIME" : (0xf1, 0x04),
"NMEAP-CONFIG" : (0xf1, 0x41),
"RTCM-BEIMSM4" : (0xf5, 0x7c),
"RTCM-BEIMSM7" : (0xf5, 0x7f),
"RTCM-GPSMSM4" : (0xf5, 0x4a),
"RTCM-GPSMSM7" : (0xf5, 0x4d),
"RTCM-GLOCODE" : (0xf5, 0xe6),
"RTCM-GLOMSM4" : (0xf5, 0x54),
"RTCM-GLOMSM7" : (0xf5, 0x57),
"RTCM-REFSTATIONARP" : (0xf5, 0x05),
"RTCM-REFSTATIONPVT" : (0xf5, 0xfe),
}
CLIDPAIR_INV = dict( [ [v,k] for k,v in CLIDPAIR.items() ] )
# MSGFMT - Describes the format of each message.
# The key tuple contains the name of the message and the size in bytes.
# If the size is None, than than this is a variable length message.
#
# The value of each dictionary item is in one of two formats. Both are lists.
#
# In the first format, the first element of the list is a string describing the
# struct format (see the Python struct documentation). The second element is a
# list of strings with field names for each of the elements of the struct.
#
# The second format is used if there is a header section followed by repeated
# sections. In this format, the first element is the number of bytes for the
# header section, followed by the struct format, then the field names.
#
# The fourth element is where the description of the repeated section starts. As
# with the header section, this begins with the number of bytes for each
# section. It is not the sum of the section sizes. So if each repeated section
# is 12 bytes, this value is 12. Finally, the last (sixth) element is the list
# of field names.
MSGFMT = {
("NAV-POSECEF", 20) :
["<IiiiI", ["ITOW", "ECEF_X", "ECEF_Y", "ECEF_Z", "Pacc"]],
("NAV-POSLLH", 28) :
["<IiiiiII", ["ITOW", "LON", "LAT", "HEIGHT", "HMSL", "Hacc", "Vacc"]],
("NAV-POSUTM", 18) :
["<Iiiibb", ["ITOW", "EAST", "NORTH", "ALT", "ZONE", "HEM"]],
("NAV-DOP", 18) :
["<IHHHHHHH", ["ITOW", "GDOP", "PDOP", "TDOP", "VDOP", "HDOP", "NDOP", "EDOP"]],
("NAV-STATUS", 16) :
["<IBBBxII", ["ITOW", "GPSfix", "Flags", "DiffS", "TTFF", "MSSS"]],
("NAV-SOL", 52) :
["<IihBBiiiIiiiIHxBxxxx", ["ITOW", "Frac", "week", "GPSFix", "Flags", "ECEF_X", "ECEF_Y", "ECEF_Z", "Pacc",
"ECEFVX", "ECEFVY", "ECEFVZ", "SAcc", "PDOP", "numSV"]],
("NAV-VELECEF", 20) :
["<IiiiI", ["ITOW", "ECEFVX", "ECEFVY", "ECEFVZ", "SAcc"]],
("NAV-VELNED", 36) :
["<IiiiIIiII", ["ITOW", "VEL_N", "VEL_E", "VEL_D", "Speed", "GSpeed", "Heading", "SAcc", "CAcc"]],
("NAV-TIMEGPS", 16) :
["<IihbBI", ["ITOW", "Frac", "week", "LeapS", "Valid", "TAcc"]],
("NAV-TIMEUTC", 20) :
["<IIiHBBBBBB", ["ITOW", "TAcc", "Nano", "Year", "Month", "Day", "Hour", "Min", "Sec", "Valid"]],
("NAV-CLOCK", 20) :
["<IiiII", ["ITOW", "CLKB", "CLKD", "TAcc", "FAcc"]],
("NAV-SVINFO", None) :
[8, "<IBxxx", ["ITOW", "NCH"], 12, "<BBBbBbhi", ["chn", "SVID", "Flags", "QI", "CNO", "Elev", "Azim", "PRRes"]],
("NAV-DGPS", None) :
[16, "<IihhBBxx", ["ITOW", "AGE", "BASEID", "BASEHLTH", "NCH", "STATUS"], 12, "<BBHff", ["SVID", "Flags", "AGECH", "PRC", "PRRC"]],
("NAV-SBAS", None) :
[12, "<IBBbBBxxx", ["ITOW", "GEO", "MODE", "SYS", "SERVICE", "CNT"], 12, "<BBBBBxhxxh", ["SVID", "FLAGS", "UDRE", "SYSn", "SERVICEn", "PRC", "IC"]],
("NAV-EKFSTATUS", 36) : # no response to query
["<iiIhbbiiihhhbB", ["pulses", "period", "gyromean", "temp", "dir", "calib", "pulse", "gbias", "gscale", "accps", "accgb", "accgs", "used", "res"]],
("NAV-ATT", 32) :
["<IBxxxiiiIII", ["ITOW", "Version", "Roll", "Pitch", "Heading", "AccRoll", "AccPitch", "AccHeading"]],
("NAV-PVT", 92) :
["<IHBBBBBBIiBBBBiiiiIIiiiiiIIHxxxxxxihH", ["ITOW", "Year", "Month", "Day", "Hour", "Min", "Sec", "Valid", "TAcc", "Nano", "FixType", "Flags", "Flags2", "NumSV", "LON", "LAT", "HEIGHT", "HMSL", "Hacc", "Vacc", "VEL_N", "VEL_E", "VEL_D", "GSpeed", "HeadMot", "SAcc", "HeadAcc", "PDOP", "HeadVeh", "MagDec", "MagAcc"]],
# ('RXM-RAW', [{'Week': 1575, 'ITOW': 475184470, 'NSV': 0}])
("RXM-RAW", None) :
[8, "<ihBx", ["ITOW", "Week", "NSV"], 24, "<ddfBbbB", ["CPMes", "PRMes", "DOMes", "SV", "MesQI", "CNO", "LLI"]],
("RXM-SVSI", None) :
[8, "<ihBB", ["ITOW", "Week", "NumVis", "NumSv"], 6, "<BBhbB", ["SVID", "SVFlag", "Azim", "Elev", "Age"]],
("RXM-SFRB", 42) :
["<BBiiiiiiiiii", ["CHN", "SVID", "DWRD0", "DWRD1", "DWRD2", "DWRD3", "DWRD4", "DWRD5", "DWRD6", "DWRD7", "DWRD8", "DWRD9"]],
("RXM-ALM", 1) :
["<B", ["SVID"]],
("RXM-ALM", 8) :
["<II", ["SVID", "WEEK"]],
("RXM-ALM", 40) :
["<" + "I"*10, ["SVID", "WEEK", "DWRD0", "DWRD1", "DWRD2", "DWRD3", "DWRD4", "DWRD5", "DWRD6", "DWRD7"]],
("RXM-EPH", 1) :
["<B", ["SVID"]],
("RXM-EPH", 8) :
["<II", ["SVID", "HOW"]],
("RXM-EPH", 104) :
["<" + "I"*26, ["SVID", "HOW", "SF1D0", "SF1D1", "SF1D2", "SF1D3", "SF1D4",
"SF1D5", "SF1D6", "SF1D7", "SF2D0", "SF2D1", "SF2D2", "SF2D3", "SF2D4",
"SF2D5", "SF2D6", "SF2D7", "SF3D0", "SF3D1", "SF3D2", "SF3D3", "SF3D4", "SF3D5", "SF3D6", "SF3D7"]],
("RXM-PMREQ", 8) :
["<II", ["duration", "flags"]],
("RXM-PMREQ", 16) :
["<BxxxIII", ["version", "duration", "flags", "wakeupSources"]],
("INF-ERROR", None) :
[0, "", [], 1, "c", ["Char"]],
("INF-WARNING", None) :
[0, "", [], 1, "c", ["Char"]],
("INF-NOTICE", None) :
[0, "", [], 1, "c", ["Char"]],
("INF-TEST", None) :
[0, "", [], 1, "c", ["Char"]],
("INF-DEBUG", None) :
[0, "", [], 1, "c", ["Char"]],
("INF-USER", None) :
[0, "", [], 1, "c", ["Char"]],
("ACK-ACK", 2) :
["<BB", ["ClsID", "MsgID"]],
("ACK-NACK", 2) :
["<BB", ["ClsID", "MsgID"]],
("CFG-GNSS", None) :
[4, "<BBBB", ['msgVer', 'numTrkChHw', 'numTrkChUse', 'numConfigBlocks'], 8, "<BBBBL", ['gnssId', 'resTrkCh', 'maxTrkCh', 'reserved1', 'flags']],
("CFG-PRT", 1) :
["<B", ["PortID"]],
("CFG-PRT", None) | |
import os
import os.path as op
from glob import glob
import tensorflow as tf
import shutil
import json
import copy
from timeit import default_timer as timer
import utils.util_funcs as uf
import utils.util_class as uc
from tfrecords.example_maker import ExampleMaker
from tfrecords.tfr_util import Serializer, inspect_properties
from utils.util_class import MyExceptionToCatch
class TfrecordMakerBase:
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
self.dataset = dataset
self.split = split
self.srcpath = srcpath
self.tfrpath = tfrpath # final root path of tfrecords of this dataset
self.tfrpath__ = tfrpath + "__" # temporary root path of tfrecords of this dataset
self.tfr_drive_path = "" # path to write "current" tfrecords
self.shwc_shape = shwc_shape
self.shard_size = shard_size # max number of examples in a shard
self.shard_count = 0 # number of shards written in this drive
self.example_count_in_shard = 0 # number of examples in this shard
self.example_count_in_drive = 0 # number of examples in this drive
self.total_example_count = 0 # number of examples in this dataset generated in this session
self.drive_paths = self.list_drive_paths(srcpath, split)
self.data_keys = self.get_dataset_keys(stereo)
self.example_maker = self.get_example_maker(dataset, split, shwc_shape, self.data_keys)
self.serialize_example = Serializer()
self.writer = None
self.pm = uc.PathManager([""])
self.error_count = 0
def list_drive_paths(self, srcpath, split):
raise NotImplementedError()
def get_dataset_keys(self, stereo):
keys = ["image", "intrinsic", "depth_gt", "pose_gt"]
if stereo:
keys += ["image_R", "intrinsic_R", "depth_gt_R", "pose_gt_R", "stereo_T_LR"]
return keys
def get_example_maker(self, dataset, split, shwc_shape, data_keys):
return ExampleMaker(dataset, split, shwc_shape, data_keys)
def make(self, frame_per_drive=0, total_frame_limit=0):
print("\n\n========== Start a new dataset:", op.basename(self.tfrpath))
num_drives = len(self.drive_paths)
with uc.PathManager([self.tfrpath__], closer_func=self.on_exit) as pm:
self.pm = pm
for di, drive_path in enumerate(self.drive_paths):
if self.init_drive_tfrecord(di):
continue
if (total_frame_limit > 0) and (self.total_example_count >= total_frame_limit):
break
print("\n==== Start a new drive:", drive_path)
# create data reader in example maker
self.example_maker.init_reader(drive_path)
loop_range = self.example_maker.get_range()
num_frames = self.example_maker.num_frames()
drive_example = dict()
for ii, index in enumerate(loop_range):
time1 = timer()
if (frame_per_drive > 0) and (self.example_count_in_drive >= frame_per_drive):
break
if (total_frame_limit > 0) and (self.total_example_count >= total_frame_limit):
break
try:
example = self.example_maker.get_example(index)
drive_example = self.verify_example(drive_example, example)
except StopIteration as si: # raised from xxx_reader._get_frame()
print("\n[StopIteration] stop this drive", si)
break
except MyExceptionToCatch as ve: # raised from xxx_reader._get_frame()
uf.print_progress_status(f"==[making TFR] Exception frame: {ii}/{num_frames}, {ve}")
continue
example_serial = self.serialize_example(example)
self.write_tfrecord(example_serial, di)
uf.print_progress_status(f"==[making TFR] drives: {di}/{num_drives} | "
f"index,count: {ii}/{self.example_count_in_drive}/{num_frames} | "
f"total count: {self.total_example_count} | "
f"shard({self.shard_count}): {self.example_count_in_shard}/{self.shard_size} | "
f"time: {timer() - time1:1.4f}")
print("")
self.write_tfrecord_config(drive_example)
pm.set_ok()
self.wrap_up()
def init_drive_tfrecord(self, drive_index=0):
raise NotImplementedError()
def verify_example(self, drive_example, example):
if (not example) or ("image" not in example):
raise MyExceptionToCatch(f"[verify_example] EMPTY example")
if not drive_example:
drive_example = copy.deepcopy(example)
print("[verify_example] Set drive_example:", list(drive_example.keys()))
return drive_example
for key in drive_example:
if key not in example:
print(f"[verify_example] (WARNING) error count: {self.error_count}, {key} is not in example")
self.error_count += 1
assert self.error_count < 10
raise MyExceptionToCatch(f"{key} is not in example")
if drive_example[key].shape != example[key].shape:
print(f"[verify_example] (WARNING) error count: {self.error_count}, "
f"different shape of {key}: {drive_example[key].get_shape()} != {example[key].get_shape()}")
self.error_count += 1
assert self.error_count < 10
raise MyExceptionToCatch(f"{key} is not in example")
return drive_example
def write_tfrecord(self, example_serial, drive_index):
self.writer.write(example_serial)
self.example_count_in_shard += 1
self.example_count_in_drive += 1
self.total_example_count += 1
# reset and create a new tfrecord file
if self.example_count_in_shard > self.shard_size:
self.shard_count += 1
self.example_count_in_shard = 0
self.open_new_writer(drive_index)
def open_new_writer(self, drive_index):
raise NotImplementedError()
def write_tfrecord_config(self, example):
if self.example_count_in_drive == 0:
return
if ('image' not in example) or (example['image'] is None):
return
config = inspect_properties(example)
config["length"] = self.example_count_in_drive
config["imshape"] = self.shwc_shape
print("## save config", config)
with open(op.join(self.tfr_drive_path, "tfr_config.txt"), "w") as fr:
json.dump(config, fr)
def on_exit(self):
if self.writer:
self.writer.close()
self.writer = None
def wrap_up(self):
raise NotImplementedError()
# TODO ======================================================================
# TfrecordMakers which make tfrecords in tfrpath directly
class TfrecordMakerSingleDir(TfrecordMakerBase):
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
super().__init__(dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape)
def list_drive_paths(self, srcpath, split):
raise NotImplementedError()
def init_drive_tfrecord(self, drive_index=0):
outpath = self.tfrpath__
print("[init_drive_tfrecord] outpath:", outpath)
# change path to check date integrity
self.pm.reopen([outpath], closer_func=self.on_exit)
self.tfr_drive_path = outpath
self.example_count_in_drive = 0
if drive_index == 0:
self.open_new_writer(drive_index)
return False
def open_new_writer(self, drive_index):
outfile = f"{self.tfr_drive_path}/shard_{self.shard_count:03d}.tfrecord"
print("\n==== Open a new tfrecord:", op.basename(outfile))
self.writer = tf.io.TFRecordWriter(outfile)
def write_tfrecord_config(self, example):
if self.example_count_in_drive == 0:
return
config = inspect_properties(example)
config["length"] = self.total_example_count
config["imshape"] = self.shwc_shape
print("## save config", config)
with open(op.join(self.tfr_drive_path, "tfr_config.txt"), "w") as fr:
json.dump(config, fr)
def wrap_up(self):
os.rename(self.tfrpath__, self.tfrpath)
# For ONLY kitti dataset, tfrecords are generated from extracted files
class KittiRawTfrecordMaker(TfrecordMakerSingleDir):
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
super().__init__(dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape)
def get_example_maker(self, dataset, split, shwc_shape, data_keys):
return ExampleMaker(dataset, split, shwc_shape, data_keys, self.srcpath)
def list_drive_paths(self, srcpath, split):
split_ = "train" if split == "train" else "test"
code_tfrecord_path = op.dirname(op.abspath(__file__))
filename = op.join(code_tfrecord_path, "resources", f"kitti_raw_{split_}_scenes.txt")
with open(filename, "r") as f:
drives = f.readlines()
drives.sort()
drives = [tuple(drive.strip("\n").split()) for drive in drives]
print("[list_drive_paths] drive list:", drives[:5])
return drives
# For ONLY kitti dataset, tfrecords are generated from extracted files
class KittiOdomTfrecordMaker(TfrecordMakerSingleDir):
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
super().__init__(dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape)
def get_example_maker(self, dataset, split, shwc_shape, data_keys):
return ExampleMaker(dataset, split, shwc_shape, data_keys, self.srcpath)
def list_drive_paths(self, srcpath, split):
# create drive paths like : "00"
if split == "train":
drives = [f"{i:02d}" for i in range(0, 9)] + [f"{i:02d}" for i in range(11, 22)]
# remove "12" sequence because color distribution is totally different between left and right
drives.remove("12")
else:
drives = ["09", "10"]
return drives
class DrivingStereoTfrecordMaker(TfrecordMakerSingleDir):
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
super().__init__(dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape)
def list_drive_paths(self, srcpath, split):
# drive_path like : .../driving_stereo/train-left-image/2018-07-16-15-18-53.zip
split_ = "train" if split == "train" else "test"
drive_paths = glob(op.join(srcpath, f"{split_}-left-image", "*.zip"))
drive_paths.sort()
return drive_paths
class WaymoTfrecordMaker(TfrecordMakerBase):
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
super().__init__(dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape)
def list_drive_paths(self, srcpath, split):
drive_paths = glob(op.join(srcpath, "training_*"))
drive_paths.sort()
return drive_paths
def init_drive_tfrecord(self, drive_index=0):
outpath = f"{self.tfrpath__}/drive_{drive_index:03d}"
print("[init_drive_tfrecord] outpath:", outpath)
if op.isdir(outpath):
print(f"[init_drive_tfrecord] {op.basename(outpath)} exists. move onto the next")
return True
# change path to check date integrity
self.pm.reopen([outpath], closer_func=self.on_exit)
self.tfr_drive_path = outpath
self.shard_count = 0
self.example_count_in_shard = 0
self.example_count_in_drive = 0
self.open_new_writer(drive_index)
return False
def open_new_writer(self, drive_index):
outfile = f"{self.tfr_drive_path}/drive_{drive_index:03d}_shard_{self.shard_count:03d}.tfrecord"
self.writer = tf.io.TFRecordWriter(outfile)
def wrap_up(self):
move_tfrecord_and_merge_configs(self.tfrpath__, self.tfrpath)
import zipfile
class CityscapesTfrecordMaker(TfrecordMakerBase):
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
self.zip_suffix = dataset.split("__")[1]
self.zip_files = self.open_zip_files(srcpath, self.zip_suffix)
super().__init__(dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape)
self.city = ""
print(f"[CityscapesTfrecordMaker] zip_suffix={self.zip_suffix}")
def open_zip_files(self, srcpath, zip_suffix):
zip_files = dict()
if zip_suffix == "extra":
basic_name = op.join(srcpath, "leftImg8bit_trainextra.zip")
elif zip_suffix == "sequence":
basic_name = op.join(srcpath, "leftImg8bit_sequence_trainvaltest.zip")
else:
assert 0, f"Wrong zip suffix: {zip_suffix}"
zip_files["leftImg"] = zipfile.ZipFile(basic_name, "r")
zip_files["rightImg"] = zipfile.ZipFile(basic_name.replace("/leftImg8bit_", "/rightImg8bit_"), "r")
if zip_suffix == "extra":
zip_files["camera"] = zipfile.ZipFile(basic_name.replace("/leftImg8bit_", "/camera_"), "r")
elif zip_suffix == "sequence":
zip_files["camera"] = zipfile.ZipFile(basic_name.replace("/leftImg8bit_sequence_", "/camera_"), "r")
zip_files["disparity"] = zipfile.ZipFile(basic_name.replace("/leftImg8bit_", "/disparity_"), "r")
return zip_files
def get_example_maker(self, dataset, split, shwc_shape, data_keys):
return ExampleMaker(dataset, split, shwc_shape, data_keys, self.zip_files)
def list_drive_paths(self, srcpath, split):
filelist = self.zip_files["leftImg"].namelist()
filelist = [file for file in filelist if file.endswith(".png")]
filelist.sort()
# drive path example: /leftImg8bit_sequence/train/aachen/aachen
drive_paths = ["_".join(file.split("_")[:-3]) for file in filelist]
drive_paths = list(set(drive_paths))
drive_paths.sort()
return drive_paths
def init_drive_tfrecord(self, drive_index=0):
city = self.drive_paths[drive_index].split("/")[-1]
# example: cityscapes__/sequence_aachen
outpath = op.join(self.tfrpath__, f"{self.zip_suffix}_{city}")
print("[init_drive_tfrecord] outpath:", outpath)
if op.isdir(outpath):
print(f"[init_drive_tfrecord] {op.basename(outpath)} exists. move onto the next")
return True
# change path to check date integrity
self.pm.reopen([outpath], closer_func=self.on_exit)
self.tfr_drive_path = outpath
self.city = city
self.shard_count = 0
self.example_count_in_shard = 0
self.example_count_in_drive = 0
self.open_new_writer(drive_index)
return False
def open_new_writer(self, drive_index):
outfile = f"{self.tfr_drive_path}/{self.zip_suffix}_{self.city}_shard_{self.shard_count:03d}.tfrecord"
self.writer = tf.io.TFRecordWriter(outfile)
def wrap_up(self):
# TODO WARNING!! sequence MUST be created after extra!
if self.zip_suffix == "sequence":
move_tfrecord_and_merge_configs(self.tfrpath__, self.tfrpath)
class A2D2TfrecordMaker(TfrecordMakerBase):
def __init__(self, dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape):
super().__init__(dataset, split, srcpath, tfrpath, shard_size, stereo, shwc_shape)
def list_drive_paths(self, srcpath, split):
drive_paths = glob(self.srcpath + "/*_camera_frontleft.zip")
return drive_paths
def get_example_maker(self, dataset, split, shwc_shape, data_keys):
return ExampleMaker(dataset, split, shwc_shape, data_keys)
def init_drive_tfrecord(self, drive_index=0):
drivetime = op.basename(self.drive_paths[drive_index]).split("-")[1].split("_")[0]
# example: "20180810150607" from "camera_lidar-20180810150607_camera_frontleft.zip"
outpath = op.join(self.tfrpath__, drivetime)
print("[init_drive_tfrecord] outpath:", outpath)
if op.isdir(outpath):
print(f"[init_drive_tfrecord] {op.basename(outpath)} exists. move onto the next")
return True
# change path to check date integrity
self.pm.reopen([outpath], closer_func=self.on_exit)
self.tfr_drive_path = outpath
self.shard_count = 0
self.example_count_in_shard = 0
self.example_count_in_drive = 0
self.open_new_writer(drive_index)
return False
def open_new_writer(self, drive_index):
| |
data = self.get_data("checked_out_acs.json")
result = json.loads(data)
fulfill_data = self.api.parse_fulfill_result(result['result'])
eq_(fulfill_data[0], """http://afs.enkilibrary.org/fulfillment/URLLink.acsm?action=enterloan&ordersource=Califa&orderid=ACS4-9243146841581187248119581&resid=urn%3Auuid%3Ad5f54da9-8177-43de-a53d-ef521bc113b4&gbauthdate=Wed%2C+23+Aug+2017+19%3A42%3A35+%2B0000&dateval=1503517355&rights=%24lat%231505331755%24&gblver=4&auth=<PASSWORD>""")
eq_(fulfill_data[1], 'epub')
def test_fulfill_success(self):
# Test the fulfill() method.
patron = self._patron()
patron.authorization_identifier = "123"
pool = self._licensepool(None)
data = self.get_data("checked_out_acs.json")
self.api.queue_response(200, content=data)
fulfillment = self.api.fulfill(patron, "pin", pool, "internal format")
# An appropriate request to the "getSELink" endpoint was made.,
[method, url, headers, data, params, kwargs] = self.api.requests.pop()
eq_("get", method)
eq_(self.api.base_url + "UserAPI", url)
eq_("getSELink", params['method'])
eq_("123", params['username'])
eq_("pin", params['password'])
# In particular, the Enki library ID associated with the
# patron's library was used as the 'lib' parameter.
eq_("c", params['lib'])
# A FulfillmentInfo for the loan was returned.
assert isinstance(fulfillment, FulfillmentInfo)
eq_(fulfillment.identifier, pool.identifier.identifier)
eq_(fulfillment.collection_id, pool.collection.id)
eq_(DeliveryMechanism.ADOBE_DRM, fulfillment.content_type)
assert fulfillment.content_link.startswith(
"http://afs.enkilibrary.org/fulfillment/URLLink.acsm"
)
eq_(fulfillment.content_expires,
datetime.datetime(2017, 9, 13, 19, 42, 35, 0))
def test_patron_activity(self):
data = self.get_data("patron_response.json")
self.api.queue_response(200, content=data)
patron = self._patron()
patron.authorization_identifier = "123"
[loan] = self.api.patron_activity(patron, 'pin')
# An appropriate Enki API call was issued.
[method, url, headers, data, params, kwargs] = self.api.requests.pop()
eq_("get", method)
eq_(self.api.base_url + "UserAPI", url)
eq_("getSEPatronData", params['method'])
eq_("123", params['username'])
eq_("pin", params['password'])
# In particular, the Enki library ID associated with the
# patron's library was used as the 'lib' parameter.
eq_("c", params['lib'])
# The result is a single LoanInfo.
assert isinstance(loan, LoanInfo)
eq_(Identifier.ENKI_ID, loan.identifier_type)
eq_(DataSource.ENKI, loan.data_source_name)
eq_("231", loan.identifier)
eq_(self.collection, loan.collection(self._db))
eq_(datetime.datetime(2017, 8, 15, 14, 56, 51), loan.start_date)
eq_(datetime.datetime(2017, 9, 5, 14, 56, 51), loan.end_date)
def test_patron_activity_failure(self):
patron = self._patron()
self.api.queue_response(404, "No such patron")
collect = lambda: list(self.api.patron_activity(patron, 'pin'))
assert_raises(PatronNotFoundOnRemote, collect)
msg = dict(result=dict(message="Login unsuccessful."))
self.api.queue_response(200, content=json.dumps(msg))
assert_raises(AuthorizationFailedException, collect)
msg = dict(result=dict(message="Some other error."))
self.api.queue_response(200, content=json.dumps(msg))
assert_raises(CirculationException, collect)
class TestBibliographicParser(BaseEnkiTest):
def test_process_all(self):
class Mock(BibliographicParser):
inputs = []
def extract_bibliographic(self, element):
self.inputs.append(element)
parser = Mock()
def consume(*args):
"""Consume a generator's output."""
list(parser.process_all(*args))
# First try various inputs that run successfully but don't
# extract any data.
consume("{}")
eq_([], parser.inputs)
consume(dict(result=dict()))
eq_([], parser.inputs)
consume(dict(result=dict(titles=[])))
eq_([], parser.inputs)
# Now try a list of books that is split up and each book
# processed separately.
data = self.get_data("get_update_titles.json")
consume(data)
eq_(6, len(parser.inputs))
def test_extract_bibliographic(self):
"""Test the ability to turn an individual book data blob
into a Metadata.
"""
data = json.loads(self.get_data("get_item_french_title.json"))
parser = BibliographicParser()
m = parser.extract_bibliographic(data['result'])
assert isinstance(m, Metadata)
eq_(u'Le But est le Seul Choix', m.title)
eq_(u'fre', m.language)
eq_(u'Law of Time Press', m.publisher)
# Two identifiers, Enki and ISBN, with Enki being primary.
enki, isbn = sorted(m.identifiers, key=lambda x: x.type)
eq_(Identifier.ENKI_ID, enki.type)
eq_("21135", enki.identifier)
eq_(enki, m.primary_identifier)
eq_(Identifier.ISBN, isbn.type)
eq_("9780988432727", isbn.identifier)
# One contributor
[contributor] = m.contributors
eq_("Hoffmeister, David", contributor.sort_name)
eq_([Contributor.AUTHOR_ROLE], contributor.roles)
# Two links -- full-sized image and description.
image, description = sorted(m.links, key=lambda x: x.rel)
eq_(Hyperlink.IMAGE, image.rel)
eq_("https://enkilibrary.org/bookcover.php?id=21135&isbn=9780988432727&category=EMedia&size=large", image.href)
eq_(Hyperlink.DESCRIPTION, description.rel)
eq_("text/html", description.media_type)
assert description.content.startswith("<NAME> ré")
# The full-sized image has a thumbnail.
eq_(Hyperlink.THUMBNAIL_IMAGE, image.thumbnail.rel)
eq_("http://thumbnail/", image.thumbnail.href)
# Four subjects.
subjects = sorted(m.subjects, key=lambda x: x.identifier)
# All subjects are classified as tags, rather than BISAC, due
# to inconsistencies in the data presentation.
for i in subjects:
eq_(i.type, Subject.TAG)
eq_(None, i.name)
eq_([u'BODY MIND SPIRIT Spirituality General',
u'BODY, MIND & SPIRIT / Spirituality / General.',
u'Spirituality',
u'Spirituality.'],
[x.identifier for x in subjects]
)
# We also have information about the current availability.
circulation = m.circulation
assert isinstance(circulation, CirculationData)
eq_(1, circulation.licenses_owned)
eq_(1, circulation.licenses_available)
eq_(0, circulation.licenses_reserved)
eq_(0, circulation.patrons_in_hold_queue)
# The book is available as an ACS-encrypted EPUB.
[format] = circulation.formats
eq_(Representation.EPUB_MEDIA_TYPE, format.content_type)
eq_(DeliveryMechanism.ADOBE_DRM, format.drm_scheme)
def test_extract_bibliographic_pdf(self):
"""Test the ability to distingush between PDF and EPUB results"""
data = json.loads(self.get_data("pdf_document_entry.json"))
parser = BibliographicParser()
m = parser.extract_bibliographic(data['result'])
assert isinstance(m, Metadata)
# The book is available as a non-ACS PDF.
circulation = m.circulation
assert isinstance(circulation, CirculationData)
[format] = circulation.formats
eq_(Representation.PDF_MEDIA_TYPE, format.content_type)
eq_(DeliveryMechanism.NO_DRM, format.drm_scheme)
class TestEnkiImport(BaseEnkiTest):
def test_import_instantiation(self):
"""Test that EnkiImport can be instantiated"""
importer = EnkiImport(self._db, self.collection, api_class=self.api)
eq_(self.api, importer.api)
eq_(self.collection, importer.collection)
def test_run_once(self):
dummy_value = object()
class Mock(EnkiImport):
incremental_import_called_with = dummy_value
def full_import(self):
self.full_import_called = True
return 10
def incremental_import(self, since):
self.incremental_import_called_with = since
return 4, 7
importer = Mock(self._db, self.collection, api_class=self.api)
# If the incoming TimestampData makes it look like the process
# has never successfully completed, full_import() is called.
progress = TimestampData(start=None)
importer.run_once(progress)
eq_(True, importer.full_import_called)
eq_("New or modified titles: 10. Titles with circulation changes: 0.",
progress.achievements)
# It doesn't call incremental_import().
eq_(dummy_value, importer.incremental_import_called_with)
# If run_once() is called with a TimestampData that indicates
# an earlier successful run, a time five minutes before the
# previous completion time is passed into incremental_import()
importer.full_import_called = False
a_while_ago = datetime.datetime(2011, 1, 1)
even_earlier = a_while_ago - datetime.timedelta(days=100)
timestamp = TimestampData(start=even_earlier, finish=a_while_ago)
new_timestamp = importer.run_once(timestamp)
passed_in = importer.incremental_import_called_with
expect = a_while_ago - importer.OVERLAP
assert abs((passed_in-expect).total_seconds()) < 2
# full_import was not called.
eq_(False, importer.full_import_called)
# The proposed new TimestampData covers the entire timespan
# from the 'expect' period to now.
eq_(expect, new_timestamp.start)
now = datetime.datetime.utcnow()
assert (now - new_timestamp.finish).total_seconds() < 2
eq_("New or modified titles: 4. Titles with circulation changes: 7.",
new_timestamp.achievements)
def test_full_import(self):
"""full_import calls get_all_titles over and over again until
it returns nothing, and processes every book it receives.
"""
class MockAPI(object):
def __init__(self, pages):
"""Act like an Enki API with predefined pages of results."""
self.pages = pages
self.get_all_titles_called_with = []
def get_all_titles(self, strt, qty):
self.get_all_titles_called_with.append((strt, qty))
if self.pages:
return self.pages.pop(0)
return []
class Mock(EnkiImport):
processed = []
def process_book(self, data):
self.processed.append(data)
# Simulate an Enki site with two pages of results.
pages = [[1,2], [3]]
api = MockAPI(pages)
# Do the 'import'.
importer = Mock(self._db, self.collection, api_class=api)
importer.full_import()
# get_all_titles was called three times, once for the first two
# pages and a third time to verify that there are no more results.
eq_([(0, 10), (10, 10), (20, 10)],
api.get_all_titles_called_with)
# Every item on every 'page' of results was processed.
eq_([1,2,3], importer.processed)
def test_incremental_import(self):
"""incremental_import calls process_book() on the output of
EnkiAPI.updated_titles(), and then calls update_circulation().
"""
class MockAPI(object):
def updated_titles(self, since):
self.updated_titles_called_with = since
yield 1
yield 2
class Mock(EnkiImport):
processed = []
def process_book(self, data):
self.processed.append(data)
def update_circulation(self, since):
self.update_circulation_called_with = since
api = MockAPI()
importer = Mock(self._db, self.collection, api_class=api)
since = object()
importer.incremental_import(since)
# The 'since' value was passed into both methods.
eq_(since, api.updated_titles_called_with)
eq_(since, importer.update_circulation_called_with)
# The two items yielded by updated_titles() were run
# through process_book().
eq_([1,2], importer.processed)
def test_update_circulation(self):
# update_circulation() makes two-hour slices out of time
# between the previous run and now, and passes each slice into
# _update_circulation, keeping track of the total number of
# circulation events encountered.
class Mock(EnkiImport):
def __init__(self, *args, **kwargs):
super(Mock, self).__init__(*args, **kwargs)
self._update_circulation_called_with = []
self.sizes = [1,2]
def _update_circulation(self, start, end):
# Pretend that one circulation event was discovered
# during the given time span.
self._update_circulation_called_with.append((start, end))
return self.sizes.pop()
# Call update_circulation() on a time three hours in the
# past. It will return a count of 3 -- the sum of the return
# values from our mocked _update_circulation().
now = datetime.datetime.utcnow()
one_hour_ago = now - datetime.timedelta(hours=1)
three_hours_ago = now - datetime.timedelta(hours=3)
monitor = Mock(self._db, self.collection, api_class=MockEnkiAPI)
eq_(3, monitor.update_circulation(three_hours_ago))
# slice_timespan() sliced up the timeline into two-hour
# chunks. It yielded up two chunks: "three hours ago" to "one
# hour ago" and "one hour ago" to "now".
#
# _update_circulation() was called on each chunk, and in each
# case the return value was an item popped from monitor.sizes.
chunk1, chunk2 = monitor._update_circulation_called_with
eq_((three_hours_ago, one_hour_ago), chunk1)
eq_(one_hour_ago, chunk2[0])
assert (chunk2[1] - now).total_seconds() < 2
# our mocked 'sizes' list is now empty.
eq_([], monitor.sizes)
def test__update_circulation(self):
# Here's information about a book we didn't know about before.
circ_data = {"result":{"records":1,"recentactivity":[{"historyid":"3738","id":"34278","recordId":"econtentRecord34278","time":"2018-06-26 10:08:23","action":"Checked Out","isbn":"9781618856050","availability":{"accessType":"acs","totalCopies":"1","availableCopies":0,"onHold":0}}]}}
# Because the book is unknown, update_circulation will do a follow-up
# call to api.get_item to get bibliographic information.
bib_data = {"result":{"id":"34278","recordId":"econtentRecord34278","isbn":"9781618856050","title":"A book","availability":{"accessType":"acs","totalCopies":"1","availableCopies":0,"onHold":0}}}
api = MockEnkiAPI(self._db)
api.queue_response(200, content=json.dumps(circ_data))
api.queue_response(200, content=json.dumps(bib_data))
from core.mock_analytics_provider import MockAnalyticsProvider
analytics = MockAnalyticsProvider()
monitor = EnkiImport(self._db, self.collection, api_class=api,
analytics=analytics)
end = datetime.datetime.utcnow()
# Ask for circulation events from one hour in 1970.
start = datetime.datetime(1970, 1, 1, 0, 0, 0)
end = datetime.datetime(1970, 1, 1, 1, 0, 0)
monitor._update_circulation(start, | |
# ----------------------------------------------------------------------------
# SX Tools - Maya vertex painting toolkit
# (c) 2017-2019 <NAME> / Secret Exit Ltd.
# Released under MIT license
# ----------------------------------------------------------------------------
import maya.cmds
import maya.mel as mel
import sxglobals
class UI(object):
def __init__(self):
self.history = False
self.multiShapes = False
return None
def __del__(self):
print('SX Tools: Exiting UI')
def calculateDivision(self):
paneHeight = maya.cmds.workspaceControl(sxglobals.dockID, query=True, height=True)
if sxglobals.settings.frames['paneDivision'] == 0:
layerHeight = (
((sxglobals.settings.project['LayerCount'] +
sxglobals.settings.project['ChannelCount']) *
sxglobals.settings.tools['lineHeight'] + 170) *
sxglobals.settings.tools['displayScale'])
else:
layerHeight = maya.cmds.layout('topCanvas', query=True, height=True)
division = int(float(layerHeight) / float(paneHeight) * 100)
if division > 100:
division = 100
sxglobals.settings.frames['paneDivision'] = division
def historyUI(self):
maya.cmds.frameLayout(
'historyWarningFrame',
parent='topCanvas',
label='WARNING: Construction history detected!',
backgroundColor=(0.35, 0.1, 0),
width=250,
marginWidth=10,
marginHeight=5)
maya.cmds.button(
'disableHistoryButton',
parent='historyWarningFrame',
label='Delete and Disable History',
command=(
'maya.cmds.delete(sxtools.sxglobals.settings.objectArray, ch=True)\n'
'maya.cmds.constructionHistory(toggle=False)\n'
'sxtools.sxglobals.core.updateSXTools()'))
def multiShapesUI(self):
maya.cmds.frameLayout(
'shapeWarningFrame',
parent='topCanvas',
label='WARNING: Multiple shapes in one object!',
backgroundColor=(0.6, 0.3, 0),
width=250,
marginWidth=10,
marginHeight=5)
maya.cmds.button(
'disableShapesButton',
parent='shapeWarningFrame',
label='Delete Extra Shapes',
command=(
'maya.cmds.delete('
'sxtools.sxglobals.settings.multiShapeArray, shape=True)\n'
'sxtools.sxglobals.core.updateSXTools()'))
def setupProjectUI(self):
maya.cmds.frameLayout(
'emptyFrame',
label='No mesh objects selected',
parent='topCanvas',
width=250,
marginWidth=10,
marginHeight=5)
maya.cmds.frameLayout(
'prefsFrame',
parent='topCanvas',
width=250,
label='Tool Preferences',
marginWidth=5,
marginHeight=5,
collapsable=True,
collapse=sxglobals.settings.frames['prefsCollapse'],
collapseCommand=(
"sxtools.sxglobals.settings.frames['prefsCollapse']=True"),
expandCommand=(
"sxtools.sxglobals.settings.frames['prefsCollapse']=False"))
if 'dockPosition' in sxglobals.settings.project:
dockId = sxglobals.settings.project['DockPosition']
else:
dockId = 1
maya.cmds.radioButtonGrp(
'dockPrefsButtons',
parent='prefsFrame',
vertical=True,
labelArray2=['Dock Left', 'Dock Right'],
select=dockId,
numberOfRadioButtons=2,
onCommand1=(
"sxtools.sxglobals.settings.project['dockPosition'] = 1\n"
"maya.cmds.workspaceControl('SXToolsUI', edit=True,"
" dockToControl=('Outliner', 'right'))"),
onCommand2=(
"sxtools.sxglobals.settings.project['dockPosition'] = 2\n"
"maya.cmds.workspaceControl('SXToolsUI', edit=True,"
" dockToControl=('AttributeEditor', 'left'))"))
maya.cmds.checkBox(
'matchSubdivisionToggle',
label='Accurate crease preview',
value=sxglobals.settings.tools['matchSubdivision'],
ann=(
'Match crease values with subdivision level in the viewport.\n'
'This incurs a performance penalty on every selection.'),
onCommand='sxtools.sxglobals.settings.tools["matchSubdivision"]=True',
offCommand='sxtools.sxglobals.settings.tools["matchSubdivision"]=False')
maya.cmds.checkBox(
'historyToggle',
label='Construction History Enabled',
value=maya.cmds.constructionHistory(query=True, toggle=True),
ann='It is strongly recommended to DISABLE HISTORY when using SX Tools.',
onCommand='maya.cmds.constructionHistory(toggle=True)',
offCommand='maya.cmds.constructionHistory(toggle=False)')
maya.cmds.button(
'resetButton',
label='Reset SX Tools',
ann='Clear all optionVars',
parent='prefsFrame',
height=30,
width=100,
command=('sxtools.sxglobals.core.resetSXTools()'))
maya.cmds.frameLayout(
'setupFrame',
parent='topCanvas',
width=250,
label='Project Setup',
marginWidth=5,
marginHeight=5,
collapsable=True,
collapse=sxglobals.settings.frames['setupCollapse'],
collapseCommand=(
"sxtools.sxglobals.settings.frames['setupCollapse']=True"),
expandCommand=(
"sxtools.sxglobals.settings.frames['setupCollapse']=False"),
borderVisible=False)
maya.cmds.columnLayout(
'prefsColumn',
parent='setupFrame',
rowSpacing=5,
adjustableColumn=True)
maya.cmds.button(
label='Select Settings File',
parent='prefsColumn',
statusBarMessage='Shift-click button to reload settings from file',
command=(
'sxtools.sxglobals.settings.setFile(0)\n'
'sxtools.sxglobals.core.updateSXTools()'))
if maya.cmds.optionVar(exists='SXToolsSettingsFile') and len(
str(maya.cmds.optionVar(query='SXToolsSettingsFile'))) > 0:
maya.cmds.text(
label='Current settings location:')
maya.cmds.text(
label=maya.cmds.optionVar(query='SXToolsSettingsFile'),
ww=True)
else:
maya.cmds.text(
label='WARNING: Settings file location not set!',
backgroundColor=(0.35, 0.1, 0),
ww=True)
maya.cmds.text(label=' ')
maya.cmds.rowColumnLayout(
'refLayerRowColumns',
parent='setupFrame',
numberOfColumns=3,
columnWidth=((1, 90), (2, 60), (3, 80)),
columnAttach=[(1, 'left', 0), (2, 'left', 0), (3, 'left', 0)],
rowSpacing=(1, 0))
maya.cmds.text(label=' ')
maya.cmds.text(label='Count')
maya.cmds.text(label='Mask Export')
# Max layers 10. Going higher causes instability on GPU compositing.
maya.cmds.text(label='Color layers:')
maya.cmds.intField(
'layerCount',
value=10,
minValue=1,
maxValue=10,
step=1,
changeCommand=(
"sxtools.sxglobals.ui.refreshLayerDisplayNameList()\n"
"maya.cmds.setFocus('MayaWindow')"))
if 'LayerCount' in sxglobals.settings.project:
maya.cmds.intField(
'layerCount',
edit=True,
value=sxglobals.settings.project['LayerCount'])
#maya.cmds.textField('maskExport', text='U1')
maya.cmds.textField(
'maskExport',
text=sxglobals.settings.refLayerData['layer1'][2])
maya.cmds.text(label=' ')
maya.cmds.text(label=' ')
maya.cmds.text(label=' ')
maya.cmds.text(label='Channel')
maya.cmds.text(label='Enabled')
maya.cmds.text(label='Export UV')
maya.cmds.text('occlusionLabel', label='Occlusion:')
maya.cmds.checkBox('occlusion', label='', value=True)
maya.cmds.textField(
'occlusionExport',
text=sxglobals.settings.refLayerData['occlusion'][2])
maya.cmds.text('metallicLabel', label='Metallic:')
maya.cmds.checkBox('metallic', label='', value=True)
maya.cmds.textField(
'metallicExport',
text=sxglobals.settings.refLayerData['metallic'][2])
maya.cmds.text('smoothnessLabel', label='Smoothness:')
maya.cmds.checkBox('smoothness', label='', value=True)
maya.cmds.textField(
'smoothnessExport',
text=sxglobals.settings.refLayerData['smoothness'][2])
maya.cmds.text('transmissionLabel', label='Transmission:')
maya.cmds.checkBox('transmission', label='', value=True)
maya.cmds.textField(
'transmissionExport',
text=sxglobals.settings.refLayerData['transmission'][2])
maya.cmds.text('emissionLabel', label='Emission:')
maya.cmds.checkBox('emission', label='', value=True)
maya.cmds.textField(
'emissionExport',
text=sxglobals.settings.refLayerData['emission'][2])
maya.cmds.text('alphaOverlay1Label', label='Overlay1 (A):')
maya.cmds.textField('alphaOverlay1', text='layer8')
maya.cmds.textField('alphaOverlay1Export', text='U4')
maya.cmds.text('alphaOverlay2Label', label='Overlay2 (A):')
maya.cmds.textField('alphaOverlay2', text='layer9')
maya.cmds.textField('alphaOverlay2Export', text='V4')
maya.cmds.text('overlayLabel', label='Overlay (RGBA):')
maya.cmds.textField('overlay', text='layer10')
maya.cmds.textField('overlayExport', text='UV5,UV6')
if 'LayerData' in sxglobals.settings.project:
maya.cmds.checkBox(
'occlusion',
edit=True,
value=bool(sxglobals.settings.project['LayerData']['occlusion'][5]))
maya.cmds.checkBox(
'metallic',
edit=True,
value=bool(sxglobals.settings.project['LayerData']['metallic'][5]))
maya.cmds.checkBox(
'smoothness',
edit=True,
value=bool(sxglobals.settings.project['LayerData']['smoothness'][5]))
maya.cmds.checkBox(
'transmission',
edit=True,
value=bool(sxglobals.settings.project['LayerData']['transmission'][5]))
maya.cmds.checkBox(
'emission',
edit=True,
value=bool(sxglobals.settings.project['LayerData']['emission'][5]))
maya.cmds.textField(
'maskExport',
edit=True,
text=(sxglobals.settings.project['LayerData']['layer1'][2]))
maya.cmds.textField(
'occlusionExport',
edit=True,
text=(sxglobals.settings.project['LayerData']['occlusion'][2]))
maya.cmds.textField(
'metallicExport',
edit=True,
text=(sxglobals.settings.project['LayerData']['metallic'][2]))
maya.cmds.textField(
'smoothnessExport',
edit=True,
text=(sxglobals.settings.project['LayerData']['smoothness'][2]))
maya.cmds.textField(
'transmissionExport',
edit=True,
text=(sxglobals.settings.project['LayerData']['transmission'][2]))
maya.cmds.textField(
'emissionExport',
edit=True,
text=(sxglobals.settings.project['LayerData']['emission'][2]))
alpha1 = None
alpha2 = None
alpha1Export = None
alpha2Export = None
overlay = None
overlayExport = None
for key, value in sxglobals.settings.project['LayerData'].iteritems():
if value[3] == 1:
alpha1 = key
alpha1Export = value[2]
if value[3] == 2:
alpha2 = key
alpha2Export = value[2]
if value[4]:
overlay = key
overlayExport = ', '.join(value[2])
maya.cmds.textField(
'alphaOverlay1',
edit=True,
text=alpha1)
maya.cmds.textField(
'alphaOverlay2',
edit=True,
text=alpha2)
maya.cmds.textField(
'alphaOverlay1Export',
edit=True,
text=alpha1Export)
maya.cmds.textField(
'alphaOverlay2Export',
edit=True,
text=alpha2Export)
maya.cmds.textField(
'overlay',
edit=True,
text=overlay)
maya.cmds.textField(
'overlayExport',
edit=True,
text=overlayExport)
maya.cmds.rowColumnLayout(
'numlayerFrames',
parent='setupFrame',
numberOfColumns=2,
columnWidth=((1, 160), (2, 70)),
columnAttach=[(1, 'left', 0), (2, 'left', 0)],
rowSpacing=(1, 0))
maya.cmds.text(label='Export Process Options')
maya.cmds.text(label=' ')
maya.cmds.text(label='Number of masks:')
maya.cmds.intField(
'numMasks',
minValue=0,
maxValue=10,
value=7,
step=1,
enterCommand=("maya.cmds.setFocus('MayaWindow')"))
if 'MaskCount' in sxglobals.settings.project:
maya.cmds.intField(
'numMasks',
edit=True,
value=sxglobals.settings.project['MaskCount'])
maya.cmds.text(label='Alpha-to-mask limit:')
maya.cmds.floatField(
'exportTolerance',
value=1.0,
minValue=0,
maxValue=1,
precision=1,
enterCommand=("maya.cmds.setFocus('MayaWindow')"))
if 'AlphaTolerance' in sxglobals.settings.project:
maya.cmds.floatField(
'exportTolerance',
edit=True,
value=sxglobals.settings.project['AlphaTolerance'])
maya.cmds.text(label='Export preview grid spacing:')
maya.cmds.intField(
'exportOffset',
value=5,
minValue=0,
step=1,
enterCommand=("maya.cmds.setFocus('MayaWindow')"))
if 'ExportOffset' in sxglobals.settings.project:
maya.cmds.intField(
'exportOffset',
edit=True,
value=sxglobals.settings.project['ExportOffset'])
maya.cmds.text(label='Use "_paletted" export suffix:')
maya.cmds.checkBox(
'suffixCheck',
label='',
value=False,
changeCommand=(
"sxtools.sxglobals.settings.project['ExportSuffix'] = ("
"maya.cmds.checkBox('suffixCheck', query=True, value=True))"))
if 'ExportSuffix' in sxglobals.settings.project:
maya.cmds.checkBox(
'suffixCheck',
edit=True,
value=sxglobals.settings.project['ExportSuffix'])
maya.cmds.text(label='')
maya.cmds.text(label='')
for i in xrange(10):
layerName = sxglobals.settings.refLayerData[sxglobals.settings.refArray[i]][6]
labelID = 'display'+str(i+1)
labelText = sxglobals.settings.refArray[i] + ' display name:'
fieldLabel = sxglobals.settings.refArray[i] + 'Display'
if (('LayerData' in sxglobals.settings.project) and
(layerName in sxglobals.settings.project['LayerData'].keys())):
layerName = sxglobals.settings.project['LayerData'][layerName][6]
maya.cmds.text(labelID, label=labelText)
maya.cmds.textField(fieldLabel, text=layerName)
maya.cmds.columnLayout(
'reflayerFrame',
parent='setupFrame',
rowSpacing=5,
adjustableColumn=True)
maya.cmds.text(label=' ', parent='reflayerFrame')
if maya.cmds.optionVar(exists='SXToolsSettingsFile') and len(
str(maya.cmds.optionVar(query='SXToolsSettingsFile'))) > 0:
maya.cmds.text(
label='(Shift-click below to apply built-in defaults)',
parent='reflayerFrame')
maya.cmds.button(
label='Apply Project Settings',
parent='reflayerFrame',
statusBarMessage=(
'Shift-click button to use the built-in default settings'),
command=(
"sxtools.sxglobals.settings.createPreferences()\n"
"sxtools.sxglobals.settings.setPreferences()\n"
"sxtools.sxglobals.settings.saveFile(0)\n"
"sxtools.sxglobals.settings.frames['setupCollapse']=True\n"
"sxtools.sxglobals.core.updateSXTools()"))
self.refreshLayerDisplayNameList()
maya.cmds.workspaceControl(
sxglobals.dockID, edit=True, resizeHeight=5, resizeWidth=250)
def refreshLayerDisplayNameList(self):
for i in xrange(10):
layerName = sxglobals.settings.refArray[i]
fieldLabel = layerName + 'Display'
if i < maya.cmds.intField('layerCount', query=True, value=True):
if (('LayerData' in sxglobals.settings.project) and
(layerName in sxglobals.settings.project['LayerData'].keys())):
layerText = sxglobals.settings.project['LayerData'][layerName][6]
else:
layerText = sxglobals.settings.refLayerData[sxglobals.settings.refArray[i]][6]
maya.cmds.textField(
fieldLabel,
edit=True,
enable=True,
text=layerText)
else:
maya.cmds.textField(
fieldLabel,
edit=True,
enable=False)
def exportObjectsUI(self):
maya.cmds.frameLayout(
'exportObjFrame',
label=str(len(sxglobals.settings.objectArray)) + ' export objects selected',
parent='topCanvas',
width=250,
marginWidth=10,
marginHeight=2)
maya.cmds.button(
label='Select and show all export meshes',
command='sxtools.sxglobals.export.viewExported()')
maya.cmds.button(
label='Hide exported, show source meshes',
command=(
"maya.cmds.setAttr('exportsLayer.visibility', 0)\n"
"maya.cmds.setAttr('skinMeshLayer.visibility', 0)\n"
"maya.cmds.setAttr('assetsLayer.visibility', 1)\n"
"maya.cmds.editDisplayLayerGlobals(cdl='assetsLayer')\n"
"maya.cmds.delete(maya.cmds.createDisplayLayer(empty=True))\n"
"sxtools.sxglobals.settings.tools['compositeEnabled']=True\n"
"maya.cmds.select(clear=True)"))
maya.cmds.text(label='Preview export object data:')
maya.cmds.radioButtonGrp(
'exportShadingButtons1',
parent='exportObjFrame',
vertical=True,
columnWidth4=(80, 80, 80, 80),
columnAttach4=('left', 'left', 'left', 'left'),
labelArray4=['Composite', 'Albedo', 'Layer Masks', 'Occlusion'],
numberOfRadioButtons=4,
onCommand1=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand2=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand3=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand4=("sxtools.sxglobals.export.viewExportedMaterial()"))
maya.cmds.radioButtonGrp(
'exportShadingButtons2',
parent='exportObjFrame',
vertical=True,
shareCollection='exportShadingButtons1',
columnWidth4=(80, 80, 80, 80),
columnAttach4=('left', 'left', 'left', 'left'),
labelArray4=['Metallic', 'Smoothness', 'Transmission', 'Emission'],
numberOfRadioButtons=4,
onCommand1=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand2=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand3=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand4=("sxtools.sxglobals.export.viewExportedMaterial()"))
maya.cmds.radioButtonGrp(
'exportShadingButtons3',
parent='exportObjFrame',
vertical=True,
shareCollection='exportShadingButtons1',
columnWidth4=(80, 80, 80, 80),
columnAttach4=('left', 'left', 'left', 'left'),
labelArray4=['Alpha Overlay 1', 'Alpha Overlay 2', 'Overlay', 'Sub-Meshes'],
numberOfRadioButtons=4,
onCommand1=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand2=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand3=("sxtools.sxglobals.export.viewExportedMaterial()"),
onCommand4=("sxtools.sxglobals.export.viewExportedMaterial()"))
for obj in sxglobals.settings.objectArray:
if maya.cmds.getAttr(str(obj) + '.subMeshes'):
maya.cmds.radioButtonGrp(
'exportShadingButtons3',
edit=True,
select=4)
break
else:
maya.cmds.radioButtonGrp(
'exportShadingButtons1',
edit=True,
select=1)
sxglobals.export.viewExportedMaterial()
maya.cmds.button(
label='Choose Export Path',
width=120,
command=(
"sxtools.sxglobals.export.setExportPath()\n"
"sxtools.sxglobals.core.updateSXTools()"))
if (('SXToolsExportPath' in sxglobals.settings.project) and
(len(sxglobals.settings.project['SXToolsExportPath']) == 0)):
maya.cmds.text(label='No export folder selected!')
elif 'SXToolsExportPath' in sxglobals.settings.project:
exportPathText = (
'Export Path: ' + sxglobals.settings.project['SXToolsExportPath'])
maya.cmds.text(label=exportPathText, ww=True)
maya.cmds.button(
label='Write FBX Files',
width=120,
command=(
"sxtools.sxglobals.export.exportObjects("
"sxtools.sxglobals.settings.project['SXToolsExportPath'])"))
else:
maya.cmds.text(label='No export folder selected!')
maya.cmds.setParent('exportObjFrame')
maya.cmds.setParent('topCanvas')
maya.cmds.workspaceControl(
sxglobals.dockID, edit=True, resizeHeight=5, resizeWidth=250)
def emptyObjectsUI(self):
sxglobals.settings.patchArray = sxglobals.layers.verifyObjectLayers(
sxglobals.settings.shapeArray)[1]
patchLabel = 'Objects with no layers: ' + str(len(sxglobals.settings.patchArray))
maya.cmds.frameLayout(
'patchFrame',
label=patchLabel,
parent='topCanvas',
width=250,
marginWidth=10,
marginHeight=5)
maya.cmds.text(
label=("Click on empty to view project defaults.\n"), align='left')
if maya.cmds.objExists('SXShader'):
maya.cmds.text(
label=(
"Add project layers to selected objects\n"
"by pressing the button below.\n"),
align="left")
maya.cmds.button(
label='Add missing color sets',
command=(
'sxtools.sxglobals.layers.patchLayers('
'sxtools.sxglobals.settings.patchArray)\n'
'sxtools.sxglobals.core.updateSXTools()'))
maya.cmds.setParent('patchFrame')
maya.cmds.setParent('topCanvas')
maya.cmds.workspaceControl(
sxglobals.dockID, edit=True, resizeHeight=5, resizeWidth=250)
def mismatchingObjectsUI(self):
sxglobals.settings.patchArray = sxglobals.layers.verifyObjectLayers(
sxglobals.settings.shapeArray)[1]
patchLabel = 'Objects with nonstandard layers: ' + str(
len(sxglobals.settings.patchArray))
maya.cmds.frameLayout(
'patchFrame',
label=patchLabel,
parent='topCanvas',
width=250,
marginWidth=10,
marginHeight=5)
maya.cmds.text(
label=(
"To fix color layers:\n"
"1. Open Color Set Editor\n"
"2. Delete any redundant color sets\n"
"3. Rename any needed color sets\n"
" using reference names\n"
"4. DELETE HISTORY on selected objects\n"
"5. Press 'Add Missing Color Sets' button\n\n"
"Reference names:\nlayer1-nn, occlusion, metallic,\n"
"smoothness, transmission, emission, composite"
),
align="left")
maya.cmds.button(
label='Color Set Editor',
command="maya.mel.eval('colorSetEditor;')")
if 'LayerData' in sxglobals.settings.project:
maya.cmds.button(
label='Add missing color sets',
command=(
'sxtools.sxglobals.layers.patchLayers('
'sxtools.sxglobals.settings.patchArray)\n'
'sxtools.sxglobals.core.updateSXTools()'))
maya.cmds.setParent('patchFrame')
maya.cmds.setParent('topCanvas')
maya.cmds.workspaceControl(
sxglobals.dockID, edit=True, resizeHeight=5, resizeWidth=250)
def skinMeshUI(self):
maya.cmds.frameLayout(
'patchFrame',
label='Skinning Mesh Selected',
parent='topCanvas',
width=250,
marginWidth=10,
marginHeight=0)
maya.cmds.text(
parent='patchFrame',
label=(
"Create skeletons and edit skin weights on meshes with _skinned suffix.\n\n"
"Blend shapes are also supported.\n\n"
"Select non-skinned meshes in the Outliner."),
align='left',
ww=True)
maya.cmds.setParent('patchFrame')
maya.cmds.setParent('topCanvas')
maya.cmds.workspaceControl(
sxglobals.dockID, edit=True, resizeHeight=5, resizeWidth=250)
def layerViewUI(self):
maya.cmds.frameLayout(
'layerFrame',
parent='topCanvas',
width=250,
marginWidth=5,
marginHeight=2)
maya.cmds.radioButtonGrp(
'shadingButtons',
parent='layerFrame',
columnWidth3=(80, 80, 80),
columnAttach3=('left', 'left', 'left'),
labelArray3=['Final', 'Debug', 'Alpha'],
select=1,
numberOfRadioButtons=3,
onCommand1=(
"sxtools.sxglobals.tools.setShadingMode(0)"),
onCommand2=(
"sxtools.sxglobals.tools.setShadingMode(1)"),
onCommand3=(
"sxtools.sxglobals.tools.setShadingMode(2)"))
sxglobals.tools.verifyShadingMode()
maya.cmds.rowColumnLayout(
'layerListRowColumns',
parent='layerFrame',
numberOfColumns=3,
columnWidth=((1, 20), (2, 190), (3, 20)),
columnSpacing=([1, 0], [2, 5], [3, 5]),
rowSpacing=(1, 5))
maya.cmds.columnLayout(
'layerSetButtonsLeft',
parent='layerListRowColumns',
rowSpacing=15,
adjustableColumn=True)
maya.cmds.text(
parent='layerSetButtonsLeft',
label='')
maya.cmds.button(
'deleteLayerSetButton',
parent='layerSetButtonsLeft',
label='-',
ann='Delete current Layer Set\nShift-click to delete all other Layer Sets',
height=15,
enable=False,
command=(
| |
import os
import numpy as np
import math
from GPy.util import datasets as dat
class vertex:
def __init__(self, name, id, parents=[], children=[], meta = {}):
self.name = name
self.id = id
self.parents = parents
self.children = children
self.meta = meta
def __str__(self):
return self.name + '(' + str(self.id) + ').'
class tree:
def __init__(self):
self.vertices = []
self.vertices.append(vertex(name='root', id=0))
def __str__(self):
index = self.find_root()
return self.branch_str(index)
def branch_str(self, index, indent=''):
out = indent + str(self.vertices[index]) + '\n'
for child in self.vertices[index].children:
out+=self.branch_str(child, indent+' ')
return out
def find_children(self):
"""Take a tree and set the children according to the parents.
Takes a tree structure which lists the parents of each vertex
and computes the children for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].children = []
for i in range(len(self.vertices)):
for parent in self.vertices[i].parents:
if i not in self.vertices[parent].children:
self.vertices[parent].children.append(i)
def find_parents(self):
"""Take a tree and set the parents according to the children
Takes a tree structure which lists the children of each vertex
and computes the parents for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].parents = []
for i in range(len(self.vertices)):
for child in self.vertices[i].children:
if i not in self.vertices[child].parents:
self.vertices[child].parents.append(i)
def find_root(self):
"""Finds the index of the root node of the tree."""
self.find_parents()
index = 0
while len(self.vertices[index].parents)>0:
index = self.vertices[index].parents[0]
return index
def get_index_by_id(self, id):
"""Give the index associated with a given vertex id."""
for i in range(len(self.vertices)):
if self.vertices[i].id == id:
return i
raise ValueError('Reverse look up of id failed.')
def get_index_by_name(self, name):
"""Give the index associated with a given vertex name."""
for i in range(len(self.vertices)):
if self.vertices[i].name == name:
return i
raise ValueError('Reverse look up of name failed.')
def order_vertices(self):
"""Order vertices in the graph such that parents always have a lower index than children."""
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent>i:
ordered = False
self.swap_vertices(i, parent)
def swap_vertices(self, i, j):
"""
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
"""
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j
def rotation_matrix(xangle, yangle, zangle, order='zxy', degrees=False):
"""
Compute the rotation matrix for an angle in each direction.
This is a helper function for computing the rotation matrix for a given set of angles in a given order.
:param xangle: rotation for x-axis.
:param yangle: rotation for y-axis.
:param zangle: rotation for z-axis.
:param order: the order for the rotations.
"""
if degrees:
xangle = math.radians(xangle)
yangle = math.radians(yangle)
zangle = math.radians(zangle)
# Here we assume we rotate z, then x then y.
c1 = math.cos(xangle) # The x angle
c2 = math.cos(yangle) # The y angle
c3 = math.cos(zangle) # the z angle
s1 = math.sin(xangle)
s2 = math.sin(yangle)
s3 = math.sin(zangle)
# see http://en.wikipedia.org/wiki/Rotation_matrix for
# additional info.
if order=='zxy':
rot_mat = np.array([[c2*c3-s1*s2*s3, c2*s3+s1*s2*c3, -s2*c1],[-c1*s3, c1*c3, s1],[s2*c3+c2*s1*s3, s2*s3-c2*s1*c3, c2*c1]])
else:
rot_mat = np.eye(3)
for i in range(len(order)):
if order[i]=='x':
rot_mat = np.dot(np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]),rot_mat)
elif order[i] == 'y':
rot_mat = np.dot(np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]),rot_mat)
elif order[i] == 'z':
rot_mat = np.dot(np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]),rot_mat)
return rot_mat
# Motion capture data routines.
class skeleton(tree):
def __init__(self):
tree.__init__(self)
def connection_matrix(self):
connection = np.zeros((len(self.vertices), len(self.vertices)), dtype=bool)
for i in range(len(self.vertices)):
for j in range(len(self.vertices[i].children)):
connection[i, self.vertices[i].children[j]] = True
return connection
def to_xyz(self, channels):
raise NotImplementedError("this needs to be implemented to use the skeleton class")
def finalize(self):
"""After loading in a skeleton ensure parents are correct, vertex orders are correct and rotation matrices are correct."""
self.find_parents()
self.order_vertices()
self.set_rotation_matrices()
def smooth_angle_channels(self, channels):
"""Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions."""
for vertex in self.vertices:
for col in vertex.meta['rot_ind']:
if col:
for k in range(1, channels.shape[0]):
diff=channels[k, col]-channels[k-1, col]
if abs(diff+360.)<abs(diff):
channels[k:, col]=channels[k:, col]+360.
elif abs(diff-360.)<abs(diff):
channels[k:, col]=channels[k:, col]-360.
# class bvh_skeleton(skeleton):
# def __init__(self):
# skeleton.__init__(self)
# def to_xyz(self, channels):
class acclaim_skeleton(skeleton):
def __init__(self, file_name=None):
skeleton.__init__(self)
self.documentation = []
self.angle = 'deg'
self.length = 1.0
self.mass = 1.0
self.type = 'acclaim'
self.vertices[0] = vertex(name='root', id=0,
parents = [0], children=[],
meta = {'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)})
if file_name:
self.load_skel(file_name)
def to_xyz(self, channels):
rot_val = list(self.vertices[0].meta['orientation'])
for i in range(len(self.vertices[0].meta['rot_ind'])):
rind = self.vertices[0].meta['rot_ind'][i]
if rind != -1:
rot_val[i] += channels[rind]
self.vertices[0].meta['rot'] = rotation_matrix(rot_val[0],
rot_val[1],
rot_val[2],
self.vertices[0].meta['axis_order'],
degrees=True)
# vertex based store of the xyz location
self.vertices[0].meta['xyz'] = list(self.vertices[0].meta['offset'])
for i in range(len(self.vertices[0].meta['pos_ind'])):
pind = self.vertices[0].meta['pos_ind'][i]
if pind != -1:
self.vertices[0].meta['xyz'][i] += channels[pind]
for i in range(len(self.vertices[0].children)):
ind = self.vertices[0].children[i]
self.get_child_xyz(ind, channels)
xyz = []
for vertex in self.vertices:
xyz.append(vertex.meta['xyz'])
return np.array(xyz)
def get_child_xyz(self, ind, channels):
parent = self.vertices[ind].parents[0]
children = self.vertices[ind].children
rot_val = np.zeros(3)
for j in range(len(self.vertices[ind].meta['rot_ind'])):
rind = self.vertices[ind].meta['rot_ind'][j]
if rind != -1:
rot_val[j] = channels[rind]
else:
rot_val[j] = 0
tdof = rotation_matrix(rot_val[0], rot_val[1], rot_val[2],
self.vertices[ind].meta['order'],
degrees=True)
torient = rotation_matrix(self.vertices[ind].meta['axis'][0],
self.vertices[ind].meta['axis'][1],
self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'],
degrees=True)
torient_inv = rotation_matrix(-self.vertices[ind].meta['axis'][0],
-self.vertices[ind].meta['axis'][1],
-self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'][::-1],
degrees=True)
self.vertices[ind].meta['rot'] = np.dot(np.dot(np.dot(torient_inv,tdof),torient),self.vertices[parent].meta['rot'])
self.vertices[ind].meta['xyz'] = self.vertices[parent].meta['xyz'] + np.dot(self.vertices[ind].meta['offset'],self.vertices[ind].meta['rot'])
for i in range(len(children)):
cind = children[i]
self.get_child_xyz(cind, channels)
def load_channels(self, file_name):
fid=open(file_name, 'r')
channels = self.read_channels(fid)
fid.close()
return channels
def save_channels(self, file_name, channels):
with open(file_name,'w') as fid:
self.writ_channels(fid, channels)
fid.close()
def load_skel(self, file_name):
"""
Loads an ASF file into a skeleton structure.
:param file_name: The file name to load in.
"""
fid = open(file_name, 'r')
self.read_skel(fid)
fid.close()
self.name = file_name
def read_bonedata(self, fid):
"""Read bone data from an acclaim skeleton file stream."""
bone_count = 0
lin = self.read_line(fid)
while lin[0]!=':':
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name = '', id=np.NaN,
meta={'name': [],
'id': [],
'offset': [],
'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)}))
lin = self.read_line(fid)
elif parts[0]=='id':
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0]=='name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0]=='direction':
direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])])
lin = self.read_line(fid)
elif parts[0]=='length':
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0]=='axis':
self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]),
float(parts[2]),
float(parts[3])])
# order is reversed compared to bvh
self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower()
lin = self.read_line(fid)
elif parts[0]=='dof':
order = []
for i in range(1, len(parts)):
if parts[i]== 'rx':
chan = 'Xrotation'
order.append('x')
elif parts[i] =='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i] == 'rz':
chan = 'Zrotation'
order.append('z')
elif parts[i] == 'tx':
chan = 'Xposition'
elif parts[i] == 'ty':
chan = 'Yposition'
elif parts[i] == 'tz':
chan = 'Zposition'
elif parts[i] == 'l':
chan = 'length'
self.vertices[bone_count].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[bone_count].meta['order'] = order[::-1]
lin = self.read_line(fid)
elif parts[0]=='limits':
self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]]
lin = self.read_line(fid)
while lin !='end':
parts = lin.split()
self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])])
lin = self.read_line(fid)
self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits'])
elif parts[0]=='end':
self.vertices[bone_count].meta['offset'] = direction*lgth
lin = self.read_line(fid)
return lin
def read_channels(self, fid):
"""Read channels from an acclaim file."""
bones = [[] for | |
!= 0.0]
cx = np.linspace(zmin, zmax, options['pdf']['numpart'])
cy = np.sum(np.abs([a.pdf(cx/x)/x for x in bx]) * by, 0)
return PDF(cx, cy)
def _ndiv(self, b):
if b == 0:
raise ValueError("Cannot divide a PDF by 0.")
return PDF(self.x/b, self.y)
def __rdiv__(self, b):
if self.x[0]*self.x[-1] <= 0:
raise ValueError("Cannot divide by PDFs that include 0")
if b == 0:
raise ValueError("Dividing 0 by a PDF does not return a PDF")
extremes = [b/self.x[0], b/self.x[-1]]
zmin, zmax = np.min(extremes), np.max(extremes)
nsamp = options['pdf']['numpart']
cx = np.linspace(zmin, zmax, nsamp)
return PDF(cx, self.pdf(b/cx)/cx**2)
def __truediv__(self, b):
return self.__div__(b)
def __rtruediv__(self, b):
return self.__rdiv__(b)
def __div__(self, b):
"Divide two PDFs, returning a new PDF"
if isinstance(b, int) or isinstance(b, float):
return self._ndiv(b)
if sys.version[0] == "2" and isinstance(b, long):
return self._ndiv(b)
if b.x[0]*b.x[-1] <= 0:
raise ValueError("Cannot divide by PDFs that include 0")
a = self
extremes = np.outer([a.x[0], a.x[-1]], [1.0/b.x[0], 1.0/b.x[-1]])
zmin, zmax = np.min(extremes), np.max(extremes)
bx = b.x
by = b.y.reshape(-1, 1)
nsamp = options['pdf']['numpart']
cx = np.linspace(zmin, zmax, nsamp)
cy = np.sum([a.pdf(x * cx)*x for x in bx] * by, 0)
return PDF(cx, cy)
@property
def mode(self):
"""
Find the mode of the PDF. The mode is the x value at which pdf(x)
is at its maximum. It is the peak of the PDF.
"""
if len(self.x) == 1:
return self.x[0]
mode = None
maxy = None
for x, y in zip(self.x, self.y):
if mode is None or y > maxy:
mode = x
maxy = y
return mode
def __str__(self):
_str = "PDF [%.3g - %.3g] " % (self.x[0], self.x[-1])
_str += "mean=%.3g dev=%.3g mode=%.3g" % (self.mean, self.dev, self.mode)
return _str
def plot(self, color='', fig=False):
"""
Plot a PDF.
:param color: Optional color for the plot.
:type color: String.
:param fig: Create a new matplotlib figure to hold the plot.
:type fig: Boolean.
:returns: A list of lines that were added.
"""
if fig:
plt.figure()
if color:
plt.plot([self.x[0], self.x[0]], [0, self.y[0]], color=color)
plt.plot([self.x[-1], self.x[-1]], [0, self.y[-1]], color=color)
return plt.plot(self.x, self.y, color=color)
else:
plt.plot([self.x[0], self.x[0]], [0, self.y[0]], color='g')
plt.plot([self.x[-1], self.x[-1]], [0, self.y[-1]], color='g')
return plt.plot(self.x, self.y, color='g')
# ipython pretty print method
def _repr_pretty_(self, p, cycle):
if cycle:
return
self.plot()
p.text(self.__str__())
def _get_range(sfunc, min, max):
" Truncate PDFs with long tails"
num_tails = int(sfunc.ppf(0) == np.NINF) + int(sfunc.ppf(1) == np.PINF)
_range = options['pdf']['range']
if num_tails:
if num_tails == 2:
range = [(1.0 - _range)/2, (1.0 + _range)/2]
else:
range = [1.0 - _range, _range]
mmin = sfunc.ppf(0)
if mmin == np.NINF:
mmin = sfunc.ppf(range[0])
mmax = sfunc.ppf(1)
if mmax == np.PINF:
mmax = sfunc.ppf(range[1])
if min is not None:
min = builtins.max(min, mmin)
else:
min = mmin
if max is not None:
max = builtins.min(max, mmax)
else:
max = mmax
return min, max
def ExponPDF(rate):
"""
Creates Exponential Probability Density Function.
:param rate: The rate parameter for the distribution. Must be > 0.
:returns: A PDF object
See http://en.wikipedia.org/wiki/Exponential_distribution
"""
if rate <= 0:
raise ValueError("Rate must be greater than 0.")
sfunc = scipy.stats.expon(loc=0, scale=1.0/rate)
nsamp = options['pdf']['numpart']
min, max = _get_range(sfunc, None, None)
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def RayleighPDF(scale):
"""
Creates Rayleigh Probability Density Function.
:param scale: The scale. Must be > 0.
:returns: A PDF object
See http://en.wikipedia.org/wiki/Rayleigh_distribution
"""
if scale <= 0:
raise ValueError("Scale must be greater than 0.")
sfunc = scipy.stats.rayleigh(loc=0, scale=scale)
nsamp = options['pdf']['numpart']
min, max = _get_range(sfunc, None, None)
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def WeibullPDF(shape, scale):
"""
Creates Weibull Probability Density Function.
:param shape: The shape. Must be > 0.
:param scale: The scale. Must be > 0.
:returns: A PDF object
See http://en.wikipedia.org/wiki/Weibull_distribution
"""
if shape <= 0 or scale <= 0:
raise ValueError("Shape and Scale must be greater than 0.")
sfunc = scipy.stats.exponweib(1, shape, scale=scale)
nsamp = options['pdf']['numpart']
mmin = None
if sfunc.pdf(0) == np.PINF:
mmin = .01
min, max = _get_range(sfunc, mmin, None)
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def NormalPDF(mean, dev, min=None, max=None):
"""
Creates a normal (gaussian) Probability Density Function.
:param mean: The mean.
:param dev: The standard deviation.
:param min: A minimum value for the PDF (default None).
:param max: A maximum value for the PDF (default None).
:returns: A PDF object
For the normal distribution, you must specify **mean** and **dev**.
:Example:
>>> n = NormalPDF(10,1)
>>> n = NormalPDF(mean=10, dev=1)
>>> n = NormalPDF(mean=10, dev=1, min=10)
"""
if dev <= 0:
raise ValueError("Deviation must be positive.")
sfunc = scipy.stats.norm(loc=mean, scale=dev)
min, max = _get_range(sfunc, min, max)
dev = float(dev)
a = (min - mean) / dev
b = (max - mean) / dev
sfunc = scipy.stats.truncnorm(a, b, loc=mean, scale=dev)
nsamp = options['pdf']['numpart']
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def NetPDF(addr):
"""
Retrieves a PDF from a remote address.
:param addr: URI. PDF must be stored in JSON format
:returns: A PDF object
:Example:
>>> u = NetPDF('http://foo.com/myproject/parameters/density')
"""
from jpickle import NetObj
p = NetObj(addr)
if not isinstance(p, PDF):
raise Exception('Link is not a PDF')
return p
def UniformPDF(min=None, max=None, mean=None):
"""
Creates a uniform Probability Density Function.
:param min: The minimum value
:param max: The maximum value
:param mean: The mean value
:returns: A PDF object
For the uniform distribution, you must specify two of (min, max, and mean).
The third parameter will be calculated automatically.
:Example:
>>> u = UniformPDF(10,20)
>>> u = UniformPDF(min=10, max=20)
>>> u = UniformPDF(min=10, mean=15)
"""
def usage(match=0):
if match:
raise ValueError("mean must be (min+max)/2. Try specifying just min and max.")
raise ValueError("For uniform distribution, you must specify two of (min, max, and mean).")
if min is not None and max is not None and mean is not None:
# check agreement
if not np.allclose(mean, (min + max)/2.0, atol=1e-6):
usage(1)
if mean is None:
if max is None or min is None:
usage()
mean = (max + min) / 2.0
if max is None:
if mean is None or min is None:
usage()
max = mean + (mean - min)
if min is None:
min = mean - (max - mean)
if min > max:
raise ValueError("min must not be > mean or max!")
return PDF([min, max], [1, 1])
def TrianglePDF(min, mode, max):
"""
Creates a triangle Probability Density Function.
See http://en.wikipedia.org/wiki/Triangular_distribution
:param min: The minimum value
:param mode: The mode
:param max: The maximum value
:returns: A PDF object
You can enter the parameters in any order. They will be sorted so that the mode
is the middle value.
"""
min, mode, max = np.sort([min, mode, max])
return PDF([min, mode, max], [0, 1, 0])
def JeffreysPDF(min, max):
# untested
min = float(min)
max = float(max)
return PDF([min, max], [1.0 / (min * np.log(max/min)), 1.0 / (max * np.log(max/min))])
def ExperimentalPDF(data, min=None, max=None, fit=False, bw=None, nbins=0, prior=None, error=None, force=False):
"""
Create an experimental PDF.
An experimental PDF is derived from the results of an experiment or
measurement of some parameter. It has actual data attached to it.
That data is then used to create a PDF by one of three different methods.
The PDF can built by binning the data and linearly
interpolating, using a Gaussian KDE, or using Bayesian Inference.
:param data: Our quantity of interest.
:type data: Array of scalars
:param nbins: Number of bins (used if fit is false). Default is
2*IQR/n^(1/3) where IQR is the interquartile range
of the data.
:type nbins: int
:param fit: Use Gaussian KDE (default=False)
:type fit: True or "Gaussian"
:param bw: Bandwidth for Gaussian KDE (default=None)
:type bw: string or float. String must be 'scott' or 'silverman'
:param prior: Prior PDF to use for Bayesian Inference.
[default=None (uninformative)]
:type prior: PDF
:param error: Error in the data. For example, the measurement error.
Required for Bayesian.
:type error: PDF. Typically a NormalPDF with a mean of 0.
"""
data = np.array(data).astype(np.float64)
if not force and min is not None and min > np.min(data):
| |
is not None:
self.WebpAdapter = WebpAdapter()
self.WebpAdapter._deserialize(params.get("WebpAdapter"))
if params.get("TpgAdapter") is not None:
self.TpgAdapter = TpgAdapter()
self.TpgAdapter._deserialize(params.get("TpgAdapter"))
if params.get("GuetzliAdapter") is not None:
self.GuetzliAdapter = GuetzliAdapter()
self.GuetzliAdapter._deserialize(params.get("GuetzliAdapter"))
self.RequestId = params.get("RequestId")
class DescribeIpStatusRequest(AbstractModel):
"""DescribeIpStatus请求参数结构体
"""
def __init__(self):
"""
:param Domain: 加速域名
:type Domain: str
:param Layer: 节点类型:
edge:表示边缘节点
last:表示回源层节点
不填充情况下,默认返回边缘节点信息
:type Layer: str
:param Area: 查询区域:
mainland: 国内节点
overseas: 海外节点
global: 全球节点
:type Area: str
"""
self.Domain = None
self.Layer = None
self.Area = None
def _deserialize(self, params):
self.Domain = params.get("Domain")
self.Layer = params.get("Layer")
self.Area = params.get("Area")
class DescribeIpStatusResponse(AbstractModel):
"""DescribeIpStatus返回参数结构体
"""
def __init__(self):
"""
:param Ips: 节点列表
:type Ips: list of IpStatus
:param TotalCount: 节点总个数
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Ips = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Ips") is not None:
self.Ips = []
for item in params.get("Ips"):
obj = IpStatus()
obj._deserialize(item)
self.Ips.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeIpVisitRequest(AbstractModel):
"""DescribeIpVisit请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 查询起始时间,如:2018-09-04 10:40:10,返回结果大于等于指定时间
根据指定时间粒度不同,会进行向前归整,如 2018-09-04 10:40:10 在按 5 分钟的时间粒度查询时,返回的第一个数据对应时间点为 2018-09-04 10:40:00
:type StartTime: str
:param EndTime: 查询结束时间,如:2018-09-04 10:40:10,返回结果小于等于指定时间
根据指定时间粒度不同,会进行向前归整,如 2018-09-04 10:40:10 在按 5 分钟的时间粒度查询时,返回的最后一个数据对应时间点为 2018-09-04 10:40:00
:type EndTime: str
:param Domains: 指定查询域名列表,最多可一次性查询 30 个加速域名明细
:type Domains: list of str
:param Project: 指定要查询的项目 ID,[前往查看项目 ID](https://console.cloud.tencent.com/project)
未填充域名情况下,指定项目查询,若填充了具体域名信息,以域名为主
:type Project: int
:param Interval: 时间粒度,支持以下几种模式:
5min:5 分钟粒度,查询时间区间 24 小时内,默认返回 5 分钟粒度活跃用户数
day:天粒度,查询时间区间大于 1 天时,默认返回天粒度活跃用户数
:type Interval: str
"""
self.StartTime = None
self.EndTime = None
self.Domains = None
self.Project = None
self.Interval = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Domains = params.get("Domains")
self.Project = params.get("Project")
self.Interval = params.get("Interval")
class DescribeIpVisitResponse(AbstractModel):
"""DescribeIpVisit返回参数结构体
"""
def __init__(self):
"""
:param Interval: 数据统计的时间粒度,支持5min, day,分别表示5分钟,1天的时间粒度。
:type Interval: str
:param Data: 各个资源的回源数据详情。
:type Data: list of ResourceData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Interval = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Interval = params.get("Interval")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = ResourceData()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeMapInfoRequest(AbstractModel):
"""DescribeMapInfo请求参数结构体
"""
def __init__(self):
"""
:param Name: 映射查询类别:
isp:运营商映射查询
district:省份(中国境内)、国家/地区(中国境外)映射查询
:type Name: str
"""
self.Name = None
def _deserialize(self, params):
self.Name = params.get("Name")
class DescribeMapInfoResponse(AbstractModel):
"""DescribeMapInfo返回参数结构体
"""
def __init__(self):
"""
:param MapInfoList: 映射关系数组。
:type MapInfoList: list of MapInfo
:param ServerRegionRelation: 服务端区域id和子区域id的映射关系。
注意:此字段可能返回 null,表示取不到有效值。
:type ServerRegionRelation: list of RegionMapRelation
:param ClientRegionRelation: 客户端区域id和子区域id的映射关系。
注意:此字段可能返回 null,表示取不到有效值。
:type ClientRegionRelation: list of RegionMapRelation
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.MapInfoList = None
self.ServerRegionRelation = None
self.ClientRegionRelation = None
self.RequestId = None
def _deserialize(self, params):
if params.get("MapInfoList") is not None:
self.MapInfoList = []
for item in params.get("MapInfoList"):
obj = MapInfo()
obj._deserialize(item)
self.MapInfoList.append(obj)
if params.get("ServerRegionRelation") is not None:
self.ServerRegionRelation = []
for item in params.get("ServerRegionRelation"):
obj = RegionMapRelation()
obj._deserialize(item)
self.ServerRegionRelation.append(obj)
if params.get("ClientRegionRelation") is not None:
self.ClientRegionRelation = []
for item in params.get("ClientRegionRelation"):
obj = RegionMapRelation()
obj._deserialize(item)
self.ClientRegionRelation.append(obj)
self.RequestId = params.get("RequestId")
class DescribeOriginDataRequest(AbstractModel):
"""DescribeOriginData请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 查询起始时间,如:2018-09-04 10:40:00,返回结果大于等于指定时间
根据指定时间粒度不同,会进行向前归整,如 2018-09-04 10:40:00 在按 1 小时的时间粒度查询时,返回的第一个数据对应时间点为 2018-09-04 10:00:00
起始时间与结束时间间隔小于等于 90 天
:type StartTime: str
:param EndTime: 查询结束时间,如:2018-09-04 10:40:00,返回结果小于等于指定时间
根据指定时间粒度不同,会进行向前归整,如 2018-09-04 10:40:00 在按 1 小时的时间粒度查询时,返回的最后一个数据对应时间点为 2018-09-04 10:00:00
起始时间与结束时间间隔小于等于 90 天
:type EndTime: str
:param Metric: 指定查询指标,支持的类型有:
flux:回源流量,单位为 byte
bandwidth:回源带宽,单位为 bps
request:回源请求数,单位为 次
failRequest:回源失败请求数,单位为 次
failRate:回源失败率,单位为 %
statusCode:回源状态码,返回 2xx、3xx、4xx、5xx 汇总数据,单位为 个
2xx:返回 2xx 回源状态码汇总及各 2 开头回源状态码数据,单位为 个
3xx:返回 3xx 回源状态码汇总及各 3 开头回源状态码数据,单位为 个
4xx:返回 4xx 回源状态码汇总及各 4 开头回源状态码数据,单位为 个
5xx:返回 5xx 回源状态码汇总及各 5 开头回源状态码数据,单位为 个
支持指定具体状态码查询,若未产生过,则返回为空
:type Metric: str
:param Domains: 指定查询域名列表,最多可一次性查询 30 个加速域名明细
:type Domains: list of str
:param Project: 指定要查询的项目 ID,[前往查看项目 ID](https://console.cloud.tencent.com/project)
未填充域名情况下,指定项目查询,最多可一次性查询 30 个加速域名明细
若填充了具体域名信息,以域名为主
:type Project: int
:param Interval: 时间粒度,支持以下几种模式:
min:1 分钟粒度,指定查询区间 24 小时内(含 24 小时),可返回 1 分钟粒度明细数据(指定查询服务地域为中国境外时不支持 1 分钟粒度)
5min:5 分钟粒度,指定查询区间 31 天内(含 31 天),可返回 5 分钟粒度明细数据
hour:1 小时粒度,指定查询区间 31 天内(含 31 天),可返回 1 小时粒度明细数据
day:天粒度,指定查询区间大于 31 天,可返回天粒度明细数据
:type Interval: str
:param Detail: Domains 传入多个时,默认(false)返回多个域名的汇总数据
可按需指定为 true,返回每一个 Domain 的明细数据(statusCode 指标暂不支持)
:type Detail: bool
:param Area: 指定服务地域查询,不填充表示查询中国境内 CDN 数据
mainland:指定查询中国境内 CDN 数据
overseas:指定查询中国境外 CDN 数据
:type Area: str
"""
self.StartTime = None
self.EndTime = None
self.Metric = None
self.Domains = None
self.Project = None
self.Interval = None
self.Detail = None
self.Area = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Metric = params.get("Metric")
self.Domains = params.get("Domains")
self.Project = params.get("Project")
self.Interval = params.get("Interval")
self.Detail = params.get("Detail")
self.Area = params.get("Area")
class DescribeOriginDataResponse(AbstractModel):
"""DescribeOriginData返回参数结构体
"""
def __init__(self):
"""
:param Interval: 数据统计的时间粒度,支持min, 5min, hour, day,分别表示1分钟,5分钟,1小时和1天的时间粒度。
:type Interval: str
:param Data: 各个资源的回源数据详情。
:type Data: list of ResourceOriginData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Interval = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Interval = params.get("Interval")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = ResourceOriginData()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribePayTypeRequest(AbstractModel):
"""DescribePayType请求参数结构体
"""
def __init__(self):
"""
:param Area: 指定服务地域查询
mainland:境内计费方式查询
overseas:境外计费方式查询
未填充时默认为 mainland
:type Area: str
"""
self.Area = None
def _deserialize(self, params):
self.Area = params.get("Area")
class DescribePayTypeResponse(AbstractModel):
"""DescribePayType返回参数结构体
"""
def __init__(self):
"""
:param PayType: 计费类型:
flux:流量计费
bandwidth:带宽计费
日结计费方式切换时,若当日产生消耗,则此字段表示第二天即将生效的计费方式,若未产生消耗,则表示已经生效的计费方式。
:type PayType: str
:param BillingCycle: 计费周期:
day:日结计费
month:月结计费
:type BillingCycle: str
:param StatType: 计费方式:
monthMax:日峰值月平均计费,月结模式
day95:日 95 带宽计费,月结模式
month95:月95带宽计费,月结模式
sum:总流量计费,日结与月结均有流量计费模式
max:峰值带宽计费,日结模式
:type StatType: str
:param RegionType: 境外计费类型:
all:全地区统一计费
multiple:分地区计费
:type RegionType: str
:param CurrentPayType: 当前生效计费类型:
flux:流量计费
bandwidth:带宽计费
:type CurrentPayType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PayType = None
self.BillingCycle = None
self.StatType = None
self.RegionType = None
self.CurrentPayType = None
self.RequestId = None
def _deserialize(self, params):
self.PayType = params.get("PayType")
self.BillingCycle = params.get("BillingCycle")
self.StatType = params.get("StatType")
self.RegionType = params.get("RegionType")
self.CurrentPayType = params.get("CurrentPayType")
self.RequestId = params.get("RequestId")
class DescribePurgeQuotaRequest(AbstractModel):
"""DescribePurgeQuota请求参数结构体
"""
class DescribePurgeQuotaResponse(AbstractModel):
"""DescribePurgeQuota返回参数结构体
"""
def __init__(self):
"""
:param UrlPurge: URL刷新用量及配额。
:type UrlPurge: list of Quota
:param PathPurge: 目录刷新用量及配额。
:type PathPurge: list of Quota
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.UrlPurge = None
self.PathPurge = None
self.RequestId = None
def _deserialize(self, params):
if params.get("UrlPurge") is not None:
self.UrlPurge = []
for item in params.get("UrlPurge"):
obj = Quota()
obj._deserialize(item)
self.UrlPurge.append(obj)
if params.get("PathPurge") is not None:
self.PathPurge = []
for item in params.get("PathPurge"):
obj = Quota()
obj._deserialize(item)
self.PathPurge.append(obj)
self.RequestId = params.get("RequestId")
class DescribePurgeTasksRequest(AbstractModel):
"""DescribePurgeTasks请求参数结构体
"""
def __init__(self):
"""
:param PurgeType: 指定刷新类型查询
url:url 刷新记录
path:目录刷新记录
:type PurgeType: str
:param StartTime: 根据时间区间查询时,填充开始时间,如 2018-08-08 00:00:00
:type StartTime: str
:param EndTime: 根据时间区间查询时,填充结束时间,如 2018-08-08 23:59:59
:type EndTime: str
:param TaskId: 根据任务 ID 查询时,填充任务 ID
查询时任务 ID 与起始时间必须填充一项
:type TaskId: str
:param Offset: 分页查询偏移量,默认为 0
:type Offset: int
:param Limit: 分页查询限制数目,默认为 20
:type Limit: int
:param Keyword: 支持域名过滤,或 http(s):// 开头完整 URL 过滤
:type Keyword: str
:param Status: 指定任务状态查询
fail:刷新失败
done:刷新成功
process:刷新中
:type Status: str
:param Area: 指定刷新地域查询
mainland:境内
overseas:境外
global:全球
:type Area: str
"""
self.PurgeType = None
self.StartTime = None
self.EndTime = None
self.TaskId = None
self.Offset = None
self.Limit = None
self.Keyword = None
self.Status = None
self.Area = None
def _deserialize(self, params):
self.PurgeType = params.get("PurgeType")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.TaskId = params.get("TaskId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Keyword = params.get("Keyword")
self.Status = params.get("Status")
self.Area = params.get("Area")
class DescribePurgeTasksResponse(AbstractModel):
"""DescribePurgeTasks返回参数结构体
"""
def __init__(self):
"""
:param PurgeLogs: 详细刷新记录
注意:此字段可能返回 null,表示取不到有效值。
:type PurgeLogs: list of PurgeTask
:param TotalCount: 任务总数,用于分页
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PurgeLogs = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PurgeLogs") is not None:
self.PurgeLogs = []
for item in params.get("PurgeLogs"):
obj = PurgeTask()
obj._deserialize(item)
self.PurgeLogs.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribePushQuotaRequest(AbstractModel):
"""DescribePushQuota请求参数结构体
"""
class DescribePushQuotaResponse(AbstractModel):
"""DescribePushQuota返回参数结构体
"""
def __init__(self):
"""
:param UrlPush: Url预热用量及配额。
:type UrlPush: list of Quota
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.UrlPush = None
self.RequestId = None
def _deserialize(self, params):
if params.get("UrlPush") is not None:
self.UrlPush = []
for item in params.get("UrlPush"):
obj = Quota()
obj._deserialize(item)
self.UrlPush.append(obj)
self.RequestId = params.get("RequestId")
class DescribePushTasksRequest(AbstractModel):
"""DescribePushTasks请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 开始时间,如2018-08-08 00:00:00。
:type StartTime: str
:param EndTime: 结束时间,如2018-08-08 23:59:59。
:type EndTime: str
:param TaskId: 指定任务 ID 查询
TaskId 和起始时间必须指定一项
:type TaskId: str
:param Keyword: 查询关键字,请输入域名或 http(s):// 开头完整 URL
:type Keyword: str
:param Offset: 分页查询偏移量,默认为 0
:type Offset: int
:param Limit: 分页查询限制数目,默认为 20
:type Limit: int
:param Area: 指定地区查询预热纪录
mainland:境内
overseas:境外
global:全球
:type Area: str
:param Status: 指定任务状态查询
fail:预热失败
done:预热成功
process:预热中
:type Status: str
"""
self.StartTime = None
self.EndTime = None
self.TaskId = None
self.Keyword = None
self.Offset = None
self.Limit = None
self.Area = None
self.Status = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.TaskId = params.get("TaskId")
self.Keyword = params.get("Keyword")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Area = params.get("Area")
self.Status = params.get("Status")
class DescribePushTasksResponse(AbstractModel):
"""DescribePushTasks返回参数结构体
"""
def __init__(self):
"""
:param PushLogs: 预热历史记录
注意:此字段可能返回 null,表示取不到有效值。
:type PushLogs: list of PushTask
| |
<gh_stars>0
from __future__ import print_function, division
import torch
from tqdm.autonotebook import tqdm
import copy
import os
from torch.optim.lr_scheduler import _LRScheduler
import matplotlib.pyplot as plt
class LRFinder(object):
"""
Input:
model : DNN model
optimizer : optimizer where the defined learning is assumed to be the lower boundary of the range test
criterion : Loss function
device : represents the device on which the computation will take place.
memory_cache : If true, 'state_dict' of the model and optimizer will be cached in memory. Otherwise saved to files under 'cache_dir'
"""
def __init__(self, model, optimizer, criterion, device, memory_cache=True, cache_dir=None):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.best_loss = None
self.memory_cache = memory_cache
self.cache_dir = cache_dir
self.history = {"lr": [], "Loss": [], "Acc": []}
if device:
self.device = device
else:
self.device = self.model_device
# Save the original state of the model and optimizer
self.model_device = next(self.model.parameters()).device
self.state_cacher = StateCacher(memory_cache, cache_dir=cache_dir)
self.state_cacher.store("model", self.model.state_dict())
self.state_cacher.store("optimizer", self.optimizer.state_dict())
def reset(self):
"""Restores the model and optimizer to their initial states."""
self.model.load_state_dict(self.state_cacher.retrieve("model"))
self.optimizer.load_state_dict(self.state_cacher.retrieve("optimizer"))
self.model.to(self.model_device)
def range_test(self, trainloader, testloader=None, start_lr=None, end_lr=2, num_iter=100, step_mode="linear", smooth_f=0.05, diverge_th=5, accumulation_steps=1):
"""
Input:
trainloader : Training set data loader
testloader : Test set data loader
start_lr : starting Learning rate for the range test. (Default=None, uses the learning rate from the optimizer)
end_lr : the last learning rate upto which range test is done. (Default=2)
num_iter : number of iterations over which test occurs. (Default=100)
step_mode : Learning rate policy. Either linear or exponential. (Default="linear")
smooth_f : Loss smoothing factor. [0,1) (Default=0.05)
diverge_th : test is stopped when loss surpasses the diverge threshold, calculated to be- diverge_th * best_loss (Default=5)
accumulation_steps: steps for gradient accumulation.
Output:
"""
#Reset test results
self.history = {"lr": [], "Loss": [], "Acc": []}
self.best_loss = None
#Move model to device
self.model.to(self.device)
if start_lr:
self._set_learning_rate(start_lr)
#Initialize Learning rate policy.Using either "linear" or "exp". Error otherwise.
if step_mode.lower() == "linear":
lr_schedule = LinearLR(self.optimizer, end_lr, num_iter)
# print("Linear LR schedule generated by LinearLR",lr_schedule)
elif step_mode.lower() == 'exp':
lr_schedule = ExponentialLR(self.optimizer, end_lr, num_iter)
else:
raise ValueError("Learning rate policy should be either linear or exp. Received {} as the LR policy".format(step_mode))
if smooth_f < 0 or smooth_f >= 1:
raise ValueError("smooth_f is outside the range [0, 1)")
#Training model begins.
# Iterator to get data by batches.
iter_wrapper = DataLoaderIterWrapper(trainloader)
#Train and test on the batches
for iteration in tqdm(range(num_iter)):
accuracy, loss = self._train_batch(iter_wrapper, accumulation_steps)
if testloader:
accuracy, loss = self._validate(testloader)
#Update Learning rate
lr_schedule.step()
self.history["lr"].append(lr_schedule.get_lr()[0])
# Track the best loss
if iteration == 0:
self.best_loss = loss
else:
if smooth_f > 0:
loss = smooth_f * loss + (1 - smooth_f) * self.history["Loss"][-1]
if loss < self.best_loss:
self.best_loss = loss
# Check if the loss has diverged; if it has, stop the test
self.history["Loss"].append(loss)
self.history["Acc"].append(accuracy)
# print("Run iteration:",iteration," Test LR",self.history["lr"][-1], " Test loss:",loss," Test Accuracy:",accuracy)
if loss > diverge_th * self.best_loss:
print("Stopping early, the loss has diverged")
break
print("Learning rate search finished. See the graph with {finder_name}.plot()")
# Set learning rate.
def _set_learning_rate(self, new_lrs):
if not isinstance(new_lrs, list): # check if its a list
new_lrs = new_lrs * len(self.optimizer.param_groups) #TODO- check this
if len(new_lrs) != len(self.optimizer.param_groups):
raise ValueError("Length of new LRs are not equal to number of parameter groups in the optimizer")
#TODO- check this
for param_group, new_lr in zip(self.optimizer.param_groups, new_lrs):
param_group["lr"] = new_lr
#Training the model
def _train_batch(self, iter_wrapper, accumulation_steps):
total_loss = None
train_accuracy = 0
correct = 0
total = 0
self.model.train()
#Train
self.optimizer.zero_grad()
for i in range(accumulation_steps):
inputs, labels = iter_wrapper.get_batch()
inputs = inputs.to(self.device)
labels = labels.to(self.device)
#Forward pass
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
#Average loss
loss /= accumulation_steps
#backward pass
loss.backward()
pred = outputs.argmax(dim=1, keepdim=True)
correct += pred.eq(labels.view_as(pred)).sum().item()
total += len(inputs)
if total_loss is None:
total_loss = loss
train_accuracy = (100 * correct)/total
else:
total_loss += loss
train_accuracy += (100 * correct)/total
# print("Train batch size",inputs.size(0))
self.optimizer.step()
return train_accuracy, total_loss.item()
#Testing the model
def _validate(self, dataloader):
#set in eval mode to disable gradient accumulation
correct = 0
total = 0
epoch_test_loss = 0.0
epoch_test_accuracy = 0
self.model.eval()
with torch.no_grad():
for inputs, labels in dataloader:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
outputs = self.model(inputs)
if isinstance(inputs, tuple) or isinstance(inputs, list):
batch_size = inputs[0].size(0)
else:
batch_size = inputs.size(0)
epoch_test_loss += self.criterion(outputs, labels).item()
_, predicted = torch.max(outputs.data, 1)
total += batch_size
correct += (predicted == labels).sum().item()
epoch_test_accuracy = (100 * correct / total)
epoch_test_loss /= len(dataloader)
return epoch_test_accuracy, epoch_test_loss
def plot(self, skip_start=10, skip_end=5, log_lr=True, show_lr=None, ax=None):
"""
Plot the learning rate range test
skip_start : number of batches to trim from the start. (Default=10)
skip_end : number of batches to trim from the end. (Default=5)
log_lr : To plot the learning rate graph in logarithmic scale, linear otherwise. (Default=True for log scale)
show_lr : Add a vertical line to visualize the learning rate. (Default=None)
ax : Matplotlib figure
"""
lrs = self.history["lr"]
loss = self.history["Loss"]
acc = self.history["Acc"]
if skip_end == 0:
lrs = lrs[skip_start:]
loss = loss[skip_start:]
acc = acc[skip_start:]
else:
lrs = lrs[skip_start:-skip_end]
loss = loss[skip_start:-skip_end]
acc = acc[skip_start:-skip_end]
#Create figure and axes
fig = None
if ax is None:
fig, ax = plt.subplots(1,2,figsize=(15,7.5))
#Plot validation loss and accuracy against Learning rate.
ax[0].plot(lrs, loss)
ax[1].plot(lrs, acc)
if log_lr:
ax[0].set_xscale("log")
ax[1].set_xscale("log")
ax[0].set_title("Loss vs Learning rate")
ax[0].set_xlabel("Learning rate")
ax[0].set_ylabel("Loss")
ax[1].set_title("Accuracy vs Learning rate")
ax[1].set_xlabel("Learning rate")
ax[1].set_ylabel("Accuracy")
if show_lr:
ax[0].axvline(x=show_lr, color="red")
ax[1].axvline(x=show_lr, color="red")
if fig is not None:
plt.show()
return ax
# Setup linear schedule for Learning rate
class LinearLR(_LRScheduler):
"""
To schedule linear learning rate between 2 boundaries over a given number of iterations.
Input:
optimizer : Optimizer for the model
end_lr : Final learning rate
num_iter : Number of iterations over which test occurs.
last_epoch : Index of the final epoch
"""
def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1):
self.end_lr = end_lr
self.num_iter = num_iter
super(LinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
curr_iter = self.last_epoch + 1
r = curr_iter/self.num_iter
return [base_lr + r * (self.end_lr - base_lr) for base_lr in self.base_lrs]
# Setup exponential schedule for learning rate
class ExponentialLR(_LRScheduler):
"""Exponentially increases the learning rate between two boundaries over a number of iterations.
Input:
optimizer : Optimizer for the model
end_lr : Final learning rate
num_iter : Number of iterations over which test occurs.
last_epoch : Index of the final epoch
"""
def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1):
self.end_lr = end_lr
self.num_iter = num_iter
super(ExponentialLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
curr_iter = self.last_epoch + 1
r = curr_iter / self.num_iter
return [base_lr * (self.end_lr / base_lr) ** r for base_lr in self.base_lrs]
# Wrapper to iterate dataloader and provide an option to reset when StopIteration is called.
class DataLoaderIterWrapper(object):
"""
Wrapper to iterate dataloader and provide labels and inputs. Provides functionality to stop in case of divergence (when StopIteration is called.)
"""
def __init__(self, data_loader, auto_reset=True):
self.data_loader = data_loader
self.auto_reset = auto_reset
self._iterator = iter(data_loader)
# get new batchsize worth inputs and labels
def __next__(self):
try:
inputs, labels = next(self._iterator)
except StopIteration:
if not self.auto_reset:
raise
self._iterator = iter(self.data_loader)
inputs, labels, *_ = next(self._iterator)
return inputs, labels
def get_batch(self):
return next(self)
class StateCacher(object):
def __init__(self, in_memory, cache_dir=None):
self.in_memory = in_memory
self.cache_dir = cache_dir
if self.cache_dir is None:
import tempfile
self.cache_dir = tempfile.gettempdir()
else:
if not os.path.isdir(self.cache_dir):
raise ValueError("Given `cache_dir` is not a valid directory.")
self.cached = {}
def store(self, key, state_dict):
if self.in_memory:
self.cached.update({key: copy.deepcopy(state_dict)})
else:
fn = os.path.join(self.cache_dir, "state_{}_{}.pt".format(key, id(self)))
self.cached.update({key: fn})
torch.save(state_dict, fn)
def retrieve(self, key):
if key not in self.cached:
raise KeyError("Target {} was not cached.".format(key))
if self.in_memory:
return self.cached.get(key)
else:
fn = self.cached.get(key)
if not os.path.exists(fn):
raise RuntimeError(
"Failed | |
4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ShuffleSplit` instead.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedShuffleSplit` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If | |
#!/bin/bash
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2020 Intel Corporation
"""This script runs test cases with O-DU and O-RU"""
import logging
import sys
import argparse
import os
from itertools import dropwhile
from datetime import datetime
import json
import socket
N_LTE_NUM_RBS_PER_SYM_F1 = [
# 5MHz 10MHz 15MHz 20 MHz
[25, 50, 75, 100] # LTE Numerology 0 (15KHz)
]
N_NUM_RBS_PER_SYM_F1 = [
# 5MHz 10MHz 15MHz 20 MHz 25 MHz 30 MHz 40 MHz 50MHz 60 MHz 70 MHz 80 MHz
# 90 MHz 100 MHz
[25, 52, 79, 106, 133, 160, 216, 270, 0, 0, 0, 0, 0], # Numerology 0 (15KHz)
[11, 24, 38, 51, 65, 78, 106, 133, 162, 0, 217, 245, 273], # Numerology 1 (30KHz)
[0, 11, 18, 24, 31, 38, 51, 65, 79, 0, 107, 121, 135] # Numerology 2 (60KHz)
]
N_NUM_RBS_PER_SYM_F2 = [
# 50Mhz 100MHz 200MHz 400MHz
[66, 132, 264, 0], # Numerology 2 (60KHz)
[32, 66, 132, 264] # Numerology 3 (120KHz)
]
N_RCH_BW_OPTIONS_KEYS = ['5', '10', '15', '20', '25', '30', '40', '50', '60', '70', '80', '90',
'100', '200', '400']
N_RCH_BW_OPTIONS_VALUES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
N_RCH_BW_OPTIONS = dict(zip(N_RCH_BW_OPTIONS_KEYS, N_RCH_BW_OPTIONS_VALUES))
N_RCH_BW_OPTIONS_KEYS_MU2AND3 = ['50', '100', '200', '400']
N_RCH_BW_OPTIONS_VALUES_MU2AND3 = [0, 1, 2, 3]
N_RCH_BW_OPTIONS_MU2AND3 = dict(zip(N_RCH_BW_OPTIONS_KEYS_MU2AND3, N_RCH_BW_OPTIONS_VALUES_MU2AND3))
DIC_DIR = dict({0:'DL', 1:'UL'})
DIC_XU = dict({0:'o-du', 1:'o-ru'})
DIC_RAN_TECH = dict({0:'5g_nr', 1:'lte'})
def init_logger(console_level, logfile_level):
"""Initializes console and logfile logger with given logging levels"""
# File logger
logging.basicConfig(filename="runtests.log",
filemode='w',
format="%(asctime)s: %(levelname)s: %(message)s",
level=logfile_level)
# Console logger
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(console_level)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def parse_args(args):
"""Configures parser and parses command line configuration"""
# Parser configuration
parser = argparse.ArgumentParser(description=
"Run test cases: category numerology bandwidth test_num")
parser.add_argument("--ran", type=int, default=0, help=
"Radio Access Tehcnology 0 (5G NR) or 1 (LTE)",
metavar="ran", dest="rantech")
parser.add_argument("--cat", type=int, default=0, help="Category: 0 (A) or 1 (B)",
metavar="cat", dest="category")
parser.add_argument("--m_u", type=int, default=0, help="numerology [0,1,3]",
metavar="num", dest="numerology")
parser.add_argument("--b_w", type=int, default=20, help="bandwidth [5,10,20,100]",
metavar="b_w", dest="bandwidth")
parser.add_argument("--testcase", type=int, default=0, help="test case number",
metavar="testcase", dest="testcase")
parser.add_argument("--verbose", type=int, default=0, help="enable verbose output",
metavar="verbose", dest="verbose")
# Parse arguments
options = parser.parse_args(args)
#parser.print_help()
logging.debug("Options: ran=%d category=%d num=%d bw=%d testcase=%d",
options.rantech, options.category, options.numerology, options.bandwidth,
options.testcase)
return options
def is_comment(line):
""" function to check if a line
starts with some character.
Here # for comment
"""
# return true if a line starts with #
return line.startswith('#')
class GetOutOfLoops(Exception):
""" get out of loops exception """
def get_re_map(nrb, direction):
"""method to get re map"""
prb_map = []
prb_elem_content = []
if direction == 0:
#DL
if 'nPrbElemDl' in globals():
n_prb_elm = 'nPrbElemDl'
for i in range(0, n_prb_elm):
elm = str('prb_elem_dl'+str(i))
#print(elm)
if elm in globals():
prb_elem_content.insert(i, list(globals()[elm]))
xrbstart = prb_elem_content[i][0]
xrbsize = prb_elem_content[i][1]
#print(PrbElemContent,"RBStart: ", xRBStart, "RBSize: ",xRBSize,
#list(range(xRBStart, xRBStart + xRBSize)))
prb_map = prb_map + list(range(xrbstart*12, xrbstart*12 + xrbsize*12))
else:
n_prb_elm = 0
elif direction == 1:
#UL
if 'nPrbElemUl' in globals():
n_prb_elm = 'nPrbElemUl'
for i in range(0, n_prb_elm):
elm = str('prb_elem_ul'+str(i))
#print(elm)
if elm in globals():
prb_elem_content.insert(i, list(globals()[elm]))
xrbstart = prb_elem_content[i][0]
xrbsize = prb_elem_content[i][1]
#print(PrbElemContent,"RBStart: ", xRBStart, "RBSize: ",xRBSize,
#list(range(xRBStart, xRBStart + xRBSize)))
prb_map = prb_map + list(range(xrbstart*12, xrbstart*12 + xrbsize*12))
else:
n_prb_elm = 0
if n_prb_elm == 0:
prb_map = list(range(0, nrb*12))
return prb_map
def compare_results(rantech, cat, m_u, xran_path, direction, context): #pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements
"""method to compare results"""
res = 0
re_map = []
if rantech == 1:
if m_u == 0:
n_dirb = N_NUM_RBS_PER_SYM_F1[m_u][N_RCH_BW_OPTIONS.get(str(context["nDLBandwidth"]))]
n_uirb = N_NUM_RBS_PER_SYM_F1[m_u][N_RCH_BW_OPTIONS.get(str(context["nULBandwidth"]))]
else:
print("Incorrect arguments\n") #pylint: disable=superfluous-parens
res = -1
return res
elif rantech == 0:
if m_u < 3:
n_dirb = N_NUM_RBS_PER_SYM_F1[m_u][N_RCH_BW_OPTIONS.get(str(context["nDLBandwidth"]))]
n_uirb = N_NUM_RBS_PER_SYM_F1[m_u][N_RCH_BW_OPTIONS.get(str(context["nULBandwidth"]))]
elif (m_u >= 2) & (m_u <= 3):
n_dirb = N_NUM_RBS_PER_SYM_F2[m_u - 2][N_RCH_BW_OPTIONS_MU2AND3.get(
str(context["nDLBandwidth"]))]
n_uirb = N_NUM_RBS_PER_SYM_F2[m_u - 2][N_RCH_BW_OPTIONS_MU2AND3.get(
str(context["nULBandwidth"]))]
print(n_dirb, n_uirb)
else:
print("Incorrect arguments\n") #pylint: disable=superfluous-parens
res = -1
return res
if "compression" in context:
comp = 'compression'
else:
comp = 0
print("compare results: {} [compression {}]\n".format(DIC_DIR.get(direction), comp)) #pylint: disable=superfluous-parens
#if cat == 1:
# print("WARNING: Skip checking IQs and BF Weights for CAT B for now\n");
# return res
#get slot config
if context["nFrameDuplexType"] == 1:
slot_config = []
for i in range(context["nTddPeriod"]):
if i == 0:
slot_config.insert(i, context["sslot_config0"])
elif i == 1:
slot_config.insert(i, context["sslot_config1"])
elif i == 2:
slot_config.insert(i, context["sslot_config2"])
elif i == 3:
slot_config.insert(i, context["sslot_config3"])
elif i == 4:
slot_config.insert(i, context["sslot_config4"])
elif i == 5:
slot_config.insert(i, context["sslot_config5"])
elif i == 6:
slot_config.insert(i, context["sslot_config6"])
elif i == 7:
slot_config.insert(i, context["sslot_config7"])
elif i == 8:
slot_config.insert(i, context["sslot_config8"])
elif i == 9:
slot_config.insert(i, context["sslot_config9"])
else:
raise Exception('i should not exceed nTddPeriod {}. The value of i was: {}'
.format(context["nTddPeriod"], i))
#print(SlotConfig, type(sSlotConfig0))
try: #pylint: disable=too-many-nested-blocks
if (direction == 1) & (cat == 1): #UL
flow_id = context["ccNum"]*context["antNumUL"]
else:
flow_id = context["ccNum"]*context["antNum"]
if direction == 0:
re_map = get_re_map(n_dirb, direction)
elif direction == 1:
re_map = get_re_map(n_uirb, direction)
else:
raise Exception('Direction is not supported {}'.format(direction))
for i in range(0, flow_id):
#read ref and test files
tst = []
ref = []
if direction == 0:
# DL
nrb = n_dirb
file_tst = xran_path+"/results/"+"o-ru-rx_log_ant"+str(i)+".txt"
file_ref = xran_path+"/results/"+"o-du-play_ant"+str(i)+".txt"
# file_tst = xran_path+"/app/logs/"+"o-ru-rx_log_ant"+str(i)+".txt"
# file_ref = xran_path+"/app/logs/"+"o-du-play_ant"+str(i)+".txt"
elif direction == 1:
# UL
nrb = n_uirb
file_tst = xran_path+"/results/"+"o-du-rx_log_ant"+str(i)+".txt"
file_ref = xran_path+"/results/"+"o-ru-play_ant"+str(i)+".txt"
# file_tst = xran_path+"/app/logs/"+"o-du-rx_log_ant"+str(i)+".txt"
# file_ref = xran_path+"/app/logs/"+"o-ru-play_ant"+str(i)+".txt"
else:
raise Exception('Direction is not supported {}'.format(direction))
print("test result :", file_tst)
print("test reference:", file_ref)
if os.path.exists(file_tst):
try:
file_tst = open(file_tst, 'r')
except OSError:
print("Could not open/read file:", file_tst)
sys.exit()
else:
print(file_tst, "doesn't exist")
res = -1
return res
if os.path.exists(file_ref):
try:
file_ref = open(file_ref, 'r')
except OSError:
print("Could not open/read file:", file_ref)
sys.exit()
else:
print(file_tst, "doesn't exist")
res = -1
return res
tst = file_tst.readlines()
ref = file_ref.readlines()
print(len(tst)) #pylint: disable=superfluous-parens
print(len(ref)) #pylint: disable=superfluous-parens
file_tst.close()
file_ref.close()
print(context["numSlots"]) #pylint: disable=superfluous-parens
for slot_idx in range(0, context["numSlots"]):
for sym_idx in range(0, 14):
if context["nFrameDuplexType"] == 1:
#skip sym if TDD
if direction == 0:
#DL
sym_dir = slot_config[slot_idx%context["nTddPeriod"]][sym_idx]
if sym_dir != 0:
continue
elif direction == 1:
#UL
sym_dir = slot_config[slot_idx%context["nTddPeriod"]][sym_idx]
if sym_dir != 1:
continue
#print("Check:","[",i,"]", slot_idx, sym_idx)
for line_idx in re_map:
offset = (slot_idx*nrb*12*14) + sym_idx*nrb*12 + line_idx
try:
line_tst = tst[offset].rstrip()
except IndexError:
res = -1
print("FAIL:", "IndexError on tst: ant:[", i, "]:",
offset, slot_idx, sym_idx, line_idx, len(tst))
raise GetOutOfLoops
try:
line_ref = ref[offset].rstrip()
except IndexError:
res = -1
print("FAIL:", "IndexError on ref: ant:[", i, "]:",
offset, slot_idx, sym_idx, line_idx, len(ref))
raise GetOutOfLoops
if comp == 1:
# discard LSB bits as BFP compression is not "bit exact"
tst_i_value = int(line_tst.split(" ")[0]) & 0xFF80
tst_q_value = int(line_tst.split(" ")[1]) & 0xFF80
ref_i_value = int(line_ref.split(" ")[0]) & 0xFF80
ref_q_value = int(line_ref.split(" ")[1]) & 0xFF80
#print("check:","ant:[",i,"]:",offset, slot_idx, sym_idx,
#line_idx,":","tst: ",tst_i_value, " ", tst_q_value, " " ,
# "ref: ", ref_i_value, " ", ref_q_value, " ")
if (tst_i_value != ref_i_value) or (tst_q_value != ref_q_value):
print("FAIL:", "ant:[", i, "]:", offset, slot_idx, sym_idx,
line_idx, ":", "tst: ", tst_i_value, " ", tst_q_value, " ",
"ref: ", ref_i_value, " ", ref_q_value, " ")
res = -1
raise GetOutOfLoops
else:
#if line_idx == 0:
#print("Check:", offset,"[",i,"]", slot_idx, sym_idx,":",
#line_tst, line_ref)
if line_ref != line_tst:
print("FAIL:", "ant:[", i, "]:", offset, slot_idx, sym_idx,
line_idx, ":", "tst:", line_tst, "ref:", line_ref)
res = -1
raise GetOutOfLoops
except GetOutOfLoops:
return res
#if (direction == 0) | (cat == 0) | (srs_enb == 0): #DL or Cat A
#done
return res
def parse_dat_file(test_cfg):
"""parse config files"""
logging.info("parse config files %s\n", test_cfg[0])
line_list = list()
sep = '#'
with open(test_cfg[0], 'r') as f_h:
for curline in dropwhile(is_comment, f_h):
my_line = curline.rstrip().split(sep, 1)[0].strip()
if my_line:
line_list.append(my_line)
global_env = {}
local_env = {}
for line in line_list:
exe_line = line.replace(":", ",")
if exe_line.find("/") > 0:
exe_line = exe_line.replace('./', "'")
exe_line = exe_line+"'"
code = compile(str(exe_line), '<string>', 'exec')
exec(code, global_env, local_env) #pylint: disable=exec-used
return local_env
def run_tcase(rantech, cat, m_u, b_w, tcase, xran_path): #pylint: disable=too-many-arguments
""" method for runing test cases"""
if rantech == 1: #LTE
if cat == 1:
test_config = xran_path+"/app/usecase/lte_b/mu{0:d}_{1:d}mhz".format(m_u, b_w)
elif cat == 0:
test_config = xran_path+"/app/usecase/lte_a/mu{0:d}_{1:d}mhz".format(m_u, b_w)
else:
print("Incorrect cat arguments\n") #pylint: disable=superfluous-parens
return -1
elif rantech == 0: #5G NR
if cat | |
of the given type"""
self.add_edges([edge], edgetype)
def add_edge_table(self, etab:Mapping[ET,List[int]]) -> None:
"""Takes a dictionary mapping (source,target) --> (#edges, #h-edges) specifying that
#edges regular edges must be added between source and target and $h-edges Hadamard edges.
The method selectively adds or removes edges to produce that ZX diagram which would
result from adding (#edges, #h-edges), and then removing all parallel edges using Hopf/spider laws."""
add: Dict[EdgeType.Type,List[ET]] = {EdgeType.SIMPLE: [], EdgeType.HADAMARD: []} # list of edges and h-edges to add
new_type: Optional[EdgeType.Type]
remove: List = [] # list of edges to remove
for e,(n1,n2) in etab.items():
v1,v2 = self.edge_st(e)
t1 = self.type(v1)
t2 = self.type(v2)
conn_type = self.edge_type(e)
if conn_type == EdgeType.SIMPLE: n1 += 1 #and add to the relevant edge count
elif conn_type == EdgeType.HADAMARD: n2 += 1
if n1 + n2 <= 1: # We first deal with simple edges
if n1 == 1: new_type = EdgeType.SIMPLE
elif n2 == 1: new_type = EdgeType.HADAMARD
else: new_type = None
# Hence, all the other cases have some kind of parallel edge
elif t1 == VertexType.BOUNDARY or t2 == VertexType.BOUNDARY:
raise ValueError("Parallel edges to a boundary edge are not supported")
elif t1 == t2 and vertex_is_zx(t1): #types are ZX & equal,
n1 = bool(n1) #so normal edges fuse
pairs, n2 = divmod(n2,2) #while hadamard edges go modulo 2
self.scalar.add_power(-2*pairs)
if n1 != 0 and n2 != 0: #reduction rule for when both edges appear
new_type = EdgeType.SIMPLE
self.add_to_phase(v1, 1)
self.scalar.add_power(-1)
elif n1 != 0: new_type = EdgeType.SIMPLE
elif n2 != 0: new_type = EdgeType.HADAMARD
else: new_type = None
elif t1 != t2 and vertex_is_zx(t1) and vertex_is_zx(t2): #types are ZX & different
pairs, n1 = divmod(n1,2) #so normal edges go modulo 2
n2 = bool(n2) #while hadamard edges fuse
self.scalar.add_power(-2*pairs)
if n1 != 0 and n2 != 0: #reduction rule for when both edges appear
new_type = EdgeType.HADAMARD
self.add_to_phase(v1, 1)
self.scalar.add_power(-1)
elif n1 != 0: new_type = EdgeType.SIMPLE
elif n2 != 0: new_type = EdgeType.HADAMARD
else: new_type = None
elif t1 == VertexType.H_BOX or t2 == VertexType.H_BOX:
# TODO: Check scalar accuracy
if t1 != VertexType.H_BOX: # Ensure that the first vertex is an H-box
v1,v2 = v2,v1
t1,t2 = t2,t1
if t2 == VertexType.H_BOX: # They are both H-boxes
raise ValueError("Parallel edges between H-boxes are not supported")
elif t2 == VertexType.Z: # Z & H-box
n1 = bool(n1) # parallel regular edges collapse to single wire
if n2 > 1: raise ValueError("Parallel H-edges between H-box and Z-spider are not supported")
#if n2 and (n2-1) % 2 == 1: # parallel H-edges also collapse, but each extra one adds a pi phase
# self.add_to_phase(v2, 1)
#n2 = bool(n2)
if n1 and n2:
# There is no simple way to deal with a parallel H-edge and regular edge
# So we simply add a 2-ary H-box to the graph
r1,r2 = self.row(v1), self.row(v2)
q1,q2 = self.qubit(v1), self.qubit(v2)
w = self.add_vertex(VertexType.H_BOX,(q1+q2)/2,(r1+r2)/2-0.5)
add[EdgeType.SIMPLE].extend([self.edge(v1,w),self.edge(v2,w)])
new_type = EdgeType.SIMPLE
elif n1: new_type = EdgeType.SIMPLE
elif n2: new_type = EdgeType.HADAMARD
else: new_type = None
elif t2 == VertexType.X: # X & H-box
n2 = bool(n2) # parallel H-edges collapse to single wire
if n1 > 1: raise ValueError("Parallel edges between H-box and X-spider are not supported")
#if (n1-1) % 2 == 1: # parallel regular edges also collapse, but each extra one adds a pi phase
# self.add_to_phase(v2, 1)
#n1 = bool(n1)
if n1 and n2:
# There is no simple way to deal with a parallel H-edge and regular edge
# So we simply add a 2-ary H-box to the graph
r1,r2 = self.row(v1), self.row(v2)
q1,q2 = self.qubit(v1), self.qubit(v2)
w = self.add_vertex(VertexType.H_BOX,(q1+q2)/2,(r1+r2)/2-0.5)
add[EdgeType.SIMPLE].extend([self.edge(v1,w),self.edge(v2,w)])
new_type = EdgeType.SIMPLE
elif n1: new_type = EdgeType.SIMPLE
elif n2: new_type = EdgeType.HADAMARD
else: new_type = None
else:
raise ValueError("Unhandled parallel edges between nodes of type (%s,%s)" % (t1,t2))
else:
raise ValueError("Unhandled parallel edges between nodes of type (%s,%s)" % (t1,t2))
if new_type: # The vertices should be connected, so update the graph
if not conn_type: #new edge added
add[new_type].append(self.edge(v1,v2))
elif conn_type != new_type: #type of edge has changed
self.set_edge_type(self.edge(v1,v2), new_type)
elif conn_type: #They were connected, but not anymore, so update the graph
remove.append(self.edge(v1,v2))
self.remove_edges(remove)
self.add_edges(add[EdgeType.SIMPLE],EdgeType.SIMPLE)
self.add_edges(add[EdgeType.HADAMARD],EdgeType.HADAMARD)
def add_edge_smart(self, e: ET, edgetype: EdgeType.Type):
"""Like add_edge, but does the right thing if there is an existing edge."""
self.add_edge_table({e : [1,0] if edgetype == EdgeType.SIMPLE else [0,1]})
def set_phase_master(self, m: 'simplify.Simplifier') -> None:
"""Points towards an instance of the class :class:`~pyzx.simplify.Simplifier`.
Used for phase teleportation."""
self.phase_master = m
def update_phase_index(self, old:VT, new:VT) -> None:
"""When a phase is moved from a vertex to another vertex,
we need to tell the phase_teleportation algorithm that this has happened.
This function does that. Used in some of the rules in `simplify`."""
if not self.track_phases: return
i = self.phase_index[old]
self.phase_index[old] = self.phase_index[new]
self.phase_index[new] = i
def fuse_phases(self, p1: VT, p2: VT) -> None:
if p1 not in self.phase_index or p2 not in self.phase_index:
return
if self.phase_master is not None:
self.phase_master.fuse_phases(self.phase_index[p1],self.phase_index[p2])
self.phase_index[p2] = self.phase_index[p1]
def phase_negate(self, v: VT) -> None:
if v not in self.phase_index: return
index = self.phase_index[v]
mult = self.phase_mult[index]
if mult == 1: self.phase_mult[index] = -1
else: self.phase_mult[index] = 1
#self.phase_mult[index] = -1*mult
def vertex_from_phase_index(self, i: int) -> VT:
return list(self.phase_index.keys())[list(self.phase_index.values()).index(i)]
def remove_vertices(self, vertices: Iterable[VT]) -> None:
"""Removes the list of vertices from the graph."""
raise NotImplementedError("Not implemented on backend " + type(self).backend)
def remove_vertex(self, vertex: VT) -> None:
"""Removes the given vertex from the graph."""
self.remove_vertices([vertex])
def remove_isolated_vertices(self) -> None:
"""Deletes all vertices and vertex pairs that are not connected to any other vertex."""
rem: List[VT] = []
for v in self.vertices():
d = self.vertex_degree(v)
if d == 0:
rem.append(v)
ty = self.type(v)
if ty == VertexType.BOUNDARY:
raise TypeError("Diagram is not a well-typed ZX-diagram: contains isolated boundary vertex.")
elif ty == VertexType.H_BOX:
self.scalar.add_phase(self.phase(v))
else: self.scalar.add_node(self.phase(v))
if d == 1: # It has a unique neighbor
if v in rem: continue # Already taken care of
if self.type(v) == VertexType.BOUNDARY: continue # Ignore in/outputs
w = list(self.neighbors(v))[0]
if len(list(self.neighbors(w))) > 1: continue # But this neighbor has other neighbors
if self.type(w) == VertexType.BOUNDARY: continue # It's a state/effect
# At this point w and v are only connected to each other
rem.append(v)
rem.append(w)
et = self.edge_type(self.edge(v,w))
t1 = self.type(v)
t2 = self.type(w)
if t1 == VertexType.H_BOX: t1 = VertexType.Z # 1-ary H-box is just a Z spider
if t2 == VertexType.H_BOX: t2 = VertexType.Z
if t1==t2:
if et == EdgeType.SIMPLE:
self.scalar.add_node(self.phase(v)+self.phase(w))
else:
self.scalar.add_spider_pair(self.phase(v), self.phase(w))
else:
if et == EdgeType.SIMPLE:
self.scalar.add_spider_pair(self.phase(v), self.phase(w))
else:
self.scalar.add_node(self.phase(v)+self.phase(w))
self.remove_vertices(rem)
def remove_edges(self, edges: List[ET]) -> None:
"""Removes the list of edges from the graph."""
raise NotImplementedError("Not implemented on backend " + type(self).backend)
def remove_edge(self, edge: ET) -> None:
"""Removes the given edge from the graph."""
self.remove_edges([edge])
def num_vertices(self) -> int:
"""Returns the amount of vertices in the graph."""
raise NotImplementedError("Not implemented on backend " + type(self).backend)
def num_edges(self) -> int:
"""Returns the amount of edges in the graph"""
raise NotImplementedError("Not implemented on backend " + type(self).backend)
def vertices(self) -> Sequence[VT]:
"""Iterator over all the vertices."""
raise NotImplementedError("Not implemented on backend " + type(self).backend)
def edges(self) -> Sequence[ET]:
"""Iterator that returns all the edges. Output type depends on implementation in backend."""
raise NotImplementedError("Not implemented on backend " + type(self).backend)
def vertex_set(self) -> Set[VT]:
"""Returns the vertices of the graph as a Python set.
Should be overloaded if the backend supplies a cheaper version than this."""
return set(self.vertices())
def edge_set(self) -> Set[ET]:
"""Returns the edges of the graph as a Python set.
Should be overloaded if the backend supplies a cheaper version than this."""
return set(self.edges())
def edge(self, s:VT, t:VT) -> ET:
"""Returns the edge object with the given source/target."""
raise NotImplementedError("Not implemented on backend " + type(self).backend)
def edge_st(self, edge: ET) -> Tuple[VT, VT]:
"""Returns a tuple of source/target of the | |
# Copyright: (c) 2018, <NAME> (@jborean93) <<EMAIL>>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
import logging
import uuid
from pypsrp.complex_objects import Color, Coordinates, ObjectMeta, Size
log = logging.getLogger(__name__)
class PSHost(object):
def __init__(self, current_culture, current_ui_culture, debugger_enabled,
name, private_data, ui, version):
"""
Defines the properties and facilities provided by an application
hosting a RunspacePool.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost
This is a basic implementation some methods being noop or not
implemented.
:param current_culture: pypsrp.complex_objects.CultureInfo, the host's
culture
:param current_ui_culture: pypsrp.complex_objects.CultureInfo, the
host's UI culture
:param debugger_enabled: This property enables and disables the host
debugger if debugging is supported
:param name: Gets the hosting application identification in some user-
friendly fashion.
:param private_data: Used to allow the host to pass private data
through a Runspace to cmdlets inside that Runspace
:param ui: The hosts implementation of PSHostUserInterface. Should be
None if the host that does not want to support user interaction
:param version: The version of the hosting application
"""
self.ui = ui
self.debugger_enabled = debugger_enabled
self.private_data = private_data
self.rc = None
self.name = name
self.version = version
self.instance_id = uuid.uuid4()
self.current_culture = current_culture
self.current_ui_culture = current_ui_culture
def run_method(self, method_identifier, args, runspace, pipeline=None):
"""
Run a host call method requested by the server and return the response
from this method to send back to the server.
https://msdn.microsoft.com/en-us/library/dd306624.aspx
Each method will have access to the current runspace and pipeline (if
applicable) during the method call as well as any args sent from the
server.
:param method_identifier: pypsrp.complex_objects.HostMethodIdentifier
in the host call message.
:param args: The list of arguments for the host call function.
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: The response (if any) to send back to the server
"""
response = None
if method_identifier.value < 11:
func = getattr(self, str(method_identifier))
response = func(runspace, pipeline, *args)
elif method_identifier.value < 27:
func = getattr(self.ui, str(method_identifier))
response = func(runspace, pipeline, *args)
elif method_identifier.value < 52:
func = getattr(self.ui.raw_ui, str(method_identifier))
response = func(runspace, pipeline, *args)
else:
log.warning("Received unexpected/unsupported host method "
"identifier: %d" % method_identifier.value)
return response
# Start of Host Methods, the names of these functions are important as
# they line up to the names defined by MS and are sent in the host call
# messages
def GetName(self, runspace, pipeline):
"""
MI: 1
SHOULD return a string identifying the hosting application in a user
friendly way.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.name
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: String of the user-friendly name of the hosting application
"""
return self.name
def GetVersion(self, runspace, pipeline):
"""
MI: 2
SHOULD return the version number of the hosting application.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.version
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: Version number of the hosting application
"""
meta = ObjectMeta("Version")
value = runspace.serialize(self.version, meta)
return value
def GetInstanceId(self, runspace, pipeline):
"""
MI: 3
SHOULD return a GUID that uniquely identifies the hosting application.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.instanceid
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: GUID of the hosting application
"""
return self.instance_id
def GetCurrentCulture(self, runspace, pipeline):
"""
MI: 4
SHOULD return the host's culture.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.currentculture
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: pypsrp.complex_objects.CultureInfo of the host's culture
"""
return self.current_culture
def GetCurrentUICulture(self, runspace, pipeline):
"""
MI: 5
MUST return the host's UI culture.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.currentuiculture
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: pypsrp.complex_objects.CultureInfo of the host's UI culture
"""
return self.current_ui_culture
def SetShouldExit(self, runspace, pipeline, exit_code):
"""
MI: 6
SHOULD shut down the hosting application and close the current
runspace. The default implementation just sets the rc on the host
object and doesn't shutdown the runspace.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.setshouldexit
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:param exit_code: The exit code accompanying the exit keyword.
Typically after exiting a runspace, a host will also terminate
"""
self.rc = exit_code
def EnterNestedPrompt(self, runspace, pipeline):
"""
MI: 7
SHOULD interrupt the current pipeline and start a nested pipeline.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.enternestedprompt
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
"""
raise NotImplementedError()
def ExitNestedPrompt(self, runspace, pipeline):
"""
MI: 8
SHOULD stop the nested pipeline and resume the current pipeline.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.exitnestedprompt
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
"""
raise NotImplementedError()
def NotifyBeginApplication(self, runspace, pipeline):
"""
MI: 9
Called by an application to indicate that it is executing a command
line application.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.notifybeginapplication
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
"""
pass
def NotifyEndApplication(self, runspace, pipeline):
"""
MI: 10
Called by an application to indicate that it has finished executing a
command line application.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.notifyendapplication
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
"""
pass
class PSHostUserInterface(object):
def __init__(self, raw_ui=None):
"""
Defines the properties and facilities provided by a hosting application
deriving from PSHost that offers dialog-oriented and line-oriented
interactive features.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshostuserinterface
This is a basic implementation some methods being noop or not
implemented.
:param raw_ui: Implementation of PSHostRawUserInterface, set to None
if there is no raw user interface
"""
self.raw_ui = raw_ui
# the below properties don't need to be used, they are just here for
# the default implementation
self.stdout = []
self.stderr = []
def ReadLine(self, runspace, pipeline):
"""
MI: 11
SHOULD read a line of characters from a user.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshostuserinterface.readline
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: A string of characters to return to the read line call
"""
raise NotImplementedError()
def ReadLineAsSecureString(self, runspace, pipeline):
"""
MI: 12
SHOULD read a line of characters from a user, with the user input not
echoed.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshostuserinterface.readlineassecurestring
Because the return value is meant to be a SecureString, the user must
either have called or will call runspace.exchange_keys() in this
implementation so that the serializer can create the string.
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:return: The characters types by the user in an encrypted form
"""
raise NotImplementedError()
def Write1(self, runspace, pipeline, value):
"""
MI: 13
SHOULD write specified characters on the hosting application.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshostuserinterface.write
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:param value: The string of characters to be written
"""
self.stdout.append(value)
def Write2(self, runspace, pipeline, foreground_color, background_color,
value):
"""
MI: 14
SHOULD write the specified characters with the specified foreground and
background color on the hosting application.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshostuserinterface.write
This implementation just adds this result to the stdout list and
ignores the colors, create your own method implementation if you wish
to utilise this correctly
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
:param foreground_color: The int value of pypsrp.complex_objects.Color
of the foreground color to display the text with
:param background_color: The int value of pypsrp.complex_objects.Color
of the background color to display the text with
:param value: The string of characters to be written
"""
self.stdout.append(value)
def WriteLine1(self, runspace, pipeline):
"""
MI: 15
SHOULD write a carriage return on the hosting application.
https://docs.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshostuserinterface.writeline
:param runspace: The runspace the host call relates to
:param pipeline: The pipeline (if any) that the call relates to
| |
"""Define tests for the Flux LED/Magic Home config flow."""
from __future__ import annotations
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.components import dhcp
from homeassistant.components.flux_led.const import (
CONF_CUSTOM_EFFECT_COLORS,
CONF_CUSTOM_EFFECT_SPEED_PCT,
CONF_CUSTOM_EFFECT_TRANSITION,
CONF_MINOR_VERSION,
CONF_MODEL,
CONF_MODEL_DESCRIPTION,
CONF_MODEL_INFO,
CONF_MODEL_NUM,
CONF_REMOTE_ACCESS_ENABLED,
CONF_REMOTE_ACCESS_HOST,
CONF_REMOTE_ACCESS_PORT,
DOMAIN,
TRANSITION_JUMP,
TRANSITION_STROBE,
)
from homeassistant.const import CONF_DEVICE, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT, RESULT_TYPE_FORM
from . import (
DEFAULT_ENTRY_TITLE,
DHCP_DISCOVERY,
FLUX_DISCOVERY,
FLUX_DISCOVERY_PARTIAL,
IP_ADDRESS,
MAC_ADDRESS,
MAC_ADDRESS_ONE_OFF,
MODEL,
MODEL_DESCRIPTION,
MODEL_NUM,
MODULE,
_patch_discovery,
_patch_wifibulb,
)
from tests.common import MockConfigEntry
MAC_ADDRESS_DIFFERENT = "ff:bb:ff:dd:ee:ff"
async def test_discovery(hass: HomeAssistant):
"""Test setting up discovery."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# test we can try again
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_DEVICE: MAC_ADDRESS},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == DEFAULT_ENTRY_TITLE
assert result3["data"] == {
CONF_MINOR_VERSION: 4,
CONF_HOST: IP_ADDRESS,
CONF_MODEL: MODEL,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_INFO: MODEL,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_HOST: "the.cloud",
CONF_REMOTE_ACCESS_PORT: 8816,
CONF_MINOR_VERSION: 0x04,
}
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_legacy(hass: HomeAssistant):
"""Test setting up discovery with a legacy device."""
with _patch_discovery(device=FLUX_DISCOVERY_PARTIAL), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# test we can try again
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_DEVICE: MAC_ADDRESS},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == DEFAULT_ENTRY_TITLE
assert result3["data"] == {
CONF_MINOR_VERSION: 4,
CONF_HOST: IP_ADDRESS,
CONF_MODEL: MODEL,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_INFO: MODEL,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_HOST: "the.cloud",
CONF_REMOTE_ACCESS_PORT: 8816,
CONF_MINOR_VERSION: 0x04,
}
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_with_existing_device_present(hass: HomeAssistant):
"""Test setting up discovery."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: "127.0.0.2"}, unique_id="dd:dd:dd:dd:dd:dd"
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb(no_device=True):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# Now abort and make sure we can start over
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_DEVICE: MAC_ADDRESS}
)
assert result3["type"] == "create_entry"
assert result3["title"] == DEFAULT_ENTRY_TITLE
assert result3["data"] == {
CONF_MINOR_VERSION: 4,
CONF_HOST: IP_ADDRESS,
CONF_MODEL: MODEL,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_INFO: MODEL,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_HOST: "the.cloud",
CONF_REMOTE_ACCESS_PORT: 8816,
CONF_MINOR_VERSION: 0x04,
}
await hass.async_block_till_done()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_no_device(hass: HomeAssistant):
"""Test discovery without device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(no_device=True), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_manual_working_discovery(hass: HomeAssistant):
"""Test manually setup."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
# Cannot connect (timeout)
with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
# Success
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
), patch(f"{MODULE}.async_setup_entry", return_value=True):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result4["type"] == "create_entry"
assert result4["title"] == DEFAULT_ENTRY_TITLE
assert result4["data"] == {
CONF_MINOR_VERSION: 4,
CONF_HOST: IP_ADDRESS,
CONF_MODEL: MODEL,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_INFO: MODEL,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_HOST: "the.cloud",
CONF_REMOTE_ACCESS_PORT: 8816,
CONF_MINOR_VERSION: 0x04,
}
# Duplicate
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_manual_no_discovery_data(hass: HomeAssistant):
"""Test manually setup without discovery data."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(no_device=True), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
), patch(f"{MODULE}.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
}
async def test_discovered_by_discovery_and_dhcp(hass):
"""Test we get the form with discovery and abort for dhcp source when we get both."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=FLUX_DISCOVERY,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=DHCP_DISCOVERY,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_in_progress"
with _patch_discovery(), _patch_wifibulb():
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
hostname="any",
ip=IP_ADDRESS,
macaddress="00:00:00:00:00:00",
),
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "already_in_progress"
async def test_discovered_by_discovery(hass):
"""Test we can setup when discovered from discovery."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=FLUX_DISCOVERY,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_MINOR_VERSION: 4,
CONF_HOST: IP_ADDRESS,
CONF_MODEL: MODEL,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_INFO: MODEL,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_HOST: "the.cloud",
CONF_REMOTE_ACCESS_PORT: 8816,
CONF_MINOR_VERSION: 0x04,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
async def test_discovered_by_dhcp_udp_responds(hass):
"""Test we can setup when discovered from dhcp but with udp response."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_DHCP}, data=DHCP_DISCOVERY
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_MINOR_VERSION: 4,
CONF_HOST: IP_ADDRESS,
CONF_MODEL: MODEL,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_INFO: MODEL,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_HOST: "the.cloud",
CONF_REMOTE_ACCESS_PORT: 8816,
CONF_MINOR_VERSION: 0x04,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
async def test_discovered_by_dhcp_no_udp_response(hass):
"""Test we can setup when discovered from dhcp but no udp response."""
with _patch_discovery(no_device=True), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_DHCP}, data=DHCP_DISCOVERY
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(no_device=True), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_MODEL_NUM: MODEL_NUM,
CONF_MODEL_DESCRIPTION: MODEL_DESCRIPTION,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
async def test_discovered_by_dhcp_partial_udp_response_fallback_tcp(hass):
"""Test we can setup when discovered from dhcp but part of the udp response is missing."""
with _patch_discovery(no_device=True), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_DHCP}, data=DHCP_DISCOVERY
)
await hass.async_block_till_done()
assert result["type"] | |
self.input_ = DocumentInfo()
self.flag_ = []
if contents is not None: self.MergeFromString(contents)
def input(self): return self.input_
def mutable_input(self): self.has_input_ = 1; return self.input_
def clear_input(self):self.has_input_ = 0; self.input_.Clear()
def has_input(self): return self.has_input_
def output_mime_type(self): return self.output_mime_type_
def set_output_mime_type(self, x):
self.has_output_mime_type_ = 1
self.output_mime_type_ = x
def clear_output_mime_type(self):
if self.has_output_mime_type_:
self.has_output_mime_type_ = 0
self.output_mime_type_ = ""
def has_output_mime_type(self): return self.has_output_mime_type_
def flag_size(self): return len(self.flag_)
def flag_list(self): return self.flag_
def flag(self, i):
return self.flag_[i]
def mutable_flag(self, i):
return self.flag_[i]
def add_flag(self):
x = ConversionInput_AuxData()
self.flag_.append(x)
return x
def clear_flag(self):
self.flag_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_input()): self.mutable_input().MergeFrom(x.input())
if (x.has_output_mime_type()): self.set_output_mime_type(x.output_mime_type())
for i in xrange(x.flag_size()): self.add_flag().CopyFrom(x.flag(i))
def Equals(self, x):
if x is self: return 1
if self.has_input_ != x.has_input_: return 0
if self.has_input_ and self.input_ != x.input_: return 0
if self.has_output_mime_type_ != x.has_output_mime_type_: return 0
if self.has_output_mime_type_ and self.output_mime_type_ != x.output_mime_type_: return 0
if len(self.flag_) != len(x.flag_): return 0
for e1, e2 in zip(self.flag_, x.flag_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_input_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: input not set.')
elif not self.input_.IsInitialized(debug_strs): initialized = 0
if (not self.has_output_mime_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: output_mime_type not set.')
for p in self.flag_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.input_.ByteSize())
n += self.lengthString(len(self.output_mime_type_))
n += 1 * len(self.flag_)
for i in xrange(len(self.flag_)): n += self.lengthString(self.flag_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_input_):
n += 1
n += self.lengthString(self.input_.ByteSizePartial())
if (self.has_output_mime_type_):
n += 1
n += self.lengthString(len(self.output_mime_type_))
n += 1 * len(self.flag_)
for i in xrange(len(self.flag_)): n += self.lengthString(self.flag_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_input()
self.clear_output_mime_type()
self.clear_flag()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.input_.ByteSize())
self.input_.OutputUnchecked(out)
out.putVarInt32(18)
out.putPrefixedString(self.output_mime_type_)
for i in xrange(len(self.flag_)):
out.putVarInt32(26)
out.putVarInt32(self.flag_[i].ByteSize())
self.flag_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_input_):
out.putVarInt32(10)
out.putVarInt32(self.input_.ByteSizePartial())
self.input_.OutputPartial(out)
if (self.has_output_mime_type_):
out.putVarInt32(18)
out.putPrefixedString(self.output_mime_type_)
for i in xrange(len(self.flag_)):
out.putVarInt32(26)
out.putVarInt32(self.flag_[i].ByteSizePartial())
self.flag_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_input().TryMerge(tmp)
continue
if tt == 18:
self.set_output_mime_type(d.getPrefixedString())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_flag().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_input_:
res+=prefix+"input <\n"
res+=self.input_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_output_mime_type_: res+=prefix+("output_mime_type: %s\n" % self.DebugFormatString(self.output_mime_type_))
cnt=0
for e in self.flag_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("flag%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kinput = 1
koutput_mime_type = 2
kflag = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "input",
2: "output_mime_type",
3: "flag",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ConversionInput'
class ConversionOutput(ProtocolBuffer.ProtocolMessage):
has_error_code_ = 0
error_code_ = 0
has_output_ = 0
output_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def error_code(self): return self.error_code_
def set_error_code(self, x):
self.has_error_code_ = 1
self.error_code_ = x
def clear_error_code(self):
if self.has_error_code_:
self.has_error_code_ = 0
self.error_code_ = 0
def has_error_code(self): return self.has_error_code_
def output(self):
if self.output_ is None:
self.lazy_init_lock_.acquire()
try:
if self.output_ is None: self.output_ = DocumentInfo()
finally:
self.lazy_init_lock_.release()
return self.output_
def mutable_output(self): self.has_output_ = 1; return self.output()
def clear_output(self):
if self.has_output_:
self.has_output_ = 0;
if self.output_ is not None: self.output_.Clear()
def has_output(self): return self.has_output_
def MergeFrom(self, x):
assert x is not self
if (x.has_error_code()): self.set_error_code(x.error_code())
if (x.has_output()): self.mutable_output().MergeFrom(x.output())
def Equals(self, x):
if x is self: return 1
if self.has_error_code_ != x.has_error_code_: return 0
if self.has_error_code_ and self.error_code_ != x.error_code_: return 0
if self.has_output_ != x.has_output_: return 0
if self.has_output_ and self.output_ != x.output_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_error_code_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: error_code not set.')
if (self.has_output_ and not self.output_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.error_code_)
if (self.has_output_): n += 1 + self.lengthString(self.output_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_error_code_):
n += 1
n += self.lengthVarInt64(self.error_code_)
if (self.has_output_): n += 1 + self.lengthString(self.output_.ByteSizePartial())
return n
def Clear(self):
self.clear_error_code()
self.clear_output()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.error_code_)
if (self.has_output_):
out.putVarInt32(18)
out.putVarInt32(self.output_.ByteSize())
self.output_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_error_code_):
out.putVarInt32(8)
out.putVarInt32(self.error_code_)
if (self.has_output_):
out.putVarInt32(18)
out.putVarInt32(self.output_.ByteSizePartial())
self.output_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_error_code(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_output().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_error_code_: res+=prefix+("error_code: %s\n" % self.DebugFormatInt32(self.error_code_))
if self.has_output_:
res+=prefix+"output <\n"
res+=self.output_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kerror_code = 1
koutput = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "error_code",
2: "output",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ConversionOutput'
class ConversionRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.conversion_ = []
if contents is not None: self.MergeFromString(contents)
def conversion_size(self): return len(self.conversion_)
def conversion_list(self): return self.conversion_
def conversion(self, i):
return self.conversion_[i]
def mutable_conversion(self, i):
return self.conversion_[i]
def add_conversion(self):
x = ConversionInput()
self.conversion_.append(x)
return x
def clear_conversion(self):
self.conversion_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.conversion_size()): self.add_conversion().CopyFrom(x.conversion(i))
def Equals(self, x):
if x is self: return 1
if len(self.conversion_) != len(x.conversion_): return 0
for e1, e2 in zip(self.conversion_, x.conversion_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.conversion_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.conversion_)
for i in xrange(len(self.conversion_)): n += self.lengthString(self.conversion_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.conversion_)
for i in xrange(len(self.conversion_)): n += self.lengthString(self.conversion_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_conversion()
def OutputUnchecked(self, out):
for i in xrange(len(self.conversion_)):
out.putVarInt32(10)
out.putVarInt32(self.conversion_[i].ByteSize())
self.conversion_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.conversion_)):
out.putVarInt32(10)
out.putVarInt32(self.conversion_[i].ByteSizePartial())
self.conversion_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_conversion().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.conversion_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("conversion%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kconversion = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "conversion",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ConversionRequest'
class ConversionResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.result_ = []
if contents is not None: self.MergeFromString(contents)
def result_size(self): return len(self.result_)
def result_list(self): return self.result_
def result(self, i):
return self.result_[i]
def mutable_result(self, i):
return self.result_[i]
def add_result(self):
x = ConversionOutput()
self.result_.append(x)
return x
def clear_result(self):
self.result_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.result_size()): self.add_result().CopyFrom(x.result(i))
def Equals(self, x):
if x is self: return 1
if len(self.result_) != len(x.result_): return 0
for e1, e2 in zip(self.result_, x.result_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.result_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_result()
def OutputUnchecked(self, out):
for i in xrange(len(self.result_)):
out.putVarInt32(10)
out.putVarInt32(self.result_[i].ByteSize())
self.result_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.result_)):
out.putVarInt32(10)
out.putVarInt32(self.result_[i].ByteSizePartial())
self.result_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt | |
<gh_stars>0
import os
import logging
import socket
import sys
import yaml
from urllib.request import urlopen
from urllib.request import urlretrieve
from log_config import log_setup
from helper import create_dir, check_path, get_ip, get_network_device_mac, \
set_values, validate_cidr, validate_ip, \
validate_network_cidr, validate_port, validate_url, \
check_user_input_if_integer
class InventoryFile:
def __init__(self, inventory_dict = {}):
self.inventory_dict = inventory_dict
self.software_dir = ''
self.input_choice = ''
self.ocp43_client_base_url = 'https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-4.3'
self.ocp43_rhcos_base_url = 'https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/4.3.0'
self.ocp_urls = {'openshift_client': '{}/openshift-client-linux.tar.gz'.format(self.ocp43_client_base_url),
'openshift_installer': '{}/openshift-install-linux.tar.gz'.format(self.ocp43_client_base_url),
'initramfs': '{}/rhcos-4.3.0-x86_64-installer-initramfs.img'.format(self.ocp43_rhcos_base_url),
'kernel_file': '{}/rhcos-4.3.0-x86_64-installer-kernel'.format(self.ocp43_rhcos_base_url),
'uefi_file': '{}/rhcos-4.3.0-x86_64-metal.raw.gz'.format(self.ocp43_rhcos_base_url)}
self.task_inputs = """
1: 'download ocp 4.3 software',
2: 'bootstrap node details',
3: 'master node details',
4: 'worker node details',
5: 'network setup',
6: 'disk info',
7: 'bind dns',
8: 'http webserver',
9: 'dhcp',
10: 'ignition config',
11: 'print inventory',
12: 'generate inventory file',
13: 'Exit'
"""
def clear_screen(self):
"""
performs clean screen
"""
os.system('clear')
def set_keys(self):
"""
sets the initial keys for the inventory file
"""
self.inventory_dict['csah'] = {'hosts': '{}'.format(socket.getfqdn()), 'vars': {}}
def generate_inputs_menu(self):
"""
generates a menu of tasks for user input for each task
"""
self.clear_screen()
self.input_choice = ''
valid_choices = range(1,14)
while self.input_choice not in valid_choices:
logging.info('{}'.format(self.task_inputs))
try:
self.input_choice = int(input('task choice for necessary inputs: '))
if self.input_choice not in valid_choices:
logging.warn('Invalid choice. Valid choice is an integer from 1-13')
except ValueError:
logging.error('Strings not a valid choice')
logging.info('user choice is {}'.format(self.input_choice))
self.get_user_inputs_for_task()
def get_user_inputs_for_task(self):
"""
performs tasks based on user input
"""
if self.input_choice == 13:
sys.exit()
elif self.input_choice == 1:
self.get_software_download_dir()
self.get_software()
elif self.input_choice == 2:
self.get_bootstrap_node()
elif self.input_choice == 3:
self.get_master_nodes()
elif self.input_choice == 4:
self.get_worker_nodes()
elif self.input_choice == 5:
self.set_bond_network_details()
elif self.input_choice == 6:
self.get_disk_name()
elif self.input_choice == 7:
self.get_dns_details()
elif self.input_choice == 8:
self.get_http_details()
elif self.input_choice == 9:
self.dhcp_lease_times()
elif self.input_choice == 10:
self.get_ignition_details()
elif self.input_choice == 11:
self.display_inventory()
elif self.input_choice == 12:
self.yaml_inventory()
sys.exit()
self.generate_inputs_menu()
def get_software_download_dir(self):
"""
get software download directory to download OCP 4.3 software bits
"""
self.clear_screen()
default = '/home/ansible/files'
self.software_dir = input('provide complete path of directory to download OCP 4.3 software bits\n'
'default [/home/ansible/files]: ')
self.software_dir = set_values(self.software_dir, default)
dest_path_exist = check_path(self.software_dir, isdir=True)
if dest_path_exist:
logging.info('directory {} already exists'.format(self.software_dir))
else:
logging.info('Creating directory {}'.format(self.software_dir))
create_dir(self.software_dir)
self.inventory_dict['csah']['vars']['software_src'] = self.software_dir
def get_software(self):
"""
performs OCP 4.3 software bits download from the base urls
specified in the class __init__
"""
logging.info('downloading OCP 4.3 software bits into {}'.format(self.software_dir))
for url_key in self.ocp_urls.keys():
url = self.ocp_urls[url_key]
dest_name = url.split('/')[-1]
dest_path = self.software_dir + '/' + dest_name
dest_path_exist = check_path(dest_path, isfile=True)
url_check = ''
if dest_path_exist:
logging.info('file {} already exists in {}'.format(dest_name, self.software_dir))
self.inventory_dict['csah']['vars'][url_key] = dest_name
else:
url_check = validate_url(url)
if url_check == '':
logging.error('file {} in {} is not available'.format(dest_name, url_key))
self.inventory_dict['csah']['vars'][url_key] = ''
if url_check != '' and url_check.code == 200:
logging.info('downloading {}'.format(dest_name))
urlretrieve('{}'.format(url),'{}/{}'.format(self.software_dir, dest_name))
self.inventory_dict['csah']['vars'][url_key] = dest_name
def get_bootstrap_node(self):
"""
get details about bootstrap node
"""
self.clear_screen()
default = 'bootstrap'
bootstrap_name = input('enter the bootstrap node name\n'
'default [bootstrap]: ')
bootstrap_name = set_values(bootstrap_name, default)
bootstrap_ip = get_ip(node_name=bootstrap_name, ip_type='os')
bootstrap_ip = validate_ip(bootstrap_ip)
bootstrap_mac = get_network_device_mac(node_name=bootstrap_name, ip_type='idrac')
logging.info('adding bootstrap_node values as name: {} ip: {} mac: {}'.format(bootstrap_name, bootstrap_ip,
bootstrap_mac))
self.inventory_dict['csah']['vars']['bootstrap_node'] = [{'name': '{}'.format(bootstrap_name),
'ip': '{}'.format(bootstrap_ip),
'mac': '{}'.format(bootstrap_mac)}]
def get_master_nodes(self):
"""
get details about master node
"""
default = 3
master_nodes_count = input('enter number of master nodes\n'
'default [3]: ')
master_nodes_count = set_values(master_nodes_count, default, check='integer')
master_keys = ['name','ip','mac']
self.inventory_dict['csah']['vars']['master_nodes'] = []
for num in range(master_nodes_count):
master_values = []
default = 'etcd-{}'.format(num)
master_name = input('enter the master {} node name \n'
'default [{}]: '.format(num, default))
master_name = set_values(master_name, default)
master_ip = get_ip(node_name=master_name, ip_type='os')
master_mac = get_network_device_mac(node_name=master_name, ip_type='idrac')
master_values.append(master_name)
master_values.append(master_ip)
master_values.append(master_mac)
master_node_dict_pairs = dict(zip(master_keys, master_values))
logging.info('adding {} values as name: {} ip: {} mac: {}'.format(master_name, master_name,
master_ip, master_mac))
self.inventory_dict['csah']['vars']['master_nodes'].append(master_node_dict_pairs)
self.clear_screen()
self.inventory_dict['csah']['vars']['number_of_masters'] = master_nodes_count
def get_worker_nodes(self):
"""
get details about worker node
"""
worker_nodes_count = input('enter number of worker nodes\n'
'default [2]: ')
default = 2
worker_nodes_count = set_values(worker_nodes_count, default, check='integer')
worker_keys = ['name','ip','mac']
self.inventory_dict['csah']['vars']['worker_nodes'] = []
for num in range(worker_nodes_count):
worker_values = []
default = 'worker-{}'.format(num)
worker_name = input('enter the worker {} node name\n'
'default [{}]: '.format(num, default))
worker_name = set_values(worker_name, default)
worker_ip = get_ip(node_name=worker_name, ip_type='os')
worker_mac = get_network_device_mac(node_name=worker_name, ip_type='idrac')
worker_values.append(worker_name)
worker_values.append(worker_ip)
worker_values.append(worker_mac)
worker_node_dict_pairs = dict(zip(worker_keys, worker_values))
logging.info('adding {} values as name: {} ip: {} mac: {}'.format(worker_name, worker_name,
worker_ip, worker_mac))
self.inventory_dict['csah']['vars']['worker_nodes'].append(worker_node_dict_pairs)
self.clear_screen()
self.inventory_dict['csah']['vars']['number_of_workers'] = worker_nodes_count
def dhcp_lease_times(self):
"""
get dhcp lease times
"""
default_lease_time = input('enter a default lease time for dhcp\n'
'default [800]: ')
default = 800
default_lease_time = set_values(default_lease_time, default, check='integer')
max_lease_time = input('enter max lease time for dhcp\n'
'default [7200]: ')
default = 7200
max_lease_time = set_values(max_lease_time, default, check='integer')
logging.info('adding default_lease_time: {} max_lease_time: {}'.format(default_lease_time,
max_lease_time))
self.inventory_dict['csah']['vars']['default_lease_time'] = default_lease_time
self.inventory_dict['csah']['vars']['max_lease_time'] = max_lease_time
def set_bond_network_details(self):
"""
get bond details and user interfaces used for bond
"""
self.clear_screen()
default = 'bond0'
name = input('enter bond name\n'
'default [bond0]: ')
name = set_values(name, default)
interfaces = input('enter bond interfaces seperated by \',\'\n'
'default [ens2f0,ens2f1]: ')
default = 'ens2f0,ens2f1'
interfaces = set_values(interfaces, default)
default = 'mode=active-backup,miimon=100,primary=ens2f0'
options = input('enter bond options \n'
'default [mode=active-backup,miimon=100,primary=ens2f0]: ')
options = set_values(options, default)
logging.info('adding bond_name: {} interfaces: {} bond_options: {}'.format(name, interfaces, options))
self.inventory_dict['csah']['vars']['bond_name'] = name
self.inventory_dict['csah']['vars']['bond_interfaces'] = interfaces
self.inventory_dict['csah']['vars']['bond_options'] = options
def get_dns_details(self):
"""
get zone config file and cluster name used by DNS
"""
self.clear_screen()
zone_file = input('specify zone file \n'
'default [/var/named/ocp.zones]: ')
default = '/var/named/ocp.zones'
zone_file = set_values(zone_file, default)
cluster_name = input('specify cluster name \n'
'default [ocp]: ')
default = 'ocp'
cluster_name = set_values(cluster_name, default)
logging.info('adding zone_file: {} cluster: {}'.format(zone_file, cluster_name))
self.inventory_dict['csah']['vars']['default_zone_file'] = zone_file
self.inventory_dict['csah']['vars']['cluster'] = cluster_name
def get_http_details(self):
"""
get http details and directories names created under /var/www/html
"""
self.clear_screen()
port = input('enter http port \n'
'default [8080]: ')
default = 8080
port = set_values(port, default)
port = validate_port(port)
ignition_dir = input('specify dir where ignition files will be placed \n'
'directory will be created under /var/www/html \n'
'default [ignition]: ')
default = 'ignition'
ignition_dir = set_values(ignition_dir, default)
ocp_version = input('specify the version of ocp \n'
'default [4.3]: ')
default = 4.3
ocp_version = set_values(ocp_version, default)
logging.info('adding http_port: {} http_ignition: {} version: {}'.format(port, ignition_dir, ocp_version))
self.inventory_dict['csah']['vars']['http_port'] = int(port)
self.inventory_dict['csah']['vars']['os'] = 'rhcos'
self.inventory_dict['csah']['vars']['http_ignition'] = ignition_dir
self.inventory_dict['csah']['vars']['version'] = ocp_version
def get_disk_name(self):
"""
disknames used for each node type.
"""
self.clear_screen()
default = 'nvme0n1'
logging.info('ensure disknames are absolutely available. Otherwise OpenShift install fails')
master_install_device = input('specify the master device that will be installed\n'
'default [nvme0n1]: ')
master_install_device = set_values(master_install_device, default)
bootstrap_install_device = input('specify the bootstrap device that will be installed\n'
'default [nvme0n1]: ')
bootstrap_install_device = set_values(bootstrap_install_device, default)
worker_install_device = input('specify the worker device that will be installed\n'
'default [nvme0n1]: ')
worker_install_device = set_values(worker_install_device, default)
logging.info('adding master_install_device: {} bootstrap_install_device: {}\
worker_install_device: {}'.format(master_install_device, bootstrap_install_device,
worker_install_device))
self.inventory_dict['csah']['vars']['master_install_device'] = master_install_device
self.inventory_dict['csah']['vars']['bootstrap_install_device'] = bootstrap_install_device
self.inventory_dict['csah']['vars']['worker_install_device'] = worker_install_device
def set_haproxy(self):
"""
sets default values for haproxy
"""
logging.info('currently only haproxy is supported for load balancing')
self.inventory_dict['csah']['vars']['proxy'] = 'haproxy'
self.inventory_dict['csah']['vars']['haproxy_conf'] = '/etc/haproxy/haproxy.cfg'
self.inventory_dict['csah']['vars']['master_ports'] = [{'port': 6443, 'description': 'apiserver'},
{'port': 22623 , 'description': 'configserver'}]
self.inventory_dict['csah']['vars']['worker_ports'] = [{'port': 80, 'description': 'http'},
{'port': 443, 'description': 'https'}]
def get_ignition_details(self):
"""
get details from users used for install-config.yaml file
"""
self.clear_screen()
default = 'core'
install_user = input('enter the user used to install openshift\n'
'DONOT CHANGE THIS VALUE\n'
'default [core]: ')
install_user = set_values(install_user, default)
default = 'openshift'
install_dir = input('enter the directory where openshift installs\n'
'directory will be created under /home/core\n'
'default [openshift]: ')
install_dir = set_values(install_dir, default)
default = '10.128.0.0/14'
pod_network_cidr = input('enter the pod network cidr\n'
'default [10.128.0.0/14]: ')
pod_network_cidr = set_values(pod_network_cidr, default)
logging.info('pod network cidr: {}'.format(pod_network_cidr))
pod_network_cidr = validate_network_cidr(pod_network_cidr)
default = 23
host_prefix = input('specify cidr notation for number of ips in each node: \n'
'cidr number should be an integer and less than 32\n'
'default [23]: ')
host_prefix = set_values(host_prefix, default)
host_prefix = validate_cidr(host_prefix)
default = '172.30.0.0/16'
service_network_cidr = input('specify the service network cidr\n'
'default [172.30.0.0/16]: ')
service_network_cidr = set_values(service_network_cidr, default)
service_network_cidr = validate_network_cidr(service_network_cidr)
logging.info('adding install_user: {} install_dir: {} cluster_network_cidr: {}\
host_prefix: {} service_network_cidr: {}'.format(install_user, install_dir,
pod_network_cidr, host_prefix,
service_network_cidr))
self.inventory_dict['csah']['vars']['install_user'] = install_user
| |
<gh_stars>1-10
import multiprocessing
import os
from collections import Callable
from platform import system
from time import time
from socket import gethostname
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec, rcParams
from matplotlib.ticker import MaxNLocator
from matplotlib.figure import Figure
from bokeh.plotting import gridplot, output_file, show
import bokeh.plotting.figure as bokeh_figure
from bokeh.models import Span
from scipy.stats import truncnorm
from common.document_wrapper import DocumentWrapper, niceprint
## Importing my code
from termcolor import colored
from common.files import pickle_dump
from common.mathematics import nCr
from common.config import load_config
from common.document_wrapper import DocumentWrapper
import warnings
spam = load_config()
# results_dir = spam["results"]
tmp_dir = spam["tmp"]
del spam
wrapper = DocumentWrapper(width=75)
def maximize_plot():
try:
mng = plt.get_current_fig_manager()
if "wind" in system().lower():
mng.window.state('zoomed')
else:
mng.resize(*mng.window.maxsize())
except:
pass
class HastingsResults:
""" Class to represent Metropolis Hastings results. """
def __init__(self, params, theta_init, accepted, rejected, observations_count: int, observations_samples_count: int,
mh_sampling_iterations: int, eps=0, sd=0.15, burn_in=0.25, as_scatter=False, pretitle="", title="", bins=20, last_iter=0,
timeout=0, time_it_took=0, true_point=False, opt_point=False, parameter_intervals=None):
"""
Args:
params (list of strings): parameter names
accepted (np.array): accepted points with iteration index
rejected (np.array): rejected points with iteration index
observations_count (int): total number of observations
observations_samples_count (int): sample size from the observations
mh_sampling_iterations (int): number of iterations/steps of walker in param space
eps (number): very small value used as probability of non-feasible values in prior
sd (float): variation of walker in parameter space
burn_in (number): fraction or count of how many samples will be trimmed from beginning
pretitle (string): title to be put in front of title
title (string): title of the plot
bins (int): number of segments in the heatmap plot (used only for 2D param space)
true_point (point): The true value in the parameter space - only to be shown on the plot
opt_point (point): Point obtained by the optimisation method - only to be shown on the plot
parameter_intervals (list of pairs): Pairs of domains of respective parameter
"""
## Inside variables
self.params = params
self.theta_init = theta_init
## Results
self.accepted = accepted
self.rejected = rejected
## Results setting
self.observations_count = observations_count
self.observations_samples_count = observations_samples_count
self.mh_sampling_iterations = mh_sampling_iterations
self.eps = eps
self.sd = sd
self.burn_in = burn_in
# try: ## backward compatibility
# self.not_burn_in = not_burn_in
# except AttributeError as exx:
# if "'HastingsResults' object has no attribute 'not_burn_in'" in exx:
# self.not_burn_in = show
self.title = title
self.pretitle = pretitle
self.bins = bins
self.as_scatter = as_scatter
self.last_iter = last_iter
self.timeout = timeout
self.time_it_took = time_it_took
self.parameter_intervals = parameter_intervals
## SET THE TRUE POINT
if true_point:
if len(true_point) is len(self.params):
self.true_point = true_point
else:
raise Exception(f"The dimension of the given true point ({len(true_point)}) does not match")
else:
self.true_point = False
## SET THE OPTIMISED POINT
if opt_point:
if len(opt_point) is len(self.params):
self.opt_point = opt_point
else:
raise Exception(f"The dimension of the given optimised point ({len(opt_point)}) does not match")
else:
self.opt_point = False
def get_burn_in(self):
""" Returns fraction of the burned-in part. """
if 0 < self.burn_in < 1:
return self.burn_in
elif len(self.accepted):
return min(1, self.burn_in / len(self.accepted))
else:
return None
def merge_acc_and_rej(self):
""" Returns both, accepted and rejected samples, in a single list. """
spam = np.empty([len(self.accepted) + len(self.rejected), len(self.params) + 1])
acc_index = 0
rej_index = 0
for i in range(len(spam)):
if acc_index < len(self.accepted):
if int(self.accepted[acc_index][-1]) == i:
spam[i] = np.append(self.accepted[acc_index][:-1], True)
acc_index = acc_index + 1
else:
spam[i] = np.append(self.rejected[rej_index][:-1], False)
rej_index = rej_index + 1
else:
spam[i] = np.append(self.rejected[rej_index][:-1], False)
rej_index = rej_index + 1
return spam
# spam = np.empty([len(self.accepted) + len(self.rejected), len(self.params) + 1])
# acc_index = 0
# rej_index = 0
# for i in range(len(self.accepted)):
# if int(self.accepted[acc_index][-1]) == i:
# spam[i] = np.append(self.accepted[acc_index][:-1], True)
# acc_index = acc_index + 1
# else:
# spam[i] = np.append(self.rejected[rej_index][:-1], False)
# rej_index = rej_index + 1
# for i in range(acc_index + rej_index, len(spam)):
# spam[i] = np.append(self.rejected[rej_index][:-1], False)
# rej_index = rej_index + 1
# return spam
# spam = []
# acc_index = 0
# rej_index = 0
# for i in range(len(self.accepted)):
# if int(self.accepted[acc_index][-1]) == i:
# spam.append(np.append(self.accepted[acc_index][:-1], True))
# acc_index = acc_index + 1
# else:
# spam.append(np.append(self.rejected[rej_index][:-1], False))
# rej_index = rej_index + 1
# for i in range(rej_index, len(self.rejected)):
# spam.append(np.append(self.rejected[rej_index][:-1], False))
# return spam
def keep_index(self, burn_in=False):
""" Translates burn-in into index which should be kept. """
if not burn_in:
burn_in = self.burn_in
if 0 < burn_in < 1:
keep_index = int(burn_in * self.accepted.shape[0]) + 1
else:
keep_index = int(burn_in) + 1
burn_in = round(burn_in / self.accepted.shape[0], 2)
return keep_index, burn_in
def get_not_burn_in(self):
""" Returns fraction of not burned-in part. """
if self.get_burn_in() is not None:
return 1 - self.get_burn_in()
else:
return None
def get_all_accepted(self):
""" Returns the list of ALL accepted point. """
return self.accepted
def get_accepted(self):
""" Return the list of TRIMMED accepted points. """
keep_index, burn_in = self.keep_index(self.burn_in)
return self.accepted[keep_index:]
def set_accepted(self, accepted):
""" Sets the accepted points ."""
self.accepted = accepted
def set_rejected(self, rejected):
""" Sets rejected points. """
self.rejected = rejected
def set_burn_in(self, burn_in):
""" Sets burn-in period. """
self.burn_in = burn_in
def set_bins(self, bins):
""" Sets bins, used in the plots. """
self.bins = bins
def set_as_scatter(self, as_scatter):
""" Sets as_scatter, used in plots"""
self.as_scatter = as_scatter
def set_true_point(self, true_point):
""" Sets true point """
self.true_point = true_point
def set_opt_point(self, opt_point):
""" Sets optimised point """
self.opt_point = opt_point
def get_acc_as_a_list(self):
""" Returns accepted points in a list. """
return self.accepted.tolist()
def get_rej_as_a_list(self):
""" Returns rejected points in a list. """
return self.rejected.tolist()
def fix_missing_parameter_domains(self):
""" Backward compatibility fix for missing attribute of parameter domains/intervals """
self.parameter_intervals = []
for param_index, param in enumerate(self.params):
curr_min = float("inf")
curr_max = float("-inf")
# try:
# spam = self.accepted + self.rejected
# except TypeError as err:
# if self.accepted is None:
# spam = self.rejected
# elif self.rejected is None:
# spam = self.accepted
# else:
# raise err
for point in self.accepted:
point = point[param_index]
if point < curr_min:
curr_min = point
if point > curr_max:
curr_max = point
for point in self.rejected:
point = point[param_index]
if point < curr_min:
curr_min = point
if point > curr_max:
curr_max = point
self.parameter_intervals.append([curr_min, curr_max])
def show_mh_heatmap(self, where=False, bins=False, burn_in=None, as_scatter=False, debug=False, show_true_point=False, show_opt_point=False):
""" Visualises the result of Metropolis Hastings as a heatmap
Args:
where (tuple/list): output matplotlib sources to output created figure
bins (int): number of segments in the plot (used only for heatmap - 2D param space)
burn_in (number or None): discards the fraction/number of accepted points, None - use class value (to trim burn-in period)
as_scatter (bool): Sets the plot to scatter plot even for 2D output
debug (bool): if True extensive print will be used
show_true_point (bool): if True, true point will be shown
show_opt_point (bool): if True, optimised point will be shown
@author: xhajnal
@edit: denis
"""
# import matplotlib as mpl
# mpl.rcParams.update(mpl.rcParamsDefault)
# plt.style.use('default')
if self.accepted.size == 0:
raise Exception("Set of accepted points is empty!")
## Backwards compatibility
if len(self.params) == len(self.accepted[0]):
print("old data type")
indices = np.linspace(1, len(self.accepted), num=len(self.accepted))
indices = np.array(list(map(lambda x: [x], indices)))
self.accepted = np.hstack((self.accepted, indices))
# for index, item in enumerate(self.accepted):
# self.accepted[index] = self.accepted[index]
if debug:
print("burn-in", burn_in)
print("self.accepted", self.accepted)
if burn_in is None:
try: ## Backward compatibility
burn_in = self.burn_in
except AttributeError as exx:
if "'HastingsResults' object has no attribute 'burn_in'" in str(exx):
try:
burn_in = (100 - self.not_burn_in)/100 ## Backward compatibility
self.burn_in = burn_in
except:
pass
try:
if 0 <= self.show <= 1: ## Backward compatibility
burn_in = 1 - self.show ## Backward compatibility
elif 0 <= self.show <= 100: ## Backward compatibility
burn_in = 1 - self.show/100 ## Backward compatibility
elif self.show < 0: ## Backward compatibility
burn_in = len(self.accepted) + self.show ## Backward compatibility
else:
burn_in = len(self.accepted) - self.show ## Backward compatibility
self.burn_in = burn_in
except:
pass
try: ## backward compatibility
if self.mh_sampling_iterations < 0:
pass
except AttributeError as exx:
self.mh_sampling_iterations = self.MH_sampling_iterations ## Backward compatibility
if burn_in < 0:
raise Exception("MH - wrong burn-in setting.")
if burn_in > len(self.accepted):
raise Exception("MH - Burn-in values | |
import sys
import time
import traceback
import pygame.mixer
from pygame.locals import *
import bullet
import enemyplan
import myplan
import supply
from enemyplan import *
# 初始化
pygame.init()
pygame.mixer.init()
# 帧数对象创建
clock = pygame.time.Clock()
# 设置屏幕大小
size = width, height = 512, 900
screen = pygame.display.set_mode(size)
# 设置标题
pygame.display.set_caption('TylerXixi ------------现代飞机大战------------')
# 载入图片
# 背景图片
background = pygame.image.load('image\\background\\1.jpg').convert_alpha()
# ICO游戏图标
ico = pygame.image.load('image\\ICO\\J20.ico')
pygame.display.set_icon(ico)
# 暂停时图片
people_pause_image = pygame.image.load('image\\Pause\\j20_gaitubao_656x464.png').convert_alpha()
font_image = pygame.image.load('image\\Pause\\font22.png').convert_alpha()
# 载入游戏音效
# 主背景音乐
pygame.mixer.music.load('Wav\\back music\\backmusci.mp3')
pygame.mixer.music.set_volume(0.03)
# 达到四级背景音乐
level4_backmusic = pygame.mixer.Sound('Wav\\back music\\4级BGM.mp3')
level4_backmusic.set_volume(0.1)
# 达到6级背景音乐
level6_backmusic = pygame.mixer.Sound("Wav\\back music\\6级BGM.mp3")
level6_backmusic.set_volume(0.2)
supperbig_sound = pygame.mixer.Sound('Wav\\sound\\supperbig_sound.ogg')
supperbig_sound.set_volume(0.03)
big_sound = pygame.mixer.Sound('Wav\\sound\\big_sound.ogg')
big_sound.set_volume(0.1)
mid_sound = pygame.mixer.Sound('Wav\\sound\\mid_sound.ogg')
mid_sound.set_volume(0.03)
small_sound = pygame.mixer.Sound('Wav\\sound\\small_sound.ogg')
small_sound.set_volume(0.03)
appear_bigplan = pygame.mixer.Sound('Wav\\sound\\大飞机来咯-1_2.ogg')
appear_bigplan.set_volume(0.05)
die_Myplan = pygame.mixer.Sound('Wav\\sound\\哦豁-.ogg')
die_Myplan.set_volume(0.05)
GameOver_plan = pygame.mixer.Sound('Wav\\sound\\游戏结束-.ogg')
GameOver_plan.set_volume(0.05)
boom_sound = pygame.mixer.Sound('Wav\\sound\\核弹.ogg')
boom_sound.set_volume(0.5)
supply_appear = pygame.mixer.Sound('Wav\\sound\\补给箱已经-.ogg')
supply_appear.set_volume(0.05)
get_bomb = pygame.mixer.Sound('Wav\\sound\\获得核弹.ogg')
get_bomb.set_volume(0.05)
too_much_bomb = pygame.mixer.Sound('Wav\\sound\\最大炸弹量.ogg')
too_much_bomb.set_volume(0.05)
too_much_life = pygame.mixer.Sound('Wav\\sound\\最大生命值.ogg')
too_much_life.set_volume(0.05)
get_bullet = pygame.mixer.Sound('Wav\\sound\\获得子弹.ogg')
get_bullet.set_volume(0.05)
get_life = pygame.mixer.Sound('Wav\\sound\\获得生命.ogg')
get_life.set_volume(0.05)
launch_bullet = pygame.mixer.Sound('Wav\\sound\\普通子弹发射.ogg')
launch_bullet.set_volume(0.01)
# 等级音效
level2_sound = pygame.mixer.Sound('Wav\\sound\\二级.ogg')
level2_sound.set_volume(0.1)
level3_sound = pygame.mixer.Sound('Wav\\sound\\三级_1_1.ogg')
level3_sound.set_volume(0.1)
level4_sound = pygame.mixer.Sound('Wav\\sound\\四级_1_1.ogg')
level4_sound.set_volume(0.1)
level5_sound = pygame.mixer.Sound('Wav\\sound\\五级_1_1.ogg')
level5_sound.set_volume(0.1)
level6_sound = pygame.mixer.Sound('Wav\\sound\\六级_1_1.ogg')
level6_sound.set_volume(0.1)
# 血条颜色定义
BLACK = (0, 0, 0)
RED = (224, 30, 30)
GREEN = (24, 220, 24)
WHITE = (255, 255, 255)
Font_color = (191, 239, 255)
# 添加小型敌机
def add_small_enemy(group1, group2, num):
for i in range(num):
# 实例化敌方飞机
e1 = enemyplan.SmallPlan(size)
group1.add(e1)
group2.add(e1)
# 添加中型敌机
def add_mid_enemy(group1, group2, num):
for i in range(num):
e2 = enemyplan.MidPlan(size)
group1.add(e2)
group2.add(e2)
# 添加大型敌机
def add_big_enemy(group1, group2, num):
for i in range(num):
e3 = enemyplan.BigPlan(size)
group1.add(e3)
group2.add(e3)
# 定义敌机的速度
def inc_speed(target, inc):
for each in target:
each.speed += inc
def main():
# 整除对象
delay = 100
# 等级设置
level = 1
# 分数设置
score = 0
# 分数字体设置
score_font = pygame.font.Font('Font\\HYZhuZiMuTouRenW.ttf', 40)
# 控制图片状态
switch_image = True
# 循环状态
running = True
# 控制暂停状态
pause = False
# 控制声音状态
voice_pause = False
# 控制文件打开次数
recorded = False
# 导入暂停图片
pause_nor_image = pygame.image.load('image\\Pause\\not pause_white.png').convert_alpha()
pause_pressd_image = pygame.image.load('image\\Pause\\not pause_gray.png').convert_alpha()
resumer_nor_image = pygame.image.load('image\\Pause\\resumer_white.png').convert_alpha()
resumer_pressd_image = pygame.image.load('image\\Pause\\resumer_gray.png').convert_alpha()
# 导入声音图片
voice_image_blue = pygame.image.load('image\\voice\\voice (1)_gaitubao_66x66.png').convert_alpha()
voice_image_green = pygame.image.load('image\\voice\\voice (2)_gaitubao_66x66.png').convert_alpha()
pause_voice_image_blue = pygame.image.load('image\\voice\\pause_voice (1)_gaitubao_66x66.png').convert_alpha()
pause_voice_image_green = pygame.image.load('image\\voice\\pause_voice (2)_gaitubao_66x66.png').convert_alpha()
# 导入结束、重开、GameOver、logo图片
end_image = pygame.image.load('image\\restart\\G2.png').convert_alpha()
end_rect = end_image.get_rect()
again_image = pygame.image.load("image\\restart\\重新开始.png").convert_alpha()
again_rect = again_image.get_rect()
gameover_image = pygame.image.load("image\\restart\\结束游戏.png").convert_alpha()
gameover_rect = gameover_image.get_rect()
logo_image = pygame.image.load('image\\restart\\ico.png').convert_alpha()
logo_font = pygame.image.load('image\\restart\\LogoFont.png').convert_alpha()
# 结束字体
gameover_font = pygame.font.Font("Font\\华文圆体 REGULAR.TTF", 48)
# 导入复活图片
reset_life = pygame.image.load('image\\life\\Rese_life.png').convert_alpha()
reset_font_image = pygame.image.load('image\\life\\reset_font_life.png').convert_alpha()
# 导入生命UI图标
life_image = pygame.image.load('image\\boom\\LIFE.png').convert_alpha()
# 获取生命图标矩形位置
life_rect = life_image.get_rect()
# 设置生命剩余字体
life_font = pygame.font.Font('Font\\华文圆体 REGULAR.TTF', 45)
# 生命数量
life_num = 3
# 导入炸弹UI图标
boom_image = pygame.image.load('image\\boom\\BOOM.png').convert_alpha()
# 获取炸弹的矩形位置
boom_rect = boom_image.get_rect()
# 设置炸弹剩余字体
boom_font = pygame.font.Font('Font\\华文圆体 REGULAR.TTF', 45)
# 设置炸弹的数量
boom_num = 3
# 实例化补给包
# 子弹补给
bullet_supply = supply.BulletSupply(size)
# 核弹补给
bomb_supply = supply.BombSupply(size)
# 生命补给
life_supply = supply.LifeSupply(size)
# 设置每40秒放发任意一个补给包
supply_timer = USEREVENT
pygame.time.set_timer(supply_timer, 40 * 1000)
# 设置超级子弹的发射时间
double_bullet_timer = USEREVENT + 1
# 设置无敌时间
invincible_timer = USEREVENT + 2
# 标准是否使用超级子弹
is_double_bullet = False
# 获取pause图片的矩形
paused_rect = pause_pressd_image.get_rect()
# 初始化图片的位置
paused_rect.left, paused_rect.top = width - paused_rect.width - 5, 5
# 默认显示图标
paused_image = pause_nor_image
# 获取声音矩形图像
voice_rect = voice_image_blue.get_rect()
# 初始化图像位置
voice_rect.left, voice_rect.top = width - voice_rect.width - 5, 75
# 默认显示图
voice_image = voice_image_blue
#
# 播放背景音乐:
pygame.mixer.music.play(-1)
# 生成我方飞机
me = myplan.MyPlan(size)
# 生成普通子弹 设置添加子弹的列表
bullet1 = []
# 添加图片索引
bullet1_index = 0
# 添加子弹数量
bullet1_nums = 7
# 将子弹迭代拿出并添加到列表
for i in range(bullet1_nums):
bullet1.append(bullet.Bullet1(me.rect.midtop))
# 生成超级子弹
# 设置添加子弹的列表
bullet2 = []
# 添加图片索引
bullet2_index = 0
# 添加子弹数量
bullet2_nums = 12
# 将子弹迭代拿出并添加到列表
for i in range(bullet2_nums//2):
bullet2.append(bullet.Bullet2((me.rect.centerx-55, me.rect.centery)))
bullet2.append(bullet.Bullet2((me.rect.centerx+20, me.rect.centery)))
# 生成敌方飞机
enemys = pygame.sprite.Group()
# 生成小型敌机
small_enemy = pygame.sprite.Group()
add_small_enemy(small_enemy, enemys, 14)
# 生成中型飞机
mid_enemy = pygame.sprite.Group()
add_mid_enemy(mid_enemy, enemys, 6)
# 生成大型飞机
big_enemy = pygame.sprite.Group()
add_big_enemy(big_enemy, enemys, 2)
# 中弹图片索引
small_destory_index = 0
mid_destory_index = 0
big_destory_index = 0
me_destory_index = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1 and paused_rect.collidepoint(event.pos):
pause = not pause
# 暂停游戏所有音效
if pause:
pygame.time.set_timer(supply_timer, 0)
pygame.mixer.music.pause()
pygame.mixer.pause()
else:
pygame.time.set_timer(supply_timer, 40*1000)
pygame.mixer.music.unpause()
pygame.mixer.unpause()
elif event.type == MOUSEMOTION:
if paused_rect.collidepoint(event.pos):
if pause:
paused_image = resumer_pressd_image
else:
paused_image = pause_pressd_image
else:
if pause:
paused_image = resumer_nor_image
else:
paused_image = pause_nor_image
# 声音控制
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1 and voice_rect.collidepoint(event.pos):
voice_pause = not voice_pause
elif event.type == MOUSEMOTION:
if voice_rect.collidepoint(event.pos):
if voice_pause:
voice_image = pause_pressd_image
else:
voice_image = voice_image_green
else:
if voice_pause:
voice_image = pause_voice_image_blue
else:
voice_image = voice_image_blue
elif event.type == KEYDOWN:
if event.key == K_SPACE:
if boom_num:
# 炸弹减一
boom_num -= 1
# 播放炸弹音效
boom_sound.play()
for each in enemys:
# 如果敌机在底部的上面将全部毁灭
if each.rect.bottom > 0:
each.active = False
# 判断补给箱是否触发
elif event.type == supply_timer:
supply_appear.play()
main_value = randint(0, 20)
if main_value % 3 == 0:
bomb_supply.reset()
if main_value % 3 == 1:
life_supply.reset()
if main_value % 3 == 2:
bullet_supply.reset()
elif event.type == double_bullet_timer:
is_double_bullet = False
pygame.time.set_timer(double_bullet_timer, 0)
elif event.type == invincible_timer:
me.invincible = False
pygame.time.set_timer(invincible_timer, 0)
# 等级难度提升
# 二级难度
if level == 1 and score > 30000:
level = 2
level2_sound.play()
# 增加小型敌机3 中型2, 大型1
add_small_enemy(small_enemy, enemys, 3)
add_mid_enemy(mid_enemy, enemys, 2)
add_big_enemy(big_enemy, enemys, 1)
# 增加小型敌机速度
inc_speed(small_enemy, 1)
# 三级难度
elif level == 2 and score > 100000:
level = 3
level3_sound.play()
# 增加小型敌机4, 中型3, 大型2
add_small_enemy(small_enemy, enemys, 4)
add_mid_enemy(mid_enemy, enemys, 3)
add_big_enemy(big_enemy, enemys, 2)
# 增加小型, 中型敌机速度
inc_speed(small_enemy, 1)
# 四级难度
elif level == 3 and score > 300000:
level = 4
level4_sound.play()
pygame.mixer.music.pause()
level4_backmusic.play(-1)
pygame.time.set_timer(supply_timer, 30 * 1000)
# 增加小型敌机6, 中型5, 大型3
add_small_enemy(small_enemy, enemys, 6)
add_mid_enemy(mid_enemy, enemys, 5)
add_big_enemy(big_enemy, enemys, 3)
# 增加小型, 中型敌机速度
inc_speed(small_enemy, 1)
# 五级难度
elif level == 4 and score > 600000:
level = 5
level5_sound.play()
# 增加小型敌机8, 中型7, 大型4
add_small_enemy(small_enemy, enemys, 8)
add_mid_enemy(mid_enemy, enemys, 7)
add_big_enemy(big_enemy, enemys, 4)
# 增加小型, 中型敌机速度
inc_speed(small_enemy, 2)
# 六级难度
elif level == 5 and score > 1000000:
level = 6
level6_sound.play()
level4_backmusic.stop()
level6_backmusic.play(-1)
# 增加小型敌机10, 中型9, 大型6
add_small_enemy(small_enemy, enemys, 10)
add_mid_enemy(mid_enemy, enemys, 9)
add_big_enemy(big_enemy, enemys, 6)
# 增加小型, 中型敌机速度
inc_speed(small_enemy, 2)
inc_speed(mid_enemy, 1)
# 绘制游戏背景
screen.blit(people_pause_image, (0, 250))
screen.blit(font_image, (0, 550))
if life_num and not pause:
# 检测用户键盘操作
key_button = pygame.key.get_pressed()
if key_button[K_w] or key_button[K_UP]:
me.moveup()
if key_button[K_s] or key_button[K_DOWN]:
me.movedown()
if key_button[K_a] or key_button[K_LEFT]:
me.moveleft()
if key_button[K_d] or key_button[K_RIGHT]:
me.moveright()
screen.blit(background, (0, 0))
# 绘制核弹并检测是否获得
if bomb_supply.active:
bomb_supply.move()
screen.blit(bomb_supply.image, bomb_supply.rect)
# 检测是否获得
if pygame.sprite.collide_mask(bomb_supply, me):
get_bomb.play()
if boom_num < 3:
boom_num += 1
if boom_num >= 3:
too_much_bomb.play()
bomb_supply.active = False
# 绘制生命并检测是否获得
if life_supply.active:
life_supply.move()
screen.blit(life_supply.image, life_supply.rect)
# 检测是否获得
if pygame.sprite.collide_mask(life_supply, me):
get_bomb.play()
if life_num < 3:
life_num += 1
if life_num >= 3:
too_much_life.play()
life_supply.active = False
# 绘制超级子弹
if bullet_supply.active:
bullet_supply.move()
screen.blit(bullet_supply.image, bullet_supply.rect)
# 检测是否获得
if pygame.sprite.collide_mask(bullet_supply, me):
get_bullet.play()
# 发射超级子弹
is_double_bullet = True
pygame.time.set_timer(double_bullet_timer, 20 * 1000)
bullet_supply.active = False
# 发射子弹 delay % 10 就是限制子弹为10帧/s
if not (delay % 10):
launch_bullet.play()
if is_double_bullet:
bullets = bullet2
bullets[bullet2_index].reset((me.rect.centerx-55, me.rect.centery))
bullets[bullet2_index+1].reset((me.rect.centerx+20, me.rect.centery))
bullet2_index = (bullet2_index + 2) % bullet2_nums
else:
bullets = bullet1
bullets[bullet1_index].reset((me.rect.centerx-2.5, me.rect.centery))
bullet1_index = (bullet1_index + 1) % bullet1_nums
# 检测子弹击中敌人
for b in bullets:
if b.active:
b.move()
screen.blit(b.image, b.rect)
enemy_hit = pygame.sprite.spritecollide(b, enemys, False, pygame.sprite.collide_mask)
if enemy_hit:
b.active = False
for e in enemy_hit:
if e in big_enemy or e in mid_enemy:
e.hit = True
e.energy -= 1
if e.energy == 0:
e.active = False
else:
e.active = False
# 绘制大型敌机
for each in big_enemy:
if each.active:
# 初速度
each.move()
if each.hit:
screen.blit(each.image_hit, each.rect)
each.hit = False
else:
screen.blit(each.image, each.rect)
# 绘制大型敌机血量底槽
pygame.draw.line(screen, BLACK, (each.rect.left, each.rect.top - 5),
(each.rect.right, each.rect.top - 5), 4)
# 绘制大型飞机击中时血量
# 计算当时的血量
energy_count = each.energy / enemyplan.BigPlan.energy
# 如果血量大于百分之二十绘制绿色 否则绘制红色
if energy_count > 0.2:
energy_color = GREEN
else:
energy_color = RED
# 绘制敌机被击中的当时血量
pygame.draw.line(screen, energy_color, (each.rect.left, each.rect.top - 5),
(each.rect.left + each.rect.width * energy_count,
each.rect.top - 5), 4)
# 添加出场音效
if each.rect.bottom == -100:
appear_bigplan.play()
else:
# 飞机毁灭播放
if not (delay % 3):
if big_destory_index == 0:
big_sound.play()
screen.blit(each.destory_images[big_destory_index], each.rect)
big_destory_index = (big_destory_index + 1) % 9
if big_destory_index == 0:
score += 13140
each.reset()
# 绘制中型敌机
for each in mid_enemy:
if each.active:
# 初速度
each.move()
if each.hit:
screen.blit(each.image_hit, each.rect)
each.hit = False
else:
screen.blit(each.image, each.rect)
# 绘制中型敌机血量底槽
pygame.draw.line(screen, BLACK, (each.rect.left, each.rect.top - 5),
(each.rect.right, each.rect.top - 5), 3)
# 绘制中型飞机击中时血量
# 计算当时的血量
energy_count = each.energy / enemyplan.MidPlan.energy
# 如果血量大于百分之二十绘制绿色 否则绘制红色
if energy_count > 0.2:
energy_color = GREEN
else:
energy_color = RED
# 绘制敌机被击中的当时血量
pygame.draw.line(screen, energy_color, (each.rect.left, each.rect.top - 5),
(each.rect.left + each.rect.width * energy_count,
each.rect.top - 5), 3)
else:
if not (delay | |
XPVector(
tensor, modes[0]
) # TODO: check if we can output modes as a list in _mode_aware_matmul
elif self.isVector and other.isMatrix:
tensor, modes = other.T._mode_aware_matmul(self)
return XPVector(tensor, modes[0])
else: # self.isVector and other.isVector:
return self._mode_aware_vecvec(other) # NOTE: this is a scalar, not an XPTensor
def _mode_aware_matmul(
self, other: Union[XPMatrix, XPVector]
) -> Tuple[Tensor, Tuple[List[int], List[int]]]:
r"""Performs matrix multiplication only on the necessary modes and
takes care of keeping only the modes that are needed, in case of mismatch.
See documentation for a visual explanation with blocks. #TODO: add link to figure
"""
if list(self.inmodes) == list(other.outmodes): # NOTE: they match including the ordering
prod = math.tensordot(
self.tensor, other.tensor, ((1, 3), (0, 2)) if other.isMatrix else ((1, 3), (0, 1))
)
return math.transpose(prod, (0, 2, 1, 3) if other.isMatrix else (0, 1)), (
self.outmodes,
other.inmodes,
)
contracted = [i for i in self.inmodes if i in other.outmodes]
uncontracted_self = [i for i in self.inmodes if i not in contracted]
uncontracted_other = [o for o in other.outmodes if o not in contracted]
if not (
set(self.outmodes).isdisjoint(uncontracted_other)
and set(other.inmodes).isdisjoint(uncontracted_self)
):
raise ValueError("Invalid modes")
bulk = None
copied_rows = None
copied_cols = None
if len(contracted) > 0:
subtensor1 = math.gather(
self.tensor, [self.inmodes.index(m) for m in contracted], axis=1
)
subtensor2 = math.gather(
other.tensor, [other.outmodes.index(m) for m in contracted], axis=0
)
if other.isMatrix:
bulk = math.tensordot(subtensor1, subtensor2, ((1, 3), (0, 2)))
bulk = math.transpose(bulk, (0, 2, 1, 3))
else:
bulk = math.tensordot(subtensor1, subtensor2, ((1, 3), (0, 1)))
if self.like_1 and len(uncontracted_other) > 0:
copied_rows = math.gather(
other.tensor, [other.outmodes.index(m) for m in uncontracted_other], axis=0
)
if other.like_1 and len(uncontracted_self) > 0:
copied_cols = math.gather(
self.tensor, [self.inmodes.index(m) for m in uncontracted_self], axis=1
)
if copied_rows is not None and copied_cols is not None:
if bulk is None:
bulk = math.zeros(
(copied_cols.shape[0], copied_rows.shape[1], 2, 2), dtype=copied_cols.dtype
)
empty = math.zeros(
(copied_rows.shape[0], copied_cols.shape[1], 2, 2), dtype=copied_cols.dtype
)
final = math.block([[copied_cols, bulk], [empty, copied_rows]], axes=[0, 1])
elif copied_cols is None and copied_rows is not None:
if bulk is None:
final = copied_rows
else:
final = math.block([[bulk], [copied_rows]], axes=[0, 1])
elif copied_rows is None and copied_cols is not None:
if bulk is None:
final = copied_cols
else:
final = math.block([[copied_cols, bulk]], axes=[0, 1])
else: # copied_rows and copied_cols are both None
final = bulk # NOTE: could be None
outmodes = self.outmodes + uncontracted_other
if other.like_0 and len(contracted) == 0:
outmodes = uncontracted_other
if self.like_0:
outmodes = [m for m in outmodes if m in self.outmodes]
inmodes = uncontracted_self + other.inmodes
if self.like_0 and len(contracted) == 0:
inmodes = uncontracted_self
if other.like_0:
inmodes = [m for m in inmodes if m in other.inmodes]
if final is not None:
final = math.gather(final, [outmodes.index(o) for o in sorted(outmodes)], axis=0)
if other.isMatrix:
final = math.gather(final, [inmodes.index(i) for i in sorted(inmodes)], axis=1)
return final, (sorted(outmodes), sorted(inmodes))
def _mode_aware_vecvec(self, other: XPVector) -> Scalar:
if list(self.outmodes) == list(other.outmodes):
return math.sum(self.tensor * other.tensor)
common = list(
set(self.outmodes) & set(other.outmodes)
) # only the common modes (the others are like 0)
return math.sum(self.tensor[common] * other.tensor[common])
def __add__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVector]:
if not isinstance(other, (XPMatrix, XPVector)):
raise TypeError(
f"unsupported operand type(s) for +: '{self.__class__.__qualname__}' and '{other.__class__.__qualname__}'"
)
if self.isVector != other.isVector:
raise ValueError("Cannot add a vector and a matrix")
if self.isCoherence != other.isCoherence:
raise ValueError("Cannot add a coherence block and a diagonal block")
if self.tensor is None and other.tensor is None: # both are None
if self.like_1 and other.like_1:
raise ValueError("Cannot add two like_1 null tensors yet") # because 1+1 = 2
if self.isMatrix and other.isMatrix:
return XPMatrix(like_0=self.like_0 and other.like_0)
else:
return XPVector()
if self.tensor is None: # only self is None
if self.like_0:
return other
elif (
self.like_1
): # other must be a matrix because self is like_1, so it must be a matrix and we can't add a vector to a matrix
indices = [
[i, i] for i in range(other.num_modes)
] # TODO: check if this is always correct
updates = math.tile(
math.expand_dims(math.eye(2, dtype=other.dtype), 0), (other.num_modes, 1, 1)
)
other.tensor = math.update_add_tensor(other.tensor, indices, updates)
return other
if other.tensor is None: # only other is None
return other + self
# now neither is None
modes_match = list(self.outmodes) == list(other.outmodes) and list(self.inmodes) == list(
other.inmodes
)
if modes_match:
self.tensor = self.tensor + other.tensor
return self
if not modes_match and self.like_1 and other.like_1:
raise ValueError("Cannot add two like_1 tensors on different modes yet")
outmodes = sorted(set(self.outmodes).union(other.outmodes))
inmodes = sorted(set(self.inmodes).union(other.inmodes))
self_contains_other = set(self.outmodes).issuperset(other.outmodes) and set(
self.inmodes
).issuperset(other.inmodes)
other_contains_self = set(other.outmodes).issuperset(self.outmodes) and set(
other.inmodes
).issuperset(self.inmodes)
if self_contains_other:
to_update = self.tensor
to_add = [other]
elif other_contains_self:
to_update = other.tensor
to_add = [self]
else: # need to add both to a new empty tensor
to_update = math.zeros(
(len(outmodes), len(inmodes), 2, 2) if self.isMatrix else (len(outmodes), 2),
dtype=self.tensor.dtype,
)
to_add = [self, other]
for t in to_add:
outmodes_indices = [outmodes.index(o) for o in t.outmodes]
inmodes_indices = [inmodes.index(i) for i in t.inmodes]
if (
t.isMatrix
): # e.g. outmodes of to_update are [self]+[other_new] = (e.g.) [9,1,2]+[0,20]
indices = [[o, i] for o in outmodes_indices for i in inmodes_indices]
else:
indices = [[o] for o in outmodes_indices]
to_update = math.update_add_tensor(
to_update,
indices,
math.reshape(t.modes_first(), (-1, 2, 2) if self.isMatrix else (-1, 2)),
)
if self.isMatrix and other.isMatrix:
return XPMatrix(
to_update,
like_0=self.like_0 and other.like_0,
like_1=self.like_1 or other.like_1,
modes=(outmodes, inmodes),
)
else:
return XPVector(to_update, outmodes)
def __sub__(self, other: Union[XPMatrix, XPVector]) -> Optional[XPTensor]:
return self + (-1) * other
def __truediv__(self, other: Scalar) -> Optional[XPTensor]:
return (1 / other) * self
def __getitem__(self, modes: Union[int, slice, List[int], Tuple]) -> Union[XPMatrix, XPVector]:
r"""Returns modes or subsets of modes from the XPTensor or coherences between modes using an
intuitive notation.
We handle mode indices and we get the corresponding tensor indices handled correctly.
Examples:
.. code::
T[N] ~ self.tensor[N,:,:,:]
T[M,N] = the coherence between the modes M and N
T[:,N] ~ self.tensor[:,N,:,:]
T[[1,2,3],:] ~ self.tensor[[1,2,3],:,:,:] # i.e. the block with outmodes [1,2,3] and all inmodes
T[[1,2,3],[4,5]] ~ self.tensor[[1,2,3],[4,5],:,:] # i.e. the block with outmodes [1,2,3] and inmodes [4,5]
"""
if self.isVector:
if isinstance(modes, int):
_modes = [modes]
elif isinstance(modes, list) and all(isinstance(m, int) for m in modes):
_modes = modes
elif modes == slice(None, None, None):
_modes = self.outmodes
else:
raise ValueError(f"Usage: V[1], V[[1,2,3]] or V[:]")
rows = [self.outmodes.index(m) for m in modes]
return XPVector(math.gather(self.tensor, rows, axis=0), modes)
else:
_modes = [None, None]
if isinstance(modes, int):
_modes = ([modes], slice(None, None, None))
elif isinstance(modes, list) and all(isinstance(m, int) for m in modes):
_modes = (modes, slice(None, None, None))
elif modes == slice(None, None, None):
_modes = (slice(None, None, None), slice(None, None, None))
elif isinstance(modes, tuple) and len(modes) == 2:
for i, M in enumerate(modes):
if isinstance(M, int):
_modes[i] = [M]
elif isinstance(M, list):
_modes[i] = M
elif M == slice(None, None, None):
_modes[i] = self.modes[i]
else:
raise ValueError(
f"Invalid modes: {M} from {modes} (tensor has modes {self.modes})"
)
else:
raise ValueError(f"Invalid modes: {modes} (tensor has modes {self.modes})")
rows = [self.outmodes.index(m) for m in _modes[0]]
columns = [self.inmodes.index(m) for m in _modes[1]]
subtensor = math.gather(self.tensor, rows, axis=0)
subtensor = math.gather(subtensor, columns, axis=1)
return XPMatrix(
subtensor,
like_1=_modes[0] == _modes[1] if self.like_1 else False,
modes=tuple(_modes),
)
class XPMatrix(XPTensor):
r"""A convenience class for a matrix in the XPTensor format.
# TODO: write docstring
"""
def __init__(
self,
tensor: Tensor = None,
like_0: bool = None,
like_1: bool = None,
modes: Tuple[List[int], List[int]] = ([], []),
):
if like_0 is None and like_1 is None:
raise ValueError("At least one of like_0 or like_1 must be set")
if like_0 == like_1:
raise ValueError(f"like_0 and like_1 can't both be {like_0}")
if not (
isinstance(modes, tuple) and len(modes) == 2 and all(type(m) == list for m in modes)
):
raise ValueError("modes should be a tuple containing two lists (outmodes and inmodes)")
if len(modes[0]) == 0 and len(modes[1]) == 0 and tensor is not None:
if (
tensor.shape[0] != |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.