input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'''
Classes and methods for the pseudocluster sampling method
'''
## pseudocluster.py
## author: <NAME>
## email: <EMAIL>
from __future__ import division, absolute_import, print_function
import rasterio
import numpy as np
import math
import warnings
from collections import OrderedDict
try:
from multiprocessing import Pool, Process
has_multiprocessing = True
except ImportError:
warnings.warn('Multiprocessing not available. Install multiprocessing module to enable it.')
has_multiprocessing = False
from .image import Image
from .indices import calc_indices, _allowed_indices
from ._pseudocluster import _pseudocluster1, _pseudocluster2
def _in2d(x, vals):
'''
Wrapper for numpy.in1d().reshape(). See https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html for more info.
'''
test = np.in1d(x.ravel(), vals).reshape(x.shape)
return test
def _bi(x, water_values, land_values):
'''
Balance Index (BI)
(see compute_samples() method for definition)
'''
bi = 1 - ( abs(np.sum(_in2d(x, water_values)) - np.sum(_in2d(x, land_values))) / (np.sum(_in2d(x, water_values)) + np.sum(_in2d(x, land_values))) )
return bi
class _PseudoclusterConfig(object):
def __init__(self, indices = None, exclude_bands = None):
'''
Configuration class for Pseudocluster Class
'''
pass
_bands = ['B', 'G', 'R', 'NIR', 'SWIR1', 'SWIR2']
_S10bands = ['B', 'G', 'R', 'NIR']
_S20bands = ['B', 'G', 'R', 'RE1', 'RE2', 'RE3', 'NIR', 'SWIR1', 'SWIR2']
class Pseudocluster(Image):
'''
Pseudocluster algorithm class
Args:
- Dataset: object of class Landsat or HLS
- blockshape: shape of sample blocks as a tuple (height, width)
- indices: indices to be computed
- exclude_bands - original bands to be excluded from model
- copy - create copy of the input Dataset class?
- copy: create a copy of the image bands and mask in memory?
'''
def __init__(self, Dataset, blockshape, indices = None, exclude_bands = None, copy = False):
if Dataset.dataset == 'S10':
default_bands = _S10bands
if indices and not all([i in ['NDVI', 'NDWI'] for i in indices]):
raise ValueError("HLS-S10 only supports NDVI and NDWI as indices.")
elif Dataset.dataset == 'S20':
default_bands = _S20bands
else:
default_bands = _bands
if indices:
if not isinstance(indices, list):
indices = [indices]
for i in indices:
if not i in _allowed_indices:
raise ValueError("{0} is not an allowed index.".format(i))
indices = default_bands + [i for i in indices]
else:
indices = default_bands
if exclude_bands:
if not isinstance(exclude_bands, list):
exclude_bands = [exclude_bands]
self.index_names = [i for i in indices if not i in exclude_bands]
else:
self.index_names = indices
self.indices = OrderedDict()
self.aggregated_indices = OrderedDict()
for i in self.index_names:
self.indices[i] = None
self.aggregated_indices[i] = None
# these will be filled in later
self.index_filenames = OrderedDict()
Image.__init__(self)
self.set_date(Dataset.date)
if not Dataset.opened:
raise ValueError("Dataset must be read first.")
elif Dataset.mask is None:
raise ValueError("Dataset mask must be set first.")
else:
self.opened = True
if copy:
self.bands = Dataset.bands.copy()
self.mask = Dataset.mask.copy()
else:
self.bands = Dataset.bands
self.mask = Dataset.mask
self.sceneid = Dataset.sceneid
self.bandnames = Dataset.bandnames
self.dtype = Dataset.dtype
self.nodatavalue = Dataset.nodatavalue
self.profile = Dataset.profile
self.height = Dataset.height
self.width = Dataset.width
self.nodatavalue = Dataset.nodatavalue
self.filepath = Dataset.filepath
if not len(blockshape) is 2:
raise ValueError("blockshape must be of length 2")
self.shape = blockshape
self.cols = int( self.width / self.shape[0] )
self.rows = int( self.height / self.shape[1] )
self.tops = [ top for top in range(0, self.shape[0]*self.rows, self.shape[0]) ]
self.lefts = [ left for left in range(0, self.shape[1]*self.cols, self.shape[1]) ]
self.water_mask = None
self.water_values = None
self.land_values = None
self.bi = None
self.weights = None
self.nsamples = None
self.copy = copy
def set_water_mask(self, water_mask, water_values = [1], land_values = [0]):
'''
Set the water map and indicate the values representing land and (pure) water
'''
self.water_mask = water_mask
self.water_values = water_values
self.land_values = land_values
def block_extent(self, block_index):
'''
Return the extent of a given block
block_index: zero-indexed tuple of length two (row_index, col_index)
Value: (top, bottom, left, right)
'''
if (block_index[0] > self.rows) or (block_index[1] > self.cols):
raise ValueError("block_index must be within rows and cols.")
i, j = block_index
top = self.tops[i]
bottom = self.tops[i] + self.shape[0]
left = self.lefts[j]
right = self.lefts[j] + self.shape[1]
return top, bottom, left, right
def compute_indices(self):
# wrapper for self.compute_index
self.compute_index([i for i in self.index_names if i not in self.bandnames])
for i in self.index_names:
self.indices[i] = self.bands[i]
def crop_array(self, x, block_index):
'''
Same as crop_bands, but on any np.array
'''
if len(x.shape) == 2:
dims = x.shape
elif len(x.shape) == 3:
dims = (x.shape[1], x.shape[2])
else:
raise ValueError("Array must have 2 or 3 dimensions")
assert dims[0] == self.height
assert dims[1] == self.width
top, bottom, left, right = self.block_extent(block_index)
if len(x.shape) == 3: # TODO: force 3-D array
z = x[:, top:bottom, left:right]
else:
z = x[top:bottom, left:right]
return z
def crop_indices(self, block_index):
'''
Wrapper for crop_array(), but applied only to indices
'''
z = np.stack([j for i,j in self.indices.items()])
return self.crop_array(z, block_index)
def compute_samples(self, d, N, minsample = 0):
'''
Computes sample weights (w) and # samples (n) based on the balance index (BI), dampening factor (d) and total sample size (N)
.. math::
BI = 1 - \frac{|N_{w}-N_{l}|}{N_{w}+N_{l}}
w_{ij} = \frac{BI_{ij}+d}{\sum_{ij}{(BI_{ij}+d)}};
d \ge 0
n_{ij} = w_{ij}N
Args:
water_map: a 2-D numpy array with identical height and width as image data
d: dampening factor
N: total number of samples
minsample: minimum sample required in a block for it to be included (otherwise a numpy.nan is assigned to weights array)
'''
if self.water_mask is None:
raise ValueError("water_mask must be set first.")
self.bi = np.zeros( (self.rows, self.cols), dtype = np.float32 )
for i in range(self.rows):
for j in range(self.cols):
wc = self.crop_array(self.water_mask, (i, j))
if np.sum( _in2d(wc, self.water_values + self.land_values) ) >= minsample:
self.bi[i,j] = _bi(wc, self.water_values, self.land_values)
else:
self.bi[i,j] = np.nan
self.weights = (self.bi + d) / np.nansum(self.bi + d)
self.nsamples = np.round(N * self.weights).astype(np.uint32)
self.nsamples[np.isnan(self.weights)] = 0
def get_pseudoclusters(self, cl, ncpus=1, distribution = None):
'''
(wrapper for _runpseudocluster)
Args:
cl: number of pixels per pseudocluster
ncpus: number of cpus (for parallel processing)
distribution: impose a distribution on the SWF sampled from each block (experimental and disabled by default). Either None (disabled) or "uniform", with plans to add other distibutions later.
Returns:
A tuple of (1) a numpy.ndarray of the covariates and (2) a 1-D array of the response variable
'''
if self.nsamples is None:
raise ValueError("# of samples per block must be computed first.")
block_data = []
z = self.stack_bands()
for i in range(self.rows):
for j in range(self.cols):
block_data.append({
'x': self.crop_array(z, (i, j)),
'wm': self.crop_array(self.water_mask, (i, j)),
'N': self.nsamples[i, j],
'cl': cl,
'distribution': distribution
})
if has_multiprocessing and (ncpus > 1):
try:
p = Pool(ncpus)
samples = p.map(_runpseudocluster, block_data)
finally:
p.close()
p.join()
else:
samples = [ _runpseudocluster(t) for t in block_data ]
if all(s is None for s in samples):
return None, None
else:
samples = np.concatenate([ s for s in samples if s is not None ], axis = 1)
return samples[:-1], samples[-1]
# some other utility methods not currently used in algorithm...
def count_obs(self, block_index):
'''
TODO:
Returns a count of the valid (non-NA) pixels available in block (i, j)
Uses the first band in Image as a reference
Args:
- block_index: (i, j) --> (row, column)
'''
pass
def samples_to_dict(samples, band_names = ['B', 'G', 'R', 'NIR', 'SWIR1', 'SWIR2']):
'''
Converts numpy.ndarray from get_pseudoclusters() above to OrderedDict in order to compute indices from these (see waffls.indices.calc_indices())
Args:
- samples: a 2-D array of shape (nbands, nsamples). If nbands > len(band_names), only the first len(band_names) bands will be taken. It is recommended just to use the 6 original spectral bands, and then compute the indices from these.
'''
samples_dict = OrderedDict()
for i, b in enumerate(band_names):
samples_dict[b] = samples[i].reshape((1, samples[i].shape[0]))
return samples_dict
def _runpseudocluster(kwargs):
'''
Wrapper for pseudocluster function
'''
if kwargs['distribution'] in ["uniform", "Uniform"]:
_pseudocluster = _pseudocluster1
else:
_pseudocluster = _pseudocluster2
N = kwargs['N']
if N > 0:
x = kwargs['x']
nbands = x.shape[0]
npixels = x.shape[1] * x.shape[2]
x = x.reshape([nbands, npixels])
wm = kwargs['wm'].flatten()
cl = kwargs['cl']
try:
ps = _pseudocluster(x, wm, N, cl)
except:
ps = None
# TODO: find some way to flag these errors
# ie. in which blocks are these errors being thrown?
# and are biases being introduced by skipping over | |
<reponame>dchaplinsky/pep.org.ua
# coding: utf-8
from __future__ import unicode_literals
from itertools import chain
from copy import deepcopy
from urlparse import urlparse
import datetime
from collections import defaultdict, OrderedDict
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_noop as _
from django.utils.translation import ugettext_lazy, activate, get_language
from django.forms.models import model_to_dict
from django.conf import settings
from django.db.models.functions import Coalesce
from django.db.models import Q, Value, Max
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.fields import GenericRelation
from cacheops import cached
from translitua import translitua
import select2.fields
import select2.models
from dateutil.parser import parse as dt_parse
from core.fields import RedactorField
from core.model.base import AbstractNode
from core.model.translations import Ua2EnDictionary
from core.utils import (
render_date,
lookup_term,
parse_fullname,
translate_into,
ceil_date,
)
from core.templatetags.jinja_filters import convert_curr
from core.model.declarations import Declaration
from core.model.supplementaries import Document
from core.model.connections import Person2Person, Person2Company, Person2Country
# to_*_dict methods are used to convert two main entities that we have, Person
# and Company into document indexable by ElasticSearch.
# Links between Persons, Person and Company, Companies, Person and Country,
# Company and Country is also converted to subdocuments and attached to
# Person/Company documents. Because Person and Company needs different
# subdocuments, Person2Company has two different methods, to_person_dict and
# to_company_dict. For the same reason Person2Person and Company2Company has
# to_dict/to_dict_reverse because same link provides info to both persons.
class Person(models.Model, AbstractNode):
_reasons_of_termination = (
(1, _("Помер")),
(2, _("Звільнився/склав повноваження")),
(3, _("Пов'язана особа або член сім'ї - ПЕП помер")),
(4, _("Пов'язана особа або член сім'ї - ПЕП припинив бути ПЕПом")),
(5, _("Зміни у законодавстві що визначає статус ПЕПа")),
(6, _("Зміни форми власності юр. особи посада в котрій давала статус ПЕПа")),
)
_types_of_officials = (
(1, _("Національний публічний діяч")),
(2, _("Іноземний публічний діяч")),
(3, _("Діяч, що виконуює значні функції в міжнародній організації")),
(4, _("Пов'язана особа")),
(5, _("Член сім'ї")),
)
last_name = models.CharField("Прізвище", max_length=40)
first_name = models.CharField("Ім'я", max_length=40)
patronymic = models.CharField("По батькові", max_length=40, blank=True)
publish = models.BooleanField("Опублікувати", default=True)
is_pep = models.BooleanField("Є PEPом", default=True)
imported = models.BooleanField("Був імпортований з гугл-таблиці", default=False)
photo = models.ImageField("Світлина", blank=True, upload_to="images")
dob = models.DateField("Дата народження", blank=True, null=True)
dob_details = models.IntegerField(
"Дата народження: точність",
choices=((0, "Точна дата"), (1, "Рік та місяць"), (2, "Тільки рік")),
default=0,
)
city_of_birth = models.CharField("Місто народження", max_length=100, blank=True)
related_countries = models.ManyToManyField(
"Country",
verbose_name="Пов'язані країни",
through="Person2Country",
related_name="people",
)
reputation_assets = RedactorField("Статки", blank=True)
reputation_sanctions = RedactorField("Наявність санкцій", blank=True)
reputation_crimes = RedactorField("Кримінальні провадження", blank=True)
reputation_manhunt = RedactorField("Перебування у розшуку", blank=True)
reputation_convictions = RedactorField("Наявність судимості", blank=True)
related_persons = select2.fields.ManyToManyField(
"self",
through="Person2Person",
symmetrical=False,
ajax=True,
search_field=(lambda q: Q(last_name__icontains=q) | Q(first_name__icontains=q)),
)
related_companies = models.ManyToManyField("Company", through="Person2Company")
wiki = RedactorField("Вікі-стаття", blank=True)
wiki_draft = RedactorField("Чернетка вікі-статті", blank=True)
wiki_url = models.URLField("Посилання на вікі", blank=True, max_length=1023)
names = models.TextField("Варіанти написання імені", blank=True)
also_known_as = models.TextField("Інші імена", blank=True)
type_of_official = models.IntegerField(
"Тип ПЕП", choices=_types_of_officials, blank=True, null=True
)
risk_category = models.CharField(
"Рівень ризику",
choices=(
("danger", _("Неприйнятно високий")),
("high", _("Високий")),
("medium", _("Середній")),
("low", _("Низький")),
),
max_length=6,
default="low",
)
title = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
hash = models.CharField("Хеш", max_length=40, blank=True)
reason_of_termination = models.IntegerField(
"Причина припинення статусу ПЕП",
choices=_reasons_of_termination,
blank=True,
null=True,
)
termination_date = models.DateField(
"Дата припинення статусу ПЕП",
blank=True,
null=True,
help_text="Вказується реальна дата зміни без врахування 3 років (реальна дата звільнення, тощо)",
)
termination_date_details = models.IntegerField(
"Дата припинення статусу ПЕП: точність",
choices=((0, "Точна дата"), (1, "Рік та місяць"), (2, "Тільки рік")),
default=0,
)
last_change = models.DateTimeField(
"Дата останньої зміни сторінки профіля", blank=True, null=True
)
last_editor = models.ForeignKey(
User,
on_delete=models.SET_NULL,
verbose_name="Автор останньої зміни сторінки профілю",
blank=True,
null=True,
)
_last_modified = models.DateTimeField("Остання зміна", null=True, blank=True)
inn = models.CharField(
_("ІПН з публічних джерел"), max_length=40, null=True, blank=True
)
inn_source = models.ForeignKey(
"core.Document",
verbose_name="Документ з котрого було отримано ІПН",
default=None,
blank=True,
null=True,
related_name="inns",
)
passport = models.CharField(
_("Паспортні дані з публічних джерел"), max_length=40, null=True, blank=True
)
passport_source = models.ForeignKey(
"core.Document",
verbose_name="Документ з котрого було отримано ІПН",
default=None,
blank=True,
null=True,
related_name="passports",
)
proofs = GenericRelation(
"RelationshipProof", verbose_name="Посилання, соціальні мережі та документи"
)
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "last_name__icontains", "first_name__icontains")
def __unicode__(self):
return "%s %s %s" % (self.last_name, self.first_name, self.patronymic)
@property
def date_of_birth(self):
return render_date(self.dob, self.dob_details)
@property
def termination_date_human(self):
return render_date(self.termination_date, self.termination_date_details)
@property
def terminated(self):
# (1, _("Помер")),
# (2, _("Звільнився/склав повноваження")),
# (3, _("Пов'язана особа або член сім'ї - ПЕП помер")),
# (4, _("Пов'язана особа або член сім'ї - ПЕП припинив бути ПЕПом")),
# (5, _("Зміни у законодавстві що визначає статус ПЕПа")),
# (6, _("Зміни форми власності юр. особи посада в котрій давала статус ПЕПа")),
if self.reason_of_termination in [1, 3]:
return True
if (
self.reason_of_termination in [2, 4, 5, 6]
and self.termination_date is not None
):
if (
ceil_date(self.termination_date, self.termination_date_details)
+ datetime.timedelta(days=3 * 365)
<= datetime.date.today()
):
return True
return False
@property
def died(self):
return self.reason_of_termination == 1
def _last_workplace(self):
# Looking for a most recent appointment that has at least one date set
# It'll work in following three cases:
# Case 1: date_finished=null, date_established is the most recent one
# i.e person got appointed and still holds the office
# else
# Case 2: date_finished=is the most recent one
# and the date_established is the most recent one or null
# i.e person got appointed and then resigned.
# Tricky part: null values in dates are getting on top of the list when
# you are sorting in decreasing order. So without exclude clause this
# query will return the positions without both dates on the top of the
# list
qs = (
self.person2company_set.order_by(
"-is_employee", "-date_finished", "-date_established"
)
.exclude(date_finished__isnull=True, date_established__isnull=True) # AND!
.exclude(relationship_type_uk="Клієнт банку")
.prefetch_related("to_company")
.only(
"to_company__short_name_uk",
"to_company__name_uk",
"to_company__short_name_en",
"to_company__name_en",
"to_company__id",
"relationship_type_uk",
"relationship_type_en",
"date_finished",
"date_finished_details",
"from_person_id",
"id",
)
)
if qs:
return qs
# If nothing is found we are going to return the position that
# has finished date set to null or the most recent one.
# In contrast with previous query it'll also return those positions
# where date_finished and date_established == null.
qs = (
self.person2company_set.order_by("-is_employee", "-date_finished")
.prefetch_related("to_company")
.exclude(relationship_type_uk="Клієнт банку")
.only(
"to_company__short_name_uk",
"to_company__name_uk",
"to_company__short_name_en",
"to_company__name_en",
"to_company__id",
"relationship_type_uk",
"relationship_type_en",
"date_finished",
"date_finished_details",
"from_person_id",
"id",
)
)
return qs
@property
def day_of_dismissal(self):
dday = self._last_workplace().filter(is_employee=True).first()
if dday:
return render_date(dday.date_finished, dday.date_finished_details)
else:
return False
def _last_workplace_from_declaration(self):
return (
Declaration.objects.filter(person=self, confirmed="a")
.exclude(doc_type="Кандидата на посаду")
.order_by("-nacp_declaration", "-year")
.only(
"year", "office_en", "position_en", "office_uk", "position_uk", "url"
)[:1]
)
@property
def last_workplace(self):
qs = self._last_workplace()
if qs:
l = qs[0]
return {
"company": l.to_company.short_name_uk or l.to_company.name_uk,
"company_id": l.to_company.pk,
"position": l.relationship_type_uk,
}
else:
qs = self._last_workplace_from_declaration()
if qs:
d = qs[0]
return {
"company": d.office_uk,
"company_id": None,
"position": d.position_uk,
}
return ""
# Fuuugly hack
@property
def last_workplace_en(self):
qs = self._last_workplace()
if qs:
l = qs[0]
return {
"company": l.to_company.short_name_en or l.to_company.name_en,
"company_id": l.to_company.pk,
"position": l.relationship_type_en,
}
else:
qs = self._last_workplace_from_declaration()
if qs:
d = qs[0]
return {
"company": d.office_en,
"company_id": None,
"position": d.position_en,
}
return ""
# Fuuugly hack
@property
def translated_last_workplace(self):
# Add caching
qs = self._last_workplace()
if qs:
l = qs[0]
return {
"company": l.to_company.short_name or l.to_company.name,
"company_id": l.to_company.pk,
"position": l.relationship_type,
}
else:
qs = self._last_workplace_from_declaration()
if qs:
d = qs[0]
return {"company": d.office, "company_id": None, "position": d.position}
return ""
@property
def workplaces(self):
# Coalesce works by taking the first non-null value. So we give it
# a date far before any non-null values of last_active. Then it will
# naturally sort behind instances of Box with a non-null last_active
# value.
# djangoproject.com/en/1.8/ref/models/database-functions/#coalesce
the_past = datetime.datetime.now() - datetime.timedelta(days=10 * 365)
timeline = (
self.person2company_set.prefetch_related(
"to_company", "proofs", "proofs__proof_document"
)
.filter(is_employee=True)
.annotate(
fixed_date_established=Coalesce("date_established", Value(the_past))
)
.order_by("-fixed_date_established")
)
return timeline
@property
def assets(self):
return self.person2company_set.prefetch_related(
"to_company", "proofs", "proofs__proof_document"
).filter(
is_employee=False,
relationship_type_uk__in=(
"Член центрального статутного органу",
"Повірений у справах",
"Засновник/учасник",
"Колишній засновник/учасник",
"Бенефіціарний власник",
"Номінальний власник",
"Номінальний директор",
"Фінансові зв'язки",
"Секретар",
"Керуючий",
"Контролер",
),
)
@property
def all_related_companies(self):
companies = (
self.person2company_set.prefetch_related(
"to_company", "proofs", "proofs__proof_document"
)
.filter(is_employee=False)
.order_by("-pk")
)
banks = []
rest = []
all_connections = []
for c in companies:
if c.relationship_type_uk == "Клієнт банку":
banks.append(c)
else:
rest.append(c)
all_connections.append(c)
return {"banks": banks, "rest": rest, "all": all_connections}
@property
def all_related_persons(self):
related_persons = [
(i.to_relationship_type, i.from_relationship_type, deepcopy(i.to_person), i)
for i in self.to_persons.prefetch_related(
"to_person", "proofs", "proofs__proof_document"
).defer(
"to_person__reputation_assets",
"to_person__reputation_sanctions",
"to_person__reputation_crimes",
"to_person__reputation_manhunt",
"to_person__reputation_convictions",
"to_person__wiki",
"to_person__names",
"to_person__hash",
)
] + [
(
i.from_relationship_type,
i.to_relationship_type,
deepcopy(i.from_person),
i,
)
for i in | |
import datetime
from datetime import date
from datetime import datetime
import requests
import pymysql
import json
import math
import skfuzzy as fuzz
from skfuzzy import control as ctrl
import numpy as np
import matplotlib
from apscheduler.schedulers.blocking import BlockingScheduler
import config
''' Developed by <NAME>
This script is used inside the irrigationRecommendation container and does the following:
1. Get weather parameters and sent it to the referenceEvapotranspiration Entity
2. Calculate Reference evaotranspiration for the city of São Bernardo do Campo, São Paulo, Brazil
3. Calculate the irrigation recommendation based on FAO's Crop Evapotranspiration
4. Calculate the irrigation recommendation based on a Fuzzy Inference System developed by <NAME>
'''
def get_daily_info():
"""
Colled daily weather data and calculate max, min, avg values for temperature, relative humidity and wind speed
"""
connection = pymysql.connect(host='db-mysql', user='root', password='<PASSWORD>', db='lab')
# Select Maximum daily temperature
cursor = connection.cursor()
sql = 'SELECT MAX(CASE attrName WHEN "temperature" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
tmax = cursor.fetchone()
# Collect minimum daily temperature
sql = 'SELECT MIN(CASE attrName WHEN "temperature" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
tmin = cursor.fetchone()
# Collect minimum daily temperature
sql = 'SELECT AVG(CASE attrName WHEN "temperature" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
tmed = cursor.fetchone()
# Collect maximum daily humidity
sql = 'SELECT MAX(CASE attrName WHEN "humidity" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
rhmax = cursor.fetchone()
# Collect minimum daily humidity
sql = 'SELECT MIN(CASE attrName WHEN "humidity" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
rhmin = cursor.fetchone()
# Collect average daily humidity
sql = 'SELECT AVG(CASE attrName WHEN "humidity" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
rhmed = cursor.fetchone()
# Collect average daily wind speed
sql = 'SELECT MAX(CASE attrName WHEN "windSpeed" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
vmax = cursor.fetchone()
# Collect average daily wind speed
sql = 'SELECT MIN(CASE attrName WHEN "windSpeed" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
vmin = cursor.fetchone()
# Collect average daily wind speed
sql = 'SELECT AVG(CASE attrName WHEN "windSpeed" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()'
cursor.execute(sql)
vmed = cursor.fetchone()
payload = json.dumps({
"dateObserved": {
"value": datetime.now().isoformat()
},
"dailyTmax": {
"value": float(tmax[0])
},
"dailyTmin": {
"value": float(tmin[0])
},
"dailyTmed": {
"value": float(tmed[0])
},
"dailyRhmax": {
"value": float(rhmax[0])
},
"dailyRhmin": {
"value": float(rhmin[0])
},
"dailyRhmed": {
"value": float(rhmed[0])
},
"dailyVmax": {
"value": float(vmax[0])
},
"dailyVmin": {
"value": float(vmin[0])
},
"dailyVmed": {
"value": float(vmed[0])
}
})
print(f'Sending payload to Orion: {payload}')
url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs"
headers = {
'Content-Type': 'application/json',
'fiware-service': 'lab',
'fiware-servicepath': '/'
}
try:
response = requests.request("PATCH", url, headers=headers, data = payload)
print(response.text.encode('utf8'))
except requests.exceptions.RequestException as e: # This is the correct syntax
print(e)
def evapotranspiration():
''' Calculates the reference evapotranspiration based on referenceEvapotranspiration the entity and Sunrise and sunset hours
1. Gets data from the reference Evapotranspiration entity.
2. Gets Sunrise and Sunsset hours from the OpenWeather API
3. Calculates the reference evapotranspiration.
4. Send the daily evapotranspiration to the reference Evapotranspiration Entity.
'''
url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs?options=keyValues"
payload = {}
headers = {
'fiware-service': 'lab',
'fiware-servicepath': '/'
}
response = requests.request("GET", url, headers=headers, data = payload).json()
day_corr = date.today().toordinal() - date(date.today().year,1,1).toordinal()
tmax = response['dailyTmax']
tmin = response['dailyTmin']
tmed = response['dailyTmed']
rhmax = response['dailyRhmax']
v_med = response['dailyVmed']
alt_v = 2
rhmed =response['dailyRhmed']
# Collect sunrise and sunset hours from Ope Weather API and calculate difference
url = "http://api.openweathermap.org/data/2.5/weather?id=3449344&appid=1b43995d45e76484eac79c54b28ad885&units=metric"
payload = {}
headers= {}
response = requests.request("GET", url, headers=headers, data = payload)
r = response.json()
n = datetime.fromtimestamp(r['sys']['sunset']) - datetime.fromtimestamp(r['sys']['sunrise'])
n = n.total_seconds()/3600
# tmax is maximum temperature for the day
# tmin is the minimum temperature for the day
# rhmax is the maximum relative humidity for the day
# n is the actual duration of sunshine [hour]
# v_med is the average wind velocity Km/h
# alt_v is the altitute from the ground that the wind speed is collected. As for a weather station for example.
# rhmed
# day_corr is the current date in the range 1 to 365
elev = 801 # Elevation from sea level. Used the city of São Bernardo- São Paulo - Brazil. Change if needed.
p = 92.183188 # Atmospheric Pressure use eq 101.3*math.pow((293-0.0065*elev)/293,5.26). Used the city of São Bernardo- São Paulo - Brazil. Change if needed.
phi = -0.414081215084 # latitude in radians. Used the city of São Bernardo- São Paulo - Brazil. Change if needed.
y = 0.665*math.pow(10,-3)*p # y is the psycometric constant
dr = 1 + 0.033*math.cos((2*math.pi*day_corr)/365) # Dr is Relative Distance Earth-Sun
delt = 0.409 * math.sin(((2*math.pi*day_corr)/365)-1.39) # Delt is solar declination
e0_tmax = 0.6108*math.pow(math.e,((17.27*tmax)/(tmax+237.3))) # eo_tmax is saturation vapor pressure for the max air temperature
e0_tmin = 0.6108*math.pow(math.e,((17.27*tmin)/(tmin+237.3))) # eo_tmin is saturation vapor pressure for the min air temperature
es = (e0_tmax + e0_tmin)/2 # es is the mean saturation vapor pressure
D = (4098*(0.6108*math.pow(math.e,((17.27*tmed)/(tmed+237.3)))))/math.pow((tmed+237.3),2) # D is Slope Vapor Pressure Curve
ea = es*rhmed/100 # ea us actual vapor pressure considering an average relative humidity
ws = math.acos(-math.tan(phi)*math.tan(delt)) #Ws is sunset hour angle
ra = 37.5860314*dr*((ws*math.sin(phi)*math.sin(delt)) + (math.cos(phi)*math.cos(delt)*math.sin(ws))) # Ra is Extraterrestrial Radiation
rs = (0.25 + (0.5 * (n/(7.6394 * ws))))*ra #*0.408 Rs is solar radiation
rns = 0.77*rs #rns ius Net Shortwave Radiation
rso = (0.75*ra) # Rso is Clear Sky Solar Radiation
f_rs_rso = rs/rso
if f_rs_rso > 1:
f_rs_rso = 1
rnl = (4.903*math.pow(10,-9)) * ((math.pow((tmax+273.16),4) + math.pow((tmin+273.16),4))/2) * (0.34+(-0.14*math.sqrt(ea))) * ((1.35*(f_rs_rso))-0.35) # Rnl is Net Long Wave Radiation
r_n = rns - rnl # Rn is Neet Radiation
g = 0
uz = v_med*1000/3600 # uz is Wind Speed measured at Z height in m/s
u2 = uz*(4.87/(math.log(67.8*alt_v - 5.42))) #u2 is is wind speed at 2m above ground
et_o = ((0.408*D*(r_n-g)+y*(900/(tmed+273))*u2*(es-ea))/(D+y*(1+0.34*u2)))/0.85 # Calculate daily evapotranspiration based on the values before
payload = json.dumps({
"evapotranspiration": {
"value": et_o
}
})
print(f'Sending payload to Orion: {payload}')
url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs"
headers = {
'Content-Type': 'application/json',
'fiware-service': 'lab',
'fiware-servicepath': '/'
}
try:
response = requests.request("PATCH", url, headers=headers, data = payload)
print(response.text.encode('utf8'))
except requests.exceptions.RequestException as e: # This is the correct syntax
print(e)
def fao_recommendation():
''' Calculate Fao's irrigation recommendation for a crop of Pepper and send to management zone 1
1. Get daily reference evapotranspiration (Eto) from referenceEvappotranspiration entity
2. Get days after seeding (Das) from cropInstance evapotranspiration
3. Get crop Koeficiente (kc) from Crop type entity
4. Send irrigation recommendation based on Eto * Kc
'''
# 1. Get daily reference evapotranspiration (Eto) from referenceEvappotranspiration entity
url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs?options=keyValues"
payload = {}
headers = {
'fiware-service': 'lab',
'fiware-servicepath': '/'
}
response = requests.request("GET", url, headers=headers, data = payload).json()
eto = response['evapotranspiration']
# 2. Get days after seeding (Das) from cropInstance evapotranspiration
url = "http://orion:1026/v2/entities/urn:ngsi-ld:CropInstance:Pepper/attrs?options=keyValues"
payload = {}
headers = {
'fiware-service': 'lab',
'fiware-servicepath': '/'
}
response = requests.request("GET", url, headers=headers, data = payload).json()
das = date.today().toordinal() - datetime.strptime(response['SeedingDay'], '%Y-%m-%dT%H:%M:%S.%fZ').toordinal()
# 3. Get crop Koeficiente (kc) from Crop type entity
url = "http://orion:1026/v2/entities/urn:ngsi-ld:CropType:Pepper/attrs?options=keyValues"
payload = {}
headers = {
'fiware-service': 'lab',
'fiware-servicepath': '/'
}
r = requests.request("GET", url, headers=headers, data = payload).json()
kc = 0
IniDays = float(r['stageIniDays'])
DevDays = float(r['stageDevDays'])
MidDays = float(r['stageMidDays'])
LateDays = float(r['stageLateDays'])
IniKc = float(r['stageIniKc'])
MidKc = float(r['stageMidKc'])
LateKc = float(r['stageLateKc'])
if das <= IniDays:
kc = IniKc
elif das > IniDays and das <= IniDays + DevDays:
kc = IniKc + ((MidKc-IniKc) * (das - IniDays)) / (DevDays)
elif das > IniDays + DevDays and das <= IniDays + DevDays + MidDays:
kc = MidKc
elif das > IniDays + DevDays + MidDays and das <= IniDays + DevDays + MidDays + LateDays:
kc = LateKc + ((MidKc - LateKc) * (IniDays + DevDays + MidDays + LateDays - das)) / (LateDays)
else:
print('error')
# 4. Send irrigation recommendation based on Eto * Kc
payload = json.dumps({
| |
<filename>pvfactors/geometry/pvarray.py
"""Module containing PV array classes, which will use PV rows and ground
geometries."""
import numpy as np
from pvfactors.config import X_ORIGIN_PVROWS, VIEW_DICT, DISTANCE_TOLERANCE
from pvfactors.geometry.base import \
_get_solar_2d_vectors, BasePVArray, _get_rotation_from_tilt_azimuth
from pvfactors.geometry.timeseries import TsPVRow, TsGround
from shapely.geometry import Point
from pvfactors import PVFactorsError
class OrderedPVArray(BasePVArray):
"""An ordered PV array has a flat horizontal ground, and pv rows which
are all at the same height, with the same surface tilt and azimuth angles,
and also all equally spaced. These simplifications allow faster and easier
calculations. In the ordered PV array, the list of PV rows must be
ordered from left to right (along the x-axis) in the 2D geometry."""
y_ground = 0. # ground will be at height = 0 by default
def __init__(self, axis_azimuth=None, gcr=None, pvrow_height=None,
n_pvrows=None, pvrow_width=None, param_names=None,
cut=None):
"""Initialize ordered PV array.
List of PV rows will be ordered from left to right.
Parameters
----------
axis_azimuth : float, optional
Azimuth angle of rotation axis [deg] (Default = None)
gcr : float, optional
Ground coverage ratio (Default = None)
pvrow_height : float, optional
Unique height of all PV rows in [m] (Default = None)
n_pvrows : int, optional
Number of PV rows in the PV array (Default = None)
pvrow_width : float, optional
Width of the PV rows in the 2D plane in [m] (Default = None)
param_names : list of str, optional
List of surface parameter names for the PV surfaces
(Default = None)
cut : dict, optional
Nested dictionary that tells if some PV row sides need to be
discretized, and how (Default = None).
Example: {1: {'front': 5}}, will create 5 segments on the front
side of the PV row with index 1
"""
# Initialize base parameters: common to all sorts of PV arrays
super(OrderedPVArray, self).__init__(axis_azimuth=axis_azimuth)
# These are the invariant parameters of the PV array
self.gcr = gcr
self.height = pvrow_height
self.distance = (pvrow_width / gcr
if (pvrow_width is not None) and (gcr is not None)
else None)
self.width = pvrow_width
self.n_pvrows = n_pvrows
self.param_names = [] if param_names is None else param_names
self.cut = {} if cut is None else cut
# These attributes will be updated at fitting time
self.solar_2d_vectors = None
self.ts_pvrows = None
self.ts_ground = None
self.n_states = None
self.has_direct_shading = None
self.rotation_vec = None
self.shaded_length_front = None
self.shaded_length_back = None
# These attributes will be transformed at each iteration
self.pvrows = None
self.ground = None
self.front_neighbors = None
self.back_neighbors = None
self.edge_points = None
self.is_flat = None
@classmethod
def init_from_dict(cls, pvarray_params, param_names=None):
"""Instantiate ordered PV array from dictionary of parameters
Parameters
----------
pvarray_params : dict
The parameters defining the PV array
param_names : list of str, optional
List of parameter names to pass to surfaces (Default = None)
Returns
-------
OrderedPVArray
Initialized Ordered PV Array
"""
return cls(axis_azimuth=pvarray_params['axis_azimuth'],
gcr=pvarray_params['gcr'],
pvrow_height=pvarray_params['pvrow_height'],
n_pvrows=pvarray_params['n_pvrows'],
pvrow_width=pvarray_params['pvrow_width'],
cut=pvarray_params.get('cut', {}),
param_names=param_names)
@classmethod
def fit_from_dict_of_scalars(cls, pvarray_params, param_names=None):
"""Instantiate, and fit ordered PV array using dictionary
of scalar inputs.
Parameters
----------
pvarray_params : dict
The parameters used for instantiation, fitting, and transformation
param_names : list of str, optional
List of parameter names to pass to surfaces (Default = None)
Returns
-------
OrderedPVArray
Initialized, and fitted Ordered PV Array
"""
# Create pv array
pvarray = cls.init_from_dict(pvarray_params,
param_names=param_names)
# Fit pv array to scalar values
solar_zenith = np.array([pvarray_params['solar_zenith']])
solar_azimuth = np.array([pvarray_params['solar_azimuth']])
surface_tilt = np.array([pvarray_params['surface_tilt']])
surface_azimuth = np.array([pvarray_params['surface_azimuth']])
pvarray.fit(solar_zenith, solar_azimuth,
surface_tilt, surface_azimuth)
return pvarray
@classmethod
def transform_from_dict_of_scalars(cls, pvarray_params, param_names=None):
"""Instantiate, fit and transform ordered PV array using dictionary
of scalar inputs.
Parameters
----------
pvarray_params : dict
The parameters used for instantiation, fitting, and transformation
param_names : list of str, optional
List of parameter names to pass to surfaces (Default = None)
Returns
-------
OrderedPVArray
Initialized, fitted, and transformed Ordered PV Array
"""
# Create pv array
pvarray = cls.fit_from_dict_of_scalars(pvarray_params,
param_names=param_names)
# Transform pv array to first index (since scalar values were passed)
pvarray.transform(0)
return pvarray
def fit(self, solar_zenith, solar_azimuth, surface_tilt, surface_azimuth):
"""Fit the ordered PV array to the list of solar and surface angles.
All intermediate PV array results necessary to build the geometries
will be calculated here using vectorization as much as possible.
Intemediate results include: PV row coordinates for all timestamps,
ground element coordinates for all timestamps, cases of direct
shading, ...
Parameters
----------
solar_zenith : array-like or float
Solar zenith angles [deg]
solar_azimuth : array-like or float
Solar azimuth angles [deg]
surface_tilt : array-like or float
Surface tilt angles, from 0 to 180 [deg]
surface_azimuth : array-like or float
Surface azimuth angles [deg]
"""
self.n_states = len(solar_zenith)
# Calculate rotation angles
rotation_vec = _get_rotation_from_tilt_azimuth(
surface_azimuth, self.axis_azimuth, surface_tilt)
# Save rotation vector
self.rotation_vec = rotation_vec
# Calculate the solar 2D vectors for all timestamps
self.solar_2d_vectors = _get_solar_2d_vectors(
solar_zenith, solar_azimuth, self.axis_azimuth)
# Calculate the angle made by 2D sun vector and x-axis
alpha_vec = np.arctan2(self.solar_2d_vectors[1],
self.solar_2d_vectors[0])
# Calculate the coordinates of all PV rows for all timestamps
self._calculate_pvrow_elements_coords(alpha_vec, rotation_vec)
# Calculate ground elements coordinates for all timestamps
self.ts_ground = TsGround.from_ts_pvrows_and_angles(
self.ts_pvrows, alpha_vec, rotation_vec, y_ground=self.y_ground,
flag_overlap=self.has_direct_shading,
param_names=self.param_names)
# Save surface rotation angles
self.rotation_vec = rotation_vec
def transform(self, idx):
"""
Transform the ordered PV array for the given index.
This means actually building the PV Row and Ground geometries. Note
that the list of PV rows will be ordered from left to right in the
geometry (along the x-axis), and indexed from 0 to n_pvrows - 1.
This can only be run after the ``fit()`` method.
Object attributes like ``pvrows`` and ``ground`` will be updated each
time this method is run.
Parameters
----------
idx : int
Index for which to build the simulation.
"""
if idx < self.n_states:
self.is_flat = self.rotation_vec[idx] == 0
# Create PV row geometries
self.pvrows = [ts_pvrow.at(idx) for ts_pvrow in self.ts_pvrows]
# Create ground geometry with its shadows and cut points
self.ground = self.ts_ground.at(idx)
self.edge_points = [Point(coord.at(idx))
for coord in self.ts_ground.cut_point_coords]
# Build lists of pv row neighbors, used to calculate view matrix
self.front_neighbors, self.back_neighbors = \
self._get_neighbors(self.rotation_vec[idx])
else:
msg = "Step index {} is out of range: [0 to {}]".format(
idx, self.n_states - 1)
raise PVFactorsError(msg)
def _calculate_pvrow_elements_coords(self, alpha_vec, rotation_vec):
"""Calculate PV row coordinate elements in a vectorized way, such as
PV row boundary coordinates and shaded lengths.
Parameters
----------
alpha_vec : array-like or float
Angle made by 2d solar vector and x-axis [rad]
rotation_vec : array-like or float
Rotation angle of the PV rows [deg]
"""
# Initialize timeseries pv rows
self.ts_pvrows = []
# Calculate interrow direct shading lengths
self._calculate_interrow_shading(alpha_vec, rotation_vec)
# Calculate coordinates of segments of each pv row side
xy_centers = [(X_ORIGIN_PVROWS + idx * self.distance,
self.height + self.y_ground)
for idx in range(self.n_pvrows)]
tilted_to_left = rotation_vec > 0.
for idx_pvrow, xy_center in enumerate(xy_centers):
# A special treatment needs to be applied to shaded lengths for
# the PV rows at the edge of the PV array
if idx_pvrow == 0:
# the leftmost row doesn't have left neighbors
shaded_length_front = np.where(tilted_to_left, 0.,
self.shaded_length_front)
shaded_length_back = np.where(tilted_to_left,
self.shaded_length_back, 0.)
elif idx_pvrow == (self.n_pvrows - 1):
# the rightmost row does have right neighbors
shaded_length_front = np.where(tilted_to_left,
self.shaded_length_front, 0.)
shaded_length_back = np.where(tilted_to_left, 0.,
self.shaded_length_back)
else:
# use calculated shaded lengths
shaded_length_front = self.shaded_length_front
shaded_length_back = self.shaded_length_back
# Create timeseries PV rows and add it to the list
self.ts_pvrows.append(TsPVRow.from_raw_inputs(
xy_center, self.width, rotation_vec,
self.cut.get(idx_pvrow, {}), shaded_length_front,
shaded_length_back, index=idx_pvrow,
param_names=self.param_names))
def _calculate_interrow_shading(self, alpha_vec, rotation_vec):
"""Calculate the shaded length on front and back side of PV rows when
direct shading happens, and in a vectorized way.
Parameters
----------
alpha_vec : array-like or float
Angle made by 2d solar vector and x-axis [rad]
rotation_vec : array-like or float
Rotation angle of the PV rows [deg]
"""
if self.n_pvrows > 1:
# Calculate intermediate values for direct shading
alpha_vec_deg = np.rad2deg(alpha_vec)
theta_t = 90. - rotation_vec
theta_t_rad = np.deg2rad(theta_t)
beta = theta_t + alpha_vec_deg
beta_rad = np.deg2rad(beta)
delta = self.distance * (
np.sin(theta_t_rad) - np.cos(theta_t_rad) * np.tan(beta_rad))
| |
<filename>ASur.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#************************************************************************
# --- Copyright (c) INRS 2016
# --- Institut National de la Recherche Scientifique (INRS)
# --- Copyright (c) <NAME> 2018
# ---
# --- Licensed under the Apache License, Version 2.0 (the "License");
# --- you may not use this file except in compliance with the License.
# --- You may obtain a copy of the License at
# ---
# --- http://www.apache.org/licenses/LICENSE-2.0
# ---
# --- Unless required by applicable law or agreed to in writing, software
# --- distributed under the License is distributed on an "AS IS" BASIS,
# --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# --- See the License for the specific language governing permissions and
# --- limitations under the License.
#************************************************************************
"""
Modèle de temps d'arrivée de surverses
"""
import datetime
import enum
import logging
import os
import optparse
import pytz
import sys
import traceback
try:
import addLogLevel
addLogLevel.addLoggingLevel('DUMP', logging.DEBUG + 5)
addLogLevel.addLoggingLevel('TRACE', logging.DEBUG - 5)
except AttributeError:
pass
import wx
import wx.adv as wx_adv
import wx.lib.wordwrap as wx_ww
import wx.aui as wx_AUI
import wx.html as wx_html
if getattr(sys, 'frozen', False):
supPath = sys._MEIPASS
else:
try:
supPath = os.path.join( os.environ['INRS_DEV'], 'H2D2-tools', 'script' )
except KeyError:
try:
supPath = os.path.normpath( os.environ['INRS_H2D2_TOOLS'] )
except KeyError:
supPath = 'Neither INRS_DEV nor INRS_H2D2_TOOLS defined'
if os.path.isdir(supPath):
if supPath not in sys.path:
sys.path.append(supPath)
else:
raise RuntimeError('Supplementary import path not found: "%s"' % supPath)
from __about__ import __author__, __version__, __copyright__
from ASGlobalParameters import ASGlobalParameters
from ASPanelScenario import ASPanelScenario
from ASPanelPlot import ASPanelPlot
from ASPanelPath import ASPanelPath
from ASPathParameters import ASPathParameters, CLR_SRC, ELL_STL
from ASTranslator import translator
from ASEvents import ASEVT_MESSAGE, ASEVT_MOTION, ASEVT_BUTTON
from ASConst import DATE_MIN, DATE_MAX, LOCAL_TZ
import ASDlgLogger
import ASDlgParamGlobal
import ASDlgParamPath
import ASDlgTides
import ASModel
#--- Help provider for contextual help (broken!!)
# provider = wx.SimpleHelpProvider()
# wx.HelpProvider.Set(provider)
#--- States
GlbStates = enum.Enum('GlbStates', ('started', 'data_loaded'))
BtnStates = enum.Enum('BtnStates', ('off', 'on', 'pan', 'zoom'))
GlbModes = enum.Enum('GlbModes', ('standard', 'expert', 'debug'))
if getattr(sys, 'frozen', False):
ICON_ROOT = os.path.join(sys._MEIPASS, 'bitmaps')
else:
ICON_ROOT = os.path.join(os.path.dirname(__file__), 'bitmaps')
licTxt = """
ASur Version %s
%s
Sous licence Apache, Version 2.0 (la "Licence") ;
vous ne pouvez pas utiliser ce fichier, sauf conformément avec la licence.
Vous pouvez obtenir une copie de la Licence sur
http://www.apache.org/licenses/LICENSE-2.0
Sauf si requis par la loi en vigueur ou par accord écrit, le logiciel distribué sous la licence est distribué "TEL QUEL", SANS GARANTIE NI CONDITION DE QUELQUE NATURE QUE CE SOIT, implicite ou explicite.
Consultez la Licence pour connaître la terminologie spécifique régissant les autorisations et les limites prévues par la licence.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
""" % (__version__, __copyright__)
appName = "ASur-2"
appTitle = "Arrivée d'une SURverse"
class ASur(wx.Frame):
CLC_DELTAS = 300
CLC_DELTAT = datetime.timedelta(seconds=CLC_DELTAS)
# ID_MDL = [ wx.Window.NewControlId() for i in range(9)]
TIMER_ID_MSG = 1000
def __init__(self, *args, **kwds):
self.appMode = kwds.pop("appMode", GlbModes.standard)
#self.logHndlr = CTTextCtrlHandler.CTTextCtrlHandler(self.txt_log)
self.logHndlr = logging.StreamHandler()
FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
self.logHndlr.setFormatter( logging.Formatter(FORMAT) )
self.LOGGER = logging.getLogger("INRS.ASur") # .frame")
self.LOGGER.addHandler(self.logHndlr)
self.LOGGER.setLevel(logging.INFO)
self.LOGGER.info('Start')
wx.Frame.__init__(self, *args, **kwds)
self.nbk_dspl = wx_AUI.AuiNotebook (self, wx.ID_ANY)
self.pnl_pnts = ASPanelScenario (self.nbk_dspl, wx.ID_ANY)
self.pnl_asur = ASPanelPlot (self.nbk_dspl, wx.ID_ANY)
self.pnl_slin = ASPanelPath (self.nbk_dspl, wx.ID_ANY)
self.dlgHelp = None
self.dlgParamPath = None # ASDlgParamPath.ASDlgParamPath(None)
self.statusbar = self.CreateStatusBar(2)
self.tmrMsg = wx.Timer(self, ASur.TIMER_ID_MSG)
self.histCfg = wx.Config('ASur - File history', style=wx.CONFIG_USE_LOCAL_FILE)
self.prmsCfg = wx.Config('ASur - Parameters', style=wx.CONFIG_USE_LOCAL_FILE)
self.__create_menu_bar()
self.__create_tool_bar()
self.__set_properties()
self.__do_layout()
# --- Event processing
self.Bind(wx.EVT_MENU, self.on_mnu_file_open, self.mnu_file_open)
self.Bind(wx.EVT_MENU, self.on_mnu_file_add, self.mnu_file_add)
self.Bind(wx.EVT_MENU_RANGE,self.on_mnu_file_hist, id=wx.ID_FILE1, id2=wx.ID_FILE9)
# TODO : il faut regénérer les ID à chaque appel
# self.Bind(wx.EVT_MENU_RANGE,self.on_mnu_file_xone, id=self.ID_MDL[0], id2=self.ID_MDL[-1])
self.Bind(wx.EVT_MENU, self.on_mnu_file_close, self.mnu_file_close)
self.Bind(wx.EVT_MENU, self.on_mnu_file_quit, self.mnu_file_quit)
self.Bind(wx.EVT_MENU, self.on_mnu_parm_maree, self.mnu_parm_maree)
self.Bind(wx.EVT_MENU, self.on_mnu_parm_path, self.mnu_parm_path)
self.Bind(wx.EVT_MENU, self.on_mnu_parm_glbx, self.mnu_parm_glbx)
if self.appMode == GlbModes.debug:
self.Bind(wx.EVT_MENU, self.on_mnu_help_reload,self.mnu_help_reload)
self.Bind(wx.EVT_MENU, self.on_mnu_help_log, self.mnu_help_log)
self.Bind(wx.EVT_MENU, self.on_mnu_help_help, self.mnu_help_help)
self.Bind(wx.EVT_MENU, self.on_mnu_help_info, self.mnu_help_info)
self.Bind(wx.EVT_MENU, self.on_mnu_help_about, self.mnu_help_about)
self.Bind(wx.EVT_BUTTON, self.on_btn_apply, self.btn_apply)
self.Bind(wx.EVT_TOOL, self.on_btn_rst, self.btn_rst)
self.Bind(wx.EVT_TOOL, self.on_btn_bck, self.btn_bck)
self.Bind(wx.EVT_TOOL, self.on_btn_fwd, self.btn_fwd)
self.Bind(wx.EVT_TOOL, self.on_btn_pan, self.btn_pan)
self.Bind(wx.EVT_TOOL, self.on_btn_zsl, self.btn_zsl)
self.Bind(wx_AUI.EVT_AUINOTEBOOK_PAGE_CHANGED, self.on_page_change, self.nbk_dspl)
self.Bind(wx.EVT_TIMER, self.cb_panel_message_clear, self.tmrMsg)
self.Bind(ASEVT_MESSAGE, self.cb_panel_message)
self.Bind(ASEVT_MOTION, self.cb_panel_motion)
#self.Bind(ASEVT_BUTTON, self.on_btn_parm_path)
self.mnu_states = {
GlbStates.started : (
[self.mnu_file, # off
self.mnu_parm,
self.mnu_help],
[self.mnu_file, # on
self.mnu_parm_path,
self.mnu_parm_glbx,
self.mnu_help]),
GlbStates.data_loaded : (
[], # off
[self.mnu_file, # on
self.mnu_parm,
self.mnu_help])
}
self.btn_states = {
BtnStates.off : (
[self.toolbar], # off
[self.btn_apply]), # on
BtnStates.on : (
[], # off
[self.toolbar]), # on
BtnStates.pan : (
[self.toolbar], # off
[self.btn_rst,
self.btn_bck,
self.btn_fwd,
self.btn_pan,
self.btn_apply]), # on
BtnStates.zoom : (
[self.toolbar], # off
[self.btn_rst,
self.btn_bck,
self.btn_fwd,
self.btn_zsl,
self.btn_apply]), # on
}
self.mnuState = None
self.btnState = None
self.__set_state(GlbStates.started, BtnStates.off)
# --- Initialize data attributes
self.dirname = ''
self.bbModels = []
self.bbCycles = []
self.__initConfig()
def __initConfig(self):
gPrm = self.__getGlobalParameters()
try:
translator.loadFromFile(gPrm.fileTrnsl)
except FileNotFoundError:
pass
pPrm = self.__getPathParameters()
self.pnl_slin.setBackground(gPrm.projBbox, gPrm.fileBgnd, gPrm.fileShore)
self.pnl_slin.setParameters(pPrm)
def __set_properties(self):
# --- Main title
self.SetTitle(appTitle)
self.SetSize((800, 600))
self.nbk_dspl.SetSelection(0)
# --- Status bar
self.statusbar.SetStatusWidths([-1,-1])
statusbar_fields = ["Status", "Position"]
for i, f in enumerate(statusbar_fields):
self.statusbar.SetStatusText(f, i)
def __do_layout(self):
szr_main = wx.BoxSizer(wx.HORIZONTAL)
self.nbk_dspl.AddPage(self.pnl_pnts, "Surverses")
self.nbk_dspl.AddPage(self.pnl_asur, "Graphes")
self.nbk_dspl.AddPage(self.pnl_slin, "Trajectoires")
szr_main.Add(self.nbk_dspl, 1, wx.EXPAND, 0)
self.SetSizer(szr_main)
self.Layout()
wx.CallAfter(self.nbk_dspl.SendSizeEvent)
def __create_menu_bar(self):
# --- File history
self.history= wx.FileHistory(5)
self.history.Load(self.histCfg)
self.hist_mnu = wx.Menu()
self.history.UseMenu(self.hist_mnu)
self.history.AddFilesToMenu()
# --- Loaded files
self.bbmdl_mnu = wx.Menu()
# --- Set up menus
self.menubar = wx.MenuBar()
self.mnu_file = wx.Menu()
self.mnu_file_open = wx.MenuItem(self.mnu_file, wx.ID_ANY, 'Ouvrir...\tCTRL+O', 'Sélectionner le répertoire des données - Ferme toutes les données chargées', wx.ITEM_NORMAL)
self.mnu_file.Append(self.mnu_file_open)
self.mnu_file_add = wx.MenuItem(self.mnu_file, wx.ID_ANY, 'Ajouter...\tCTRL+P', 'Ajouter un répertoire des données', wx.ITEM_NORMAL)
self.mnu_file.Append(self.mnu_file_add)
self.mnu_file.Append(wx.ID_ANY, 'Ajouter un répertoire récent\tCtrl+R', self.hist_mnu)
self.mnu_file.AppendSeparator()
self.mnu_file.Append(wx.ID_ANY, 'Fermer un jeu de données\tCtrl+W', self.bbmdl_mnu)
self.mnu_file_close = wx.MenuItem(self.mnu_file, wx.ID_ANY, 'Fermer tout', 'Fermer tous les jeux de données', wx.ITEM_NORMAL)
self.mnu_file.Append(self.mnu_file_close)
self.mnu_file.AppendSeparator()
self.mnu_file_quit = wx.MenuItem(self.mnu_file, wx.ID_ANY, 'Quitter\tCTRL+Q', "Quitter l'application", wx.ITEM_NORMAL)
self.mnu_file.Append(self.mnu_file_quit)
self.menubar.Append(self.mnu_file, 'Fichier')
self.mnu_parm = wx.Menu()
self.mnu_parm_maree = wx.MenuItem(self.mnu_parm, wx.ID_ANY, 'Marées...\tCTRL+M', 'Sélectionner les marées prise en compte dans le calcul', wx.ITEM_NORMAL)
self.mnu_parm.Append(self.mnu_parm_maree)
self.mnu_parm_path = wx.MenuItem(self.mnu_parm, wx.ID_ANY, 'Panaches...\tCTRL+T', 'Paramètres de tracé des panaches et ellipses', wx.ITEM_NORMAL)
self.mnu_parm.Append(self.mnu_parm_path)
self.mnu_parm_glbx = wx.MenuItem(self.mnu_parm, wx.ID_ANY, 'Globaux...', 'Paramètre globaux', wx.ITEM_NORMAL)
self.mnu_parm.Append(self.mnu_parm_glbx)
self.menubar.Append(self.mnu_parm, 'Paramètres')
self.mnu_help = wx.Menu()
if self.appMode == GlbModes.debug:
self.mnu_help_reload = wx.MenuItem(self.mnu_help, wx.ID_ANY, 'Reload module...', '', wx.ITEM_NORMAL)
self.mnu_help.Append(self.mnu_help_reload)
self.mnu_help_log = wx.MenuItem(self.mnu_help, wx.ID_ANY, 'Log level...', '', wx.ITEM_NORMAL)
self.mnu_help.Append(self.mnu_help_log)
self.mnu_help.AppendSeparator()
self.mnu_help_help = wx.MenuItem(self.mnu_help, wx.ID_ANY, 'Aide...\tF1', '', wx.ITEM_NORMAL)
self.mnu_help.Append(self.mnu_help_help)
self.mnu_help_info = wx.MenuItem(self.mnu_help, wx.ID_ANY, 'Info...', '', wx.ITEM_NORMAL)
self.mnu_help.Append(self.mnu_help_info)
self.mnu_help_about = wx.MenuItem(self.mnu_help, wx.ID_ANY, 'À propos...', '', wx.ITEM_NORMAL)
self.mnu_help.Append(self.mnu_help_about)
self.menubar.Append(self.mnu_help, 'Aide')
self.SetMenuBar(self.menubar)
def __create_tool_bar(self):
rst_bmp = wx.Bitmap(os.path.join(ICON_ROOT, 'mActionZoomFullExtent.png'), wx.BITMAP_TYPE_ANY)
bck_bmp = wx.Bitmap(os.path.join(ICON_ROOT, 'mActionMoveBackFeature.png'), wx.BITMAP_TYPE_ANY)
fwd_bmp = wx.Bitmap(os.path.join(ICON_ROOT, 'mActionMoveFeature.png'), wx.BITMAP_TYPE_ANY)
zsl_bmp = wx.Bitmap(os.path.join(ICON_ROOT, 'mActionZoomToSelected.png'), wx.BITMAP_TYPE_ANY)
pan_bmp = wx.Bitmap(os.path.join(ICON_ROOT, 'mActionPan.png'), wx.BITMAP_TYPE_ANY)
nil_bmp = wx.NullBitmap
tsize = (16,16)
self.toolbar = wx.ToolBar(self)
self.toolbar.SetToolBitmapSize(tsize)
self.btn_rst = self.toolbar.AddTool(wx.ID_ANY, "Home", rst_bmp, nil_bmp, shortHelp="Home", longHelp="Long help for 'Home'")
self.btn_bck = self.toolbar.AddTool(wx.ID_ANY, "Move backward", bck_bmp, nil_bmp, shortHelp="Move backward", longHelp="Long help for 'Forward'")
self.btn_fwd = self.toolbar.AddTool(wx.ID_ANY, "Move forward", fwd_bmp, nil_bmp, shortHelp="Move forward", longHelp="Long help for 'Backward'")
self.btn_pan = self.toolbar.AddTool(wx.ID_ANY, "Pan", pan_bmp, nil_bmp, shortHelp="Pan", longHelp="Long help for 'Pan'")
self.btn_zsl = self.toolbar.AddTool(wx.ID_ANY, "Zoom", zsl_bmp, nil_bmp, shortHelp="Zoom to selection", longHelp="Long help for 'Zoom'")
self.toolbar.AddStretchableSpace()
self.toolbar.Realize()
self.btn_apply = wx.Button(self.toolbar, wx.ID_APPLY, 'Affiche')
self.toolbar.AddControl(self.btn_apply)
self.SetToolBar(self.toolbar)
def __set_mnu_state(self, status):
for it in self.mnu_states[status][0]:
if isinstance(it, wx.Menu):
for m in it.GetMenuItems(): m.Enable(False)
else:
it.Enable(False)
for it in self.mnu_states[status][1]:
if isinstance(it, wx.Menu):
for m in it.GetMenuItems(): m.Enable(True)
else:
it.Enable(True)
self.mnuState = status
def __set_btn_state(self, status):
for it in self.btn_states[status][0]:
if isinstance(it, wx.ToolBar):
for i in range(it.GetToolsCount()):
id = it.GetToolByPos(i).GetId()
self.toolbar.EnableTool(id, False)
else:
id = it.GetId()
self.toolbar.EnableTool(id, False)
for it in self.btn_states[status][1]:
if isinstance(it, wx.ToolBar):
for i in range(it.GetToolsCount()):
id = it.GetToolByPos(i).GetId()
self.toolbar.EnableTool(id, True)
else:
id = it.GetId()
self.toolbar.EnableTool(id, True)
self.btnState = status
def __set_state(self, glb, btn):
# self.LOGGER.info('Global state: %s' % glb)
self.__set_mnu_state(glb)
self.__set_btn_state(btn)
def __fillModelMenu(self):
for item in self.bbmdl_mnu.GetMenuItems():
self.bbmdl_mnu.Delete(item)
id_mdl = []
for bbModel in self.bbModels:
fpath = bbModel.getDataDir()
label = os.path.basename(fpath)
id = wx.Window.NewControlId()
self.bbmdl_mnu.Append(id, label, helpString=fpath)
id_mdl.append(id)
if id_mdl:
self.Bind(wx.EVT_MENU_RANGE,self.on_mnu_file_xone, id=id_mdl[0], id2=id_mdl[-1])
def __fillPoints(self):
addTides = self.appMode is GlbModes.expert
self.pnl_pnts.fillTree(self.bbModels, self.bbCycles, addTides)
def __getCycles(self, bbModel):
"""
Returns all know cycles contained in the data
"""
uniquer = set()
for pnt in bbModel.getPointNames():
for tide in bbModel.getPointTideNames(pnt):
uniquer.add(tide)
return [ item for item in sorted(uniquer) ]
def __getAllCycles(self):
| |
<reponame>marcelm/dnaio
import os
import shutil
import subprocess
import sys
from io import BytesIO
from tempfile import mkdtemp
from textwrap import dedent
from pytest import raises, mark
import dnaio
from dnaio import (
FileFormatError, FastaFormatError, FastqFormatError,
FastaReader, FastqReader, InterleavedPairedEndReader,
FastaWriter, FastqWriter, InterleavedPairedEndWriter,
TwoFilePairedEndReader,
)
from dnaio import record_names_match, Sequence
from dnaio.writers import FileWriter
from dnaio.readers import BinaryFileReader
# files tests/data/simple.fast{q,a}
simple_fastq = [
Sequence("first_sequence", "SEQUENCE1", ":6;;8<=:<"),
Sequence("second_sequence", "SEQUENCE2", "83<??:(61")
]
simple_fasta = [Sequence(x.name, x.sequence, None) for x in simple_fastq]
tiny_fastq = b'@r1\nACG\n+\nHHH\n@r2\nT\n+\n#\n'
class TestSequence:
def test_too_many_qualities(self):
with raises(ValueError):
Sequence(name="name", sequence="ACGT", qualities="#####")
def test_fastq_bytes(self):
assert Sequence("name", "ACGT", "====").fastq_bytes() == \
b"@name\nACGT\n+\n====\n"
def test_fastq_bytes_two_headers(self):
assert Sequence("name", "ACGT", "====").fastq_bytes_two_headers() == \
b"@name\nACGT\n+name\n====\n"
class TestFastaReader:
def test_file(self):
with FastaReader("tests/data/simple.fasta") as f:
reads = list(f)
assert reads == simple_fasta
def test_bytesio(self):
fasta = BytesIO(b">first_sequence\nSEQUENCE1\n>second_sequence\nSEQUENCE2\n")
reads = list(FastaReader(fasta))
assert reads == simple_fasta
def test_with_comments(self):
fasta = BytesIO(dedent(
"""
# a comment
# another one
>first_sequence
SEQUENCE1
>second_sequence
SEQUENCE2
""").encode())
reads = list(FastaReader(fasta))
assert reads == simple_fasta
def test_wrong_format(self):
fasta = BytesIO(dedent(
"""# a comment
# another one
unexpected
>first_sequence
SEQUENCE1
>second_sequence
SEQUENCE2
""").encode())
with raises(FastaFormatError) as info:
list(FastaReader(fasta))
assert info.value.line == 2
def test_fastareader_keeplinebreaks(self):
with FastaReader("tests/data/simple.fasta", keep_linebreaks=True) as f:
reads = list(f)
assert reads[0] == simple_fasta[0]
assert reads[1].sequence == 'SEQUEN\nCE2'
def test_context_manager(self):
filename = "tests/data/simple.fasta"
with open(filename, 'rb') as f:
assert not f.closed
_ = list(dnaio.open(f))
assert not f.closed
assert f.closed
with FastaReader(filename) as sr:
tmp_sr = sr
assert not sr._file.closed
_ = list(sr)
assert not sr._file.closed
assert tmp_sr._file is None
# Open it a second time
with FastaReader(filename):
pass
class TestFastqReader:
def test_fastqreader(self):
with FastqReader("tests/data/simple.fastq") as f:
reads = list(f)
assert reads == simple_fastq
@mark.parametrize("buffer_size", [1, 2, 3, 5, 7, 10, 20])
def test_fastqreader_buffersize(self, buffer_size):
with FastqReader("tests/data/simple.fastq", buffer_size=buffer_size) as f:
reads = list(f)
assert reads == simple_fastq
def test_fastqreader_buffersize_too_small(self):
with raises(ValueError) as e:
with FastqReader("tests/data/simple.fastq", buffer_size=0) as f:
_ = list(f) # pragma: no cover
assert "buffer size too small" in e.value.args[0]
def test_fastqreader_dos(self):
# DOS line breaks
with open('tests/data/dos.fastq', 'rb') as f:
assert b'\r\n' in f.read()
with FastqReader("tests/data/dos.fastq") as f:
dos_reads = list(f)
with FastqReader("tests/data/small.fastq") as f:
unix_reads = list(f)
assert dos_reads == unix_reads
def test_fastq_wrongformat(self):
with raises(FastqFormatError) as info:
with FastqReader("tests/data/withplus.fastq") as f:
list(f) # pragma: no cover
assert info.value.line == 2
def test_empty_fastq(self):
with FastqReader(BytesIO(b'')) as fq:
assert list(fq) == []
@mark.parametrize('s,line', [
(b'@', 0),
(b'@r', 0),
(b'@r1', 0),
(b'@r1\n', 1),
(b'@r1\nA', 1),
(b'@r1\nAC', 1),
(b'@r1\nACG', 1),
(b'@r1\nACG\n', 2),
(b'@r1\nACG\n+', 2),
(b'@r1\nACG\n+\n', 3),
(b'@r1\nACG\n+\nH', 3),
(b'@r1\nACG\n+\nHH', 3),
(b'@r1\nACG\n+\nHHH\n@', 4),
(b'@r1\nACG\n+\nHHH\n@r', 4),
(b'@r1\nACG\n+\nHHH\n@r2', 4),
(b'@r1\nACG\n+\nHHH\n@r2\n', 5),
(b'@r1\nACG\n+\nHHH\n@r2\nT', 5),
(b'@r1\nACG\n+\nHHH\n@r2\nT\n', 6),
(b'@r1\nACG\n+\nHHH\n@r2\nT\n+', 6),
(b'@r1\nACG\n+\nHHH\n@r2\nT\n+\n', 7),
])
def test_fastq_incomplete(self, s, line):
fastq = BytesIO(s)
with raises(FastqFormatError) as info:
with FastqReader(fastq) as fq:
list(fq)
assert info.value.line == line
def test_half_record_line_numbers(self):
fastq = BytesIO(b'@r\nACG\n+\nHH\n')
# Choose the buffer size such that only parts of the record fit
# We want to ensure that the line number is reset properly
# after the record has been half-parsed
buffer_size = len('@r\nACG\n+\n')
with raises(FastqFormatError) as info:
with FastqReader(fastq, buffer_size=buffer_size) as fq:
list(fq) # pragma: no cover
assert 'Length of sequence and qualities differ' in info.value.message
assert info.value.line == 3
@mark.parametrize('s,line', [
(b'@r1\nACG\n+\nH#HH\n@r2\nT\n+\nH\n', 3),
(b'@r1\nACG\n+\n#H\n@r2\nT\n+\nH\n', 3),
(b'@r1\nACG\n+\nHHH\n@r2\nT\n+\nHH\n', 7),
(b'@r1\nACG\n+\nHHH\n@r2\nT\n+\n\n', 7),
])
def test_differing_lengths(self, s, line):
fastq = BytesIO(s)
with raises(FastqFormatError) as info:
with FastqReader(fastq) as fq:
list(fq)
assert info.value.line == line
def test_missing_final_newline(self):
# Files with a missing final newline are currently allowed
fastq = BytesIO(b'@r1\nA\n+\nH')
with dnaio.open(fastq) as f:
records = list(f)
assert records == [Sequence('r1', 'A', 'H')]
def test_not_opened_as_binary(self):
filename = 'tests/data/simple.fastq'
with open(filename, 'rt') as f:
with raises(ValueError):
list(dnaio.open(f))
def test_context_manager(self):
filename = "tests/data/simple.fastq"
with open(filename, 'rb') as f:
assert not f.closed
_ = list(dnaio.open(f))
assert not f.closed
assert f.closed
with FastqReader(filename) as sr:
tmp_sr = sr
assert not sr._file.closed
_ = list(sr)
assert not sr._file.closed
assert tmp_sr._file is None
def test_two_header_detection(self):
fastq = BytesIO(b'@r1\nACG\n+r1\nHHH\n@r2\nT\n+r2\n#\n')
with FastqReader(fastq) as fq:
assert fq.two_headers
list(fq)
fastq = BytesIO(b'@r1\nACG\n+\nHHH\n@r2\nT\n+r2\n#\n')
with FastqReader(fastq) as fq:
assert not fq.two_headers
list(fq)
def test_second_header_not_equal(self):
fastq = BytesIO(b'@r1\nACG\n+xy\n')
with raises(FastqFormatError) as info:
with FastqReader(fastq) as fq:
list(fq) # pragma: no cover
assert "Sequence descriptions don't match" in info.value.message
class TestOpen:
def setup(self):
self._tmpdir = mkdtemp()
def teardown(self):
shutil.rmtree(self._tmpdir)
def test_sequence_reader(self):
# test the autodetection
with dnaio.open("tests/data/simple.fastq") as f:
reads = list(f)
assert reads == simple_fastq
with dnaio.open("tests/data/simple.fasta") as f:
reads = list(f)
assert reads == simple_fasta
with open("tests/data/simple.fastq", 'rb') as f:
reads = list(dnaio.open(f))
assert reads == simple_fastq
# make the name attribute unavailable
with open("tests/data/simple.fastq", 'rb') as f:
data = f.read()
bio = BytesIO(data)
reads = list(dnaio.open(bio))
assert reads == simple_fastq
with open("tests/data/simple.fasta", 'rb') as f:
data = f.read()
bio = BytesIO(data)
reads = list(dnaio.open(bio))
assert reads == simple_fasta
def test_autodetect_fasta_format(self, tmpdir):
path = str(tmpdir.join('tmp.fasta'))
with dnaio.open(path, mode='w') as f:
assert isinstance(f, FastaWriter)
for seq in simple_fastq:
f.write(seq)
with dnaio.open(path) as f:
records = list(f)
assert records == simple_fasta
def test_write_qualities_to_fasta(self):
path = os.path.join(self._tmpdir, 'tmp.fasta')
with dnaio.open(path, mode='w', qualities=True) as f:
assert isinstance(f, FastaWriter)
for seq in simple_fastq:
f.write(seq)
with dnaio.open(path) as f:
assert list(f) == simple_fasta
def test_autodetect_fastq_format(self):
path = os.path.join(self._tmpdir, 'tmp.fastq')
with dnaio.open(path, mode='w') as f:
assert isinstance(f, FastqWriter)
for seq in simple_fastq:
f.write(seq)
with dnaio.open(path) as f:
assert list(f) == simple_fastq
def test_autodetect_fastq_weird_name(self):
path = os.path.join(self._tmpdir, 'tmp.fastq.gz')
with dnaio.open(path, mode='w') as f:
assert isinstance(f, FastqWriter)
for seq in simple_fastq:
f.write(seq)
weird_path = os.path.join(self._tmpdir, 'tmp.weird.gz')
os.rename(path, weird_path)
with dnaio.open(weird_path) as f:
assert list(f) == simple_fastq
def test_fastq_qualities_missing(self):
path = os.path.join(self._tmpdir, 'tmp.fastq')
with raises(ValueError):
with dnaio.open(path, mode='w', qualities=False):
pass
class TestInterleavedReader:
def test(self):
expected = [
(
Sequence('read1/1 some text', 'TTATTTGTCTCCAGC', '##HHHHHHHHHHHHH'),
Sequence('read1/2 other text', 'GCTGGAGACAAATAA', 'HHHHHHHHHHHHHHH')
),
(
Sequence('read3/1', 'CCAACTTGATATTAATAACA', 'HHHHHHHHHHHHHHHHHHHH'),
Sequence('read3/2', 'TGTTATTAATATCAAGTTGG', '#HHHHHHHHHHHHHHHHHHH')
),
]
with InterleavedPairedEndReader("tests/data/interleaved.fastq") as isr:
reads = list(isr)
assert reads == expected
with dnaio.open("tests/data/interleaved.fastq", interleaved=True) as f:
reads = list(f)
assert reads == expected
def test_missing_partner(self):
s = BytesIO(b'@r1\nACG\n+\nHHH\n')
with raises(FileFormatError) as info:
with InterleavedPairedEndReader(s) as isr:
list(isr)
assert "Interleaved input file incomplete" in info.value.message
def test_incorrectly_paired(self):
s = BytesIO(b'@r1/1\nACG\n+\nHHH\n@wrong_name\nTTT\n+\nHHH\n')
with raises(FileFormatError) as info:
with InterleavedPairedEndReader(s) as isr:
list(isr)
assert "Reads are improperly paired" in info.value.message
class TestFastaWriter:
def setup(self):
self._tmpdir = mkdtemp()
self.path = os.path.join(self._tmpdir, 'tmp.fasta')
def teardown(self):
shutil.rmtree(self._tmpdir)
def test(self):
with FastaWriter(self.path) as fw:
fw.write("name", "CCATA")
fw.write("name2", "HELLO")
assert fw._file.closed
with open(self.path) as t:
assert t.read() == '>name\nCCATA\n>name2\nHELLO\n'
def test_linelength(self):
with FastaWriter(self.path, line_length=3) as fw:
fw.write("r1", "ACG")
fw.write("r2", "CCAT")
fw.write("r3", "TACCAG")
assert fw._file.closed
with open(self.path) as t:
d = t.read()
assert d == '>r1\nACG\n>r2\nCCA\nT\n>r3\nTAC\nCAG\n'
def test_write_sequence_object(self):
with FastaWriter(self.path) as fw:
fw.write(Sequence("name", "CCATA"))
fw.write(Sequence("name2", "HELLO"))
assert fw._file.closed
with open(self.path) as t:
assert t.read() == '>name\nCCATA\n>name2\nHELLO\n'
def test_write_to_file_like_object(self):
bio = BytesIO()
with FastaWriter(bio) as fw:
fw.write(Sequence("name", "CCATA"))
fw.write(Sequence("name2", "HELLO"))
assert bio.getvalue() == b'>name\nCCATA\n>name2\nHELLO\n'
assert not bio.closed
assert not fw._file.closed
def test_write_zero_length_sequence(self):
bio = BytesIO()
with FastaWriter(bio) as fw:
fw.write(Sequence("name", ""))
assert bio.getvalue() == b'>name\n\n', '{!r}'.format(bio.getvalue())
class TestFastqWriter:
def setup(self):
self._tmpdir = mkdtemp()
self.path = os.path.join(self._tmpdir, 'tmp.fastq')
def teardown(self):
shutil.rmtree(self._tmpdir)
def test(self):
with FastqWriter(self.path) as fq:
fq.writeseq("name", "CCATA", "!#!#!")
fq.writeseq("name2", "HELLO", "&&&!&&")
assert fq._file.closed
with open(self.path) as t:
assert t.read() == '@name\nCCATA\n+\n!#!#!\n@name2\nHELLO\n+\n&&&!&&\n'
def test_twoheaders(self):
with FastqWriter(self.path, two_headers=True) as fq:
fq.write(Sequence("name", "CCATA", "!#!#!"))
fq.write(Sequence("name2", "HELLO", "&&&!&"))
assert fq._file.closed
with open(self.path) as t:
assert t.read() == '@name\nCCATA\n+name\n!#!#!\n@name2\nHELLO\n+name2\n&&&!&\n'
def test_write_to_file_like_object(self):
bio = BytesIO()
with FastqWriter(bio) as fq:
fq.writeseq("name", "CCATA", "!#!#!")
fq.writeseq("name2", "HELLO", "&&&!&&")
assert bio.getvalue() == b'@name\nCCATA\n+\n!#!#!\n@name2\nHELLO\n+\n&&&!&&\n'
class TestInterleavedWriter:
def test(self):
reads = [
(
Sequence('A/1 comment', 'TTA', '##H'),
Sequence('A/2 comment', 'GCT', 'HH#')
),
(
Sequence('B/1', 'CC', 'HH'),
Sequence('B/2', 'TG', '#H')
),
]
bio = BytesIO()
with InterleavedPairedEndWriter(bio) as writer:
for read1, read2 in reads:
writer.write(read1, read2)
assert bio.getvalue() == (
b'@A/1 comment\nTTA\n+\n##H\n'
b'@A/2 comment\nGCT\n+\nHH#\n'
b'@B/1\nCC\n+\nHH\n'
b'@B/2\nTG\n+\n#H\n'
)
class TestPairedSequenceReader:
def test_read(self):
s1 = BytesIO(b'@r1\nACG\n+\nHHH\n')
s2 = BytesIO(b'@r2\nGTT\n+\n858\n')
with TwoFilePairedEndReader(s1, s2) as psr:
assert [
(Sequence("r1", "ACG", "HHH"), Sequence("r2", "GTT", "858")),
] == list(psr)
def test_record_names_match(self):
match = record_names_match
assert match('abc', 'abc')
assert match('abc def', 'abc')
assert match('abc def', 'abc ghi')
assert match('abc', 'abc ghi')
assert not match('abc', 'xyz')
def test_record_names_match_with_ignored_trailing_12(self):
match = record_names_match
assert match('abc/1', 'abc/2')
assert match('abc.1', 'abc.2')
assert match('abc1', 'abc2')
assert match('abc2', 'abc1')
assert match('abc1 def', 'abc1 ghi')
assert match('abc1 | |
q.W[m]['prodT'] = np.dot(q.W[m]['mean'].T, q.W[m]['mean']) + self.d[m]*q.W[m]['cov']
else:
print ('Cov W' + str(m) + ' is not invertible, not updated')
del w_cov
def update_b(self,m):
"""Updates the variable b.
This function uses the variables of the learnt model to update b of
the specified view.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
q.b[m]['cov'] = (1 + self.n_max * q.tau_mean(m))**(-1) * np.eye(self.d[m])
q.b[m]['mean'] = q.tau_mean(m) * np.dot(np.sum(np.subtract(self.X[m]['mean'], np.dot(q.Z['mean'], q.W[m]['mean'].T)), axis=0)[np.newaxis,:], q.b[m]['cov'])
q.b[m]['prodT'] = np.sum(q.b[m]['mean']**2) + self.d[m]*q.b[m]['cov'][0,0] #mean of a noncentral chi-squared distribution
def update_alpha(self,m):
"""Updates the variable alpha.
This function uses the variables of the learnt model to update alpha of
the specified view.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
q.alpha[m]['a'] = (self.hyper.alpha_a[m] + 0.5 * self.d[m])/(self.d[m])
if self.sparse[m]:
prod = q.W[m]['prodTgamma']
else:
prod = np.diag(q.W[m]['prodT'])
q.alpha[m]['b'] = (self.hyper.alpha_b[m] + 0.5 * prod)/(self.d[m])
def update_tau(self,m):
"""Updates the variable tau.
This function uses the variables of the learnt model to update tau of
the specified view.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
q.tau[m]['a'] = (self.hyper.tau_a[m] + 0.5 * self.d[m]*self.n_max)/(self.d[m]*self.n_max)
q.tau[m]['b'] = (self.hyper.tau_b[m] + 0.5 *(np.trace(self.X[m]['prodT']) + np.trace(np.dot(q.W[m]['prodT'],q.Z['prodT']))
- 2 * np.trace(np.linalg.multi_dot([q.W[m]['mean'], q.Z['mean'].T,self.X[m]['mean']]))
+ 2 * np.sum(np.linalg.multi_dot([q.Z['mean'], q.W[m]['mean'].T,q.b[m]['mean'].T]))
- 2 *np.sum(np.dot(self.X[m]['mean'],q.b[m]['mean'].T))
+ self.n_max * q.b[m]['prodT'] ))/(self.d[m]*self.n_max)
def update_gamma(self,m):
"""Updates the variable gamma.
This function uses the variables of the learnt model to update gamma of
the specified view.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
q.gamma[m]['a'] = (self.hyper.gamma_a[m] + 0.5 * q.Kc)/q.Kc
q.gamma[m]['b'] = (self.hyper.gamma_b[m] + 0.5 *q.W[m]['prodTalpha'])/q.Kc
def update_xs(self,m): #Semisupervised
"""Updates the variable X*.
This function uses the variables of the learnt model to update X* of
the specified view in the case of semisupervised learning.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
# cov
q.XS[m]['cov'] = (q.tau_mean(m)**(-1)*np.eye(self.d[m])).astype(float)
# mean
q.XS[m]['mean'] = np.add(np.dot(q.Z['mean'], q.W[m]['mean'].T), q.b[m]['mean'])
def update_t(self,m):
"""Updates the variable t*.
This function uses the variables of the learnt model to update t* of
the specified view in the case of semisupervised learning.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
# mean
q.tS[m]['mean'] = self.sigmoid(self.X[m]['mean'])
# cov
# q.tS[m]['cov'] = np.exp(np.subtract(np.log(q.tS[m]['mean']), np.log((1 + np.exp(self.X[m]['mean'])))))
q.tS[m]['cov'] = np.exp(np.subtract(self.X[m]['mean'], 2*np.log((1 + np.exp(self.X[m]['mean'])))))
# sum(log(det(X)))
q.tS[m]['sumlogdet'] = np.sum(np.log(q.tS[m]['cov']))
def update_x(self,m): #Multilabel
"""Updates the variable X.
This function uses the variables of the learnt model to update X of
the specified view in the case of a multilabel view.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
self.X[m]['cov'] = (q.tau_mean(m) + 2*self.lambda_func(q.xi[m]))**(-1)
self.X[m]['mean'] = (self.t[m]['mean'] - 0.5 + q.tau_mean(m)*(np.dot(q.Z['mean'], q.W[m]['mean'].T) + q.b[m]['mean'])) * self.X[m]['cov']
self.X[m]['prodT'] = np.dot(self.X[m]['mean'].T, self.X[m]['mean']) + np.diag(np.sum(self.X[m]['cov'],axis=0))
self.X[m]['sumlogdet'] = np.sum(np.log(self.X[m]['cov']))
def update_xi(self,m): #Multilabel
"""Updates the variable xi.
This function uses the variables of the learnt model to update xi of
the specified view in the case of a multilabel view.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
q.xi[m] = np.sqrt(self.X[m]['cov'] + self.X[m]['mean']**2)
def expectation_aprx(self, a, b, c = [None], n_samples = 100, n = None):
"""Calculates the expectation aproximation.
Parameters
----------
__a: float.
Mean value of the wanted class.
__b: float.
Mean value of the not wanted classes.
__c: float, (default [None])
In case there is a pdf in the expectation, this parameter is the one
used for the mean. N(c - a, 1).
__n_samples: int, (default 100).
__n: int, (default None).
"""
if n == None:
n = self.n_max
exp = 0
for it in np.arange(n_samples):
u = np.random.normal(0.0, 1.0, n)
prod = 1
# prod = 0
for j in np.arange(np.shape(b)[1]):
prod = prod * norm.cdf(u + a - b[:,j], 0.0, 1.0) #We calculate the cdf for each class
# prod = prod + np.log(norm.cdf(u + a - b[:,j], 0.0, 1.0)) #We calculate the cdf for each class
if not (None in c):
exp += norm.pdf(u, c - a, 1)*prod
# exp += np.exp(np.log(norm.pdf(u, c - a, 1)) + prod)
else:
exp += prod
# exp += np.exp(prod)
return exp/n_samples
def update_xcat(self,m): #Multiclass
"""Updates the variable X.
This function uses the variables of the learnt model to update X of
the specified view in the case of a categorical view.
Parameters
----------
__m: int.
This value indicates which of the input views is updated.
"""
q = self.q_dist
y = np.dot(q.Z['mean'],q.W[m]['mean'].T) + q.b[m]['mean']
set_classes = np.unique(self.t[m]['mean']).astype(int)
t_b = label_binarize(self.t[m]['mean'], classes=set_classes).astype(bool)
if t_b.shape[1] == 1:
t_b = np.hstack((~t_b, t_b))
y_i = y[t_b]
y_j = y[~t_b].reshape(self.n_max,self.d[m]-1)
#Aproximation of the expectation
# xi = self.expectation_aprx(m_wormi, m_wormj) + 1e-10
#Mean value for Xnj / j!=i
exp_j = np.zeros((self.n_max,self.d[m]-1))
for j in np.arange(self.d[m]-1):
# y_k = y_j[:,np.arange(self.d[m]-1)!=j] #it extracts the mean of the values there are neither i nor j
exp_j[:,j] = self.expectation_aprx(y_i, y_j[:,np.arange(self.d[m]-1)!=j], c = y_j[:,j])
# mean
self.X[m]['mean'][~t_b] = (y_j - (exp_j.T/self.expectation_aprx(y_i, y_j) + 1e-10).T).flatten()
self.X[m]['mean'][t_b] = y_i + np.sum(y_j - self.X[m]['mean'][~t_b].reshape(self.n_max,self.d[m]-1),axis=1)
self.X[m]['prodT'] = np.dot(self.X[m]['mean'].T, self.X[m]['mean'])
def update_tc(self,m): #Semisupervised categorical
q = self.q_dist
for i in np.arange(self.d[m]):
q.tc[m][:,i] = self.expectation_aprx(self.X[m]['mean'][:,np.arange(self.d[m]) == i].flatten(), self.X[m]['mean'][:,np.arange(self.d[m]) != i])
def predict(self, m_in, m_out, *args):
"""Apply the model learned in the training process to new data.
This function uses the variables of the specified views to predict
the output view.
Parameters
----------
__X: dict, ('data', 'method', 'sparse').
Dictionary with the information of the input views. Where 'data'
stores the matrix with the data. These matrices have size n_samples
and can have different number of features. If one view has a number
of samples smaller than the rest, these values are infered assuming
it is a semisupervised scheme. This dictionary can be built using
the function "struct_data".
__m_in: list.
This value indicates which of the views are used as input.
__m_out: list.
This value indicates which of the input views is used as output.
"""
# =============================================================================
# Hay que modificarlo para que pueda predecir todo lo que quieras a la vez.
# Para ello hay que definir un m_vec = [0,1,0,0,1] indicando qué vistas
# son para calcular la predicción y cuáles para ser predichas.
# =============================================================================
q = self.q_dist
if type(args[0]) == dict:
n_pred = np.shape(args[0]['data'])[0]
else:
n_pred = np.shape(args[0][0]['data'])[0]
aux = np.eye(q.Kc)
for m in m_in:
aux += q.tau_mean(m)*np.dot(q.W[m]['mean'].T,q.W[m]['mean'])
Z_cov = self.myInverse(aux)
if not np.any(np.isnan(Z_cov)):
self.Z_mean = np.zeros((n_pred,q.Kc))
for (m,arg) in enumerate(args):
if not (arg['SV'] is None) and not(arg['data'].shape[1] == arg['SV'].shape[0]):
V = copy.deepcopy(arg['SV'])
X = copy.deepcopy(arg['data'])
k = copy.deepcopy(arg['kernel'])
sig = copy.deepcopy(arg['sig'])
center = copy.deepcopy(arg['center'])
#Feature selection
#Lineal Kernel
if k == 'linear':
arg['data'] = np.dot(X, V.T)
#RBF Kernel
elif k == 'rbf':
if sig == 'auto':
self.sparse_K[m] = SparseELBO(X, V, self.sparse_fs[m])
arg['data'], _ = self.sparse_K[m].get_params()[0]
else:
arg['data'], sig = self.rbf_kernel_sig(X, V, sig = sig)
if center:
arg['data'] = self.center_K(arg['data'])
if type(arg) == dict:
if arg['method'] == 'cat': #categorical
arg['data'] = label_binarize(arg['data'], classes = np.arange(self.d[m_in[m]]))
self.Z_mean += np.dot(arg['data'] - q.b[m]['mean'],q.W[m_in[m]]['mean']) * q.tau_mean(m_in[m])
else:
for (m,x) in enumerate(arg):
if x['method'] == 'cat': #categorical
x['data'] = label_binarize(x['data'], classes = np.arange(self.d[m_in[m]]))
self.Z_mean += np.dot(x['data'] - q.b[m]['mean'],q.W[m_in[m]]['mean']) * q.tau_mean(m_in[m])
self.Z_mean = np.dot(self.Z_mean,Z_cov)
else:
print ('Cov Z is not invertible')
#Regression
if self.method[m_out] == 'reg':
#Expectation X
| |
:param async_req bool
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: UnmonitoredDevicePaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_unmonitored_device_list" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/unmonitoreddevices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UnmonitoredDevicePaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_update_reason_list_by_data_source_id(self, id, **kwargs): # noqa: E501
"""get update history for a datasource # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_update_reason_list_by_data_source_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: DataSourceUpdateReasonsPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_update_reason_list_by_data_source_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_update_reason_list_by_data_source_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_update_reason_list_by_data_source_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get update history for a datasource # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_update_reason_list_by_data_source_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: DataSourceUpdateReasonsPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_update_reason_list_by_data_source_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_update_reason_list_by_data_source_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_update_reason_list_by_data_source_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/datasources/{id}/updatereasons', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataSourceUpdateReasonsPaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_website_alert_list_by_website_id(self, id, **kwargs): # noqa: E501
"""get alerts for a website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_alert_list_by_website_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param bool need_message:
:param str custom_columns:
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: AlertPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_website_alert_list_by_website_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_website_alert_list_by_website_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_website_alert_list_by_website_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get alerts for a website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_alert_list_by_website_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param bool need_message:
:param str custom_columns:
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: AlertPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'need_message', 'custom_columns', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_website_alert_list_by_website_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_website_alert_list_by_website_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_website_alert_list_by_website_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'need_message' in params:
query_params.append(('needMessage', params['need_message'])) # noqa: E501
if 'custom_columns' in params:
query_params.append(('customColumns', params['custom_columns'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites/{id}/alerts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AlertPaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_website_by_id(self, id, **kwargs): # noqa: E501
"""get website by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str format:
:return: Website
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_website_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_website_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_website_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get website by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str format:
:return: Website
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'format'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_website_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_website_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_website_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'format' in params:
| |
<filename>lambda_proxy/proxy.py<gh_stars>10-100
"""Translate request from AWS api-gateway.
Freely adapted from https://github.com/aws/chalice
"""
from typing import Any, Callable, Dict, List, Optional, Tuple, Sequence, Union
import inspect
import os
import re
import sys
import json
import zlib
import base64
import logging
import warnings
from functools import wraps
from lambda_proxy import templates
params_expr = re.compile(r"(<[^>]*>)")
proxy_pattern = re.compile(r"/{(?P<name>.+)\+}$")
param_pattern = re.compile(
r"^<((?P<type>[a-zA-Z0-9_]+)(\((?P<pattern>.+)\))?\:)?(?P<name>[a-zA-Z0-9_]+)>$"
)
regex_pattern = re.compile(
r"^<(?P<type>regex)\((?P<pattern>.+)\):(?P<name>[a-zA-Z0-9_]+)>$"
)
def _path_to_regex(path: str) -> str:
path = f"^{path}$" # full match
path = re.sub(r"<[a-zA-Z0-9_]+>", r"([a-zA-Z0-9_]+)", path)
path = re.sub(r"<string\:[a-zA-Z0-9_]+>", r"([a-zA-Z0-9_]+)", path)
path = re.sub(r"<int\:[a-zA-Z0-9_]+>", r"([0-9]+)", path)
path = re.sub(r"<float\:[a-zA-Z0-9_]+>", "([+-]?[0-9]+.[0-9]+)", path)
path = re.sub(
r"<uuid\:[a-zA-Z0-9_]+>",
"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})",
path,
)
for regexParam in re.findall(r"(<regex[^>]*>)", path):
matches = regex_pattern.search(regexParam)
expr = matches.groupdict()["pattern"]
path = path.replace(regexParam, f"({expr})")
return path
def _path_to_openapi(path: str) -> str:
for regexParam in re.findall(r"(<regex[^>]*>)", path):
match = regex_pattern.search(regexParam).groupdict()
name = match["name"]
path = path.replace(regexParam, f"<regex:{name}>")
path = re.sub(r"<([a-zA-Z0-9_]+\:)?", "{", path)
return re.sub(r">", "}", path)
def _converters(value: str, pathArg: str) -> Any:
match = param_pattern.match(pathArg)
if match:
arg_type = match.groupdict()["type"]
if arg_type == "int":
return int(value)
elif arg_type == "float":
return float(value)
elif arg_type == "string":
return value
elif arg_type == "uuid":
return value
else:
return value
else:
return value
class RouteEntry(object):
"""Decode request path."""
def __init__(
self,
endpoint: Callable,
path: str,
methods: List = ["GET"],
cors: bool = False,
token: bool = False,
payload_compression_method: str = "",
binary_b64encode: bool = False,
ttl=None,
cache_control=None,
description: str = None,
tag: Tuple = None,
) -> None:
"""Initialize route object."""
self.endpoint = endpoint
self.path = path
self.route_regex = _path_to_regex(path)
self.openapi_path = _path_to_openapi(self.path)
self.methods = methods
self.cors = cors
self.token = token
self.compression = payload_compression_method
self.b64encode = binary_b64encode
self.ttl = ttl
self.cache_control = cache_control
self.description = description or self.endpoint.__doc__
self.tag = tag
if self.compression and self.compression not in ["gzip", "zlib", "deflate"]:
raise ValueError(
f"'{payload_compression_method}' is not a supported compression"
)
def __eq__(self, other) -> bool:
"""Check for equality."""
return self.__dict__ == other.__dict__
def _get_path_args(self) -> Sequence[Any]:
route_args = [i.group() for i in params_expr.finditer(self.path)]
args = [param_pattern.match(arg).groupdict() for arg in route_args]
return args
def _get_apigw_stage(event: Dict) -> str:
"""Return API Gateway stage name."""
header = event.get("headers", {})
host = header.get("x-forwarded-host", header.get("host", ""))
if ".execute-api." in host and ".amazonaws.com" in host:
stage = event["requestContext"].get("stage", "")
return stage
return ""
def _get_request_path(event: Dict) -> Optional[str]:
"""Return API call path."""
resource_proxy = proxy_pattern.search(event.get("resource", "/"))
if resource_proxy:
proxy_path = event["pathParameters"].get(resource_proxy["name"])
return f"/{proxy_path}"
return event.get("path")
class ApigwPath(object):
"""Parse path of API Call."""
def __init__(self, event: Dict):
"""Initialize API Gateway Path Info object."""
self.version = event.get("version")
self.apigw_stage = _get_apigw_stage(event)
self.path = _get_request_path(event)
self.api_prefix = proxy_pattern.sub("", event.get("resource", "")).rstrip("/")
if not self.apigw_stage and self.path:
path = event.get("path", "")
suffix = self.api_prefix + self.path
self.path_mapping = path.replace(suffix, "")
else:
self.path_mapping = ""
@property
def prefix(self):
"""Return the API prefix."""
if self.apigw_stage and not self.apigw_stage == "$default":
return f"/{self.apigw_stage}" + self.api_prefix
elif self.path_mapping:
return self.path_mapping + self.api_prefix
else:
return self.api_prefix
class API(object):
"""API."""
FORMAT_STRING = "[%(name)s] - [%(levelname)s] - %(message)s"
def __init__(
self,
name: str,
version: str = "0.0.1",
description: str = None,
add_docs: bool = True,
configure_logs: bool = True,
debug: bool = False,
https: bool = True,
) -> None:
"""Initialize API object."""
self.name: str = name
self.description: Optional[str] = description
self.version: str = version
self.routes: List[RouteEntry] = []
self.context: Dict = {}
self.event: Dict = {}
self.request_path: ApigwPath
self.debug: bool = debug
self.https: bool = https
self.log = logging.getLogger(self.name)
if configure_logs:
self._configure_logging()
if add_docs:
self.setup_docs()
@property
def host(self) -> str:
"""Construct api gateway endpoint url."""
host = self.event["headers"].get(
"x-forwarded-host", self.event["headers"].get("host", "")
)
path_info = self.request_path
if path_info.apigw_stage and not path_info.apigw_stage == "$default":
host_suffix = f"/{path_info.apigw_stage}"
else:
host_suffix = path_info.path_mapping
scheme = "https" if self.https else "http"
return f"{scheme}://{host}{host_suffix}"
def _get_parameters(self, route: RouteEntry) -> List[Dict]:
argspath_schema = {
"default": {"type": "string"},
"string": {"type": "string"},
"str": {"type": "string"},
"regex": {"type": "string", "pattern": ""},
"uuid": {"type": "string", "format": "uuid"},
"int": {"type": "integer"},
"float": {"type": "number", "format": "float"},
}
args_in_path = route._get_path_args()
endpoint_args = inspect.signature(route.endpoint).parameters
endpoint_args_names = list(endpoint_args.keys())
parameters: List[Dict] = []
for arg in args_in_path:
annotation = endpoint_args[arg["name"]]
endpoint_args_names.remove(arg["name"])
parameter = {
"name": arg["name"],
"in": "path",
"schema": {"type": "string"},
}
if arg["type"] is not None:
parameter["schema"] = argspath_schema[arg["type"]]
if arg["type"] == "regex":
parameter["schema"]["pattern"] = f"^{arg['pattern']}$"
if annotation.default is not inspect.Parameter.empty:
parameter["schema"]["default"] = annotation.default
else:
parameter["required"] = True
parameters.append(parameter)
for name, arg in endpoint_args.items():
if name not in endpoint_args_names:
continue
parameter = {"name": name, "in": "query", "schema": {}}
if arg.default is not inspect.Parameter.empty:
parameter["schema"]["default"] = arg.default
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
parameter["schema"]["format"] = "dict"
else:
parameter["schema"]["format"] = "string"
parameter["required"] = True
parameters.append(parameter)
return parameters
def _get_openapi(
self, openapi_version: str = "3.0.2", openapi_prefix: str = ""
) -> Dict:
"""Get OpenAPI documentation."""
info = {"title": self.name, "version": self.version}
if self.description:
info["description"] = self.description
output = {"openapi": openapi_version, "info": info}
security_schemes = {
"access_token": {
"type": "apiKey",
"description": "Simple token authentification",
"in": "query",
"name": "access_token",
}
}
components: Dict[str, Dict] = {}
paths: Dict[str, Dict] = {}
for route in self.routes:
path: Dict[str, Dict] = {}
default_operation: Dict[str, Any] = {}
if route.tag:
default_operation["tags"] = route.tag
if route.description:
default_operation["description"] = route.description
if route.token:
components.setdefault("securitySchemes", {}).update(security_schemes)
default_operation["security"] = [{"access_token": []}]
parameters = self._get_parameters(route)
if parameters:
default_operation["parameters"] = parameters
default_operation["responses"] = {
400: {"description": "Not found"},
500: {"description": "Internal error"},
}
for method in route.methods:
operation = default_operation.copy()
operation["operationId"] = route.openapi_path
if method in ["PUT", "POST", "DELETE", "PATCH"]:
operation["requestBody"] = {
"description": "Body",
"content": {"*/*": {}},
"required": operation["parameters"][0].get("required", "False"),
}
operation["parameters"] = operation["parameters"][1:]
path[method.lower()] = operation
paths.setdefault(openapi_prefix + route.openapi_path, {}).update(path)
if components:
output["components"] = components
output["paths"] = paths
return output
def _configure_logging(self) -> None:
if self._already_configured(self.log):
return
handler = logging.StreamHandler(sys.stdout)
# Timestamp is handled by lambda itself so the
# default FORMAT_STRING doesn't need to include it.
formatter = logging.Formatter(self.FORMAT_STRING)
handler.setFormatter(formatter)
self.log.propagate = False
if self.debug:
level = logging.DEBUG
else:
level = logging.ERROR
self.log.setLevel(level)
self.log.addHandler(handler)
def _already_configured(self, log) -> bool:
if not log.handlers:
return False
for handler in log.handlers:
if isinstance(handler, logging.StreamHandler):
if handler.stream == sys.stdout:
return True
return False
def _add_route(self, path: str, endpoint: Callable, **kwargs) -> None:
methods = kwargs.pop("methods", ["GET"])
cors = kwargs.pop("cors", False)
token = kwargs.pop("token", "")
payload_compression = kwargs.pop("payload_compression_method", "")
binary_encode = kwargs.pop("binary_b64encode", False)
ttl = kwargs.pop("ttl", None)
cache_control = kwargs.pop("cache_control", None)
description = kwargs.pop("description", None)
tag = kwargs.pop("tag", None)
if ttl:
warnings.warn(
"ttl will be deprecated in 6.0.0, please use 'cache-control'",
DeprecationWarning,
stacklevel=2,
)
if kwargs:
raise TypeError(
"TypeError: route() got unexpected keyword "
"arguments: %s" % ", ".join(list(kwargs))
)
for method in methods:
if self._checkroute(path, method):
raise ValueError(
'Duplicate route detected: "{}"\n'
"URL paths must be unique.".format(path)
)
route = RouteEntry(
endpoint,
path,
methods,
cors,
token,
payload_compression,
binary_encode,
ttl,
cache_control,
description,
tag,
)
self.routes.append(route)
def _checkroute(self, path: str, method: str) -> bool:
for route in self.routes:
if method in route.methods and path == route.path:
return True
return False
def _url_matching(self, url: str, method: str) -> Optional[RouteEntry]:
for route in self.routes:
expr = re.compile(route.route_regex)
if method in route.methods and expr.match(url):
return route
return None
def _get_matching_args(self, route: RouteEntry, url: str) -> Dict:
route_expr = re.compile(route.route_regex)
route_args = [i.group() for i in params_expr.finditer(route.path)]
url_args = route_expr.match(url).groups()
names = [param_pattern.match(arg).groupdict()["name"] for arg in route_args]
args = [
_converters(u, route_args[id])
for id, u in enumerate(url_args)
if u != route_args[id]
]
return dict(zip(names, args))
def _validate_token(self, token: str = None) -> bool:
env_token = os.environ.get("TOKEN")
if not token or not env_token:
return False
if token == env_token:
return True
return False
def route(self, path: str, **kwargs) -> Callable:
"""Register route."""
def _register_view(endpoint):
self._add_route(path, endpoint, **kwargs)
return endpoint
return _register_view
def get(self, path: str, **kwargs) -> Callable:
"""Register GET route."""
kwargs.update(dict(methods=["GET"]))
def _register_view(endpoint):
self._add_route(path, endpoint, **kwargs)
return endpoint
return _register_view
def post(self, path: str, **kwargs) -> Callable:
"""Register POST route."""
kwargs.update(dict(methods=["POST"]))
def _register_view(endpoint):
self._add_route(path, endpoint, **kwargs)
return endpoint
return _register_view
def pass_context(self, f: Callable) -> Callable:
"""Decorator: pass the API Gateway context to the function."""
@wraps(f)
def new_func(*args, **kwargs) -> Callable:
return f(self.context, *args, **kwargs)
return new_func
def pass_event(self, f: Callable) -> Callable:
"""Decorator: pass the API Gateway event to the function."""
@wraps(f)
def new_func(*args, | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import json
from abstractextensionhandler import AbstractExtensionHandler
from ..util import extensionutils, cartridgeagentutils
class DefaultExtensionHandler(AbstractExtensionHandler):
"""
Default implementation of the AbstractExtensionHandler
"""
log = None
def __init__(self):
self.log = LogFactory().get_log(__name__)
self.wk_members = []
self.cartridge_agent_config = CartridgeAgentConfiguration()
def on_instance_started_event(self):
try:
self.log.debug("Processing instance started event...")
if self.cartridge_agent_config.is_multitenant:
artifact_source = "%r/repository/deployment/server/" % self.cartridge_agent_config.app_path
artifact_dest = cartridgeagentconstants.SUPERTENANT_TEMP_PATH
extensionutils.execute_copy_artifact_extension(artifact_source, artifact_dest)
env_params = {}
extensionutils.execute_instance_started_extension(env_params)
except:
self.log.exception("Error processing instance started event")
def on_instance_activated_event(self):
extensionutils.execute_instance_activated_extension()
def on_artifact_updated_event(self, artifacts_updated_event):
self.log.info("Artifact update event received: [tenant] %r [cluster] %r [status] %r" %
(artifacts_updated_event.tenant_id, artifacts_updated_event.cluster_id,
artifacts_updated_event.status))
cluster_id_event = str(artifacts_updated_event.cluster_id).strip()
cluster_id_payload = self.cartridge_agent_config.cluster_id
repo_url = str(artifacts_updated_event.repo_url).strip()
if (repo_url != "") and (cluster_id_payload is not None) and (cluster_id_payload == cluster_id_event):
local_repo_path = self.cartridge_agent_config.app_path
secret = self.cartridge_agent_config.cartridge_key
#repoPassword = https://mb_ip:9<PASSWORD>/stratosmetadataservice/app_id/alias/repoPassword
repo_password = <PASSWORD>(artifacts_updated_event.repo_password, secret)
repo_username = artifacts_updated_event.repo_username
tenant_id = artifacts_updated_event.tenant_id
is_multitenant = self.cartridge_agent_config.is_multitenant
commit_enabled = artifacts_updated_event.commit_enabled
self.log.info("Executing git checkout")
# create repo object
repo_info = RepositoryInformation(repo_url, repo_username, repo_password, local_repo_path, tenant_id,
is_multitenant, commit_enabled)
# checkout code
subscribe_run, repo_context = agentgithandler.AgentGitHandler.checkout(repo_info)
# repo_context = checkout_result["repo_context"]
# execute artifact updated extension
env_params = {"STRATOS_ARTIFACT_UPDATED_CLUSTER_ID": artifacts_updated_event.cluster_id,
"STRATOS_ARTIFACT_UPDATED_TENANT_ID": artifacts_updated_event.tenant_id,
"STRATOS_ARTIFACT_UPDATED_REPO_URL": artifacts_updated_event.repo_url,
"STRATOS_ARTIFACT_UPDATED_REPO_PASSWORD": artifacts_updated_event.repo_password,
"STRATOS_ARTIFACT_UPDATED_REPO_USERNAME": artifacts_updated_event.repo_username,
"STRATOS_ARTIFACT_UPDATED_STATUS": artifacts_updated_event.status}
extensionutils.execute_artifacts_updated_extension(env_params)
if subscribe_run:
# publish instanceActivated
cartridgeagentpublisher.publish_instance_activated_event()
update_artifacts = self.cartridge_agent_config.read_property(cartridgeagentconstants.ENABLE_ARTIFACT_UPDATE, False)
update_artifacts = True if str(update_artifacts).strip().lower() == "true" else False
if update_artifacts:
auto_commit = self.cartridge_agent_config.is_commits_enabled
auto_checkout = self.cartridge_agent_config.is_checkout_enabled
try:
update_interval = int(
self.cartridge_agent_config.read_property(cartridgeagentconstants.ARTIFACT_UPDATE_INTERVAL))
except ParameterNotFoundException:
self.log.exception("Invalid artifact sync interval specified ")
update_interval = 10
except ValueError:
self.log.exception("Invalid artifact sync interval specified ")
update_interval = 10
self.log.info("Artifact updating task enabled, update interval: %r seconds" % update_interval)
self.log.info("Auto Commit is turned %r " % ("on" if auto_commit else "off"))
self.log.info("Auto Checkout is turned %r " % ("on" if auto_checkout else "off"))
agentgithandler.AgentGitHandler.schedule_artifact_update_scheduled_task(
repo_info,
auto_checkout,
auto_commit,
update_interval)
def on_artifact_update_scheduler_event(self, tenant_id):
env_params = {"STRATOS_ARTIFACT_UPDATED_TENANT_ID": str(tenant_id), "STRATOS_ARTIFACT_UPDATED_SCHEDULER": str(True)}
extensionutils.execute_artifacts_updated_extension(env_params)
def on_instance_cleanup_cluster_event(self, instance_cleanup_cluster_event):
self.cleanup()
def on_instance_cleanup_member_event(self, instance_cleanup_member_event):
self.cleanup()
def on_member_activated_event(self, member_activated_event):
self.log.info("Member activated event received: [service] %r [cluster] %r [member] %r"
% (member_activated_event.service_name, member_activated_event.cluster_id, member_activated_event.member_id))
topology_consistent = extensionutils.check_topology_consistency(
member_activated_event.service_name,
member_activated_event.cluster_id,
member_activated_event.member_id)
if not topology_consistent:
self.log.error("Topology is inconsistent...failed to execute member activated event")
return
topology = TopologyContext.get_topology()
service = topology.get_service(member_activated_event.service_name)
cluster = service.get_cluster(member_activated_event.cluster_id)
member = cluster.get_member(member_activated_event.member_id)
lb_cluster_id = member.lb_cluster_id
if extensionutils.is_relevant_member_event(member_activated_event.service_name,
member_activated_event.cluster_id, lb_cluster_id):
env_params = {"STRATOS_MEMBER_ACTIVATED_MEMBER_IP": str(member_activated_event.member_ip),
"STRATOS_MEMBER_ACTIVATED_MEMBER_ID": str(member_activated_event.member_id),
"STRATOS_MEMBER_ACTIVATED_CLUSTER_ID": str(member_activated_event.cluster_id),
"STRATOS_MEMBER_ACTIVATED_LB_CLUSTER_ID": str(lb_cluster_id),
"STRATOS_MEMBER_ACTIVATED_NETWORK_PARTITION_ID": str(member_activated_event.network_partition_id),
"STRATOS_MEMBER_ACTIVATED_SERVICE_NAME": str(member_activated_event.service_name)}
ports = member_activated_event.port_map.values()
ports_str = ""
for port in ports:
ports_str += port.protocol + "," + str(port.value) + "," + str(port.proxy) + "|"
env_params["STRATOS_MEMBER_ACTIVATED_PORTS"] = ports_str
env_params["STRATOS_MEMBER_ACTIVATED_MEMBER_LIST_JSON"] = json.dumps(cluster.member_list_json)
member_ips = extensionutils.get_lb_member_ip(lb_cluster_id)
if member_ips is not None and len(member_ips) > 1:
env_params["STRATOS_MEMBER_ACTIVATED_LB_IP"] = str(member_ips[0])
env_params["STRATOS_MEMBER_ACTIVATED_LB_PUBLIC_IP"] = str(member_ips[1])
env_params["STRATOS_TOPOLOGY_JSON"] = json.dumps(topology.json_str)
extensionutils.add_properties(service.properties, env_params, "MEMBER_ACTIVATED_SERVICE_PROPERTY")
extensionutils.add_properties(cluster.properties, env_params, "MEMBER_ACTIVATED_CLUSTER_PROPERTY")
extensionutils.add_properties(member.properties, env_params, "MEMBER_ACTIVATED_MEMBER_PROPERTY")
clustered = self.cartridge_agent_config.is_clustered
if member.properties is not None and cartridgeagentconstants.CLUSTERING_PRIMARY_KEY in member.properties \
and member.properties[cartridgeagentconstants.CLUSTERING_PRIMARY_KEY] == "true" \
and clustered is not None and clustered:
self.log.debug(" If WK member is re-spawned, update axis2.xml ")
has_wk_ip_changed = True
for wk_member in self.wk_members:
if wk_member.member_ip == member_activated_event.member_ip:
has_wk_ip_changed = False
self.log.debug(" hasWKIpChanged %r" + has_wk_ip_changed)
min_count = int(self.cartridge_agent_config.min_count)
is_wk_member_grp_ready = self.is_wk_member_group_ready(env_params, min_count)
self.log.debug("MinCount: %r" % min_count)
self.log.debug("is_wk_member_grp_ready : %r" % is_wk_member_grp_ready)
if has_wk_ip_changed and is_wk_member_grp_ready:
self.log.debug("Setting env var STRATOS_UPDATE_WK_IP to true")
env_params["STRATOS_UPDATE_WK_IP"] = "true"
self.log.debug("Setting env var STRATOS_CLUSTERING to %r" % clustered)
env_params["STRATOS_CLUSTERING"] = str(clustered)
env_params["STRATOS_WK_MEMBER_COUNT"] = str(self.cartridge_agent_config.min_count)
extensionutils.execute_member_activated_extension(env_params)
else:
self.log.debug("Member activated event is not relevant...skipping agent extension")
def on_complete_topology_event(self, complete_topology_event):
self.log.debug("Complete topology event received")
service_name_in_payload = self.cartridge_agent_config.service_name
cluster_id_in_payload = self.cartridge_agent_config.cluster_id
member_id_in_payload = self.cartridge_agent_config.member_id
consistant = extensionutils.check_topology_consistency(
service_name_in_payload,
cluster_id_in_payload,
member_id_in_payload)
if not consistant:
return
else:
self.cartridge_agent_config.initialized = True
topology = complete_topology_event.get_topology()
service = topology.get_service(service_name_in_payload)
cluster = service.get_cluster(cluster_id_in_payload)
env_params = {"STRATOS_TOPOLOGY_JSON": json.dumps(topology.json_str), "STRATOS_MEMBER_LIST_JSON": json.dumps(cluster.member_list_json)}
extensionutils.execute_complete_topology_extension(env_params)
def on_instance_spawned_event(self, instance_spawned_event):
self.log.debug("Instance Spawned event received")
service_name_in_payload = self.cartridge_agent_config.service_name
cluster_id_in_payload = self.cartridge_agent_config.cluster_id
member_id_in_payload = self.cartridge_agent_config.member_id
consistant = extensionutils.check_topology_consistency(
service_name_in_payload,
cluster_id_in_payload,
member_id_in_payload)
if not consistant:
return
else:
self.cartridge_agent_config.initialized = True
def on_complete_tenant_event(self, complete_tenant_event):
self.log.debug("Complete tenant event received")
tenant_list_json = complete_tenant_event.tenant_list_json
self.log.debug("Complete tenants:" + json.dumps(tenant_list_json))
env_params = {"STRATOS_TENANT_LIST_JSON": json.dumps(tenant_list_json)}
extensionutils.execute_complete_tenant_extension(env_params)
def on_member_terminated_event(self, member_terminated_event):
self.log.info("Member terminated event received: [service] " + member_terminated_event.service_name +
" [cluster] " + member_terminated_event.cluster_id
+ " [member] " + member_terminated_event.member_id)
topology_consistent = extensionutils.check_topology_consistency(
member_terminated_event.service_name,
member_terminated_event.cluster_id,
member_terminated_event.member_id
)
if not topology_consistent:
self.log.error("Topology is inconsistent...failed to execute member terminated event")
return
topology = TopologyContext.get_topology()
service = topology.get_service(member_terminated_event.service_name)
cluster = service.get_cluster(member_terminated_event.cluster_id)
terminated_member = cluster.get_member(member_terminated_event.member_id)
lb_cluster_id = cluster.get_member(member_terminated_event.member_id).lb_cluster_id
#check whether terminated member is within the same cluster, LB cluster or service group
if extensionutils.is_relevant_member_event(
member_terminated_event.service_name,
member_terminated_event.cluster_id,
lb_cluster_id):
env_params = {"STRATOS_MEMBER_TERMINATED_MEMBER_IP": terminated_member.member_ip,
"STRATOS_MEMBER_TERMINATED_MEMBER_ID": member_terminated_event.member_id,
"STRATOS_MEMBER_TERMINATED_CLUSTER_ID": member_terminated_event.cluster_id,
"STRATOS_MEMBER_TERMINATED_LB_CLUSTER_ID": lb_cluster_id,
"STRATOS_MEMBER_TERMINATED_NETWORK_PARTITION_ID": member_terminated_event.network_partition_id,
"STRATOS_MEMBER_TERMINATED_SERVICE_NAME": member_terminated_event.service_name,
"STRATOS_MEMBER_TERMINATED_MEMBER_LIST_JSON": json.dumps(cluster.member_list_json),
"STRATOS_TOPOLOGY_JSON": json.dumps(topology.json_str)}
member_ips = extensionutils.get_lb_member_ip(lb_cluster_id)
if member_ips is not None and len(member_ips) > 1:
env_params["STRATOS_MEMBER_TERMINATED_LB_IP"] = member_ips[0]
env_params["STRATOS_MEMBER_TERMINATED_LB_PUBLIC_IP"] = member_ips[1]
extensionutils.add_properties(service.properties, env_params, "MEMBER_TERMINATED_SERVICE_PROPERTY")
extensionutils.add_properties(cluster.properties, env_params, "MEMBER_TERMINATED_CLUSTER_PROPERTY")
extensionutils.add_properties(terminated_member.properties, env_params, "MEMBER_TERMINATED_MEMBER_PROPERTY")
extensionutils.execute_member_terminated_extension(env_params)
else:
self.log.debug("Member terminated event is not relevant...skipping agent extension")
def on_member_suspended_event(self, member_suspended_event):
self.log.info("Member suspended event received: [service] " + member_suspended_event.service_name +
" [cluster] " + member_suspended_event.cluster_id + " [member] " + member_suspended_event.member_id)
topology_consistent = extensionutils.check_topology_consistency(
member_suspended_event.service_name,
member_suspended_event.cluster_id,
member_suspended_event.member_id
)
if not topology_consistent:
self.log.error("Topology is inconsistent...failed to execute member suspended event")
return
topology = TopologyContext.get_topology()
service = topology.get_service(member_suspended_event.service_name)
cluster = service.get_cluster(member_suspended_event.cluster_id)
suspended_member = cluster.get_member(member_suspended_event.member_id)
lb_cluster_id = cluster.get_member(member_suspended_event.member_id).lb_cluster_id
#check whether suspended member is within the same cluster, LB cluster or service group
if extensionutils.is_relevant_member_event(
member_suspended_event.service_name,
member_suspended_event.cluster_id,
lb_cluster_id):
env_params = {"STRATOS_MEMBER_SUSPENDED_MEMBER_IP": member_suspended_event.member_ip,
"STRATOS_MEMBER_SUSPENDED_MEMBER_ID": member_suspended_event.member_id,
"STRATOS_MEMBER_SUSPENDED_CLUSTER_ID": member_suspended_event.cluster_id,
"STRATOS_MEMBER_SUSPENDED_LB_CLUSTER_ID": lb_cluster_id,
"STRATOS_MEMBER_SUSPENDED_NETWORK_PARTITION_ID": member_suspended_event.network_partition_id,
"STRATOS_MEMBER_SUSPENDED_SERVICE_NAME": member_suspended_event.service_name,
"STRATOS_MEMBER_SUSPENDED_MEMBER_LIST_JSON": json.dumps(cluster.member_list_json),
"STRATOS_TOPOLOGY_JSON": json.dumps(topology.json_str)}
member_ips = extensionutils.get_lb_member_ip(lb_cluster_id)
if member_ips is not None and len(member_ips) > 1:
env_params["STRATOS_MEMBER_SUSPENDED_LB_IP"] = member_ips[0]
env_params["STRATOS_MEMBER_SUSPENDED_LB_PUBLIC_IP"] = member_ips[1]
extensionutils.add_properties(service.properties, env_params, "MEMBER_SUSPENDED_SERVICE_PROPERTY")
extensionutils.add_properties(cluster.properties, env_params, "MEMBER_SUSPENDED_CLUSTER_PROPERTY")
extensionutils.add_properties(suspended_member.properties, env_params, "MEMBER_SUSPENDED_MEMBER_PROPERTY")
extensionutils.execute_member_suspended_extension(env_params)
else:
self.log.debug("Member suspended event is not relevant...skipping agent extension")
def on_member_started_event(self, member_started_event):
self.log.info("Member started event received: [service] " + member_started_event.service_name +
" [cluster] " + member_started_event.cluster_id + " [member] " + member_started_event.member_id)
topology_consistent = extensionutils.check_topology_consistency(
member_started_event.service_name,
member_started_event.cluster_id,
member_started_event.member_id
)
if not topology_consistent:
self.log.error("Topology is inconsistent...failed to execute member started event")
return
topology = TopologyContext.get_topology()
service = topology.get_service(member_started_event.service_name)
cluster = service.get_cluster(member_started_event.cluster_id)
started_member = cluster.get_member(member_started_event.member_id)
lb_cluster_id = cluster.get_member(member_started_event.member_id).lb_cluster_id
#check whether started member is within the same cluster, LB cluster or service group
if extensionutils.is_relevant_member_event(
member_started_event.service_name,
member_started_event.cluster_id,
lb_cluster_id):
env_params = {"STRATOS_MEMBER_STARTED_MEMBER_IP": started_member.member_ip,
"STRATOS_MEMBER_STARTED_MEMBER_ID": member_started_event.member_id,
"STRATOS_MEMBER_STARTED_CLUSTER_ID": member_started_event.cluster_id,
"STRATOS_MEMBER_STARTED_LB_CLUSTER_ID": lb_cluster_id,
"STRATOS_MEMBER_STARTED_NETWORK_PARTITION_ID": member_started_event.network_partition_id,
"STRATOS_MEMBER_STARTED_SERVICE_NAME": member_started_event.service_name,
"STRATOS_MEMBER_STARTED_MEMBER_LIST_JSON": json.dumps(cluster.member_list_json),
"STRATOS_TOPOLOGY_JSON": json.dumps(topology.json_str)}
member_ips = extensionutils.get_lb_member_ip(lb_cluster_id)
if member_ips is not None and len(member_ips) > 1:
env_params["STRATOS_MEMBER_STARTED_LB_IP"] = member_ips[0]
env_params["STRATOS_MEMBER_STARTED_LB_PUBLIC_IP"] = member_ips[1]
extensionutils.add_properties(service.properties, env_params, "MEMBER_STARTED_SERVICE_PROPERTY")
extensionutils.add_properties(cluster.properties, env_params, "MEMBER_STARTED_CLUSTER_PROPERTY")
extensionutils.add_properties(started_member.properties, env_params, "MEMBER_STARTED_MEMBER_PROPERTY")
extensionutils.execute_member_started_extension(env_params)
else:
self.log.debug("Member started event is not relevant...skipping agent extension")
def start_server_extension(self):
#wait until complete topology message is received to get LB IP
extensionutils.wait_for_complete_topology()
self.log.info("[start server extension] complete topology event received")
service_name_in_payload = self.cartridge_agent_config.service_name
cluster_id_in_payload = self.cartridge_agent_config.cluster_id
member_id_in_payload = self.cartridge_agent_config.member_id
topology_consistant = extensionutils.check_topology_consistency(service_name_in_payload, cluster_id_in_payload, member_id_in_payload)
try:
if not topology_consistant:
self.log.error("Topology is inconsistent...failed to execute start server event")
return
topology = TopologyContext.get_topology()
service = topology.get_service(service_name_in_payload)
cluster = service.get_cluster(cluster_id_in_payload)
# store environment variable parameters to be passed to extension shell script
env_params = {}
# if clustering is enabled wait until all well known members have started
clustering_enabled = self.cartridge_agent_config.is_clustered
if clustering_enabled:
env_params["STRATOS_CLUSTERING"] = "true"
env_params["STRATOS_WK_MEMBER_COUNT"] = self.cartridge_agent_config.min_count
env_params["STRATOS_PRIMARY"] = "true" if self.cartridge_agent_config.is_primary else "false"
self.wait_for_wk_members(env_params)
self.log.info("All well known members have started! Resuming start server extension...")
env_params["STRATOS_TOPOLOGY_JSON"] = json.dumps(topology.json_str)
env_params["STRATOS_MEMBER_LIST_JSON"] = json.dumps(cluster.member_list_json)
extensionutils.execute_start_servers_extension(env_params)
except:
self.log.exception("Error processing start servers event")
def volume_mount_extension(self, persistence_mappings_payload):
extensionutils.execute_volume_mount_extension(persistence_mappings_payload)
def on_subscription_domain_added_event(self, subscription_domain_added_event):
tenant_domain = self.find_tenant_domain(subscription_domain_added_event.tenant_id)
self.log.info(
"Subscription domain added event received: [tenant-id] " + subscription_domain_added_event.tenant_id +
" [tenant-domain] " + tenant_domain + " [domain-name] " + subscription_domain_added_event.domain_name +
" | |
sure this doesn't happen again!
#dict_astrom_after = {}
#for i in xrange(len(results[0])): dict_astrom_after[keys[i]] = results[0][i]
#stuck_test=dict_astrom_after==dict_astrom_before
#print 'get_astrom_run_sextract| dict_astrom_after==dict_astrom_before=',stuck_test
#if stuck_test:
# raise Exception('This test is true: dict_astrom_after==dict_astrom_before\nWhich means were stuck in a recursive loop again! For some reason "pasted_cat" is staying null every time! Fix this!')
### used to run construct_correction here before, will have to run this in some other function later on
print 'done: PPRUN=',PPRUN
print "get_astrom_run_sextract| DONE with func"
except:
ns.update(locals())
raise
#adam-fragments_removed# get_astrom_run_sextract-end
''' search for images of a specific object in the data directory '''
def gather_exposures(OBJNAME,filters=None): #main #step1_add_database
'''inputs: OBJNAME,filters=None
returns: exposures
purpose: search for images of a specific object in the data directory and enter all exposures into the database
calls: initialize,save_exposure
called_by: '''
print "gather_exposures| START the func. inputs:", ' OBJNAME=',OBJNAME , ' filters=',filters
''' setting Corrected to false here since I don't think there is any superflat correction '''
Corrected =False
if Corrected: pattern = 'I.fits'
else: pattern = ''
if not filters:
filters = ['B','W-J-B','W-J-V','W-C-RC','W-C-IC','I','W-S-Z+']
for filter_name in filters:
search_params = initialize(filter_name,OBJNAME)
searchstr = "/%(path)s/%(FILTER)s/SCIENCE/*.fits" % search_params
print "gather_exposures| ",'searching here:', searchstr
files = glob(searchstr)
''' filter_name out corrected or not corrected files '''
if Corrected:
files = filter(lambda x:string.find(x,'I.fits')!=-1,files)
elif not Corrected:
files = filter(lambda x:string.find(x,'I.fits')==-1,files)
print "gather_exposures| ",'files=',files
files.sort()
exposures = {}
db2,c = connect_except()
for file in files:
print "gather_exposures| ",'file=', file
if string.find(file,'wcs') == -1 and string.find(file,'.sub.fits') == -1:
res = re.split('_',re.split('/',file)[-1])
exp_name = res[0]
if not exposures.has_key(exp_name): exposures[exp_name] = {'images':[],'keywords':{}}
exposures[exp_name]['images'].append(file) # exp_name is the root of the image name
print "gather_exposures| ",'exposures[exp_name]["keywords"]=',exposures[exp_name]["keywords"]
if len(exposures[exp_name]['keywords'].keys()) == 0: #not exposures[exp_name]['keywords'].has_key('ROTATION'): #if exposure does not have keywords yet, then get them
exposures[exp_name]['keywords']['FILTER'] = filter_name
exposures[exp_name]['keywords']['file'] = file
res2 = re.split('/',file)
for r in res2:
if string.find(r,filter_name) != -1:
print "gather_exposures| r=",r
exposures[exp_name]['keywords']['date'] = r.replace(filter_name + '_','')
exposures[exp_name]['keywords']['fil_directory'] = r
search_params['fil_directory'] = r
kws = utilities.get_header_kw(file,['CRVAL1','CRVAL2','ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD']) # return KEY/NA if not SUBARU
''' figure out a way to break into SKYFLAT, DOMEFLAT '''
ppid = str(os.getppid())
command_dfits = progs_path['p_dfits']+' ' + file + ' > ' + search_params['TEMPDIR'] + '/header'
ooo=utilities.run(command_dfits)
if ooo!=0: raise Exception("the line utilities.run(command_dfits) failed\ncommand_dfits="+command_dfits)
file = open('' + search_params['TEMPDIR'] + '/header','r').read()
if string.find(file,'SKYFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'SKYFLAT'
elif string.find(file,'DOMEFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'DOMEFLAT'
#print "gather_exposures| ",file, exposures[exp_name]['keywords']['FLAT_TYPE']
file = open('' + search_params['TEMPDIR'] + '/header','r').readlines()
for line in file:
if string.find(line,'Flat frame:') != -1 and string.find(line,'illum') != -1:
res = re.split('SET',line)
if len(res) > 1:
res = re.split('_',res[1])
set = res[0]
exposures[exp_name]['keywords']['FLAT_SET'] = set
res = re.split('illum',line)
res = re.split('\.',res[1])
smooth = res[0]
exposures[exp_name]['keywords']['SMOOTH'] = smooth
break
for kw in kws.keys():
exposures[exp_name]['keywords'][kw] = kws[kw]
if Corrected:
exposures[exp_name]['keywords']['SUPA'] = exp_name+'I'
if not Corrected:
exposures[exp_name]['keywords']['SUPA'] = exp_name
exposures[exp_name]['keywords']['OBJNAME'] = OBJNAME
exposures[exp_name]['keywords']['CORRECTED'] = str(Corrected)
print "gather_exposures| ",'exposures[exp_name]["keywords"]=',exposures[exp_name]["keywords"]
save_exposure(exposures[exp_name]['keywords'])
print "gather_exposures| DONE with func"
return exposures
def initialize(FILTER,OBJNAME): #simple #database
'''inputs: FILTER,OBJNAME
returns: search_params={'path':data_path, 'OBJNAME':OBJNAME, 'FILTER':FILTER, 'PHOTCONF':PHOTCONF, 'DATACONF':os.environ['DATACONF'], 'TEMPDIR':TEMPDIR}
purpose: returns a dictionary of paths to data/temp files/config files/ etc. corresponding to this OBJECT and FILTER
calls:
called_by: sextract,sextract,length_swarp,gather_exposures,find_seeing,fix_radec,fix_radec,RefCat_coverage'''
print "initialize| START the func. inputs:",' FILTER=',FILTER , ' OBJNAME=',OBJNAME
for key in progs_path.keys():
os.environ[key] = str(progs_path[key])
ppid = str(os.getppid())
PHOTCONF = os.environ['bonn'] + '/photconf/'
if not os.path.isdir(PHOTCONF): raise Exception("The PHOTCONF directory %s isn't there" % (PHOTCONF))
if not os.path.isdir(tmpdir):os.system('mkdir ' + tmpdir)
search_params = {'path':data_path, 'OBJNAME':OBJNAME, 'FILTER':FILTER, 'PHOTCONF':PHOTCONF, 'DATACONF':progs_path['dataconf'], 'TEMPDIR':tmpdir}
print "initialize| DONE with func"
return search_params
def save_exposure(dict_save,SUPA=None,FLAT_TYPE=None): #simple #database
'''inputs: dict_save,SUPA=None,FLAT_TYPE=None
returns:
purpose: add (key,value) pairs in input dict_save to the illumination_db under (SUPA,FLAT_TYPE), if (SUPA,FLAT_TYPE) does not exist, then add it to the table
calls: connect_except
called_by: sextract,sextract,length_swarp,gather_exposures,find_seeing,fix_radec,fix_radec,fix_radec,RefCat_coverage'''
print 'save_exposure| START the func. inputs: dict_save=',dict_save , ' SUPA=',SUPA , ' FLAT_TYPE=',FLAT_TYPE
if SUPA != None and FLAT_TYPE != None:
dict_save['SUPA'] = SUPA
dict_save['FLAT_TYPE'] = FLAT_TYPE
db2,c = connect_except()
#command = "CREATE TABLE IF NOT EXISTS "+illum_db+" ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
#c.execute("DROP TABLE IF EXISTS "+illum_db+"")
#c.execute(command)
floatvars = {}
stringvars = {}
#copy array but exclude lists
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-' + ','
for ele in dict_save.keys():
type = 'float'
for l in letters:
if string.find(str(dict_save[ele]),l) != -1:
type = 'string'
if type == 'float':
floatvars[ele] = str(float(dict_save[ele]))
elif type == 'string':
stringvars[ele] = dict_save[ele]
# make database if it doesn't exist
print 'save_exposure| ','floatvars=', floatvars
print 'save_exposure| ','stringvars=', stringvars
for column in stringvars:
try:
command = 'ALTER TABLE '+illum_db+' ADD ' + column + ' varchar(240)'
c.execute(command)
except: nope = 1
for column in floatvars:
try:
command = 'ALTER TABLE '+illum_db+' ADD ' + column + ' float(30)'
c.execute(command)
except: nope = 1
# insert new observation
SUPA = dict_save['SUPA']
flat = dict_save['FLAT_TYPE']
c.execute("SELECT SUPA from "+illum_db+" where SUPA = '" + SUPA + "' and flat_type = '" + flat + "'")
results = c.fetchall()
print 'save_exposure| results=',results
if len(results) > 0:
print 'save_exposure| ','already added'
else:
command = "INSERT INTO "+illum_db+" (SUPA,FLAT_TYPE) VALUES ('" + dict_save['SUPA'] + "','" + dict_save['FLAT_TYPE'] + "')"
print 'save_exposure| command=',command
c.execute(command)
vals = ''
for key in stringvars.keys():
print 'save_exposure| key=',key , ' stringvars[key]=',stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
print 'save_exposure| key=',key , ' floatvars[key]=',floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
command = "UPDATE "+illum_db+" set " + vals + " WHERE SUPA='" + dict_save['SUPA'] + "' AND FLAT_TYPE='" + dict_save['FLAT_TYPE'] + "'"
print 'save_exposure| command=',command
c.execute(command)
print 'save_exposure| vals=',vals
#names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
#values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
#names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
#values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
#command = "INSERT INTO "+illum_db+" (" + names + ") VALUES (" + values + ")"
#os.system(command)
db2.close()
print "save_exposure| DONE with func"
def connect_except(): #simple #database
'''inputs:
returns: db2,c
purpose: connect to the SQL database
calls:
called_by: get_astrom_run_sextract,save_exposure,get_files,save_fit,get_fits'''
#print 'connect_except| START the func'
notConnect = True
tried = 0
while notConnect:
tried += 1
try:
db2 = MySQLdb.connect(db=mysqldb_params["db"], user=mysqldb_params["user"], passwd=mysqldb_params["passwd"], host=mysqldb_params["host"])
c = db2.cursor()
notConnect = False
except:
print 'connect_except| traceback.print_exc(file=sys.stdout)=',traceback.print_exc(file=sys.stdout)
randwait = int(random.random()*30)
if randwait < 10: randwait=10
print 'connect_except| rand wait!', randwait
time.sleep(randwait)
if tried > 15:
print 'connect_except| too many failures'
os._exit(0)
#print "connect_except| DONE with func"
return db2,c
def get_files_CALIB(OBJNAME,PPRUN,FILTER,single=True): #simple #database
'''inputs: OBJNAME,PPRUN
returns: calib_files
purpose: find the full set of 3sec/30sec exposures for all keys in illumination_db corresponding to this PPRUN/OBJNAME'''
db2,c = connect_except()
keys=describe_db(c,[illum_db])
command = "SELECT * from "+illum_db+" where PPRUN='" + PPRUN + "'"+" and OBJNAME='" + OBJNAME + "'"
c.execute(command)
results = c.fetchall()
dict_files = {}
for i in xrange(len(results[0])): dict_files[keys[i]] = results[0][i]
file_pat = dict_files['file']
res = re.split('_\d+O',file_pat)
if single:
pattern=res[0][:-6]+ '*_10O' + res[1]
else:
pattern=res[0][:-6]+ '*O' + res[1]
calib_pat=pattern.replace(FILTER,PPRUN+"_CALIB")
print 'get_files_CALIB| calib_pat=',calib_pat
calib_files=glob(calib_pat)
if not calib_files:
print 'get_files_CALIB| no image files found', calib_pat
else:
print 'get_files_CALIB| found %s CALIB files' % (len(calib_files))
db2.close()
return calib_files
''' find full set of files corresponding to all '''
def get_files(SUPA,FLAT_TYPE=None): #simple #database
'''inputs: SUPA,FLAT_TYPE=None
returns: dict_files
purpose: find the full set of files for all keys in illumination_db corresponding to this SUPA###
calls: connect_except
called_by: sextract,sextract,sextract,length_swarp,find_seeing,fix_radec,fix_radec,match_OBJNAME,match_OBJNAME,RefCat_coverage,RefCat_coverage,linear_fit'''
#print 'get_files| START the func. inputs: SUPA=',SUPA , ' FLAT_TYPE=',FLAT_TYPE
db2,c = connect_except()
keys=describe_db(c,[illum_db])
command = "SELECT * from "+illum_db+" where SUPA='" + SUPA + "'" # AND FLAT_TYPE='" + FLAT_TYPE + "'"
c.execute(command)
results = c.fetchall()
dict_files = {}
for i in xrange(len(results[0])): dict_files[keys[i]] = results[0][i]
file_pat = dict_files['file']
#print 'get_files| searching for files for all chips in the exposure', file_pat
res = re.split('_\d+O',file_pat)
pattern = res[0] + '_*O' + res[1]
files = glob(pattern)
dict_files['files'] = files
if not files:
print 'get_files| no image | |
yoffset, width, height, format, type, pixels):
pass
@params(api='gl', prms=['program', 'programInterface', 'name'])
def glGetProgramResourceLocation(program, programInterface, name):
pass
@params(api='gl', prms=['index', 'v'])
def glViewportIndexedfv(index, v):
pass
@params(api='gl', prms=['mode', 'count', 'type', 'indices', 'basevertex'])
def glDrawElementsBaseVertex(mode, count, type, indices, basevertex):
pass
@params(api='gl', prms=['target', 'internalformat', 'offset', 'size', 'format', 'type', 'data'])
def glClearBufferSubData(target, internalformat, offset, size, format, type, data):
pass
@params(api='gl', prms=['target', 'levels', 'internalformat', 'width'])
def glTexStorage1D(target, levels, internalformat, width):
pass
@params(api='gl', prms=['program', 'location', 'params'])
def glGetUniformiv(program, location, params):
pass
@params(api='gl', prms=['target', 'buffer'])
def glBindBuffer(target, buffer):
pass
@params(api='gl', prms=['location', 'v0', 'v1', 'v2', 'v3'])
def glUniform4ui(location, v0, v1, v2, v3):
pass
@params(api='gl', prms=['coord', 'pname', 'param'])
def glTexGenf(coord, pname, param):
pass
@params(api='gl', prms=['coord', 'pname', 'param'])
def glTexGend(coord, pname, param):
pass
@params(api='gl', prms=['coord', 'pname', 'param'])
def glTexGeni(coord, pname, param):
pass
@params(api='gl', prms=['index', 'left', 'bottom', 'width', 'height'])
def glScissorIndexed(index, left, bottom, width, height):
pass
@params(api='gl', prms=['v'])
def glRasterPos4dv(v):
pass
@params(api='gl', prms=['v'])
def glRasterPos2dv(v):
pass
@params(api='gl', prms=['v'])
def glTexCoord2iv(v):
pass
@params(api='gl', prms=['type', 'count', 'conststrings'])
def glCreateShaderProgramv(type, count, conststrings):
pass
@params(api='gl', prms=['id', 'pname', 'params'])
def glGetQueryObjectiv(id, pname, params):
pass
@params(api='gl', prms=['x', 'y'])
def glVertex2s(x, y):
pass
@params(api='gl', prms=['target'])
def glGenerateMipmap(target):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'width', 'height', 'format', 'imageSize', 'data'])
def glCompressedTextureSubImage2D(texture, level, xoffset, yoffset, width, height, format, imageSize, data):
pass
@params(api='gl', prms=['list', 'range'])
def glDeleteLists(list, range):
pass
@params(api='gl', prms=['pname', 'param'])
def glPointParameteri(pname, param):
pass
@params(api='gl', prms=['v'])
def glColor4iv(v):
pass
@params(api='gl', prms=['target'])
def glUnmapBuffer(target):
pass
@params(api='gl', prms=['pname', 'param'])
def glPointParameterf(pname, param):
pass
@params(api='gl', prms=['s', 't'])
def glTexCoord2s(s, t):
pass
@params(api='gl', prms=['v'])
def glTexCoord4dv(v):
pass
@params(api='gl', prms=['v'])
def glNormal3dv(v):
pass
@params(api='gl', prms=[])
def glReleaseShaderCompiler():
pass
@params(api='gl', prms=['v'])
def glTexCoord1dv(v):
pass
@params(api='gl', prms=['x', 'y', 'width', 'height', 'format', 'type', 'pixels'])
def glReadPixels(x, y, width, height, format, type, pixels):
pass
@params(api='gl', prms=['renderbuffer', 'samples', 'internalformat', 'width', 'height'])
def glNamedRenderbufferStorageMultisample(renderbuffer, samples, internalformat, width, height):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttribI3iv(index, v):
pass
@params(api='gl', prms=['mode'])
def glShadeModel(mode):
pass
@params(api='gl', prms=['un', 'u1', 'u2'])
def glMapGrid1d(un, u1, u2):
pass
@params(api='gl', prms=['framebuffer', 'numAttachments', 'attachments'])
def glInvalidateNamedFramebufferData(framebuffer, numAttachments, attachments):
pass
@params(api='gl', prms=['v1', 'v2'])
def glRectiv(v1, v2):
pass
@params(api='gl', prms=['type', 'color'])
def glColorP4ui(type, color):
pass
@params(api='gl', prms=['pipeline', 'stages', 'program'])
def glUseProgramStages(pipeline, stages, program):
pass
@params(api='gl', prms=['v'])
def glRasterPos3dv(v):
pass
@params(api='gl', prms=['src'])
def glReadBuffer(src):
pass
@params(api='gl', prms=['v'])
def glColor4ubv(v):
pass
@params(api='gl', prms=['target', 'offset', 'size', 'data'])
def glGetBufferSubData(target, offset, size, data):
pass
@params(api='gl', prms=['index', 'pname', 'params'])
def glGetVertexAttribLdv(index, pname, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformuiv(program, location, bufSize, params):
pass
@params(api='gl', prms=['n', 'buffers'])
def glGenBuffers(n, buffers):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'value'])
def glClearNamedFramebufferiv(framebuffer, buffer, drawbuffer, value):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttribI2iv(index, v):
pass
@params(api='gl', prms=['framebuffer'])
def glIsFramebuffer(framebuffer):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP4uiv(type, coords):
pass
@params(api='gl', prms=['coord', 'pname', 'params'])
def glTexGendv(coord, pname, params):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP2uiv(type, value):
pass
@params(api='gl', prms=['s', 't'])
def glTexCoord2d(s, t):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glGetBufferParameteri64v(target, pname, params):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform4dv(program, location, count, value):
pass
@params(api='gl', prms=['s', 't'])
def glTexCoord2f(s, t):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord3fv(target, v):
pass
@params(api='gl', prms=['n', 'renderbuffers'])
def glCreateRenderbuffers(n, renderbuffers):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4Nusv(index, v):
pass
@params(api='gl', prms=['func'])
def glDepthFunc(func):
pass
@params(api='gl', prms=['sampler', 'pname', 'param'])
def glSamplerParameterf(sampler, pname, param):
pass
@params(api='gl', prms=['buf', 'src', 'dst'])
def glBlendFunci(buf, src, dst):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib3dv(index, v):
pass
@params(api='gl', prms=['target', 'size', 'data', 'flags'])
def glBufferStorage(target, size, data, flags):
pass
@params(api='gl', prms=['target', 'index', 'data'])
def glGetFloati_v(target, index, data):
pass
@params(api='gl', prms=['program', 'name'])
def glGetUniformLocation(program, name):
pass
@params(api='gl', prms=['framebuffer', 'n', 'bufs'])
def glNamedFramebufferDrawBuffers(framebuffer, n, bufs):
pass
@params(api='gl', prms=['location', 'count', 'value'])
def glUniform4fv(location, count, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP4uiv(index, type, normalized, value):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'zoffset', 'width', 'height', 'depth', 'format', 'imageSize', 'data'])
def glCompressedTextureSubImage3D(texture, level, xoffset, yoffset, zoffset, width, height, depth, format, imageSize, data):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4Nbv(index, v):
pass
@params(api='gl', prms=[])
def glEndConditionalRender():
pass
@params(api='gl', prms=['array'])
def glEnableClientState(array):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord2sv(target, v):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform2uiv(program, location, count, value):
pass
@params(api='gl', prms=['id', 'pname', 'params'])
def glGetQueryObjectuiv(id, pname, params):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4iv(index, v):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform1uiv(program, location, count, value):
pass
@params(api='gl', prms=['target', 'attachment', 'texture', 'level'])
def glFramebufferTexture(target, attachment, texture, level):
pass
@params(api='gl', prms=['coord', 'pname', 'params'])
def glGetTexGendv(coord, pname, params):
pass
@params(api='gl', prms=['v'])
def glColor3usv(v):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix2x4fv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform2dv(program, location, count, value):
pass
@params(api='gl', prms=['v'])
def glRasterPos2sv(v):
pass
@params(api='gl', prms=['v'])
def glTexCoord1sv(v):
pass
@params(api='gl', prms=['x', 'y'])
def glVertex2i(x, y):
pass
@params(api='gl', prms=['pname', 'data'])
def glGetFloatv(pname, data):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glWindowPos3f(x, y, z):
pass
@params(api='gl', prms=['type', 'color'])
def glSecondaryColorP3uiv(type, color):
pass
@params(api='gl', prms=['pname', 'data'])
def glGetIntegerv(pname, data):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix3dv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['id'])
def glIsQuery(id):
pass
@params(api='gl', prms=['target', 'level', 'internalformat', 'width', 'height', 'border', 'format', 'type', 'pixels'])
def glTexImage2D(target, level, internalformat, width, height, border, format, type, pixels):
pass
@params(api='gl', prms=['width', 'height', 'format', 'type', 'pixels'])
def glDrawPixels(width, height, format, type, pixels):
pass
@params(api='gl', prms=['m'])
def glMultMatrixd(m):
pass
@params(api='gl', prms=['m'])
def glMultMatrixf(m):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4Nubv(index, v):
pass
@params(api='gl', prms=['v'])
def glColor4usv(v):
pass
@params(api='gl', prms=['un', 'u1', 'u2'])
def glMapGrid1f(un, u1, u2):
pass
@params(api='gl', prms=['mask'])
def glPolygonStipple(mask):
pass
@params(api='gl', prms=['format', 'stride', 'pointer'])
def glInterleavedArrays(format, stride, pointer):
pass
@params(api='gl', prms=['program', 'shadertype', 'name'])
def glGetSubroutineUniformLocation(program, shadertype, name):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glGetFramebufferParameteriv(target, pname, params):
pass
@params(api='gl', prms=['map', 'mapsize', 'values'])
def glPixelMapusv(map, mapsize, values):
pass
@params(api='gl', prms=['sampler', 'pname', 'params'])
def glGetSamplerParameteriv(sampler, pname, params):
pass
@params(api='gl', prms=['readTarget', 'writeTarget', 'readOffset', 'writeOffset', 'size'])
def glCopyBufferSubData(readTarget, writeTarget, readOffset, writeOffset, size):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttribI1uiv(index, v):
pass
@params(api='gl', prms=['v'])
def glColor3fv(v):
pass
@params(api='gl', prms=['program', 'index', 'bufSize', 'length', 'size', 'type', 'name'])
def glGetActiveUniform(program, index, bufSize, length, size, type, name):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'texture', 'level', 'layer'])
def glNamedFramebufferTextureLayer(framebuffer, attachment, texture, level, layer):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z'])
def glVertexAttribL3d(index, x, y, z):
pass
@params(api='gl', prms=['v'])
def glTexCoord3sv(v):
pass
@params(api='gl', prms=['value'])
def glMinSampleShading(value):
pass
@params(api='gl', prms=['v'])
def glVertex2fv(v):
pass
@params(api='gl', prms=['target', 'attachment', 'renderbuffertarget', 'renderbuffer'])
def glFramebufferRenderbuffer(target, attachment, renderbuffertarget, renderbuffer):
pass
@params(api='gl', prms=['target', 'index', 'data'])
def glGetDoublei_v(target, index, data):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib1sv(index, v):
pass
@params(api='gl', prms=['unit', 'sampler'])
def glBindSampler(unit, sampler):
pass
@params(api='gl', prms=['width'])
def glLineWidth(width):
pass
@params(api='gl', prms=['target', 'index', 'data'])
def glGetIntegeri_v(target, index, data):
pass
@params(api='gl', prms=['program', 'index', 'bufSize', 'length', 'size', 'type', 'name'])
def glGetTransformFeedbackVarying(program, index, bufSize, length, size, type, name):
pass
@params(api='gl', prms=['v'])
def glWindowPos2iv(v):
pass
@params(api='gl', prms=['pname', 'params'])
def glFogiv(pname, params):
pass
@params(api='gl', prms=['pname', 'params'])
def glLightModeliv(pname, params):
pass
@params(api='gl', prms=['n', 'f'])
def glDepthRangef(n, f):
pass
@params(api='gl', prms=['target', 'index'])
def glEnablei(target, index):
pass
@params(api='gl', prms=['u'])
def glEvalCoord1fv(u):
pass
@params(api='gl', prms=['maskNumber', 'mask'])
def glSampleMaski(maskNumber, mask):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix3x2fv(location, count, transpose, value):
pass
@params(api='gl', prms=['target', 'internalformat', 'pname', 'bufSize', 'params'])
def glGetInternalformativ(target, internalformat, pname, bufSize, params):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib2dv(index, v):
pass
@params(api='gl', prms=['flag'])
def glEdgeFlag(flag):
pass
@params(api='gl', prms=['program', 'location', 'v0'])
def glProgramUniform1ui(program, location, v0):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glVertex3d(x, y, z):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glVertex3f(x, y, z):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glVertex3s(x, y, z):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP2ui(type, coords):
pass
@params(api='gl', prms=['index', 'r', 'g', 'b', 'a'])
def glColorMaski(index, r, g, b, a):
pass
@params(api='gl', prms=['readBuffer', 'writeBuffer', 'readOffset', 'writeOffset', 'size'])
def glCopyNamedBufferSubData(readBuffer, writeBuffer, readOffset, writeOffset, size):
pass
@params(api='gl', prms=['target', 'levels', 'internalformat', 'width', 'height', 'depth'])
def glTexStorage3D(target, levels, internalformat, width, height, depth):
pass
@params(api='gl', prms=['texture', 'pname', 'param'])
def glTextureParameteriv(texture, pname, param):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix3x4fv(location, count, transpose, value):
pass
@params(api='gl', prms=['type', 'stride', 'pointer'])
def glNormalPointer(type, stride, pointer):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'texture', 'level'])
def glNamedFramebufferTexture(framebuffer, attachment, texture, level):
pass
@params(api='gl', prms=['token'])
def glPassThrough(token):
pass
@params(api='gl', prms=['type', 'color'])
def glSecondaryColorP3ui(type, color):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix4x3fv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['mode'])
def glBegin(mode):
pass
@params(api='gl', prms=['u'])
def glEvalCoord2dv(u):
pass
@params(api='gl', prms=['v'])
def glColor3ubv(v):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP3ui(type, value):
pass
@params(api='gl', prms=['light', 'pname', 'params'])
def glLightfv(light, pname, params):
pass
@params(api='gl', prms=['program', 'uniformIndex', 'bufSize', 'length', 'uniformName'])
def glGetActiveUniformName(program, uniformIndex, bufSize, length, uniformName):
pass
@params(api='gl', prms=['target', 'attachment', 'pname', 'params'])
def glGetFramebufferAttachmentParameteriv(target, attachment, pname, params):
pass
@params(api='gl', prms=['target', 's', 't'])
def glMultiTexCoord2f(target, s, t):
pass
@params(api='gl', prms=['framebuffer', 'buf'])
def glNamedFramebufferDrawBuffer(framebuffer, buf):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glTexParameteriv(target, pname, params):
pass
@params(api='gl', prms=['vaobj', 'bindingindex', 'buffer', 'offset', 'stride'])
def glVertexArrayVertexBuffer(vaobj, bindingindex, buffer, offset, stride):
pass
@params(api='gl', prms=['target', 'level', 'format', 'type', 'pixels'])
def glGetTexImage(target, level, format, type, pixels):
pass
@params(api='gl', prms=['xfb', 'index', 'buffer'])
def glTransformFeedbackBufferBase(xfb, index, buffer):
pass
@params(api='gl', prms=['c'])
def glIndexsv(c):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP3uiv(type, coords):
pass
@params(api='gl', prms=['width', 'height', 'xorig', 'yorig', 'xmove', 'ymove', 'bitmap'])
def glBitmap(width, height, xorig, yorig, xmove, ymove, bitmap):
pass
@params(api='gl', prms=['buffer', 'offset', 'size', 'data'])
def glGetNamedBufferSubData(buffer, offset, size, data):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform2iv(program, location, count, value):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glGetQueryiv(target, pname, params):
pass
@params(api='gl', prms=['xfb', 'pname', 'param'])
def glGetTransformFeedbackiv(xfb, pname, param):
pass
@params(api='gl', prms=['s', 't', 'r', 'q'])
def glTexCoord4i(s, t, r, q):
pass
@params(api='gl', prms=['identifier', 'name', 'length', 'label'])
def glObjectLabel(identifier, name, length, label):
pass
@params(api='gl', prms=['pname', 'params'])
def glPointParameteriv(pname, params):
pass
@params(api='gl', prms=['v'])
def glNormal3fv(v):
pass
@params(api='gl', prms=['v'])
def glTexCoord1fv(v):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord1dv(target, v):
pass
@params(api='gl', prms=['v'])
def glTexCoord3fv(v):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP3uiv(texture, type, coords):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP3ui(index, type, normalized, value):
pass
@params(api='gl', prms=['near', 'far'])
def glDepthRange(near, far):
pass
@params(api='gl', prms=['buf'])
def glDrawBuffer(buf):
pass
@params(api='gl', prms=['map', 'bufSize', 'values'])
def glGetnPixelMapusv(map, bufSize, values):
pass
@params(api='gl', prms=['v'])
def glRasterPos3fv(v):
pass
@params(api='gl', prms=['buffer', 'drawbuffer', 'value'])
def glClearBufferuiv(buffer, drawbuffer, value):
pass
@params(api='gl', prms=['target', 'internalformat', 'pname', 'bufSize', 'params'])
def glGetInternalformati64v(target, internalformat, pname, bufSize, params):
pass
@params(api='gl', prms=['c'])
def glClearIndex(c):
pass
@params(api='gl', prms=['index', 'size', 'type', 'stride', 'pointer'])
def glVertexAttribIPointer(index, size, type, stride, pointer):
pass
@params(api='gl', prms=[])
def glFlush():
pass
@params(api='gl', prms=['mode', 'count', 'type', 'indices', 'instancecount', 'basevertex', 'baseinstance'])
def glDrawElementsInstancedBaseVertexBaseInstance(mode, count, type, indices, instancecount, basevertex, baseinstance):
pass
@params(api='gl', prms=['target', 'level', 'pname', 'params'])
def glGetTexLevelParameteriv(target, level, pname, params):
pass
@params(api='gl', prms=['n', 'textures', 'priorities'])
def glPrioritizeTextures(n, textures, priorities):
pass
@params(api='gl', prms=['size', 'buffer'])
def glSelectBuffer(size, buffer):
pass
@params(api='gl', prms=['target', 'samples', 'internalformat', 'width', 'height', 'depth', 'fixedsamplelocations'])
def glTexStorage3DMultisample(target, samples, internalformat, width, height, depth, fixedsamplelocations):
pass
@params(api='gl', prms=['target', 'clamp'])
def glClampColor(target, clamp):
pass
@params(api='gl', prms=['s'])
def glClearStencil(s):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP1uiv(type, coords):
pass
@params(api='gl', prms=['texture'])
def glIsTexture(texture):
pass
@params(api='gl', prms=['x', 'y'])
def glVertex2f(x, y):
pass
@params(api='gl', prms=['x', 'y'])
def glVertex2d(x, y):
pass
@params(api='gl', prms=['target', 'index', 'id'])
def glBeginQueryIndexed(target, index, id):
pass
@params(api='gl', prms=['factor', 'units'])
def glPolygonOffset(factor, units):
pass
@params(api='gl', prms=['target', 'level', 'internalformat', 'width', 'height', 'depth', 'border', 'format', 'type', 'pixels'])
def glTexImage3D(target, level, internalformat, width, height, depth, border, format, type, pixels):
pass
@params(api='gl', prms=['program', 'pname', 'params'])
def glGetProgramiv(program, pname, params):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform4fv(program, location, count, value):
pass
@params(api='gl', prms=['target', 'offset', 'length'])
def glFlushMappedBufferRange(target, offset, length):
pass
@params(api='gl', prms=['target', 'levels', 'internalformat', 'width', 'height'])
def glTexStorage2D(target, levels, internalformat, width, height):
pass
@params(api='gl', prms=['n', 'ids'])
def glGenQueries(n, ids):
pass
@params(api='gl', prms=['map', 'values'])
def glGetPixelMapfv(map, values):
pass
@params(api='gl', prms=['target', 'level', 'xoffset', 'yoffset', 'zoffset', 'width', 'height', 'depth', 'format', 'type', 'pixels'])
def glTexSubImage3D(target, level, xoffset, yoffset, zoffset, width, height, depth, format, type, pixels):
pass
@params(api='gl', prms=['count', 'samplers'])
def glDeleteSamplers(count, samplers):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameterfv(texture, pname, params):
pass
@params(api='gl', prms=['mode'])
def glMatrixMode(mode):
pass
@params(api='gl', prms=['first', 'count', 'textures'])
def glBindTextures(first, count, textures):
pass
@params(api='gl', prms=['pname', 'data'])
def glGetDoublev(pname, data):
pass
@params(api='gl', prms=['index', 'x'])
def glVertexAttrib1d(index, x):
pass
@params(api='gl', prms=['location', 'count', 'value'])
def glUniform4dv(location, count, value):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform3dv(program, location, count, value):
pass
@params(api='gl', prms=['buffer'])
def glInvalidateBufferData(buffer):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'width', 'format', 'imageSize', 'data'])
def glCompressedTextureSubImage1D(texture, level, xoffset, width, format, imageSize, data):
pass
@params(api='gl', prms=['texture', 'level', 'format', 'type', 'data'])
def glClearTexImage(texture, level, format, type, data):
pass
@params(api='gl', prms=['location', 'count', 'value'])
def glUniform3fv(location, count, value):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP1ui(texture, type, coords):
pass
@params(api='gl', prms=['xfb', 'pname', 'index', 'param'])
def | |
from typing import Dict
from code.front_end.our_ast import *
import itertools as itl
from collections import OrderedDict
class SemanticAnalysisError(Exception):
#Esception que voy a utilizar para detectar los errores semanticos, recive una lista de strings con los problemas y en su __str__ los imprime
def __init__(self, wrongs = ["Se encontraron errores semanticos"]):
super()
self.wrongs = wrongs
def __show__(self):
print("SemanticAnalysisError:")
for bad in self.wrongs:
print(bad)
class TypesGraph():
#Clase que va a facilitarme todo el manejo de la jerarquia de clases que detecto el parser. Ademas chequea algunos errores de tipo herencia y redefiniciones de metodos y demas.
#Tiene un LCA necesario para saber si una clase se conforma con otra necesario para el chequeo de tipos
def __init__(self, programnode):
self.program_node = programnode
self.types_nodes = OrderedDict() #diccionario de name de la clase en el typesNode
self.built_in_types = {}#diccionario que continee los tipos bulit in que no se puede heredar de ellos, osea string, int y bool , porque de Object y de Io si se puede heredar. lo llena el metodo insert_built_in_types
self.lca = {}#diccionario para hacer memoization con las querys de lca pedidas
self.insert_built_in_types()
self.built()
self.generate_beg_end_times()
def __show__(self):
#Redifinicion del str para listar las caracteristicas principales del TypesGraph, principalmente para debugear
print("Listando las caracteristicas del TypeGraph,vamos a escribir sus classbooks correspondientes")
for name in self.types_nodes:
self.types_nodes[name].__show__()
print("Tiene length de los tipos built_in que no se puede heredar = " + str(len(self.built_in_types)) + " ,vamos a listar los nombres de estos classbooks:")
for name in self.built_in_types:
print("Uno de los classbooks built_in que no se puede heredar tiene nombre = " + str(name))
print("Vamos a mostrar la lista de memoization de LCA ya guardados:")
for tuplex in self.lca:
print(str(tuplex[0]) + " : " + str(tuplex[1]) + " -> " + str(self.lca[tuplex]))
def generate_beg_end_times(self):
#Metodo que va a agragarle a todos los classbooks los tiempos de descubrimiento y salida, esto lo usamos para saber en O(1) si A hereda de B.
#Esto pasa si y solo si (A.begin <= B.end) and (A.begin >= B.begin), esto pues el DFS funciona visitando primero a mis
count = 1
def dfs(book):
nonlocal count
book.begin = count
count +=1
for son in book.sons:
dfs(son)
book.end = count
count +=1
dfs(self.types_nodes["Object"])
def built(self):
#metodo que va a contruir el grafo y dedecta errores de redifinicion de clases, herencia en tipos built in no permitida,ciclos de herencia, que no exista la clase Main con el metodo main y demas
for clas in self.program_node.classes:
if clas.name in self.types_nodes:
#reportar error de que es esta redifiniendo una clase ya existe, puede seeer que sea una de las clases bulit in
#todo ver como reportar el error y recuperarse de el
mistake = "Existen dos definiciones de la clase " + str(clas.name) + " que no es permitido, tener en cuenta que existen clases built_in en Cool que no se pueden redefinir."
raise SemanticAnalysisError([mistake])
else:
self.types_nodes[clas.name] = Classbook(clas)
for name in self.types_nodes:
#Aqui voy a poner las referencias de los ClassBooks en el types_nodes a sus padres como instancias de classBooks
current = self.types_nodes[name]
if current.name != "Object" and (not current.classNode.parent in self.types_nodes):#Cambie que fuera current.parent.name por current.classnode.parent, debido a que lo que estoy buscando es el nombre del padre, el string.
#print(current.classNode.name)
#print(current.classNode.parent)
#Si el nombre del padre no esta en types_nodes es que no esta declarado en el modulo de Cool por lo que no existe por lo que no se puede heredar de el
mistake = "La clase " + str(current.name) + " esta heredando de la clase padre " + str(current.classNode.parent) + " que no esta definida."
#Reportar que estas heredando de alguien que no existe
#todo ver como manejar el reportar el error
raise SemanticAnalysisError([mistake])
if current.name != "Object":
self.types_nodes[current.classNode.parent].add_child(current) #Aqui tambien tengo el cambio que hice poner el classnode.parent en vez de parent.name, debido a que el parent todavia puede no estar inicializado
visited = {}#diccionario que vamos a usar para hacer el bfs y comprobar que no existan ciclos
visited["Object"] = True
self.types_nodes["Object"].deph = 0
pointer = 0
cola = [self.types_nodes["Object"]]
while pointer < len(cola):
current = cola[pointer]
current.check_redefinitions() # lo hago para chequear que las redifiniciones de esta clase esten bien.
pointer += 1
for son in current.sons:
if son.name in visited:
mistake = "La herencia de la clase " + str(son.name) + " que hereda de " + str(current.name) + " genera un ciclo de herencia no permitido"
raise SemanticAnalysisError([mistake])
else:
visited[son.name] = True
cola.append(son)
son.deph = current.deph +1
#current.add_child(son)
if pointer != len(self.types_nodes):
mistake = " Existe un ciclo de herencia no permitido debido a que la clase Object no es ancestro de todas las clases"
raise SemanticAnalysisError([mistake])
#reportar error de que se encontro un ciclo de herencia
#todo ver como manejo detectar estos errores
#Significa que no pudimos ver todos los nodos desde OBject por lo que hay ciclos
for name in self.built_in_types:
if name != "Object" and len(self.types_nodes[name].sons) != 0:
mistake = "Se esta heredando de la clase built_in " + str(name) + " y esto no esta permitido"
raise SemanticAnalysisError([mistake])
#reportar que se heredo de uno de los tipos built in que no esta permitido
#todo ver como manejar el reportar estos errores y recuperarse de ellos
if not "Main" in self.types_nodes:
mistake = "No existe una clase Main lo cual no esta permitido dado que para correr el codigo del programa se corre Main.main()"
raise SemanticAnalysisError([mistake])
if self.types_nodes["Main"].give_method("main") == False:
#La clase Main tiene que tener el metodo Main por lo que si el give_method devuelve False implica que no lo tiene reportamos el error.
mistake = "La clase Main no tiene un metodo main lo que no es permitido, recordad que para correr el codigo Cool se hace Main.main(), tampoco es valido que tenga en metodo main heredado, tiene que tener la definicion del metodo en su declaracion "
raise SemanticAnalysisError([mistake])
if len(self.types_nodes["Main"].give_method("main")) > 1:
mistake = "El metodo main de la clase Main no puede tomar parametros"
raise SemanticAnalysisError([mistake])
#TODO YET
def insert_built_in_types(self):
#metodo donde voy a insertar los typesNodes de object, string y demas... en el types_nodes.
#Voy a poner la signatura de los metodos y atributos nada mas, para poder hacer el chequeo de tipos.
Object_class = ClassNode(name = "Object",parent = None,attrs = [] ,methods =
[MethodNode(name = "abort",params=[],return_type="Object",body = None) #Metodo abort que no recibe paramettros y devuelve Object,
,MethodNode(name = "copy", params = [], return_type= "SELF_TYPE", body = None), #metodo copy que no recibe parametros y tiene tipo de retorno self_type pues devuelve una copia del objeto, que tiene el mismo tipo dinamico que el objeto que llamo al metodo
MethodNode(name = "type_name", params = [], return_type = "String",body = None)
] )
IO_class = ClassNode(name= "IO", parent = "Object", attrs= [],methods =
[MethodNode(name = "in_int", params=[],return_type="Int",body = None),
MethodNode(name = "in_string", params=[],return_type="String", body = None),
MethodNode(name = "out_int", params=[ParamNode("x","Int")], return_type= "SELF_TYPE",body = None),
MethodNode(name = "out_string", params=[ParamNode("x", "String")], return_type="SELF_TYPE", body = None)])
Int_class = ClassNode(name ="Int", parent= "Object",attrs= [],methods=[] )
String_class = ClassNode(name = "String", parent = "Object", attrs=[],methods=[
MethodNode(name = "length", params= [], return_type="Int", body = None),
MethodNode(name = "concat", params = [ParamNode("s","String")], return_type="String", body = None),
MethodNode(name = "substr", params = [ParamNode("i","Int"),ParamNode("l","Int")], return_type="String", body = None)])
Bool_class = ClassNode(name ="Bool", parent="Object", attrs=[],methods=[])
#Guardo en el built_types los nombres de los tipos built_in qeu no se puede heredar de ellos para chequear que esto no se viole
self.built_in_types["String"] = True
self.built_in_types["Int"] = True
self.built_in_types["Bool"] = True
#Agrego a los tipos
self.types_nodes["String"] = Classbook(String_class)
self.types_nodes["Int"] = Classbook(Int_class)
self.types_nodes["Bool"] = Classbook(Bool_class)
self.types_nodes["Object"] = Classbook(Object_class)
self.types_nodes["IO"] = Classbook(IO_class)
def lowest_common_ancestor(self,classA,classB):
#metodo que recibe dos clases y ddevuelve el LCA de estos.
if isinstance(classA,ClassNode) and isinstance(classA,ClassNode):
nameA = classA.name
nameB = classB.name
want_classes = True
else:
nameA = classA
| |
<reponame>s911415/CheckSum-Tool
# The MIT License
#
# Copyright (c) 2008 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# SetVersions.py
# A Python script to set various component's version numbers in the project.
import ConfigParser
import getopt
import os
import shutil
import sys
# The version of the script
script_version = 0.9
def get_product_version(filename):
'''Get product version used in archive paths etc.
TODO: add new section. At the moment this gets executable version.
'''
config = ConfigParser.ConfigParser()
config.readfp(open(filename))
# First try RC file, then define-macro
version = ''
if config.has_section('Executable') and \
config.has_option('Executable', 'version'):
version = config.get('Executable', 'version')
else:
print 'Executable version not found, using default.'
# Remove quotation marks
if len(version) > 0:
if version[0] == '"':
version = version[1:]
if version[len(version) - 1] == '"':
version = version[:len(version) - 1]
return version
def process_NSIS(filename, config, sect):
'''Process NSIS section in the ini file.'''
if config.has_option(sect, 'version') and \
config.has_option(sect, 'path') and \
config.has_option(sect, 'description'):
ver = config.get(sect, 'version')
file = config.get(sect, 'path')
desc = config.get(sect, 'description')
else:
print 'ERROR: NSIS section does not have all required options!'
return False
print '%s : %s' % (sect, desc)
print ' File: ' + file
print ' Version: ' + ver
inidir = os.path.dirname(filename)
nsisfile = os.path.join(inidir, file)
ret = set_NSIS_ver(nsisfile, ver)
return ret
def set_NSIS_ver(file, version):
'''Set version into NSIS installer file.'''
outfile = file + '.bak'
try:
fread = open(file, 'r')
except IOError, (errno, strerror):
print 'Cannot open file ' + file + ' for reading'
print 'Error: ' + strerror
return False
try:
fwrite = open(outfile, 'w')
except IOError, (errno, strerror):
print 'Cannot open file ' + infile + ' for writing'
print 'Error: ' + strerror
fread.close()
return False
# Replace PRODUCT_ VERSION macro value with new value
for line in fread:
if line.startswith('!define PRODUCT_VERSION'):
ind = line.find('\"')
ind2 = line.rfind('\"')
if ind != -1 and ind2 != -1:
line = line[:ind] + version + line[ind2 + 1:]
fwrite.write(line)
fread.close()
fwrite.close()
shutil.move(outfile, file)
return True
def process_AssemblyCs(filename, config, sect):
'''Process C# AssemblyInfo section in the ini file.'''
if config.has_option(sect, 'version') and \
config.has_option(sect, 'path') and \
config.has_option(sect, 'description'):
ver = config.get(sect, 'version')
file = config.get(sect, 'path')
desc = config.get(sect, 'description')
else:
print 'ERROR: Assembly info section does not have all required options!'
return False
print '%s : %s' % (sect, desc)
print ' File: ' + file
print ' Version: ' + ver
inidir = os.path.dirname(filename)
nsisfile = os.path.join(inidir, file)
ret = set_CSAssembly_ver(nsisfile, ver)
return ret
def set_CSAssembly_ver(file, version):
'''Set version into C# Assembly Info file.'''
outfile = file + '.bak'
try:
fread = open(file, 'r')
except IOError, (errno, strerror):
print 'Cannot open file ' + file + ' for reading'
print 'Error: ' + strerror
return False
try:
fwrite = open(outfile, 'w')
except IOError, (errno, strerror):
print 'Cannot open file ' + infile + ' for writing'
print 'Error: ' + strerror
fread.close()
return False
# Replace AssemblyVersion value with new value
for line in fread:
if line.startswith('[assembly: AssemblyVersion'):
replace_ver_in_quotes(line, version)
fwrite.write(line)
fread.close()
fwrite.close()
shutil.move(outfile, file)
return True
def process_WinRC(filename, config, sect):
'''Process Windows RC file section in the ini file.'''
if config.has_option(sect, 'version') and \
config.has_option(sect, 'path') and \
config.has_option(sect, 'description'):
ver = config.get(sect, 'version')
file = config.get(sect, 'path')
desc = config.get(sect, 'description')
else:
print 'ERROR: RC file section does not have all required options!'
return False
print '%s : %s' % (sect, desc)
print ' File: ' + file
print ' Version: ' + ver
inidir = os.path.dirname(filename)
rcfile = os.path.join(inidir, file)
ret = set_WinRC_ver(rcfile, ver)
return ret
def set_WinRC_ver(file, version):
'''Set version into Windows RC file. Currently we set both fileversion and
productversion to same number.
TODO: Allow separate file- and product -versions.
'''
outfile = file + '.bak'
try:
fread = open(file, 'r')
except IOError, (errno, strerror):
print 'Cannot open file ' + file + ' for reading'
print 'Error: ' + strerror
return False
try:
fwrite = open(outfile, 'w')
except IOError, (errno, strerror):
print 'Cannot open file ' + infile + ' for writing'
print 'Error: ' + strerror
fread.close()
return False
# Add ending NULL to the version string and replace dots with commas
verInd = version.rfind('\"')
if verInd != -1:
versionNul = version[:verInd] + '\\0' + version[verInd:]
# Replace version defines with new value
ret = True
for line in fread:
if line.startswith(' FILEVERSION'):
line = replace_rc_ver_at_end(line, version)
if line.startswith(' PRODUCTVERSION'):
line = replace_rc_ver_at_end(line, version)
if line.find('VALUE \"FileVersion\"') != -1:
line = replace_ver_in_quotes(line, versionNul)
if line.find('VALUE \"ProductVersion\"') != -1:
line = replace_ver_in_quotes(line, versionNul)
fwrite.write(line)
fread.close()
fwrite.close()
shutil.move(outfile, file)
return ret
def process_InnoSetup(filename, config, sect):
'''Process Innosetup script file section in the ini file.'''
if config.has_option(sect, 'version') and \
config.has_option(sect, 'path') and \
config.has_option(sect, 'description') and \
config.has_option(sect, 'macro'):
ver = config.get(sect, 'version')
file = config.get(sect, 'path')
desc = config.get(sect, 'description')
macro = config.get(sect, 'macro')
else:
print 'ERROR: InnoSetup section does not have all required options!'
return False
print '%s : %s' % (sect, desc)
print ' File: ' + file
print ' Version: ' + ver
if len(macro):
print ' Macro: ' + macro
else:
print ' ERROR: You must set macro name in the INI file.'
return False
inidir = os.path.dirname(filename)
innofile = os.path.join(inidir, file)
ret = set_InnoSetup_ver(innofile, ver, macro)
return ret
def set_InnoSetup_ver(file, version, macro):
'''Set version into InnoSetup script. If the macro setting was found
from the INI file replace only that macro's value. Currently the script
supports only replacing initial macro value.
TODO: support other means (not just macro) to set version.
'''
outfile = file + '.bak'
try:
fread = open(file, 'r')
except IOError, (errno, strerror):
print 'Cannot open file ' + file + ' for reading'
print 'Error: ' + strerror
return False
try:
fwrite = open(outfile, 'w')
except IOError, (errno, strerror):
print 'Cannot open file ' + infile + ' for writing'
print 'Error: ' + strerror
fread.close()
return False
# Replace version macro value with new value
macroline = '#define ' + macro
for line in fread:
if line.startswith(macroline):
line = line[:len(macroline)] + ' ' + version + '\n'
fwrite.write(line)
fread.close()
fwrite.close()
shutil.move(outfile, file)
return True
def process_CDefine(filename, config, sect):
'''Read version number information for setting it into C/C++ #defines.'''
if config.has_option(sect, 'version') and \
config.has_option(sect, 'path') and \
config.has_option(sect, 'description'):
ver = config.get(sect, 'version')
file = config.get(sect, 'path')
desc = config.get(sect, 'description')
else:
print 'ERROR: C Define section does not have all required options!'
return False
# Macro names to use
if config.has_option(sect, 'define-major'):
major = config.get(sect, 'define-major')
if config.has_option(sect, 'define-minor'):
minor = config.get(sect, 'define-minor')
if config.has_option(sect, 'define-subrelease'):
subrel = config.get(sect, 'define-subrelease')
if config.has_option(sect, 'define-buildnumber'):
buildnum = config.get(sect, 'define-buildnumber')
if len(major) == 0 and len(minor) == 0 and len(subrel) == 0:
print ' ERROR: You must set at least one of major/minor/subrelease version numbers.'
return False
print '%s : %s' % (sect, desc)
print ' File: ' | |
list_properties = [
'name',
'state',
'time',
'checkpointOf',
'fileSystemSize',
'baseline',
'writeable',
'readOnly',
]
self._copy_properties(item[1], check_point, list_properties)
check_point['id'] = item[1].get('checkpoint', '')
break
if check_point['id'] == '':
status = constants.STATUS_ERROR
return status, check_point
def list_storage_pool(self):
pools = []
request = self._build_query_package(
self.elt_maker.StoragePoolQueryParams()
)
status, msg, result = self._send_request(request)
if constants.STATUS_OK != status:
return status, msg
for item in result:
if item[0] == 'StoragePool':
pool = {
'name': '',
'id': '',
"diskType": '',
'used_size': '',
'total_size': '',
'movers_id': [],
'virtualProvisioning': '',
'dataServicePolicies': '',
'greedy': '',
'isBackendPool': '',
}
list_properties = [
'name',
'diskType',
'virtualProvisioning',
'dataServicePolicies',
'greedy',
'isBackendPool',
]
self._copy_properties(item[1], pool, list_properties)
pool['id'] = item[1].get('pool', '')
pool['used_size'] = item[1].get('usedSize', '')
pool['total_size'] = item[1].get('autoSize', '')
if 'movers' in item[1].keys():
pool['movers_id'] = item[1]['movers'].split()
pools.append(pool)
if not pools:
status = constants.STATUS_ERROR
return status, pools
def get_mover_ref_by_name(self, name):
mover = {
'name': '',
'id': '',
}
request = self._build_query_package(
self.elt_maker.MoverQueryParams(
self.elt_maker.AspectSelection(movers='true')
)
)
status, msg, result = self._send_request(request)
if constants.STATUS_ERROR == status:
return status, msg
for item in result:
if (item[0] == 'Mover' and item[1]['name'] == name
and 'mover' in item[1].keys()):
mover['id'] = item[1]['mover']
mover['name'] = name
break
if mover['id'] == '':
status = constants.STATUS_NOT_FOUND
return status, mover
def get_mover_by_id(self, mover_id):
mover = {
'name': '',
'id': '',
'role': '',
'Status': '',
'uptime': '',
'version': '',
'interfaces': [],
'devices': [],
'dns_domain': [],
}
request = self._build_query_package(
self.elt_maker.MoverQueryParams(
self.elt_maker.AspectSelection(
moverDeduplicationSettings='true',
moverDnsDomains='true',
moverInterfaces='true',
moverNetworkDevices='true',
moverNisDomains='true',
moverRoutes='true',
movers='true',
moverStatuses='true'
),
mover=mover_id
)
)
status, msg, result = self._send_request(request)
if constants.STATUS_OK != status:
return status, msg
for item in result:
if item[0] == 'Mover':
mover['name'] = item[1].get('name', '')
mover['id'] = item[1].get('mover', '')
mover['role'] = item[1].get('role', '')
elif item[0] == 'MoverStatus':
self._copy_properties(item[1],
mover,
['Status', 'uptime', 'version'])
elif item[0] == 'MoverInterface':
interface = {
'name': '',
'device': '',
'ipVersion': '',
'netMask': '',
'vlan_id': '',
'ipAddress': '',
}
list_properties = [
'name',
'device',
'ipVersion',
'netMask',
'ipAddress',
]
self._copy_properties(item[1], interface, list_properties)
interface['vlan_id'] = item[1].get('vlanid', '')
if (self._is_not_internal_device(interface['device'])
and 'ipAddress' in item[1].keys()):
mover['interfaces'].append(interface)
elif item[0] == 'LogicalNetworkDevice':
logical_network_device = {
'speed': '',
'interfaces': '',
'type': '',
'name': '',
}
self._copy_properties(item[1],
logical_network_device,
['speed', 'interfaces', 'type', 'name'])
mover['devices'].append(logical_network_device)
elif item[0] == 'MoverDnsDomain':
dns_domain = {
'name': '',
'servers': '',
'protocol': '',
}
self._copy_properties(item[1],
dns_domain,
dns_domain.keys())
mover['dns_domain'].append(dns_domain)
if mover['id'] == '':
status = constants.STATUS_ERROR
return status, mover
def extend_file_system(self, fs_id, pool_id, new_size):
request = self._build_task_package(
self.elt_maker.ExtendFileSystem(
self.elt_maker.StoragePool(
pool=pool_id,
size=six.text_type(new_size)
),
fileSystem=fs_id,
)
)
status, msg, result = self._send_request(request)
return status, msg
def create_vdm(self, name, host_mover_id):
request = self._build_task_package(
self.elt_maker.NewVdm(mover=host_mover_id, name=name)
)
status, msg, result = self._send_request(request)
return status, msg
def delete_vdm(self, vdm_id):
request = self._build_task_package(
self.elt_maker.DeleteVdm(vdm=vdm_id)
)
status, msg, result = self._send_request(request)
return status, msg
def get_vdm_by_name(self, name):
vdm = {
"name": '',
"id": '',
"state": '',
'host_mover_id': '',
'interfaces': [],
}
request = self._build_query_package(self.elt_maker.VdmQueryParams())
status, msg, result = self._send_request(request)
if constants.STATUS_OK != status:
return status, msg
for item in result:
if item[0] == 'Vdm' and item[1]['name'] == name:
vdm['name'] = item[1].get('name', '')
vdm['host_mover_id'] = item[1].get('mover', '')
vdm['interfaces'] = item[1].get('Interfaces', '')
vdm['state'] = item[1].get('state', '')
vdm['id'] = item[1].get('vdm', '')
break
if vdm['id'] == '':
status = constants.STATUS_NOT_FOUND
return status, vdm
def create_mover_interface(self, name, device_name, ip_addr, mover_id,
net_mask='255.255.255.0', vlan_id=None):
vlan_id = vlan_id if vlan_id else '-1'
interface = {
'name': name,
'device_name': device_name,
'ip': ip_addr,
'mover_id': mover_id,
'net_mask': net_mask,
'vlan_id': vlan_id,
}
request = self._build_task_package(
self.elt_maker.NewMoverInterface(
device=device_name,
ipAddress=six.text_type(ip_addr),
mover=mover_id,
name=name,
netMask=net_mask,
vlanid=six.text_type(vlan_id)
)
)
status, msg, result = self._send_request(request)
if constants.STATUS_OK != status:
return status, msg
return status, interface
def delete_mover_interface(self, ip_addr, mover_id):
request = self._build_task_package(
self.elt_maker.DeleteMoverInterface(
ipAddress=six.text_type(ip_addr),
mover=mover_id
)
)
status, msg, result = self._send_request(request)
return status, msg
def create_cifs_server(self, args):
computer_name = args['compName']
netbios_name = args['netbios']
mover_id = args['mover_id']
domain_name = args['domain']
interfaces = args['interface']
alias_names = args['alias']
user_name = args['admin_username']
password = args['<PASSWORD>']
alias_name_list = [self.elt_maker.li(alias) for alias in alias_names]
request = self._build_task_package(
self.elt_maker.NewW2KCifsServer(
self.elt_maker.MoverOrVdm(mover=mover_id, moverIdIsVdm='true'),
self.elt_maker.Aliases(*alias_name_list),
self.elt_maker.JoinDomain(userName=user_name,
password=password),
compName=computer_name,
domain=domain_name,
interfaces=interfaces,
name=netbios_name
)
)
status, msg, result = self._send_request(request)
if constants.STATUS_OK == status:
if (constants.MSG_JOIN_DOMAIN_FAILED
in self._get_message_codes(msg)):
# Domain Join Failed
return constants.STATUS_ERROR, msg
else:
cifs_server = {
'compName': computer_name,
'netbios_name': netbios_name,
'mover_id': mover_id,
'domain_name': domain_name,
'interfaces': interfaces,
'alias_names': alias_names,
}
return status, cifs_server
else:
return status, msg
def modify_cifs_server(self, args):
mover_id = args['mover_id']
name = args['name']
join_domain = args['join_domain']
user_name = args['admin_username']
password = args['<PASSWORD>']
is_vdm = args['is_vdm'] if 'is_vdm' in args.keys() else 'true'
request = self._build_task_package(
self.elt_maker.ModifyW2KCifsServer(
self.elt_maker.DomainSetting(
joinDomain=join_domain,
password=password,
userName=user_name,
),
mover=mover_id,
moverIdIsVdm=is_vdm,
name=name
)
)
status, msg, result = self._send_request(request)
return status, msg
def delete_cifs_server(self, server_name, mover_id, is_vdm='true'):
request = self._build_task_package(
self.elt_maker.DeleteCifsServer(
mover=mover_id,
moverIdIsVdm=is_vdm,
name=server_name
)
)
status, msg, result = self._send_request(request)
return status, msg
def get_cifs_servers(self, mover_id, is_vdm=True):
cifs_servers = []
request = self._build_query_package(
self.elt_maker.CifsServerQueryParams(
self.elt_maker.MoverOrVdm(
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false'
)
)
)
status, msg, result = self._send_request(request)
if constants.STATUS_OK != status:
return status, msg
for item in result:
if item[0] == 'CifsServer':
server = {
'name': '',
'interfaces': '',
'type': '',
'compName': '',
"mover_id": '',
'moverIdIsVdm': '',
'domain': '',
'domainJoined': '',
}
list_properties = [
'name',
'type',
'compName',
'moverIdIsVdm',
'domain',
'domainJoined',
]
self._copy_properties(item[1], server, list_properties)
if 'interfaces' in item[1].keys():
server['interfaces'] = item[1]['interfaces'].split(',')
server['mover_id'] = item[1].get('mover', '')
cifs_servers.append(server)
if len(cifs_servers) == 0:
status = constants.STATUS_NOT_FOUND
return status, cifs_servers
def create_dns_domain(self, mover_id, name, servers, protocol='udp'):
request = self._build_task_package(
self.elt_maker.NewMoverDnsDomain(
mover=mover_id,
name=name,
servers=servers,
protocol=protocol
)
)
status, msg, result = self._send_request(request)
return status, msg
def delete_dns_domain(self, mover_id, name):
request = self._build_task_package(
self.elt_maker.DeleteMoverDnsDomain(
mover=mover_id,
name=name
)
)
status, msg, result = self._send_request(request)
return status, msg
class SSHConnector(object):
def __init__(self, configuration):
super(SSHConnector, self).__init__()
self.storage_ip = configuration.emc_nas_server
self.user_name = configuration.emc_nas_login
self.pass_word = <PASSWORD>
self.sshpool = utils.SSHPool(self.storage_ip,
22,
None,
self.user_name,
password=self.pass_word)
def run_ssh(self, cmd, attempts=1):
try:
if not isinstance(cmd, str):
cmd = map(str, cmd)
command = ' '.join(cmd)
else:
command = cmd
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin_stream, stdout_stream, stderr_stream = (
ssh.exec_command(command))
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
except Exception as e:
LOG.debug(e)
greenthread.sleep(random.randint(20, 500) / 100.0)
except Exception:
LOG.error(_LE("Error running SSH command: %s"), command)
return stdout, stderr
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class NASCommandHelper(object):
def __init__(self, configuration):
super(NASCommandHelper, self).__init__()
self._conn = SSHConnector(configuration)
# Add more patterns for ssh retry.
self.retry_patterns = [
(constants.SSH_DEFAULT_RETRY_PATTERN,
manila.exception.EMCVnxLockRequiredException())]
def get_interconnect_id(self, src, dest):
header = [
'id',
'name',
'source_server',
'destination_system',
'destination_server',
]
conn_id = None
command_nas_cel = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_cel',
'-interconnect', '-l',
]
out, err = self._conn.run_ssh(command_nas_cel)
lines = out.strip().split('\n')
for line in lines:
if line.strip().split() == header:
LOG.info(_LI('Found the header of the command '
'/nas/bin/nas_cel -interconnect -l'))
else:
interconn = line.strip().split()
if interconn[2] == src and interconn[4] == dest:
conn_id = interconn[0]
return conn_id
def create_fs_from_ckpt(self, fs_name, mover_name,
source_ckpt, source_fs,
dest_pool_name, connect_id):
status = constants.STATUS_OK
msg = ''
create_fs_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_fs',
'-name', fs_name,
'-type', 'uxfs',
'-create',
'samesize=' + source_fs,
'pool=' + '"' + dest_pool_name + '"',
'storage=SINGLE',
'worm=off',
'-thin', 'no',
'-option', 'slice=y',
]
self._execute_cmd(create_fs_cmd)
ro_mount_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name,
'-option', 'ro',
fs_name,
'/' + fs_name,
]
self._execute_cmd(ro_mount_cmd)
session_name = fs_name + ':' + '"' + dest_pool_name + '"'
copy_ckpt_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_copy',
'-name', session_name,
'-source -ckpt', source_ckpt,
'-destination -fs', fs_name,
'-interconnect',
'id=%s' % connect_id,
'-overwrite_destination',
'-full_copy',
]
out, err = self._execute_cmd(copy_ckpt_cmd)
if err.strip().lower() != constants.STATUS_OK:
# When there is an error happen during nas_copy, we need
# continue to delete the checkpoint of the target file system
# if it exists.
status = constants.STATUS_ERROR
msg = "nas_copy failed. Reason %s" % out
query_fs_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_fs',
'-info', fs_name,
]
out, err = self._execute_cmd(query_fs_cmd)
re_ckpts = r'ckpts\s*=\s*(.*)\s*'
m = re.search(re_ckpts, out)
if m is not None:
ckpts = m.group(1)
for ckpt in re.split(',', ckpts):
umount_ckpt_cmd = [
'env', 'NAS_DB=/nas',
'/nas/bin/server_umount', mover_name,
'-perm', ckpt,
]
self._execute_cmd(umount_ckpt_cmd)
delete_ckpt_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_fs',
'-delete', ckpt,
'-Force',
]
self._execute_cmd(delete_ckpt_cmd)
rw_mount_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name,
'-option', 'rw',
fs_name,
'/' + fs_name,
]
self._execute_cmd(rw_mount_cmd)
return status, msg
def enable_nfs_service(self, vdm_name, if_name):
command_attach_if_on_vdm = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_server',
'-vdm', vdm_name,
'-attach', if_name,
| |
<filename>ARQ/arq_tools.py
#!/usr/bin/python
# Script: .py
# Author: <NAME>'
"""
Description:
ARQ: Assessment and Comparison of RNA-seq Quantification Methods
Usage:
Source:
Reference:
"""
import sys
import os
from arq_debug import printDebug
###############################################################################
## make config for main dictionaries
###############################################################################
def getMainDictionaries(dict_type, debug):
# initialize debug
debug_list = []
format = '{:<17}'
if debug: debug_list.append(['def: getMainDictionaries():'])
if debug: debug_list.append(['-'])
if debug: debug_list.append(['done'])
# initialize return dictionary
return_dict = {}
# initialize dictionary with all possible tool parameters
# -> only these parameters will be accepted in the config file
# -> bash_script <-(1)- % bash_config <-(2)- % input_parameters
''' TODO: save ALL possible variables and update empty checks in code '''
if dict_type == 'input_parameters':
return_dict = { # input options
'proj_dir_prefix':'', 'data_set_name':'', 'fasta_gen_file':'', 'fasta_trns_file':'',
'gtf_index_file':'', 'fastq1_links':'', 'fastq2_links':'', 'arq_job_list':'',
'quant_comb_list':'', 'quant_name_list':'',
# tool paths
'tool_dir_star':'', 'tool_dir_tophat':'', 'tool_dir_hisat':'', 'tool_dir_bowtie':'',
'tool_dir_htseq':'', 'tool_dir_featc':'', 'tool_dir_eqpqm':'', 'tool_dir_toprec':'',
'tool_dir_flxcp':'', 'tool_dir_kllst':'', 'tool_dir_cuffl':'', 'tool_dir_btseq':'',
'tool_dir_rsem':'',
# tool parameters: alignments
'num_processors':'', 'star_overhang':'', 'min_intron_len':'', 'max_intron_len':'',
'max_multi_hits':'', 'mate_dist_opt':'', 'max_mismatches':'', 'star_max_edit_r':'',
'star_gen_load':'', 'index_base_name':'', 'bwtrs_in_opt':'', 'bwtrs_in_strand':'',
'bwtrs_rsem_opt':'',
# tool parameters: quantifications
'quant_feat_type':'', 'quant_feat_id':'', 'htseq_in_strand':'', 'htseq_mode_opt':'',
'featc_strand_1':'', 'featc_strand_2':'', 'featc_overlap':'', 'featc_pair_val':'',
'eqpqm_in_strand':'', 'eqpqm_count_opt':'', 'eqpqm_unambig':'', 'eqpqm_no_weight':'',
'cuffl_lib_type':'', 'cuffl_no_update':'', 'cuffl_quiet_mode':'', 'flxcp_tool_call':'',
'flxcp_tmp_dir':'', 'btseq_parse_call':'', 'btseq_estim_call':'', 'btseq_count_call':'',
'btseq_prob_out':'', 'btseq_prob_in':'', 'btseq_info_file':'', 'btseq_out_count':'',
'btseq_prob_folder':'', 'btseq_thet_file':'', 'btseq_seed_opt':'', 'btseq_verb_opt':'',
'btseq_strand_opt':'', 'btseq_out_type':'', 'kallst_seed_opt':'', 'kallst_out_frmt':'',
# management options
'save_bash_call':'', 'remove_fastq':'', 'rem_comb_align':'', 'single_tool_job':'',
'seq_style_fq':'', 'only_make_job':'', 'user_set_over':'', 'user_set_skip':'',
'user_set_new':'', 'ignore_qs_error':'', 'sleep_time_qs':'', 'qs_mem_free':'',
'qs_run_time':''}
# initialize dictionbary with all possible jobs and dependencies as comma separated list
# -> only these job request will be accepted in the config file
''' TODO: make dynamic and make possible to add jobs via config file '''
if dict_type == 'possible_jobs':
return_dict = { 'STAR_index':'', 'STAR_align':'STAR_index',
'TOPHAT_index':'BOWTIE_index', 'TOPHAT_align':'TOPHAT_index',
'HISAT_index':'', 'HISAT_align':'HISAT_index',
'BOWTIE_index':'', 'HTSEQ_quant':'',
'EQPQM_index':'', 'EQPQM_quant':'EQPQM_index',
'FLXCP_index':'', 'FLXCP_quant':'FLXCP_index',
'KLLST_index':'', 'KLLST_quant':'KLLST_index',
'BWTRS_index':'', 'BWTRS_align':'BWTRS_index',
'RSEM_index':'', 'RSEM_quant':'RSEM_index',
'BTSEQ_quant':'', 'BRSEM_align':'BWTRS_index',
'CUFFL_quant':'', 'FEATC_quant':'',
'AFREE_align':''}
# input_parameters that will be initialized automatically
# placeholder: *A* = aligner name *a* = align
# *Q* = quantifier name *q* = quant
# t c p = temp cycling parameter (parameter will iterate through all fastq_name/*A*_dir/etc.)
#
# parameter description complete directory path
# -------------------------------------------------------------------------------------------------------------------------------------------
# <p_name_suffix> command line project name -
# <proj_dir_path> main project path <proj_dir_prefix>/<p_name_suffix>
# <index_dir_path> path to index directory <proj_dir_path>/indexes/<data_set_name>
# <align_dir_path> path to alignment directory <proj_dir_path>/alignments/<data_set_name>
# <quant_dir_path> path to quantification directory <proj_dir_path>/quantifications/<data_set_name>
# <comb_dir_path> path to combined alignments directory <proj_dir_path>/combinations/<data_set_name>
# <*A*_index_dir> path to aligner index directory <index_dir_path>/*A*_index
# <*A*_align_dir> path to aligner alignment directory <align_dir_path>/*A*_align
# <*A*_*Q*_quant_dir> path to aligner-quantifier directory <quant_dir_path>/*A*_*Q*_quant
# <align_out_prefix> t c p for alignment names -
# <quant_out_prefix> t c p for quantification names -
# <align_out_dir> t c p for fastq alignments <align_dir_path/*A*_align/<align_out_prefix>
# <quant_out_dir> t c p for combi quant <quant_dir_path/*Q*_quant/<align_out_prefix>
# <fastq1_file> t c p for fastq1 files -
# <fastq2_file> t c p for fastq2 files -
# <ALIGNER_align_dir> t c p for specific alignment dir <align_dir_path/*A*_align>']
# <alignment_prefix> t c p for specific alignment first fq name -
# <combi_align_list> list of all alignment.bam files for quant <ALIGNER_align_dir>/<align_out_prefix>1.bam <ALIGNER_align_dir>/<align_out_prefix>2.bam ...
# initialize list with all possible alignment and quantification combinations
if dict_type == 'quant_align':
return_dict = [ 'HTSEQ_STAR', 'HTSEQ_TOPHAT', 'HTSEQ_HISAT', 'FEATC_STAR', 'FEATC_TOPHAT', 'FEATC_HISAT',
'EQPQM_STAR', 'EQPQM_TOPHAT', 'EQPQM_HISAT', 'CUFFL_STAR', 'CUFFL_TOPHAT', 'CUFFL_HISAT',
'FLXCP_STAR', 'FLXCP_TOPHAT', 'FLXCP_HISAT', 'BTSEQ_BWTRS', 'RSEM_BRSEM', 'KLLST_AFREE']
# return dict
return return_dict
###############################################################################
## make config for index jobs
###############################################################################
def getIndexConfig(job_config_data, debug):
# initialize debug
debug_list = []
format = '{:<17}'
if debug: debug_list.append(['def: getIndexConfig():'])
if debug: debug_list.append(['-'])
if debug: debug_list.append(['done'])
## initialize known index data
# initialize STAR index
job_config_data['STAR_index'] = { 'script_order' :['tool_dir_star', 'options', 'num_processors', 'index_base_name', 'fasta_gen_file', 'gtf_index_file', 'star_overhang'],
'script_options' :[],
'tool_dir_star' :'%(tool_dir_star)sSTAR --runMode genomeGenerate',
'num_processors' :'--runThreadN %(num_processors)s',
'index_base_name' :'--genomeDir %(STAR_index_dir)s',
'fasta_gen_file' :'--genomeFastaFiles %(fasta_gen_file)s',
'gtf_index_file' :'--sjdbGTFfile %(gtf_index_file)s',
'star_overhang' :'--sjdbOverhang %(star_overhang)s',
'bash_pre_script' :'cd %(STAR_index_dir)s',
'bash_seq_script' :''}
# initialize TOPHAT index
job_config_data['TOPHAT_index'] = { 'script_order' :['tool_dir_tophat', 'options', 'gtf_index_file', 'index_base_name', 'bowtie_index_base'],
'script_options' :[],
'tool_dir_tophat' :'%(tool_dir_tophat)stophat2',
'bowtie_index_base' :'%(BOWTIE_index_dir)s/%(index_base_name)s',
'gtf_index_file' :'-G %(gtf_index_file)s',
'index_base_name' :'--transcriptome-index %(TOPHAT_index_dir)s/%(index_base_name)s',
'bash_pre_script' :'cd %(TOPHAT_index_dir)s',
'bash_seq_script' :''}
# initialize HISAT index
job_config_data['HISAT_index'] = { 'script_order' :['tool_dir_hisat', 'options', 'fasta_gen_file', 'index_base_name'],
'script_options' :[],
'tool_dir_hisat' :'%(tool_dir_hisat)shisat-build',
'index_base_name' :'%(HISAT_index_dir)s/%(index_base_name)s',
'fasta_gen_file' :'-f %(fasta_gen_file)s',
'bash_pre_script' :'cd %(HISAT_index_dir)s && '
+'python %(tool_dir_hisat)s/extract_splice_sites.py %(gtf_index_file)s > %(HISAT_index_dir)s/%(index_base_name)s.splice',
'bash_seq_script' :''}
# initialize BOWTIE index
job_config_data['BOWTIE_index'] = { 'script_order' :['tool_dir_bowtie', 'options', 'fasta_gen_file', 'index_base_name'],
'script_options' :[],
'tool_dir_bowtie' :'%(tool_dir_bowtie)sbowtie2-build',
'index_base_name' :'%(BOWTIE_index_dir)s/%(index_base_name)s',
'fasta_gen_file' :'-f %(fasta_gen_file)s',
'bash_pre_script' :'cd %(BOWTIE_index_dir)s && '
+'cp %(fasta_gen_file)s %(BOWTIE_index_dir)s/%(index_base_name)s.fa',
'bash_seq_script' :''}
# initialize BOWTIE index
job_config_data['BWTRS_index'] = { 'script_order' :['tool_dir_bowtie', 'options', 'fasta_trns_file', 'index_base_name'],
'script_options' :[],
'tool_dir_bowtie' :'%(tool_dir_bowtie)sbowtie2-build',
'index_base_name' :'%(BWTRS_index_dir)s/%(index_base_name)s',
'fasta_trns_file' :'-f %(fasta_trns_file)s',
'bash_pre_script' :'cd %(BWTRS_index_dir)s && '
+'cp %(fasta_trns_file)s %(BWTRS_index_dir)s/%(index_base_name)s.fa',
'bash_seq_script' :''}
# initialize BOWTIE index
job_config_data['EQPQM_index'] = { 'script_order' :['tool_dir_eqpqm', 'options', 'gtf_index_file', 'index_base_name'],
'script_options' :[],
'tool_dir_eqpqm' :'%(tool_dir_eqpqm)seqp-setup.sh',
'gtf_index_file' :'%(gtf_index_file)s',
'index_base_name' :'%(EQPQM_index_dir)s',
'bash_pre_script' :'cd %(EQPQM_index_dir)s',
'bash_seq_script' :''}
# initialize fluxCapacitor index
job_config_data['FLXCP_index'] = { 'script_order' :['tool_dir_flxcp', 'options', 'flxcp_tool_call', 'gtf_index_file', 'index_base_name'],
'script_options' :[],
'tool_dir_flxcp' :'%(tool_dir_flxcp)sflux-capacitor',
'gtf_index_file' :'-i %(gtf_index_file)s',
'index_base_name' :'-o %(FLXCP_index_dir)s/%(index_base_name)s.gtf',
'flxcp_tool_call' :'-t sortGTF --force',
'bash_pre_script' :'cd %(FLXCP_index_dir)s',
'bash_seq_script' :''}
# initialize rsem index
job_config_data['RSEM_index'] = { 'script_order' :['tool_dir_rsem', 'options', 'fasta_trns_file', 'index_base_name'],
'script_options' :[],
'tool_dir_rsem' :'%(tool_dir_rsem)srsem-prepare-reference',
'fasta_trns_file' :'%(fasta_trns_file)s',
'index_base_name' :'%(RSEM_index_dir)s/%(index_base_name)s',
'bash_pre_script' :'cd %(RSEM_index_dir)s',
'bash_seq_script' :''}
# initialize kallisto index
job_config_data['KLLST_index'] = { 'script_order' :['tool_dir_kllst', 'options', 'index_base_name', 'fasta_trns_file'],
'script_options' :[],
'tool_dir_kllst' :'%(tool_dir_kllst)skallisto index',
'index_base_name' :'-i %(KLLST_index_dir)s/%(index_base_name)s.idx',
'fasta_trns_file' :'%(fasta_trns_file)s',
'bash_pre_script' :'cd %(KLLST_index_dir)s',
'bash_seq_script' :''}
# # initialize iReckon index
# job_config_data['IRECK_index'] = { 'script_order' :['tool_dir_savant', 'options', 'gtf_index_file', 'index_base_name'],
# 'script_options' :[],
# 'tool_dir_savant' :'%(tool_dir_savant)sFormatTool.sh',
# 'gtf_index_file' :'%(gtf_index_file)s',
# 'index_base_name' :'%(IRECK_index_dir)s/%(index_base_name)s.tbx',
# 'bash_pre_script' :'',
# 'bash_seq_script' :''}
# # initialize eXpress index -> use bedtools to generate multi line fasta file from genome fasta and gtf file
# job_config_data['XPRSS_index'] = { 'script_order' :['tool_dir_savant', 'options', 'gtf_index_file', 'index_base_name'],
# 'script_options' :[],
# 'tool_dir_savant' :'%(tool_dir_savant)sFormatTool.sh',
# 'gtf_index_file' :'%(gtf_index_file)s',
# 'index_base_name' :'%(IRECK_index_dir)s/%(index_base_name)s.tbx',
# 'bash_pre_script' :'',
# 'bash_seq_script' :''}
# print debug
if debug: printDebug(debug_list, format)
# return job config
return job_config_data
###############################################################################
## make config for alignment jobs
###############################################################################
def getAlignConfig(job_config_data, debug):
# initialize debug
debug_list = []
format = '{:<17}'
if debug: debug_list.append(['def: getAlignConfig():'])
if debug: debug_list.append(['-'])
if debug: debug_list.append(['done'])
## initialize known alignment data
# initialize STAR alignment
job_config_data['STAR_align'] = { 'script_order' :['tool_dir_star', 'options', 'keep_input_order', 'num_processors', 'align_out_option', 'index_base_name', 'fastq1_file', 'fastq2_file'],
'script_options' :['star_gen_load', 'min_intron_len', 'max_intron_len', 'max_multi_hits', 'mate_dist_opt', 'max_mismatches', 'star_max_edit_r'],
'tool_dir_star' :'%(tool_dir_star)sSTAR',
'num_processors' :'--runThreadN %(num_processors)s',
'align_out_option' :'--outFileNamePrefix %(align_out_dir)s/%(align_out_prefix)s',
'index_base_name' :'--genomeDir %(STAR_index_dir)s',
'fastq1_file' :'--readFilesIn %(fastq1_file)s',
'fastq2_file' :'%(fastq2_file)s',
'star_gen_load' :'--genomeLoad %(star_gen_load)s',
'keep_input_order' :'--outSAMorder PairedKeepInputOrder',
'min_intron_len' :'--alignIntronMin %(min_intron_len)s',
'max_intron_len' :'--alignIntronMax %(max_intron_len)s',
'max_multi_hits' :'--outFilterMultimapNmax %(max_multi_hits)s', # discard when max multihits
'mate_dist_opt' :'--alignMatesGapMax %(mate_dist_opt)s', # mate gap max length
'max_mismatches' :'--outFilterMismatchNmax %(max_mismatches)s',
'star_max_edit_r' :'--outFilterMismatchNoverLmax %(star_max_edit_r)s',
'bash_pre_script' :'cd %(align_out_dir)s',
'bash_seq_script' :'samtools view -Shb %(align_out_dir)s/%(align_out_prefix)sAligned.out.sam | samtools sort -n -m 20000000000 - %(STAR_align_dir)s/%(align_out_prefix)s'}
# initialize TOPHAT alignment
job_config_data['TOPHAT_align'] = { 'script_order' :['tool_dir_tophat', 'options', 'micro_exon_search', 'keep_input_order', 'num_processors', 'align_out_option', 'index_base_name', 'bowtie_index_base', 'fastq1_file', 'fastq2_file'],
'script_options' :['min_intron_len', 'max_intron_len', 'max_multi_hits', 'mate_dist_opt', 'max_mismatches'],
'tool_dir_tophat' :'%(tool_dir_tophat)stophat2',
'num_processors' :'--num-threads %(num_processors)s',
'align_out_option' :'--output-dir %(align_out_dir)s',
'index_base_name' :'--transcriptome-index %(TOPHAT_index_dir)s/%(index_base_name)s',
'bowtie_index_base' :'%(BOWTIE_index_dir)s/%(index_base_name)s',
'fastq1_file' :'%(fastq1_file)s',
'fastq2_file' :'%(fastq2_file)s',
'micro_exon_search' :'--microexon-search',
'keep_input_order' :'--keep-fasta-order',
'min_intron_len' :'--min-intron-length %(min_intron_len)s',
'max_intron_len' :'--max-intron-length %(max_intron_len)s',
'max_multi_hits' :'--max-multihits %(max_multi_hits)s', # save this many multihits
'mate_dist_opt' :'--mate-inner-dist %(mate_dist_opt)s', # mate gap medium length
'max_mismatches' :'--read-mismatches %(max_mismatches)s '
+'--read-gap-length %(max_mismatches)s '
+'--read-edit-dist %(max_mismatches)s', # both need to be less or equal to --read-edit-dist
'bash_pre_script' :'cd %(align_out_dir)s',
'bash_seq_script' :'if [ \"%(tophat_merge_opt)s\" = "TRUE" ]; '
+'then '
+ 'python %(tool_dir_toprec)stophat-recondition.py -q %(align_out_dir)s && '
+ 'samtools view -H %(align_out_dir)s/unmapped_fixup.bam > %(align_out_dir)s/unmapped_fixup-header.sam && | |
(tmp7465 if depth3 else tmp6988)
tmp7472 = (tmp6954 if depth7 else tmp6976)
tmp7471 = (tmp7472 if depth6 else tmp6988)
tmp7470 = (tmp7471 if depth5 else tmp6988)
tmp7469 = (tmp7470 if depth4 else tmp6988)
tmp7468 = (tmp7469 if depth3 else tmp6988)
tmp7463 = (tmp7464 if depth2 else tmp7468)
tmp7478 = (tmp6954 if depth7 else tmp7002)
tmp7477 = (tmp7478 if depth6 else tmp6988)
tmp7476 = (tmp7477 if depth5 else tmp6988)
tmp7475 = (tmp7476 if depth4 else tmp6988)
tmp7474 = (tmp7475 if depth3 else tmp6988)
tmp7483 = (tmp6954 if depth7 else tmp7008)
tmp7482 = (tmp7483 if depth6 else tmp6988)
tmp7481 = (tmp7482 if depth5 else tmp6988)
tmp7480 = (tmp7481 if depth4 else tmp6988)
tmp7479 = (tmp7480 if depth3 else tmp6988)
tmp7473 = (tmp7474 if depth2 else tmp7479)
tmp7462 = (tmp7463 if depth1 else tmp7473)
tmp7443 = (tmp7444 if s0 else tmp7462)
tmp7490 = (tmp7015 if depth6 else tmp7014)
tmp7489 = (tmp7490 if depth5 else tmp7014)
tmp7488 = (tmp7489 if depth4 else tmp7014)
tmp7487 = (tmp7488 if depth3 else tmp7417)
tmp7495 = (tmp7015 if depth7 else tmp7027)
tmp7494 = (tmp7495 if depth6 else tmp7014)
tmp7493 = (tmp7494 if depth5 else tmp7014)
tmp7492 = (tmp7493 if depth4 else tmp7014)
tmp7491 = (tmp7492 if depth3 else tmp7014)
tmp7486 = (tmp7487 if depth2 else tmp7491)
tmp7497 = (tmp7488 if depth3 else tmp7014)
tmp7502 = (tmp7015 if depth7 else tmp7037)
tmp7501 = (tmp7502 if depth6 else tmp7014)
tmp7500 = (tmp7501 if depth5 else tmp7014)
tmp7499 = (tmp7500 if depth4 else tmp7014)
tmp7498 = (tmp7499 if depth3 else tmp7014)
tmp7496 = (tmp7497 if depth2 else tmp7498)
tmp7485 = (tmp7486 if depth1 else tmp7496)
tmp7508 = (tmp6958 if depth6 else tmp7042)
tmp7507 = (tmp7508 if depth5 else tmp7042)
tmp7506 = (tmp7507 if depth4 else tmp7042)
tmp7505 = (tmp7506 if depth3 else tmp7427)
tmp7509 = (tmp7506 if depth3 else tmp7042)
tmp7504 = (tmp7505 if depth2 else tmp7509)
tmp7511 = (tmp7506 if depth3 else tmp7433)
tmp7516 = (tmp6958 if depth7 else tmp7062)
tmp7515 = (tmp7516 if depth6 else tmp7042)
tmp7514 = (tmp7515 if depth5 else tmp7042)
tmp7513 = (tmp7514 if depth4 else tmp7042)
tmp7512 = (tmp7513 if depth3 else tmp7042)
tmp7510 = (tmp7511 if depth2 else tmp7512)
tmp7503 = (tmp7504 if depth1 else tmp7510)
tmp7484 = (tmp7485 if s0 else tmp7503)
tmp7442 = (tmp7443 if s1 else tmp7484)
tmp7524 = (tmp6953 if depth6 else tmp6952)
tmp7523 = (tmp7524 if depth5 else tmp6952)
tmp7522 = (tmp7523 if depth4 else tmp6952)
tmp7521 = (tmp7522 if depth3 else tmp7447)
tmp7525 = (tmp7522 if depth3 else tmp7452)
tmp7520 = (tmp7521 if depth2 else tmp7525)
tmp7527 = (tmp7522 if depth3 else tmp7458)
tmp7528 = (tmp7522 if depth3 else tmp6952)
tmp7526 = (tmp7527 if depth2 else tmp7528)
tmp7519 = (tmp7520 if depth1 else tmp7526)
tmp7531 = (tmp7465 if depth3 else tmp7469)
tmp7530 = (tmp7465 if depth2 else tmp7531)
tmp7533 = (tmp7465 if depth3 else tmp7475)
tmp7534 = (tmp7465 if depth3 else tmp7480)
tmp7532 = (tmp7533 if depth2 else tmp7534)
tmp7529 = (tmp7530 if depth1 else tmp7532)
tmp7518 = (tmp7519 if s0 else tmp7529)
tmp7539 = (tmp7489 if depth4 else tmp7418)
tmp7538 = (tmp7539 if depth3 else tmp7488)
tmp7540 = (tmp7488 if depth3 else tmp7492)
tmp7537 = (tmp7538 if depth2 else tmp7540)
tmp7542 = (tmp7488 if depth3 else tmp7499)
tmp7541 = (tmp7488 if depth2 else tmp7542)
tmp7536 = (tmp7537 if depth1 else tmp7541)
tmp7546 = (tmp7507 if depth4 else tmp7428)
tmp7545 = (tmp7546 if depth3 else tmp7506)
tmp7544 = (tmp7545 if depth2 else tmp7506)
tmp7549 = (tmp7507 if depth4 else tmp7434)
tmp7548 = (tmp7549 if depth3 else tmp7506)
tmp7550 = (tmp7506 if depth3 else tmp7513)
tmp7547 = (tmp7548 if depth2 else tmp7550)
tmp7543 = (tmp7544 if depth1 else tmp7547)
tmp7535 = (tmp7536 if s0 else tmp7543)
tmp7517 = (tmp7518 if s1 else tmp7535)
tmp7441 = (tmp7442 if s2 else tmp7517)
tmp7557 = (tmp7523 if depth4 else tmp7448)
tmp7556 = (tmp7557 if depth3 else tmp7522)
tmp7559 = (tmp7523 if depth4 else tmp7453)
tmp7558 = (tmp7559 if depth3 else tmp7522)
tmp7555 = (tmp7556 if depth2 else tmp7558)
tmp7562 = (tmp7523 if depth4 else tmp7459)
tmp7561 = (tmp7562 if depth3 else tmp7522)
tmp7560 = (tmp7561 if depth2 else tmp7522)
tmp7554 = (tmp7555 if depth1 else tmp7560)
tmp7565 = (tmp7466 if depth3 else tmp7465)
tmp7567 = (tmp7466 if depth4 else tmp7470)
tmp7566 = (tmp7567 if depth3 else tmp7465)
tmp7564 = (tmp7565 if depth2 else tmp7566)
tmp7570 = (tmp7466 if depth4 else tmp7476)
tmp7569 = (tmp7570 if depth3 else tmp7465)
tmp7572 = (tmp7466 if depth4 else tmp7481)
tmp7571 = (tmp7572 if depth3 else tmp7465)
tmp7568 = (tmp7569 if depth2 else tmp7571)
tmp7563 = (tmp7564 if depth1 else tmp7568)
tmp7553 = (tmp7554 if s0 else tmp7563)
tmp7576 = (tmp7489 if depth3 else tmp7539)
tmp7578 = (tmp7489 if depth4 else tmp7493)
tmp7577 = (tmp7578 if depth3 else tmp7488)
tmp7575 = (tmp7576 if depth2 else tmp7577)
tmp7580 = (tmp7489 if depth3 else tmp7488)
tmp7582 = (tmp7489 if depth4 else tmp7500)
tmp7581 = (tmp7582 if depth3 else tmp7488)
tmp7579 = (tmp7580 if depth2 else tmp7581)
tmp7574 = (tmp7575 if depth1 else tmp7579)
tmp7585 = (tmp7507 if depth3 else tmp7546)
tmp7586 = (tmp7507 if depth3 else tmp7506)
tmp7584 = (tmp7585 if depth2 else tmp7586)
tmp7588 = (tmp7507 if depth3 else tmp7549)
tmp7590 = (tmp7507 if depth4 else tmp7514)
tmp7589 = (tmp7590 if depth3 else tmp7506)
tmp7587 = (tmp7588 if depth2 else tmp7589)
tmp7583 = (tmp7584 if depth1 else tmp7587)
tmp7573 = (tmp7574 if s0 else tmp7583)
tmp7552 = (tmp7553 if s1 else tmp7573)
tmp7595 = (tmp7523 if depth3 else tmp7557)
tmp7596 = (tmp7523 if depth3 else tmp7559)
tmp7594 = (tmp7595 if depth2 else tmp7596)
tmp7598 = (tmp7523 if depth3 else tmp7562)
tmp7599 = (tmp7523 if depth3 else tmp7522)
tmp7597 = (tmp7598 if depth2 else tmp7599)
tmp7593 = (tmp7594 if depth1 else tmp7597)
tmp7602 = (tmp7466 if depth3 else tmp7567)
tmp7601 = (tmp7466 if depth2 else tmp7602)
tmp7604 = (tmp7466 if depth3 else tmp7570)
tmp7605 = (tmp7466 if depth3 else tmp7572)
tmp7603 = (tmp7604 if depth2 else tmp7605)
tmp7600 = (tmp7601 if depth1 else tmp7603)
tmp7592 = (tmp7593 if s0 else tmp7600)
tmp7611 = (tmp7490 if depth5 else tmp7419)
tmp7610 = (tmp7611 if depth4 else tmp7489)
tmp7609 = (tmp7610 if depth3 else tmp7489)
tmp7612 = (tmp7489 if depth3 else tmp7578)
tmp7608 = (tmp7609 if depth2 else tmp7612)
tmp7614 = (tmp7489 if depth3 else tmp7582)
tmp7613 = (tmp7489 if depth2 else tmp7614)
tmp7607 = (tmp7608 if depth1 else tmp7613)
tmp7619 = (tmp7508 if depth5 else tmp7429)
tmp7618 = (tmp7619 if depth4 else tmp7507)
tmp7617 = (tmp7618 if depth3 else tmp7507)
tmp7616 = (tmp7617 if depth2 else tmp7507)
tmp7623 = (tmp7508 if depth5 else tmp7435)
tmp7622 = (tmp7623 if depth4 else tmp7507)
tmp7621 = (tmp7622 if depth3 else tmp7507)
tmp7624 = (tmp7507 if depth3 else tmp7590)
tmp7620 = (tmp7621 if depth2 else tmp7624)
tmp7615 = (tmp7616 if depth1 else tmp7620)
tmp7606 = (tmp7607 if s0 else tmp7615)
tmp7591 = (tmp7592 if s1 else tmp7606)
tmp7551 = (tmp7552 if s2 else tmp7591)
tmp7440 = (tmp7441 if s3 else tmp7551)
tmp7633 = (tmp7524 if depth5 else tmp7449)
tmp7632 = (tmp7633 if depth4 else tmp7523)
tmp7631 = (tmp7632 if depth3 else tmp7523)
tmp7636 = (tmp7524 if depth5 else tmp7454)
tmp7635 = (tmp7636 if depth4 else tmp7523)
tmp7634 = (tmp7635 if depth3 else tmp7523)
tmp7630 = (tmp7631 if depth2 else tmp7634)
tmp7640 = (tmp7524 if depth5 else tmp7460)
tmp7639 = (tmp7640 if depth4 else tmp7523)
tmp7638 = (tmp7639 if depth3 else tmp7523)
tmp7637 = (tmp7638 if depth2 else tmp7523)
tmp7629 = (tmp7630 if depth1 else tmp7637)
tmp7644 = (tmp7467 if depth4 else tmp7466)
tmp7643 = (tmp7644 if depth3 else tmp7466)
tmp7647 = (tmp7467 if depth5 else tmp7471)
tmp7646 = (tmp7647 if depth4 else tmp7466)
tmp7645 = (tmp7646 if depth3 else tmp7466)
tmp7642 = (tmp7643 if depth2 else tmp7645)
tmp7651 = (tmp7467 if depth5 else tmp7477)
tmp7650 | |
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(hbox, 0, wx.ALL | wx.EXPAND, 7)
self.vbox = vbox
self.SetSizer(vbox)
self.Layout()
ok.SetDefault()
def set_component(self, c, border=7, line=False):
self.vbox.Insert(0, c, 1, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border)
if line:
self.vbox.Insert(1, wx.StaticLine(self), 0, wx.EXPAND | wx.LEFT | wx.RIGHT, border)
self.Layout()
@property
def ButtonSizer(self):
return self._button_sizer
okcancel_class.__name__ = name
return okcancel_class
OKCancelDialog = make_okcancel('OKCancelDialog', wx.Dialog)
OKCancelFrame = make_okcancel('OKCancelFrame', wx.Frame)
class Link(wx.HyperlinkCtrl):
def __init__(self, parent, label, url):
wx.HyperlinkCtrl.__init__(self, parent, -1, label, url)
self.HoverColour = self.VisitedColour = self.NormalColour
class NonModalDialogMixin(object):
def ShowWithCallback(self, cb=None):
self.cb = cb
self.Bind(wx.EVT_BUTTON, self.on_button)
self.Show()
self.Raise()
def on_button(self, e):
ok = e.Id == wx.ID_OK
self.Hide()
cb, self.cb = self.cb, None
if cb is not None:
import util
with util.traceguard:
cb(ok)
self.Destroy()
class SimpleMessageDialog(OKCancelDialog, NonModalDialogMixin):
def __init__(self, parent, title, message,
icon, ok_caption='', cancel_caption='',
style=None,
link=None,
wrap=None):
if style is None:
style = self.dialog_style
if link is not None:
def ExtraButtons():
self._panel = wx.Panel(self)
return Link(self, link[0], link[1])
self.ExtraButtons = ExtraButtons
OKCancelDialog.__init__(self, parent, title=title,
ok_caption=ok_caption, cancel_caption=cancel_caption,
style=style)
self.icon = icon
if icon is not None:
self.SetFrameIcon(self.icon)
p = self._panel
p.Bind(wx.EVT_PAINT, self.OnPanelPaint)
main_sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetBackgroundColour(wx.WHITE)
static_text = wx.StaticText(p, -1, message)
sizer.AddSpacer((60, 20))
sizer.Add(static_text, 1, wx.EXPAND)
main_sizer.Add(sizer, 1, wx.EXPAND | wx.ALL, 10)
main_sizer.Add((5,5))
p.SetSizer(main_sizer)
self.set_component(p, border=0)
if wrap is not None:
static_text.Wrap(wrap)
self.Fit()
def OnPanelPaint(self, e):
dc = wx.PaintDC(self._panel)
icon = self.icon
if icon is not None:
dc.DrawBitmap(icon, 20, 14, True)
def ExtraButtons(self):
self._panel = wx.Panel(self)
class UpgradeDialog(SimpleMessageDialog):
dialog_style = SimpleMessageDialog.dialog_style & ~wx.CLOSE_BOX
def __init__(self, *a, **k):
super(UpgradeDialog, self).__init__(*a, **k)
self.SetEscapeId(wx.ID_NONE)
@classmethod
def show_dialog(cls, parent, title, message, success=None):
wx.CallAfter(cls.do_show_dialog, parent, title, message, success)
@classmethod
def do_show_dialog(cls, parent, title, message, success=None):
dialog = cls(parent, title=title, message=message)
dialog.ShowWithCallback(success)
try:
from cgui import FindTopLevelWindow
except ImportError:
print >> sys.stderr, "WARNING: using slow FindTopLevelWindow"
def FindTopLevelWindow(window):
return window if window.TopLevel else FindTopLevelWindow(window.Parent)
# wx.Window.GetNormalRect : return the non maximized dimensions of a window
try:
from cgui import GetNormalRect
except ImportError:
def GetNormalRect(win):
return win.Rect
wx.WindowClass.NormalRect = property(GetNormalRect)
wx.WindowClass.GetNormalRect = new.instancemethod(GetNormalRect, None, wx.WindowClass)
wx.WindowClass.Top = property(FindTopLevelWindow)
def edit_list(parent=None, obj_list=None, title="Editing List"):
if not isinstance(obj_list, list):
obj_list = []
diag = OKCancelDialog(wx.GetTopLevelParent(parent), title=title)
t = type(obj_list[0]) if len(obj_list) else None
textctrl = wx.TextCtrl(diag, value = ','.join([str(i) for i in obj_list]))
diag.set_component(textctrl)
textctrl.MinSize = (300, -1)
diag.Fit()
textctrl.SetFocus()
textctrl.SetInsertionPointEnd()
result = diag.ShowModal() == wx.ID_OK
if t is None:
t = int if all([s.isdigit() for s in textctrl.Value.split(',')]) else str
return result, [t(s.strip()) for s in textctrl.Value.split(',')] if len(textctrl.Value) else []
try:
import wx.gizmos as gizmos
except ImportError:
def edit_string_list(parent=None, obj_list=['one', 'two', 'three'], title="Editing List"):
log.critical('no wx.gizmos')
return edit_list(parent, obj_list, title)
else:
def edit_string_list(parent=None, obj_list=['one', 'two', 'three'], title="Editing List"):
diag = OKCancelDialog(wx.GetTopLevelParent(parent), title=title)
t = type(obj_list[0])
elb = gizmos.EditableListBox(diag, -1, title)
elb.SetStrings([unicode(elem) for elem in obj_list])
diag.set_component(elb)
return diag.ShowModal() == wx.ID_OK, [t(s) for s in elb.GetStrings()]
from wx import Color, ColourDatabase, NamedColor
from binascii import unhexlify
from types import NoneType
def get_wxColor(c):
if isinstance(c, (NoneType, Color)):
return c
elif isinstance(c, basestring):
if c[0:2].lower() == color_prefix.lower():
# a hex string like "0xabcdef"
return Color(*(struct.unpack("BBB", unhexlify(c[2:8])) + (255,)))
elif ColourDatabase().Find(c).Ok():
# a color name
return NamedColor(c)
else:
try: c = int(c)
except ValueError: pass
if isinstance(c, int):
# an integer
return Color((c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff, (c >> 24) or 255)
raise ValueError('error: %r is not a valid color' % c)
colorfor = get_wxColor
LOCAL_SETTINGS_FILE = 'digsbylocal.ini'
class MyConfigParser(ConfigParser):
def save(self):
import util
with util.traceguard:
parent = local_settings_path().parent
if not parent.isdir():
parent.makedirs()
lsp = local_settings_path()
try:
with open(lsp, 'w') as f:
self.write(f)
except Exception, e:
log.error('Error saving file %r. Error was: %r', lsp, e)
def iteritems(self, section):
return ((k, self.value_transform(v)) for k, v in ConfigParser.items(self, section))
def items(self, section):
return list(self.iteritems())
def _interpolate(self, section, option, rawval, vars):
try:
value = ConfigParser._interpolate(self, section, option, rawval, vars)
except TypeError:
value = rawval
return value
def value_transform(self, v):
import util
return {'none': None,
'true': True,
'false': False}.get(util.try_this(lambda: v.lower(), None), v)
def local_settings_path():
import stdpaths
return stdpaths.userlocaldata / LOCAL_SETTINGS_FILE
_global_ini_parser = None
def local_settings():
global _global_ini_parser
if _global_ini_parser is None:
_global_ini_parser = MyConfigParser()
lsp = local_settings_path()
try:
_global_ini_parser.read(lsp)
except Exception, e:
log.error('There was an error loading file %r. The error was %r.', lsp, e)
return _global_ini_parser
def getDisplayHashString():
'''
Returns a unique string for the current monitor/resolution configuration.
Used below in save/loadWindowPos.
The rationale is that using things like Remote Desktop can result in the
window remembering a location that won't work on a differently sized
display. This way you only position the window once on each display
configuration and be done with it.
'''
return '{%s}' % ', '.join('<(%s, %s): %sx%s>' % tuple(m.Geometry) for m in Monitor.All())
def saveWindowPos(win, uniqueId=""):
'''
Saves a window's position to the config file.
'''
cfg = local_settings()
section = windowId(win.Name, uniqueId)
if not cfg.has_section(section):
cfg.add_section(section)
if wxMSW:
placement = GetWindowPlacement(win)
# on win7, if a window is Aero Snapped, GetWindowPlacement will return
# it's "unsnapped" size. we want to save the size of the window as it
# is now, though--so grab the size from win.Rect and use that.
if cgui.isWin7OrHigher() and not win.IsMaximized() and not win.IsIconized():
placement_set_size(placement, win.Rect.Size)
cfg.set(section, 'placement', json.dumps(placement))
else:
rect = GetNormalRect(win)
sz, p = rect.GetSize(), rect.GetPosition()
for k, v in [("x", p.x),
("y", p.y),
("w", sz.width),
("h", sz.height),
('maximized', win.IsMaximized())]:
cfg.set(section, k, str(v))
cfg.save()
defSizes = {
'Buddy List': (280, 600),
'IM Window': (420, 330),
}
def windowId(windowName, uniqueId):
from common import profile
username = getattr(profile, 'username', None)
if not username:
username = getattr(wx.FindWindowByName('Digsby Login Window'), 'username', '_')
return ' '.join([windowName, uniqueId, username, getDisplayHashString()])
def placement_set_size(placement, size):
np = placement['rcNormalPosition']
right = np[0] + size.width
bottom = np[1] + size.height
placement['rcNormalPosition'] = [np[0], np[1], right, bottom]
def preLoadWindowPos(windowName, uniqueId="", position_only = False, defaultPos = None, defaultSize = None):
# save based on classname, and any unique identifier that is specified
section = windowId(windowName, uniqueId)
if defaultPos is not None:
doCenter = defaultPos == 'center'
hasDefPos = not doCenter
else:
hasDefPos = False
doCenter = False
size = defaultSize if defaultSize is not None else wx.DefaultSize#(450, 400)
pos = defaultPos if hasDefPos else wx.DefaultPosition
style = 0
try:
cfg = local_settings()
hassection = cfg.has_section(section)
except Exception:
print_exc()
hassection = False
placement = None
if wxMSW:
if hassection:
import util
with util.traceguard:
placement = json.loads(cfg.get(section, 'placement'))
if position_only:
placement_set_size(placement, size)
if hassection and not position_only:
try:
size = Size(cfg.getint(section, "w"), cfg.getint(section, "h"))
except Exception:
pass
#TODO: this isn't expected to work anymore with IM windows, needs to
#be removed once everything else is moved to use SetPalcement
#print_exc()
if doCenter:
mon = Monitor.GetFromRect(wx.RectPS(wx.Point(0, 0), size)) #@UndefinedVariable
pos = wx.Point(*mon.ClientArea.CenterPoint(size))
if hassection:
try:
pos = Point(cfg.getint(section, "x"), cfg.getint(section, "y"))
except Exception:
pass
#TODO: this isn't expected to work anymore with IM windows, needs to
#be removed once everything else is moved to use SetPalcement
#print_exc()
import util
max = util.try_this(lambda: cfg.getboolean(section, "maximized"), False) if hassection else False
if max:
style |= wx.MAXIMIZE
return dict(style = style, size = size, pos = pos), placement
def loadWindowPos(win, uniqueId="", position_only = False, defaultPos = None, defaultSize = None):
'''
Loads a window's position from the default config file.
'''
wininfo, placement = preLoadWindowPos(win.Name, uniqueId, position_only, defaultPos, defaultSize or win.Size)
if placement is not None:
SetWindowPlacement(win, placement)
else:
if not position_only:
win.SetRect(wx.RectPS(wininfo['pos'], wininfo['size']))
else:
win.Position = wininfo['pos']
# if wininfo['style'] & wx.MAXIMIZE:
# win.Maximize()
win.EnsureInScreen()
def persist_window_pos(frame, close_method=None, unique_id="", position_only = False, defaultPos = None, defaultSize = None, nostack = False):
'''
To make a frame remember where it was, call this function on it in its
constructor.
'''
def _persist_close(e):
saveWindowPos(frame, unique_id)
close_method(e) if close_method is not None else e.Skip(True)
frame.Bind(wx.EVT_CLOSE, _persist_close)
loadWindowPos(frame, unique_id, position_only, defaultPos = defaultPos, defaultSize = defaultSize)
if nostack:
frame.EnsureNotStacked()
def TransparentBitmap(size):
w, h = max(size[0], 1), max(size[1], 1)
return wx.TransparentBitmap(w, h)
def toscreen(bmap, x, y):
wx.ScreenDC().DrawBitmap(bmap, x, y)
def bbind(window, **evts):
'''
Shortcut for binding wxEvents.
Instead of:
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_paint_background)
self.Bind(wx.EVT_SET_FOCUS, self.on_focus)
Use this:
self.BBind(PAINT = self.on_paint,
ERASE_BACKGROUND = self.on_paint_background,
SET_FOCUS, self.on_focus)
'''
bind = window.Bind
for k, v in evts.iteritems():
bind(getattr(wx, 'EVT_' + k), v)
wx.WindowClass.BBind = bbind
def EnsureInScreen(win, mon=None, client_area=True):
| |
np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
if len(classes) != 2:
raise ValueError('Cannot calculate KS statistic for data with '
'{} category/ies'.format(len(classes)))
probas = y_probas
# Compute KS Statistic curves
thresholds, pct1, pct2, ks_statistic, \
max_distance_at, classes = binary_ks_curve(y_true,
probas[:, 1].ravel())
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(thresholds, pct1, lw=3, label='Class {}'.format(classes[0]))
ax.plot(thresholds, pct2, lw=3, label='Class {}'.format(classes[1]))
idx = np.where(thresholds == max_distance_at)[0][0]
ax.axvline(max_distance_at, *sorted([pct1[idx], pct2[idx]]),
label='KS Statistic: {:.3f} at {:.3f}'.format(ks_statistic,
max_distance_at),
linestyle=':', lw=3, color='black')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_xlabel('Threshold', fontsize=text_fontsize)
ax.set_ylabel('Percentage below threshold', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax
def plot_precision_recall_curve(y_true, y_probas,
title='Precision-Recall Curve',
curves=('micro', 'each_class'), ax=None,
figsize=None, cmap='nipy_spectral',
title_fontsize="large",
text_fontsize="medium"):
"""Generates the Precision Recall Curve from labels and probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"Precision-Recall curve".
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "each_class")`
i.e. "micro" for micro-averaged curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> nb = GaussianNB()
>>> nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.metrics.plot_precision_recall_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_precision_recall_curve.png
:align: center
:alt: Precision Recall Curve
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
probas = y_probas
if 'micro' not in curves and 'each_class' not in curves:
raise ValueError('Invalid argument for curves as it '
'only takes "micro" or "each_class"')
# Compute Precision-Recall curve and area for each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(len(classes)):
precision[i], recall[i], _ = precision_recall_curve(
y_true, probas[:, i], pos_label=classes[i])
y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
y_true = np.hstack((1 - y_true, y_true))
for i in range(len(classes)):
average_precision[i] = average_precision_score(y_true[:, i],
probas[:, i])
# Compute micro-average ROC curve and ROC area
micro_key = 'micro'
i = 0
while micro_key in precision:
i += 1
micro_key += str(i)
precision[micro_key], recall[micro_key], _ = precision_recall_curve(
y_true.ravel(), probas.ravel())
average_precision[micro_key] = average_precision_score(y_true, probas,
average='micro')
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
if 'each_class' in curves:
for i in range(len(classes)):
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(recall[i], precision[i], lw=2,
label='Precision-recall curve of class {0} '
'(area = {1:0.3f})'.format(classes[i],
average_precision[i]),
color=color)
if 'micro' in curves:
ax.plot(recall[micro_key], precision[micro_key],
label='micro-average Precision-recall curve '
'(area = {0:0.3f})'.format(average_precision[micro_key]),
color='navy', linestyle=':', linewidth=4)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='best', fontsize=text_fontsize)
return ax
def plot_silhouette(X, cluster_labels, title='Silhouette Analysis',
metric='euclidean', copy=True, ax=None, figsize=None,
cmap='nipy_spectral', title_fontsize="large",
text_fontsize="medium"):
"""Plots silhouette analysis of clusters provided.
Args:
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
cluster_labels (array-like, shape (n_samples,)):
Cluster label for each sample.
title (string, optional): Title of the generated plot. Defaults to
"Silhouette Analysis"
metric (string or callable, optional): The metric to use when
calculating distance between instances in a feature array.
If metric is a string, it must be one of the options allowed by
sklearn.metrics.pairwise.pairwise_distances. If X is
the distance array itself, use "precomputed" as the metric.
copy (boolean, optional): Determines whether ``fit`` is used on
**clf** or on a copy of **clf**.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> kmeans = KMeans(n_clusters=4, random_state=1)
>>> cluster_labels = kmeans.fit_predict(X)
>>> skplt.metrics.plot_silhouette(X, cluster_labels)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_silhouette.png
:align: center
:alt: Silhouette Plot
"""
cluster_labels = np.asarray(cluster_labels)
le = LabelEncoder()
cluster_labels_encoded = le.fit_transform(cluster_labels)
n_clusters = len(np.unique(cluster_labels))
silhouette_avg = silhouette_score(X, cluster_labels, metric=metric)
sample_silhouette_values = silhouette_samples(X, cluster_labels,
metric=metric)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.set_xlim([-0.1, 1])
ax.set_ylim([0, len(X) + (n_clusters + 1) * 10 + 10])
ax.set_xlabel('Silhouette coefficient values', fontsize=text_fontsize)
ax.set_ylabel('Cluster label', fontsize=text_fontsize)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[
cluster_labels_encoded == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = plt.cm.get_cmap(cmap)(float(i) / n_clusters)
ax.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
ax.text(-0.05, y_lower + 0.5 * size_cluster_i, str(le.classes_[i]),
fontsize=text_fontsize)
y_lower = y_upper + 10
ax.axvline(x=silhouette_avg, color="red", linestyle="--",
label='Silhouette score: {0:0.3f}'.format(silhouette_avg))
ax.set_yticks([]) # Clear the y-axis labels / ticks
ax.set_xticks(np.arange(-0.1, 1.0, 0.2))
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='best', fontsize=text_fontsize)
return ax
def plot_calibration_curve(y_true, probas_list, clf_names=None, n_bins=10,
title='Calibration plots (Reliability Curves)',
ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""Plots calibration curves for a set of classifier probability estimates.
Plotting the calibration curves of a classifier is useful for determining
whether or not you can interpret their predicted probabilities directly as
as confidence level. For instance, a well-calibrated binary classifier
should classify the samples such that for samples to which it gave a score
of 0.8, around 80% should actually be from the positive class.
This function currently only works for binary classification.
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
probas_list (list of array-like, shape (n_samples, 2) or (n_samples,)):
A list containing the outputs of binary classifiers'
:func:`predict_proba` method or :func:`decision_function` method.
clf_names (list of str, optional): A list of strings, where each string
refers to the name of the classifier that produced the
corresponding probability estimates in `probas_list`. If ``None``,
the names "Classifier 1", "Classifier 2", etc. will be used.
n_bins (int, optional): Number of bins. A bigger number requires more
data.
title (string, optional): Title of the generated plot. Defaults to
"Calibration plots (Reliabilirt Curves)"
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
:class:`matplotlib.axes.Axes`: The axes on which the plot was drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> lr = LogisticRegression()
>>> nb = GaussianNB()
>>> svm = LinearSVC()
>>> rf_probas = rf.fit(X_train, y_train).predict_proba(X_test)
>>> lr_probas = lr.fit(X_train, y_train).predict_proba(X_test)
>>> nb_probas = nb.fit(X_train, y_train).predict_proba(X_test)
>>> svm_scores = svm.fit(X_train, y_train).decision_function(X_test)
>>> probas_list = [rf_probas, lr_probas, nb_probas, svm_scores]
>>> clf_names = ['Random Forest', 'Logistic Regression',
... 'Gaussian Naive Bayes', 'Support Vector Machine']
>>> skplt.metrics.plot_calibration_curve(y_test,
... probas_list,
... clf_names)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_calibration_curve.png
:align: center
:alt: Calibration Curves
"""
y_true = np.asarray(y_true)
if not isinstance(probas_list, list):
raise ValueError('`probas_list` does not contain a list.')
classes = np.unique(y_true)
if len(classes) > 2:
| |
#
# Copyright 2020 Nebulon, Inc.
# All Rights Reserved.
#
# DISCLAIMER: THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
# EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from .graphqlclient import GraphQLParam, NebMixin
from .common import PageInput, read_value
from .filters import UuidFilter, IntFilter
from .sorting import SortDirection
from .tokens import TokenResponse
class LunSort:
"""A sort object for LUNs
Allows sorting LUNs on common properties. The sort object allows
only one property to be specified.
"""
def __init__(
self,
lun_id: SortDirection = None
):
"""Constructs a new sort object for LUNs
:param lun_id: Sort direction for the ``lun_id`` property of a LUN
:type lun_id: SortDirection, optional
"""
self.__lun_id = lun_id
@property
def lun_id(self) -> SortDirection:
"""Sort direction for the ``lun_id`` property of a LUN"""
return self.__lun_id
@property
def as_dict(self):
result = dict()
result["lunID"] = self.lun_id
return result
class LunFilter:
"""A filter object to filter LUNs.
Allows filtering for specific LUNs. The filter allows only one property to
be specified. If filtering on multiple properties is needed, use the
``and_filter`` and ``or_filter`` options to concatenate multiple filters.
"""
def __init__(
self,
uuid: UuidFilter = None,
lun_id: IntFilter = None,
and_filter=None,
or_filter=None
):
"""Constructs a new filter object
The filter allows only one property to be specified. If filtering on
multiple properties is needed, use the ``and_filter`` and ``or_filter``
options to concatenate multiple filters.
:param uuid: Filter based on datacenter unique identifiers
:type uuid: UuidFilter, optional
:param lun_id: Filter based on ``lun_id`` property of a LUN
:type lun_id: IntFilter, optional
:param and_filter: Concatenate another filter with a logical AND
:type and_filter: DataCenterFilter, optional
:param or_filter: Concatenate another filter with a logical OR
:type or_filter: DataCenterFilter, optional
"""
self.__uuid = uuid
self.__lun_id = lun_id
self.__and = and_filter
self.__or = or_filter
@property
def uuid(self) -> UuidFilter:
"""Filter based on LUN unique identifier"""
return self.__uuid
@property
def lun_id(self) -> IntFilter:
"""Filter based on LUN ID"""
return self.__lun_id
@property
def and_filter(self):
"""Allows concatenation of multiple filters via logical AND"""
return self.__and
@property
def or_filter(self):
"""Allows concatenation of multiple filters via logical OR"""
return self.__or
@property
def as_dict(self):
result = dict()
result["uuid"] = self.uuid
result["lunID"] = self.lun_id
result["and"] = self.and_filter
result["or"] = self.or_filter
return result
class CreateLunInput:
"""An input object to create a LUN for a volume
Allows the creation of a LUN for a volume. A LUN is an instance of a
volume export that makes a volume accessible to a host.
"""
def __init__(
self,
volume_uuid: str,
lun_id: int = None,
host_uuids: [str] = None,
spu_serials: [str] = None,
local: bool = None
):
"""Constructs a new input object to create a LUN for a volume
Allows the creation of a LUN for a volume. A LUN is an instance of a
volume export that makes a volume accessible to a host.
At least one host must be specified via ``host_uuids`` or ``spu_serials`` -
either one option must be specified but not both. If the ``local`` option
is provided and set to ``True`` then the volume will be exported with
ALUA, otherwise with ALUA turned off.
:param volume_uuid: The unique identifier of the volume that shall be
made available to a host
:type volume_uuid: str
:param lun_id: An optional LUN ID to export volumes with a specific ID
:type lun_id: int
:param host_uuids: List of host UUIDs that identify the hosts the
volume shall be exported to. This must be provided if
``spu_serials`` is not provided.
:type host_uuids: [str], optional
:param spu_serials: List of SPU serials that identify the serials the
volume shall be exported to. This must be provided if ``host_uuids``
is not provided.
:type spu_serials: [str], optional
:param local: If provided and set to ``True`` then the volume will be
exported with ALUA, otherwise with ALUA turned off.
:type local: bool, optional
"""
self.__volume_uuid = volume_uuid
self.__host_uuids = host_uuids
self.__spu_serials = spu_serials
self.__lun_id = lun_id
self.__local = local
@property
def volume_uuid(self) -> str:
"""The identifier of the volume that shall be exported to a host"""
return self.__volume_uuid
@property
def host_uuids(self) -> [str]:
"""The hosts to which a volume shall be exported to"""
return self.__host_uuids
@property
def spu_serials(self) -> [str]:
"""The SPUs from which a volume shall be exported from"""
return self.__spu_serials
@property
def lun_id(self) -> int:
"""An optional LUN ID to assign to the volume export"""
return self.__lun_id
@property
def local(self) -> bool:
"""If ``True``, volumes will be exported with ALUA turned on"""
return self.__local
@property
def as_dict(self):
result = dict()
result["volumeUUID"] = self.volume_uuid
result["hostUUIDs"] = self.host_uuids
result["spuSerials"] = self.spu_serials
result["lunID"] = self.lun_id
result["local"] = self.local
return result
class BatchDeleteLunInput:
"""An input object to delete multiple LUNs at once"""
def __init__(
self,
volume_uuid: str,
lun_uuids: [str] = None,
host_uuids: [str] = None
):
"""Constructs a new input object to delete multiple LUNs
Either ``lun_uuids`` or ``host_uuids`` must be specified, but not both.
:param volume_uuid: The unique identifier of the volume from which the
LUNs shall be deleted
:type volume_uuid: str
:param lun_uuids: The list of LUN identifiers that shall be deleted. If
``host_uuids`` is not specified this parameter is mandatory
:type lun_uuids: [str], optional
:param host_uuids: The list of host identifiers from which the LUNs
shall be deleted. IF ``lun_uuids`` is not specified this parameter is
mandatory
:type host_uuids: [str], optional
"""
self.__volume_uuid = volume_uuid
self.__host_uuids = host_uuids
self.__lun_uuids = lun_uuids
@property
def volume_uuid(self) -> str:
"""Identifier of the volume from which LUNs shall be deleted"""
return self.__volume_uuid
@property
def host_uuids(self) -> [str]:
"""List of host identifiers from which LUNs shall be deleted"""
return self.host_uuids
@property
def lun_uuids(self) -> [str]:
"""List of LUN identifiers that shall be deleted"""
return self.__lun_uuids
@property
def as_dict(self):
result = dict()
result["lunUUIDs"] = self.lun_uuids
result["volumeUUID"] = self.volume_uuid
result["hostUUIDs"] = self.host_uuids
return result
class Lun:
"""A LUN / an export of a volume to a host
A LUN is an instance of a volume export that makes a volume accessible to
a host.
"""
def __init__(
self,
response: dict
):
"""Constructs a new LUN object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__uuid = read_value(
"uuid", response, str, True)
self.__spu_serial = read_value(
"spu.serial", response, str, True)
self.__lun_id = read_value(
"lunID", response, int, True)
self.__volume_uuid = read_value(
"volume.uuid", response, str, True)
@property
def uuid(self) -> str:
"""The unique identifier of the LUN"""
return self.__uuid
@property
def spu_serial(self) -> str:
"""The SPU serial where the LUN is exported from"""
return self.__spu_serial
@property
def lun_id(self) -> int:
"""The LUN ID of the volume export"""
return self.__lun_id
@property
def volume_uuid(self) -> str:
"""The unique identifier of the volume that is exported"""
return self.__volume_uuid
@staticmethod
def fields():
return [
"uuid",
"spu{serial}",
"lunID",
"volume{uuid}",
]
class LunList:
"""Paginated LUN list object
Contains a list of LUN objects and information for pagination. By default a
single page includes a maximum of `100` items unless specified otherwise
in the paginated query.
Consumers should always check for the property ``more`` as per default
the server does not return the full list of alerts but only one page.
"""
def __init__(
self,
response: dict
):
"""Constructs a new LUN list object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if | |
:type HighlightsInfo: list of HighlightsInfomation
:param Message: 状态描述
:type Message: str
:param Status: 任务状态
:type Status: str
"""
self.HighlightsInfo = None
self.Message = None
self.Status = None
def _deserialize(self, params):
if params.get("HighlightsInfo") is not None:
self.HighlightsInfo = []
for item in params.get("HighlightsInfo"):
obj = HighlightsInfomation()
obj._deserialize(item)
self.HighlightsInfo.append(obj)
self.Message = params.get("Message")
self.Status = params.get("Status")
class StatInfo(AbstractModel):
"""单词出现的次数信息
"""
def __init__(self):
"""
:param Keyword: 词汇库中的单词
:type Keyword: str
:param Value: 单词出现在该音频中总次数
:type Value: int
"""
self.Keyword = None
self.Value = None
def _deserialize(self, params):
self.Keyword = params.get("Keyword")
self.Value = params.get("Value")
class StudentBodyMovementResult(AbstractModel):
"""学生肢体动作结果
"""
def __init__(self):
"""
:param Confidence: 置信度(已废弃)
:type Confidence: float
:param HandupConfidence: 举手识别结果置信度
:type HandupConfidence: float
:param HandupStatus: 举手识别结果,包含举手(handup)和未举手(nothandup)
:type HandupStatus: str
:param Height: 识别结果高度
:type Height: int
:param Left: 识别结果左坐标
:type Left: int
:param Movements: 动作识别结果(已废弃)
:type Movements: str
:param StandConfidence: 站立识别结果置信度
:type StandConfidence: float
:param StandStatus: 站立识别结果,包含站立(stand)和坐着(sit)
:type StandStatus: str
:param Top: 识别结果顶坐标
:type Top: int
:param Width: 识别结果宽度
:type Width: int
"""
self.Confidence = None
self.HandupConfidence = None
self.HandupStatus = None
self.Height = None
self.Left = None
self.Movements = None
self.StandConfidence = None
self.StandStatus = None
self.Top = None
self.Width = None
def _deserialize(self, params):
self.Confidence = params.get("Confidence")
self.HandupConfidence = params.get("HandupConfidence")
self.HandupStatus = params.get("HandupStatus")
self.Height = params.get("Height")
self.Left = params.get("Left")
self.Movements = params.get("Movements")
self.StandConfidence = params.get("StandConfidence")
self.StandStatus = params.get("StandStatus")
self.Top = params.get("Top")
self.Width = params.get("Width")
class SubmitAudioTaskRequest(AbstractModel):
"""SubmitAudioTask请求参数结构体
"""
def __init__(self):
"""
:param Lang: 音频源的语言,默认0为英文,1为中文
:type Lang: int
:param Url: 音频URL。客户请求为URL方式时必须带此字段指名音频的url。
:type Url: str
:param VoiceEncodeType: 语音编码类型 1:pcm
:type VoiceEncodeType: int
:param VoiceFileType: 语音文件类型 1:raw, 2:wav, 3:mp3,10:视频(三种音频格式目前仅支持16k采样率16bit)
:type VoiceFileType: int
:param Functions: 功能开关列表,表示是否需要打开相应的功能,返回相应的信息
:type Functions: :class:`tencentcloud.tci.v20190318.models.Function`
:param FileType: 视频文件类型,默认点播,直播填 live_url
:type FileType: str
:param MuteThreshold: 静音阈值设置,如果静音检测开关开启,则静音时间超过这个阈值认为是静音片段,在结果中会返回, 没给的话默认值为3s
:type MuteThreshold: int
:param VocabLibNameList: 识别词库名列表,评估过程使用这些词汇库中的词汇进行词汇使用行为分析
:type VocabLibNameList: list of str
"""
self.Lang = None
self.Url = None
self.VoiceEncodeType = None
self.VoiceFileType = None
self.Functions = None
self.FileType = None
self.MuteThreshold = None
self.VocabLibNameList = None
def _deserialize(self, params):
self.Lang = params.get("Lang")
self.Url = params.get("Url")
self.VoiceEncodeType = params.get("VoiceEncodeType")
self.VoiceFileType = params.get("VoiceFileType")
if params.get("Functions") is not None:
self.Functions = Function()
self.Functions._deserialize(params.get("Functions"))
self.FileType = params.get("FileType")
self.MuteThreshold = params.get("MuteThreshold")
self.VocabLibNameList = params.get("VocabLibNameList")
class SubmitAudioTaskResponse(AbstractModel):
"""SubmitAudioTask返回参数结构体
"""
def __init__(self):
"""
:param JobId: 查询结果时指名的jobid。在URL方式时提交请求后会返回一个jobid,后续查询该url的结果时使用这个jobid进行查询。
:type JobId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.RequestId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.RequestId = params.get("RequestId")
class SubmitCheckAttendanceTaskPlusRequest(AbstractModel):
"""SubmitCheckAttendanceTaskPlus请求参数结构体
"""
def __init__(self):
"""
:param FileContent: 输入数据
:type FileContent: list of str
:param FileType: 视频流类型,vod_url表示点播URL,live_url表示直播URL,默认vod_url
:type FileType: str
:param LibraryIds: 人员库 ID列表
:type LibraryIds: list of str
:param AttendanceThreshold: 确定出勤阈值;默认为0.92
:type AttendanceThreshold: float
:param EnableStranger: 是否开启陌生人模式,陌生人模式是指在任务中发现的非注册人脸库中的人脸也返回相关统计信息,默认不开启
:type EnableStranger: bool
:param EndTime: 考勤结束时间(到视频的第几秒结束考勤),单位秒;默认为900
对于直播场景,使用绝对时间戳,单位秒,默认当前时间往后12小时
:type EndTime: int
:param NoticeUrl: 通知回调地址,要求方法为post,application/json格式
:type NoticeUrl: str
:param StartTime: 考勤开始时间(从视频的第几秒开始考勤),单位秒;默认为0
对于直播场景,使用绝对时间戳,单位秒,默认当前时间
:type StartTime: int
:param Threshold: 识别阈值;默认为0.8
:type Threshold: float
"""
self.FileContent = None
self.FileType = None
self.LibraryIds = None
self.AttendanceThreshold = None
self.EnableStranger = None
self.EndTime = None
self.NoticeUrl = None
self.StartTime = None
self.Threshold = None
def _deserialize(self, params):
self.FileContent = params.get("FileContent")
self.FileType = params.get("FileType")
self.LibraryIds = params.get("LibraryIds")
self.AttendanceThreshold = params.get("AttendanceThreshold")
self.EnableStranger = params.get("EnableStranger")
self.EndTime = params.get("EndTime")
self.NoticeUrl = params.get("NoticeUrl")
self.StartTime = params.get("StartTime")
self.Threshold = params.get("Threshold")
class SubmitCheckAttendanceTaskPlusResponse(AbstractModel):
"""SubmitCheckAttendanceTaskPlus返回参数结构体
"""
def __init__(self):
"""
:param JobId: 任务标识符
:type JobId: int
:param NotRegisteredSet: 没有注册的人的ID列表
:type NotRegisteredSet: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.NotRegisteredSet = None
self.RequestId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.NotRegisteredSet = params.get("NotRegisteredSet")
self.RequestId = params.get("RequestId")
class SubmitCheckAttendanceTaskRequest(AbstractModel):
"""SubmitCheckAttendanceTask请求参数结构体
"""
def __init__(self):
"""
:param FileContent: 输入数据
:type FileContent: str
:param FileType: 视频流类型,vod_url表示点播URL,live_url表示直播URL,默认vod_url
:type FileType: str
:param LibraryIds: 人员库 ID列表
:type LibraryIds: list of str
:param AttendanceThreshold: 确定出勤阈值;默认为0.92
:type AttendanceThreshold: float
:param EnableStranger: 是否开启陌生人模式,陌生人模式是指在任务中发现的非注册人脸库中的人脸也返回相关统计信息,默认不开启
:type EnableStranger: bool
:param EndTime: 考勤结束时间(到视频的第几秒结束考勤),单位秒;默认为900
对于直播场景,使用绝对时间戳,单位秒,默认当前时间往后12小时
:type EndTime: int
:param NoticeUrl: 通知回调地址,要求方法为post,application/json格式
:type NoticeUrl: str
:param StartTime: 考勤开始时间(从视频的第几秒开始考勤),单位秒;默认为0
对于直播场景,使用绝对时间戳,单位秒,默认当前时间
:type StartTime: int
:param Threshold: 识别阈值;默认为0.8
:type Threshold: float
"""
self.FileContent = None
self.FileType = None
self.LibraryIds = None
self.AttendanceThreshold = None
self.EnableStranger = None
self.EndTime = None
self.NoticeUrl = None
self.StartTime = None
self.Threshold = None
def _deserialize(self, params):
self.FileContent = params.get("FileContent")
self.FileType = params.get("FileType")
self.LibraryIds = params.get("LibraryIds")
self.AttendanceThreshold = params.get("AttendanceThreshold")
self.EnableStranger = params.get("EnableStranger")
self.EndTime = params.get("EndTime")
self.NoticeUrl = params.get("NoticeUrl")
self.StartTime = params.get("StartTime")
self.Threshold = params.get("Threshold")
class SubmitCheckAttendanceTaskResponse(AbstractModel):
"""SubmitCheckAttendanceTask返回参数结构体
"""
def __init__(self):
"""
:param JobId: 任务标识符
:type JobId: int
:param NotRegisteredSet: 没有注册的人的ID列表
:type NotRegisteredSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.NotRegisteredSet = None
self.RequestId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.NotRegisteredSet = params.get("NotRegisteredSet")
self.RequestId = params.get("RequestId")
class SubmitConversationTaskRequest(AbstractModel):
"""SubmitConversationTask请求参数结构体
"""
def __init__(self):
"""
:param Lang: 音频源的语言,默认0为英文,1为中文
:type Lang: int
:param StudentUrl: 学生音频流
:type StudentUrl: str
:param TeacherUrl: 教师音频流
:type TeacherUrl: str
:param VoiceEncodeType: 语音编码类型 1:pcm
:type VoiceEncodeType: int
:param VoiceFileType: 语音文件类型 1:raw, 2:wav, 3:mp3(三种格式目前仅支持16k采样率16bit)
:type VoiceFileType: int
:param Functions: 功能开关列表,表示是否需要打开相应的功能,返回相应的信息
:type Functions: :class:`tencentcloud.tci.v20190318.models.Function`
:param VocabLibNameList: 识别词库名列表,评估过程使用这些词汇库中的词汇进行词汇使用行为分析
:type VocabLibNameList: list of str
"""
self.Lang = None
self.StudentUrl = None
self.TeacherUrl = None
self.VoiceEncodeType = None
self.VoiceFileType = None
self.Functions = None
self.VocabLibNameList = None
def _deserialize(self, params):
self.Lang = params.get("Lang")
self.StudentUrl = params.get("StudentUrl")
self.TeacherUrl = params.get("TeacherUrl")
self.VoiceEncodeType = params.get("VoiceEncodeType")
self.VoiceFileType = params.get("VoiceFileType")
if params.get("Functions") is not None:
self.Functions = Function()
self.Functions._deserialize(params.get("Functions"))
self.VocabLibNameList = params.get("VocabLibNameList")
class SubmitConversationTaskResponse(AbstractModel):
"""SubmitConversationTask返回参数结构体
"""
def __init__(self):
"""
:param JobId: 查询结果时指名的jobid。在URL方式时提交请求后会返回一个jobid,后续查询该url的结果时使用这个jobid进行查询。
:type JobId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.RequestId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.RequestId = params.get("RequestId")
class SubmitDoubleVideoHighlightsRequest(AbstractModel):
"""SubmitDoubleVideoHighlights请求参数结构体
"""
def __init__(self):
"""
:param FileContent: 学生视频url
:type FileContent: str
:param LibIds: 需要检索的人脸合集库,不在库中的人脸将不参与精彩集锦;目前仅支持输入一个人脸库。
:type LibIds: list of str
:param Functions: 详细功能开关配置项
:type Functions: :class:`tencentcloud.tci.v20190318.models.DoubleVideoFunction`
:param PersonInfoList: 需要匹配的人员信息列表。
:type PersonInfoList: list of PersonInfo
:param FrameInterval: 视频处理的抽帧间隔,单位毫秒。建议留空。
:type FrameInterval: int
:param PersonIds: 旧版本需要匹配的人员信息列表。
:type PersonIds: list of str
:param SimThreshold: 人脸检索的相似度阈值,默认值0.89。建议留空。
:type SimThreshold: float
:param TeacherFileContent: 老师视频url
:type TeacherFileContent: str
"""
self.FileContent = None
self.LibIds = None
self.Functions = None
self.PersonInfoList = None
self.FrameInterval = None
self.PersonIds = None
self.SimThreshold = None
self.TeacherFileContent = None
def _deserialize(self, params):
self.FileContent = params.get("FileContent")
self.LibIds = params.get("LibIds")
if params.get("Functions") is not None:
self.Functions = DoubleVideoFunction()
self.Functions._deserialize(params.get("Functions"))
if params.get("PersonInfoList") is not None:
self.PersonInfoList = []
for item in params.get("PersonInfoList"):
obj = PersonInfo()
obj._deserialize(item)
self.PersonInfoList.append(obj)
self.FrameInterval = params.get("FrameInterval")
self.PersonIds = params.get("PersonIds")
self.SimThreshold = params.get("SimThreshold")
self.TeacherFileContent = params.get("TeacherFileContent")
class SubmitDoubleVideoHighlightsResponse(AbstractModel):
"""SubmitDoubleVideoHighlights返回参数结构体
"""
def __init__(self):
"""
:param JobId: 视频拆条任务ID,用来唯一标识视频拆条任务。
:type JobId: int
:param NotRegistered: 未注册的人员ID列表。若出现此项,代表评估出现了问题,输入的PersonId中有不在库中的人员ID。
:type NotRegistered: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.NotRegistered = None
self.RequestId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.NotRegistered = params.get("NotRegistered")
self.RequestId = params.get("RequestId")
class SubmitFullBodyClassTaskRequest(AbstractModel):
"""SubmitFullBodyClassTask请求参数结构体
"""
def __init__(self):
"""
:param FileContent: 输入分析对象内容,输入数据格式参考FileType参数释义
:type FileContent: str
:param FileType: 输入分析对象类型,picture_url:图片地址,vod_url:视频地址,live_url:直播地址,picture: 图片二进制数据的BASE64编码
:type FileType: str
:param Lang: 音频源的语言,默认0为英文,1为中文
:type Lang: int
:param LibrarySet: 查询人员库列表,可填写老师的注册照所在人员库
:type LibrarySet: list of str
:param MaxVideoDuration: 视频评估时间,单位秒,点播场景默认值为2小时(无法探测长度时)或完整视频,直播场景默认值为10分钟或直播提前结束
:type MaxVideoDuration: int
:param VocabLibNameList: 识别词库名列表,这些词汇库用来维护关键词,评估老师授课过程中,对这些关键词的使用情况
:type VocabLibNameList: list of str
:param VoiceEncodeType: 语音编码类型 1:pcm,当FileType为vod_url或live_url时为必填
:type VoiceEncodeType: int
:param VoiceFileType: 语音文件类型 10:视频(三种音频格式目前仅支持16k采样率16bit),当FileType为vod_url或live_url时为必填
:type VoiceFileType: int
"""
self.FileContent = None
self.FileType = None
self.Lang = None
self.LibrarySet = None
self.MaxVideoDuration = None
self.VocabLibNameList = None
self.VoiceEncodeType = None
self.VoiceFileType = None
def _deserialize(self, params):
self.FileContent = params.get("FileContent")
self.FileType = params.get("FileType")
self.Lang = params.get("Lang")
self.LibrarySet = params.get("LibrarySet")
self.MaxVideoDuration = params.get("MaxVideoDuration")
self.VocabLibNameList = params.get("VocabLibNameList")
self.VoiceEncodeType = params.get("VoiceEncodeType")
self.VoiceFileType = params.get("VoiceFileType")
class SubmitFullBodyClassTaskResponse(AbstractModel):
"""SubmitFullBodyClassTask返回参数结构体
"""
def __init__(self):
"""
:param ImageResults: 图像任务直接返回结果,包括: FaceAttr、 FaceExpression、 FaceIdentify、 FaceInfo、 FacePose、 TeacherBodyMovement、TimeInfo
:type ImageResults: list of ImageTaskResult
:param TaskId: 任务ID
:type TaskId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ImageResults = None
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ImageResults") is not None:
self.ImageResults = []
for item in params.get("ImageResults"):
obj = ImageTaskResult()
obj._deserialize(item)
self.ImageResults.append(obj)
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class SubmitHighlightsRequest(AbstractModel):
"""SubmitHighlights请求参数结构体
"""
def __init__(self):
"""
:param Functions: 表情配置开关项。
:type Functions: :class:`tencentcloud.tci.v20190318.models.HLFunction`
:param FileContent: 视频url。
:type FileContent: str
:param FileType: 视频类型及来源,目前只支持点播类型:"vod_url"。
:type FileType: str
:param LibIds: 需要检索的人脸合集库,不在库中的人脸将不参与精彩集锦。
:type LibIds: list of str
:param FrameInterval: 视频处理的抽帧间隔,单位毫秒。建议留空。
:type FrameInterval: int
:param KeywordsLanguage: 关键词语言类型,0为英文,1为中文。
:type KeywordsLanguage: int
:param KeywordsStrings: 关键词数组,当且仅当Funtions中的EnableKeywordWonderfulTime为true时有意义,匹配相应的关键字。
:type KeywordsStrings: list of str
:param MaxVideoDuration: | |
= kwargs.pop('port_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_designated_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
designated_bridge_id = ET.SubElement(port, "designated-bridge-id")
designated_bridge_id.text = kwargs.pop('designated_bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_port_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
port_hello_time = ET.SubElement(port, "port-hello-time")
port_hello_time.text = kwargs.pop('port_hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_forward_transitions_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
forward_transitions_count = ET.SubElement(port, "forward-transitions-count")
forward_transitions_count.text = kwargs.pop('forward_transitions_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_received_stp_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
received_stp_type = ET.SubElement(port, "received-stp-type")
received_stp_type.text = kwargs.pop('received_stp_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_transmitted_stp_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
transmitted_stp_type = ET.SubElement(port, "transmitted-stp-type")
transmitted_stp_type.text = kwargs.pop('transmitted_stp_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_edge_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
edge_port = ET.SubElement(port, "edge-port")
edge_port.text = kwargs.pop('edge_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_auto_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
auto_edge = ET.SubElement(port, "auto-edge")
auto_edge.text = kwargs.pop('auto_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_admin_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
admin_edge = ET.SubElement(port, "admin-edge")
admin_edge.text = kwargs.pop('admin_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_edge_delay(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
edge_delay = ET.SubElement(port, "edge-delay")
edge_delay.text = kwargs.pop('edge_delay')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_configured_root_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
configured_root_guard = ET.SubElement(port, "configured-root-guard")
configured_root_guard.text = kwargs.pop('configured_root_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_oper_root_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
oper_root_guard = ET.SubElement(port, "oper-root-guard")
oper_root_guard.text = kwargs.pop('oper_root_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_boundary_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
boundary_port = ET.SubElement(port, "boundary-port")
boundary_port.text = kwargs.pop('boundary_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_oper_bpdu_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard")
oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_oper_bpdu_filter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
oper_bpdu_filter = ET.SubElement(port, "oper-bpdu-filter")
oper_bpdu_filter.text = kwargs.pop('oper_bpdu_filter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_link_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
link_type = ET.SubElement(port, "link-type")
link_type.text = kwargs.pop('link_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_rx_bpdu_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
rx_bpdu_count = ET.SubElement(port, "rx-bpdu-count")
rx_bpdu_count.text = kwargs.pop('rx_bpdu_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_stp_stp_port_tx_bpdu_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
stp = ET.SubElement(spanning_tree_mode, "stp")
stp = ET.SubElement(stp, "stp")
port = ET.SubElement(stp, "port")
tx_bpdu_count = ET.SubElement(port, "tx-bpdu-count")
tx_bpdu_count.text = kwargs.pop('tx_bpdu_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_root_bridge_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
root_bridge = ET.SubElement(rstp, "root-bridge")
priority = ET.SubElement(root_bridge, "priority")
priority.text = kwargs.pop('priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_root_bridge_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
root_bridge = ET.SubElement(rstp, "root-bridge")
bridge_id = ET.SubElement(root_bridge, "bridge-id")
bridge_id.text = kwargs.pop('bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_root_bridge_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
root_bridge = ET.SubElement(rstp, "root-bridge")
hello_time = ET.SubElement(root_bridge, "hello-time")
hello_time.text = kwargs.pop('hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_root_bridge_max_age(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
root_bridge = ET.SubElement(rstp, "root-bridge")
max_age = ET.SubElement(root_bridge, "max-age")
max_age.text = kwargs.pop('max_age')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_root_bridge_forward_delay(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
root_bridge = ET.SubElement(rstp, "root-bridge")
forward_delay = ET.SubElement(root_bridge, "forward-delay")
forward_delay.text = kwargs.pop('forward_delay')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_bridge_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
bridge = ET.SubElement(rstp, "bridge")
priority = ET.SubElement(bridge, "priority")
priority.text = kwargs.pop('priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_bridge_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp | |
<reponame>dudaspm/LDA_Bias_Data<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Workshop Notebook
# ## Notebook Introduction
# ### How to Use this Notebook
# ### References
# I know it tradition to have the refences at the end of books, but when you are standing on the shoulders of giants. You thank them first.
# ```{bibliography}
# ```
# ### Thank you!
#
# Also, a huge *thank you* to <NAME> (https://github.com/adamlavely) for developing some of the intial notebooks!
# ## Introduction to JupyterLab
# ### Where am I? (JupyterLab Notebook)
# Jupyter is a powerful suite of tools that allows us to do many things.
#
# Jupyter is capable of running **Ju**lia, **Pyt**hon and **R**, as well as some other things.
#
# ### Cells
# Each box is called a cell.
# #### Two types of Cells
# ##### Text
# Text Cells allow you to add text (via Markdown), which includes tables, images, links, bullet lists, numbered lists, LaTeX, blockquote, among other things.
# ###### Table
#
# ```markdown
# | This | is |
# |------|------|
# | a | table|
# ```
#
# | This | is |
# |------|------|
# | a | table|
# ###### Image
# ```markdown
# 
# ```
#
# 
#
#
#
#
# ###### Link
# ```markdown
# [Attribution](https://www.metmuseum.org/art/collection/search/436535)
# ```
# <NAME> / Public domain
# The Metropolitan Museum of Art, New York - Purchase, The Annenberg Foundation Gift, 1993 -
# [Attribution](https://www.metmuseum.org/art/collection/search/436535)
# ###### Bullet List
# ```markdown
# * I am a
# * bullet
# * list
# ```
# * I am a
# * bullet
# * list
#
#
# ###### Numbered List
# ```markdown
# 1. I am a
# 1. numbered
# 1. list
# ```
# 1. I am a
# 1. numbered
# 1. list
# ###### LaTeX
# ```markdown
# $$e=mc^2$$
# ```
#
#
# $$e=mc^2$$
# ###### Blockquotes
# ```markdown
# > This is a blockquote.
# ```
# > This is a blockquote.
# ##### Code
# Cells can be run using the Run button ► or selecting one of the run options under the Run menu.
#
# Try this out! You can change what is in the cell and rerun the same cell, which is useful for debugging.
# In[1]:
2 + 2
# ### Your turn!
# In a new cell, figure out what **5315 + 5618** is.
# In[2]:
## remove and type out 5315 + 5618
## then hit the play button
# ## Introduction to Python
# In this section, I wanted to introduce a few basic concepts and give an outline of this section.
# ### Comments in Python
# In Python, we can create comments in the code itself. Considering we can use markdown language (as you see here 😁), we won't use this too much in this notebook. Though, here is an example.
#
# Basically, you use the... umm... hashtag? Number sign? Pound sign?
#
# This thing -> #
# In[3]:
# I am a comment in Python
# Here is 2 + 2
2 + 2
# As you can see, these are not "computed" using Python.
# We are just comments for the person looking at this.
# Or... you!
# ### Print Function
# We will being using...
#
# ```python
# print()
# ```
#
# ...several times in this notebook.
#
# *print()* is a function to print out strings, variables, numbers, functions, etc.
#
# Let's use the classic example.
# In[4]:
print( "hello, world!" )
# OR
# In[5]:
print("hello, world!")
# *print()* can do some fun things as well. As in, giving it more than one thing to print with commas between them. This will print both things with spaces.
# In[6]:
print( "hello,", "world!" )
# ### Help Function
# The...
#
# ```python
# help()
# ```
#
# ... function is exactly what it is. It is a function to 🌟 help 🌟 you understand the basic usage of another function.
# In[7]:
help(print)
# ### Resources
# Highly suggest looking for answers using [StackOverflow](https://stackoverflow.com/help/searching)
# ### Common Errors
# One of the most common errors in Python is the dreaded
#
# ```python
# 2 + 2
# 3 + 3
#
# File "<ipython-input-1-0dcc020fd5cb>", line 2
# 3 + 3
# ^
# IndentationError: unexpected indent
# ```
#
# Why does this occur? Well, because Python uses spacing or tabs to distinguish where things like loops, functions, and if/else statements start and end. So, if you add an extra space or tab at the beginning of the statement, you will see this message. If you do, check your spacing.
# ```{note}
# Python can get weird with this issue. As you can, technically, start code wherever as long as you are consistent. The next cell shows an example of this... oddity.
#
# ```
# In[8]:
2+2
3+3
# ### Your turn!
# ## Learning about Variables
# When we are developing our idea, we sometimes need to use values multiple times or change the value based on our code. This concept is where variables become very helpful. Let's look at an example.
#
# In this example, we are adding a few numbers together. In this instance, if all we care about is getting the result (similar to a calculator). Then variables are not needed.
# In[9]:
5 + 3 + 16
# But let's look at an example where we need to get the circumference of a circle using multiple radii. The equation for the circumference of a circle is: $C = 2 \pi r$
# Let's say the radius is 5
# In[10]:
2 * 3.14159265359 * 5
# OK, how about radius 10 and 11 and 4 and ...
# Well, in this example, we might not want to rewrite 3.14159265359 over and over. So, in this case, we want to create a variable for this, and we will call it pi.
# In[11]:
pi = 3.14159265359
# Now, every time we reference the variable called **pi** it will refer to the number **3.14159265359**
#
# Let's try those radii again (10, 11, 4)
# In[12]:
2 * pi * 10
# In[13]:
2 * pi * 11
# In[14]:
2 * pi * 4
# By the way, if you happen to get an error:
# ```javascript
# NameError: name 'pi' is not defined
# ```
# Make sure you go to the cell that has
# ```python
# pi = 3.14159265359
# ```
# and run this cell *first* then try the other calculations.
# ### Type of Variables
# There are multiple types of variables. The most common (and the ones we will talk about) are:
#
# * Integers (whole numbers)
# * Float (Floating points or numbers with a decimal)
# * Text
# * Lists
# * Dictionaries
#
# The nice thing about Python is that we do **not** need to specify (or declare) which type we are using. Python will figure this out for us!
#
# BUT FIRST, a quick detour...
#
# We need to talk about Camel Casing.
# #### Camel Case
# <img src="https://upload.wikimedia.org/wikipedia/commons/c/c8/CamelCase_new.svg" alt="camel case" width="100" style="float:right"/>
# Variable names must be one continuous string of letters/numbers. So, let's say we wanted to create a variable called "number of kittens." Instead calling this variable <em>number of kittens</em>, I would call it <em>numberOfKittens</em>. Why the capitalization? Because it makes it easier to separate the words in the name. As in, <em>numberofkittens</em> vs. <em>numberOfKittens</em>. We have a fun name for this: camel case.
# <cite>File:CamelCase new.svg. (2020, April 15). Wikimedia Commons, the free media repository. Retrieved 15:25, June 3, 2020 from https://commons.wikimedia.org/w/index.php?title=File:CamelCase_new.svg&oldid=411544943.</cite>
# #### Integers or int
# As mentioned, integers are whole numbers. Let's create an example. How about we use our numberOfKittens. We will then set this value to 0. As in, we have 0 kittens.
# In[15]:
numberOfKittens = 0
# One thing we might want to do is to have Python tell us what **type** this variable is. Well, Python has a function for this called
#
# ```python
# type()
# ```
# In[16]:
type( numberOfKittens )
# So this checks out, we made an int, and it is showing us we have an int.
#
# Now, once we have a variable, it is not static. We can change the value as much as we need to. Running the next cell will continually add 10 to our original variable.
#
# Try | |
<reponame>ryanfarr01/blue
"""Define the Group class."""
from __future__ import division
from collections import Iterable, Counter, OrderedDict, defaultdict
from itertools import product, chain
import warnings
from six import iteritems, string_types, itervalues
from six.moves import range
import numpy as np
from openmdao.approximation_schemes.complex_step import ComplexStep
from openmdao.approximation_schemes.finite_difference import FiniteDifference
from openmdao.core.system import System
from openmdao.jacobians.assembled_jacobian import SUBJAC_META_DEFAULTS
from openmdao.proc_allocators.proc_allocator import ProcAllocationError
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.solvers.nonlinear.nonlinear_runonce import NonLinearRunOnce
from openmdao.solvers.linear.linear_runonce import LinearRunOnce
from openmdao.utils.array_utils import convert_neg
from openmdao.utils.general_utils import warn_deprecation
from openmdao.utils.units import is_compatible
class Group(System):
"""
Class used to group systems together; instantiate or inherit.
"""
def __init__(self, **kwargs):
"""
Set the solvers to nonlinear and linear block Gauss--Seidel by default.
Parameters
----------
**kwargs : dict
dict of arguments available here and in all descendants of this
Group.
"""
super(Group, self).__init__(**kwargs)
# TODO: we cannot set the solvers with property setters at the moment
# because our lint check thinks that we are defining new attributes
# called nonlinear_solver and linear_solver without documenting them.
if not self._nonlinear_solver:
self._nonlinear_solver = NonLinearRunOnce()
if not self._linear_solver:
self._linear_solver = LinearRunOnce()
def setup(self):
"""
Add subsystems to this group.
Available attributes:
name
pathname
comm
metadata
"""
pass
def _setup_procs(self, pathname, comm):
"""
Distribute processors and assign pathnames.
Parameters
----------
pathname : str
Global name of the system, including the path.
comm : MPI.Comm or <FakeComm>
MPI communicator object.
"""
self.pathname = pathname
self.comm = comm
self._subsystems_proc_range = subsystems_proc_range = []
self._subsystems_allprocs = []
self._manual_connections = {}
self._design_vars = {}
self._responses = {}
self._static_mode = False
self._subsystems_allprocs.extend(self._static_subsystems_allprocs)
self._manual_connections.update(self._static_manual_connections)
self._design_vars.update(self._static_design_vars)
self._responses.update(self._static_responses)
self.setup()
self._static_mode = True
req_procs = [s.get_req_procs() for s in self._subsystems_allprocs]
# Call the load balancing algorithm
try:
sub_inds, sub_comm, sub_proc_range = self._mpi_proc_allocator(req_procs, comm)
except ProcAllocationError as err:
raise RuntimeError("subsystem %s requested %d processes "
"but got %d" %
(self._subsystems_allprocs[err.sub_idx].pathname,
err.requested, err.remaining))
# Define local subsystems
self._subsystems_myproc = [self._subsystems_allprocs[ind]
for ind in sub_inds]
self._subsystems_myproc_inds = sub_inds
# Compute _subsystems_proc_range
for subsys in self._subsystems_myproc:
subsystems_proc_range.append(sub_proc_range)
# Perform recursion
for subsys in self._subsystems_myproc:
if self.pathname is not '':
sub_pathname = '.'.join((self.pathname, subsys.name))
else:
sub_pathname = subsys.name
subsys._setup_procs(sub_pathname, sub_comm)
def _setup_vars(self, recurse=True):
"""
Call setup in components and count variables, total and by var_set.
Parameters
----------
recurse : bool
Whether to call this method in subsystems.
"""
super(Group, self)._setup_vars()
num_var = self._num_var
num_var_byset = self._num_var_byset
# Recursion
if recurse:
for subsys in self._subsystems_myproc:
subsys._setup_vars(recurse)
# Compute num_var, num_var_byset, at least locally
for type_ in ['input', 'output']:
num_var[type_] = np.sum(
[subsys._num_var[type_] for subsys in self._subsystems_myproc])
for subsys in self._subsystems_myproc:
for set_name, num in iteritems(subsys._num_var_byset[type_]):
if set_name in num_var_byset[type_]:
num_var_byset[type_][set_name] += num
else:
num_var_byset[type_][set_name] = num
# If running in parallel, allgather
if self.comm.size > 1:
# Perform a single allgather
if self._subsystems_myproc[0].comm.rank == 0:
raw = (num_var, num_var_byset)
else:
raw = ({'input': 0, 'output': 0}, {'input': {}, 'output': {}})
gathered = self.comm.allgather(raw)
# Empty the dictionaries
for type_ in ['input', 'output']:
num_var[type_] = 0
num_var_byset[type_] = {}
# Process the gathered data and update the dictionaries
for myproc_num_var, myproc_num_var_byset in gathered:
for type_ in ['input', 'output']:
num_var[type_] += myproc_num_var[type_]
for set_name, num in iteritems(myproc_num_var_byset[type_]):
if set_name in num_var_byset[type_]:
num_var_byset[type_][set_name] += num
else:
num_var_byset[type_][set_name] = num
def _setup_var_index_ranges(self, set2iset, recurse=True):
"""
Compute the division of variables by subsystem and pass down the set_name-to-iset maps.
Parameters
----------
set2iset : {'input': dict, 'output': dict}
Dictionary mapping the var_set name to the var_set index.
recurse : bool
Whether to call this method in subsystems.
"""
super(Group, self)._setup_var_index_ranges(set2iset)
subsystems_var_range = self._subsystems_var_range
subsystems_var_range_byset = self._subsystems_var_range_byset
nsub_allprocs = len(self._subsystems_allprocs)
# Here, we count the number of variables (total and by varset) in each subsystem.
# We do this so that we can compute the offset when we recurse into each subsystem.
allprocs_counters = {
type_: np.zeros(nsub_allprocs, int) for type_ in ['input', 'output']}
allprocs_counters_byset = {
type_: np.zeros((nsub_allprocs, len(set2iset[type_])), int)
for type_ in ['input', 'output']}
# First compute these on one processor for each subsystem
for type_ in ['input', 'output']:
for subsys, isub in zip(self._subsystems_myproc, self._subsystems_myproc_inds):
if subsys.comm.rank == 0:
allprocs_counters[type_][isub] = subsys._num_var[type_]
for set_name in subsys._num_var_byset[type_]:
iset = set2iset[type_][set_name]
allprocs_counters_byset[type_][isub, iset] = \
subsys._num_var_byset[type_][set_name]
# If running in parallel, allgather
if self.comm.size > 1:
raw = (allprocs_counters, allprocs_counters_byset)
gathered = self.comm.allgather(raw)
allprocs_counters = {
type_: np.zeros(nsub_allprocs, int) for type_ in ['input', 'output']}
allprocs_counters_byset = {
type_: np.zeros((nsub_allprocs, len(set2iset[type_])), int)
for type_ in ['input', 'output']}
for myproc_counters, myproc_counters_byset in gathered:
for type_ in ['input', 'output']:
allprocs_counters[type_] += myproc_counters[type_]
allprocs_counters_byset[type_] += myproc_counters_byset[type_]
# Compute _subsystems_var_range, _subsystems_var_range_byset
for type_ in ['input', 'output']:
subsystems_var_range[type_] = []
subsystems_var_range_byset[type_] = {set_name: [] for set_name in set2iset[type_]}
for subsys, isub in zip(self._subsystems_myproc, self._subsystems_myproc_inds):
for type_ in ['input', 'output']:
subsystems_var_range[type_].append((
np.sum(allprocs_counters[type_][:isub]),
np.sum(allprocs_counters[type_][:isub + 1])))
for set_name in set2iset[type_]:
iset = set2iset[type_][set_name]
subsystems_var_range_byset[type_][set_name].append((
np.sum(allprocs_counters_byset[type_][:isub, iset]),
np.sum(allprocs_counters_byset[type_][:isub + 1, iset])))
# Recursion
if recurse:
for subsys in self._subsystems_myproc:
subsys._setup_var_index_ranges(set2iset, recurse)
def _setup_var_data(self, recurse=True):
"""
Compute the list of abs var names, abs/prom name maps, and metadata dictionaries.
Parameters
----------
recurse : bool
Whether to call this method in subsystems.
"""
super(Group, self)._setup_var_data()
allprocs_abs_names = self._var_allprocs_abs_names
abs_names = self._var_abs_names
allprocs_prom2abs_list = self._var_allprocs_prom2abs_list
abs2prom = self._var_abs2prom
allprocs_abs2meta = self._var_allprocs_abs2meta
abs2meta = self._var_abs2meta
# Recursion
if recurse:
for subsys in self._subsystems_myproc:
subsys._setup_var_data(recurse)
for subsys in self._subsystems_myproc:
var_maps = subsys._get_maps(subsys._var_allprocs_prom2abs_list)
for type_ in ['input', 'output']:
# Assemble abs_names and allprocs_abs_names
allprocs_abs_names[type_].extend(subsys._var_allprocs_abs_names[type_])
abs_names[type_].extend(subsys._var_abs_names[type_])
# Assemble allprocs_abs2meta and abs2meta
allprocs_abs2meta[type_].update(subsys._var_allprocs_abs2meta[type_])
abs2meta[type_].update(subsys._var_abs2meta[type_])
# Assemble abs2prom
for abs_name in subsys._var_abs_names[type_]:
sub_prom_name = subsys._var_abs2prom[type_][abs_name]
prom_name = var_maps[type_][sub_prom_name]
abs2prom[type_][abs_name] = prom_name
# Assemble allprocs_prom2abs_list
sub_allprocs_prom2abs_list_t = subsys._var_allprocs_prom2abs_list[type_]
for sub_prom_name in sub_allprocs_prom2abs_list_t:
prom_name = var_maps[type_][sub_prom_name]
for abs_name in sub_allprocs_prom2abs_list_t[sub_prom_name]:
if prom_name not in allprocs_prom2abs_list[type_]:
allprocs_prom2abs_list[type_][prom_name] = [abs_name]
else:
allprocs_prom2abs_list[type_][prom_name].append(abs_name)
for prom_name, abs_list in iteritems(allprocs_prom2abs_list['output']):
if len(abs_list) > 1:
raise RuntimeError("Output name '%s' refers to "
"multiple outputs: %s." %
(prom_name, sorted(abs_list)))
# If running in parallel, allgather
if self.comm.size > 1:
if self._subsystems_myproc[0].comm.rank == 0:
raw = (allprocs_abs_names, allprocs_prom2abs_list, allprocs_abs2meta)
else:
raw = (
{'input': [], 'output': []},
{'input': {}, 'output': {}},
{'input': {}, 'output': {}})
gathered = self.comm.allgather(raw)
for type_ in ['input', 'output']:
allprocs_abs_names[type_] = []
allprocs_prom2abs_list[type_] = {}
for myproc_abs_names, myproc_prom2abs_list, myproc_abs2meta in gathered:
for type_ in ['input', 'output']:
# Assemble in parallel allprocs_abs_names
allprocs_abs_names[type_].extend(myproc_abs_names[type_])
# Assemble in parallel allprocs_abs2meta
allprocs_abs2meta[type_].update(myproc_abs2meta[type_])
# Assemble in parallel allprocs_prom2abs_list
for prom_name, abs_names_list in iteritems(myproc_prom2abs_list[type_]):
if prom_name not in allprocs_prom2abs_list[type_]:
allprocs_prom2abs_list[type_][prom_name] = abs_names_list
else:
allprocs_prom2abs_list[type_][prom_name].extend(abs_names_list)
def _setup_var_sizes(self, recurse=True):
"""
Compute the arrays of local variable sizes for all variables/procs on this system.
Parameters
----------
recurse : bool
Whether to call this method in subsystems.
"""
super(Group, self)._setup_var_sizes()
sizes = self._var_sizes
sizes_byset = self._var_sizes_byset
iproc = self.comm.rank
nproc = self.comm.size
set2iset = self._var_set2iset
subsystems_proc_range = self._subsystems_proc_range
subsystems_var_range = self._subsystems_var_range
subsystems_var_range_byset = self._subsystems_var_range_byset
# Recursion
if recurse:
for subsys in self._subsystems_myproc:
subsys._setup_var_sizes(recurse)
# Compute _var_sizes
for type_ in ['input', 'output']:
sizes[type_] = np.zeros((nproc, self._num_var[type_]), int)
for set_name in set2iset[type_]:
sizes_byset[type_][set_name] = np.zeros(
(nproc, self._num_var_byset[type_][set_name]), int)
for ind, subsys in enumerate(self._subsystems_myproc):
proc_slice = slice(*subsystems_proc_range[ind])
var_slice = slice(*subsystems_var_range[type_][ind])
sizes[type_][proc_slice, var_slice] = subsys._var_sizes[type_]
for set_name in set2iset[type_]:
var_slice = slice(*subsystems_var_range_byset[type_][set_name][ind])
sizes_byset[type_][set_name][proc_slice, var_slice] = \
subsys._var_sizes_byset[type_][set_name]
# If parallel, all gather
if self.comm.size > 1:
for type_ in ['input', 'output']:
self.comm.Allgather(sizes[type_][iproc, :], sizes[type_])
for set_name in set2iset[type_]:
self.comm.Allgather(
sizes_byset[type_][set_name][iproc, :], sizes_byset[type_][set_name])
self._setup_global_shapes()
def _setup_global_connections(self, recurse=True, conns=None):
"""
Compute dict of all connections between this system's inputs and outputs.
The connections come from 4 sources:
1. Implicit connections owned by the current system
2. Explicit connections declared by the current system
3. Explicit connections declared by parent systems
4. Implicit / explicit from subsystems
Parameters
----------
recurse : bool
Whether to call this method in subsystems.
conns : dict
Dictionary of connections passed down from parent group.
"""
super(Group, self)._setup_global_connections()
global_abs_in2out = self._conn_global_abs_in2out
allprocs_prom2abs_list_in = self._var_allprocs_prom2abs_list['input']
allprocs_prom2abs_list_out = self._var_allprocs_prom2abs_list['output']
abs2meta_in = self._var_abs2meta['input']
pathname = self.pathname
abs_in2out = {}
if pathname == '':
path_len = 0
nparts = 0
else:
path_len = len(pathname) + 1
nparts = len(pathname.split('.'))
new_conns = defaultdict(dict)
if conns is not None:
for abs_in, abs_out in iteritems(conns):
inparts = abs_in.split('.')
outparts = abs_out.split('.')
if inparts[:nparts] == | |
<reponame>account-login/arggen
import argparse
import enum
from functools import partial
import json
import re
import os
import sys
from typing import Set, Sequence, Tuple, Dict, List
__version__ = '0.0.1.dev0'
class ArgError(Exception):
pass
class ArgType(enum.Enum):
BOOL = object()
COUNT = object()
ONE = object()
REST = object()
class ValueType(enum.Enum):
STRING = object()
INT = object()
# TODO: float
BOOL = object()
def make_func(arg_type: ArgType):
def func(*args: str, **kwargs):
return arg_type, args, kwargs
return func
flag = make_func(ArgType.BOOL)
count = make_func(ArgType.COUNT)
arg = make_func(ArgType.ONE)
rest = make_func(ArgType.REST)
UserArgInfo = Tuple[ArgType, Sequence[str], Dict]
class ArgInfo:
def __init__(
self, *,
name: str, options: Sequence[str], arg_type: ArgType,
value_type: ValueType, default
):
self.name = name
self.options = options
self.arg_type = arg_type
self.value_type = value_type
self.default = default
def to_tuple(self):
return self.name, self.options, self.arg_type, self.value_type, self.default
def __repr__(self):
return "<ArgInfo name=%s options=%s arg_type=%s value_type=%s default=%s>" % self.to_tuple()
def __hash__(self):
return hash(self.to_tuple())
def __eq__(self, other: 'ArgInfo'):
return self.to_tuple() == other.to_tuple()
def __ne__(self, other):
return not (self == other)
def get_option_name(options: Sequence[str], param: Dict) -> str:
name = param.get('name')
if name is not None:
return name
if is_position_option(options):
return options[0]
for opt in options:
if opt.startswith('--'):
if name is not None:
raise ArgError('multiple long option')
name = long_option_to_name(opt)
return name
def verify_option_string(argname: str):
if re.fullmatch(r'-[a-zA-Z0-9]', argname):
return
else:
long_option_to_name(argname)
def long_option_to_name(argname: str):
if not argname.startswith('--') or len(argname) <= 2:
raise ArgError('bad long arg %s' % (argname,))
argname = argname[2:]
words = argname.split('-')
assert len(words) >= 1
for wd in words:
if not re.fullmatch('[a-zA-Z0-9]+', wd):
raise ArgError('bad word %s' % (wd,))
if not re.fullmatch('[a-zA-Z]', words[0][0]):
raise ArgError('bad leading word %s' % (words[0],))
return '_'.join(words)
def is_position_option(options):
return len(options) == 1 and re.fullmatch('[a-zA-Z][a-zA-Z0-9_]*', options[0])
def get_value_type_and_default(name: str, arg_type: ArgType, param: Dict):
if arg_type == ArgType.BOOL:
value_type = ValueType.BOOL
default = False
elif arg_type == ArgType.COUNT:
value_type = ValueType.INT
default = param.get('default', None)
elif arg_type == ArgType.ONE:
value_type = param.get('type', ValueType.STRING)
default = param.get('default', None)
elif arg_type == ArgType.REST:
value_type = param.get('type', ValueType.STRING)
if value_type not in (ValueType.STRING, ValueType.INT):
raise ArgError('only string & int are allowed in rest option')
default = None
else:
assert False, 'unreachable'
if arg_type in (ArgType.BOOL, ArgType.COUNT, ArgType.REST):
if 'default' in param:
raise ArgError('"default" param not allowed in %s' % (name,))
if arg_type in (ArgType.BOOL, ArgType.COUNT):
if 'type' in param:
raise ArgError('"type" param not allowed in %s' % (name,))
return value_type, default
def process_config(conf: Sequence[UserArgInfo]):
has_rest = False
options_set = set() # type: Set[str]
name_set = set() # type: Set[str]
arginfo_list = [] # type: List[ArgInfo]
for arg_type, options, param in conf:
for opt in options:
if opt in options_set:
raise ArgError('duplicated option %s' % (opt,))
options_set.add(opt)
if not is_position_option(options):
for opt in options:
verify_option_string(opt)
name = get_option_name(options, param)
if name in name_set:
raise ArgError('duplicated option name %s' % (name,))
name_set.add(name)
if arg_type == ArgType.REST:
if has_rest:
raise ArgError('multiple rest arg_type')
has_rest = True
value_type, default = get_value_type_and_default(name, arg_type, param)
ai = ArgInfo(
name=name, options=options,
arg_type=arg_type, value_type=value_type, default=default,
)
arginfo_list.append(ai)
# check the default argument of positional args
default_pos_met = False
for info in arginfo_list:
if is_position_option(info.options):
if not default_pos_met:
if info.default is not None:
default_pos_met = True
elif info.default is None and info.arg_type != ArgType.REST:
raise ArgError(
'default value of positional args only allowed in trailing args, '
f'first error: {info.name}'
)
return arginfo_list
# begin source generation utils
class BadSourceStructure(Exception):
pass
class Label:
def __init__(self, text):
self.text = text
class BaseNode:
def __init__(self):
self.children = []
self.parent = None
def add_child(self, child):
self.children.append(child)
return self
def indent(self, level: int):
return ' ' * level
def __eq__(self, other):
return self.__class__ is other.__class__ and self.children == other.children
def __ne__(self, other):
return not (self == other)
class Body(BaseNode):
def to_source_body(self, level: int):
for child in self.children:
if isinstance(child, str):
if child:
yield self.indent(level) + child
else:
yield '' # empty line
elif isinstance(child, Label):
yield self.indent(level - 1) + child.text
else:
yield from child.to_source(level)
class Root(Body):
def to_source(self, level: int):
yield from self.to_source_body(level)
class Block(Body):
def __init__(self, head: str, trailing_semicolon=False):
super().__init__()
self.head = head
self.trailing_semicolon = trailing_semicolon
def to_source(self, level: int):
yield self.indent(level) + self.head + ' {'
yield from self.to_source_body(level + 1)
if self.trailing_semicolon:
yield self.indent(level) + '};'
else:
yield self.indent(level) + '}'
def __eq__(self, other):
return super().__eq__(other) and self.head == other.head
class Condition(BaseNode):
def to_source(self, level: int):
if not self.children:
raise BadSourceStructure('Condition node has no children')
if len(self.children) == 1 and isinstance(self.children[0], Else):
# allow dangling else
yield from self.children[0].to_source_body(level)
return
if not isinstance(self.children[0], If):
raise BadSourceStructure('expect if, got %r' % (self.children[0],))
yield self.indent(level) + 'if (%s) {' % self.children[0].cond
yield from self.children[0].to_source_body(level + 1)
for elseif in self.children[1:-1]:
if not isinstance(elseif, ElseIf):
raise BadSourceStructure(f'expect elseif, got {elseif!r}')
yield self.indent(level) + '} else if (%s) {' % (elseif.cond,)
yield from elseif.to_source_body(level + 1)
if len(self.children) > 1:
last = self.children[-1]
if isinstance(last, ElseIf):
yield self.indent(level) + '} else if (%s) {' % (last.cond,)
yield from last.to_source_body(level + 1)
elif isinstance(last, Else):
yield self.indent(level) + '} else {'
yield from last.to_source_body(level + 1)
else:
raise BadSourceStructure(f'expect elseif|else, got {last!r}')
yield self.indent(level) + '}'
class If(Block):
def __init__(self, cond: str):
super().__init__(f'if ({cond})')
self.cond = cond
def __eq__(self, other):
return super().__eq__(other) and self.cond == other.cond
class ElseIf(Body):
def __init__(self, cond: str):
super().__init__()
self.cond = cond
def __eq__(self, other):
return super().__eq__(other) and self.cond == other.cond
class Else(Body):
pass
# noinspection PyPep8Naming
class Context:
def __init__(self):
self.root = Root()
self.cur = self.root
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cur = self.cur.parent
def add_child(self, child):
self.cur.add_child(child)
def add_cur(self, child):
self.add_child(child)
child.parent = self.cur
self.cur = child
return self
def BLOCK(self, head: str, trailing_semiconlon=False):
return self.add_cur(Block(head, trailing_semicolon=trailing_semiconlon))
def CONDITION(self):
return self.add_cur(Condition())
def IF(self, cond: str):
return self.add_cur(If(cond))
def ELSEIF(self, cond: str):
return self.add_cur(ElseIf(cond))
def MATCH(self, cond: str):
if len(self.cur.children) == 0:
return self.IF(cond)
else:
return self.ELSEIF(cond)
def ELSE(self):
return self.add_cur(Else())
def collect_node(gen) -> Root:
ctx = Context()
for stmt in gen(ctx):
ctx.add_child(stmt)
return ctx.root
# end source generation utils
# begin xxx_gen
def repr_c_string(string: str):
return json.dumps(string)
value_type_to_cxx_type = {
ValueType.STRING: 'std::string',
ValueType.INT: 'int',
ValueType.BOOL: 'bool',
}
def struct_gen(ctx: Context, struct_name: str, argsinfo: Sequence[ArgInfo]):
with ctx.BLOCK(f'struct {struct_name}', trailing_semiconlon=True):
for info in sorted(argsinfo, key=lambda ai: ai.name): # sort by name
cxx_type = value_type_to_cxx_type[info.value_type]
yield f'// options: {info.options}, arg_type: {info.arg_type}'
if info.default is None:
if info.arg_type == ArgType.REST:
yield f'std::vector<{cxx_type}> {info.name};'
else:
yield f'{cxx_type} {info.name};'
else:
assert info.arg_type != ArgType.REST
default = info.default
if info.value_type == ValueType.STRING:
default = repr_c_string(default)
elif info.value_type == ValueType.BOOL:
default = 'true' if default else 'false'
yield f'{cxx_type} {info.name} = {default};'
yield ''
yield 'std::string to_string() const;'
yield f'bool operator==(const {struct_name} &rhs) const;'
yield f'bool operator!=(const {struct_name} &rhs) const;'
yield f'static {struct_name} parse_args(const std::vector<std::string> &args);'
yield f'static {struct_name} parse_argv(int argc, const char *const argv[]);'
def accecpt_rest_gen(ctx: Context, info: ArgInfo):
if info.value_type == ValueType.STRING:
yield f'ans.{info.name}.emplace_back(piece);'
elif info.value_type == ValueType.INT:
# FIXME: atol
yield f'ans.{info.name}.emplace_back(atol(piece.data()))'
else:
assert False, 'unreachable'
def accept_arg_gen(ctx: Context, info: ArgInfo, source: str):
if info.value_type == ValueType.STRING:
yield f'ans.{info.name} = {source};'
elif info.value_type == ValueType.INT:
# FIXME: atol
yield f'ans.{info.name} = atol({source});'
else:
assert False, 'unreachable'
def accept_arg_gen_with_default_check(ctx: Context, info: ArgInfo, source: str):
yield from accept_arg_gen(ctx, info, source)
if info.default is None:
yield f'has_{info.name} = true;'
def use_next_arg_gen(ctx: Context, info: ArgInfo):
yield 'i++;'
with ctx.CONDITION():
with ctx.IF("i == args.size() || args[i][0] == '-'"):
yield 'throw ArgError("no value for " + piece);'
with ctx.ELSE():
yield from accept_arg_gen_with_default_check(ctx, info, 'args[i].data()')
def use_this_arg_gen(ctx: Context, info: ArgInfo, offset: str):
yield from accept_arg_gen_with_default_check(ctx, info, f'piece.data() + {offset}')
def classify_options(options: Sequence[str]):
short, long = [], []
for opt in options:
if opt.startswith('--'):
long.append(opt)
else:
assert opt.startswith('-')
short.append(opt)
return short, long
def classify_to(options: Sequence[str], short_list: List[str], long_long: List[str]):
short, long = classify_options(options)
short_list.extend(short)
long_long.extend(long)
def parse_args_method_gen(ctx: Context, struct_name: str, argsinfo: Sequence[ArgInfo]):
option_to_arginfo = dict() # type: Dict[str, ArgInfo]
short_flags = []
short_args = []
short_count = []
long_flags = []
long_args = []
long_count = []
position_args = []
rest_arg = None
required_options = []
for info in argsinfo:
for opt in info.options:
option_to_arginfo[opt] = info
if info.arg_type == ArgType.BOOL:
classify_to(info.options, short_flags, long_flags)
elif info.arg_type == ArgType.COUNT:
classify_to(info.options, short_count, long_count)
elif info.arg_type == ArgType.ONE:
if is_position_option(info.options):
position_args.extend(info.options)
else:
classify_to(info.options, short_args, long_args)
if info.default is None:
required_options.append(info.name)
elif | |
<gh_stars>10-100
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# # Electrum - lightweight Bitcoin client
# # Copyright (C) 2014 <NAME>
# #
# # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU General Public License as published by
# # the Free Software Foundation, either version 3 of the License, or
# # (at your option) any later version.
# #
# # This program is distributed in the hope that it will be useful,
# # but WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# # GNU General Public License for more details.
# #
# # You should have received a copy of the GNU General Public License
# # along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# import hashlib
# import hmac
# import math
# import os
# import pkgutil
# import string
# import unicodedata
# import logging
# import ecdsa
# import pbkdf2
#
# from uwallet import version
# from uwallet.unet import is_new_seed
#
# log = logging.getLogger(__name__)
#
# # http://www.asahi-net.or.jp/~ax2s-kmtn/ref/unicode/e_asia.html
# CJK_INTERVALS = [
# (0x4E00, 0x9FFF, 'CJK Unified Ideographs'),
# (0x3400, 0x4DBF, 'CJK Unified Ideographs Extension A'),
# (0x20000, 0x2A6DF, 'CJK Unified Ideographs Extension B'),
# (0x2A700, 0x2B73F, 'CJK Unified Ideographs Extension C'),
# (0x2B740, 0x2B81F, 'CJK Unified Ideographs Extension D'),
# (0xF900, 0xFAFF, 'CJK Compatibility Ideographs'),
# (0x2F800, 0x2FA1D, 'CJK Compatibility Ideographs Supplement'),
# (0x3190, 0x319F, 'Kanbun'),
# (0x2E80, 0x2EFF, 'CJK Radicals Supplement'),
# (0x2F00, 0x2FDF, 'CJK Radicals'),
# (0x31C0, 0x31EF, 'CJK Strokes'),
# (0x2FF0, 0x2FFF, 'Ideographic Description Characters'),
# (0xE0100, 0xE01EF, 'Variation Selectors Supplement'),
# (0x3100, 0x312F, 'Bopomofo'),
# (0x31A0, 0x31BF, 'Bopomofo Extended'),
# (0xFF00, 0xFFEF, 'Halfwidth and Fullwidth Forms'),
# (0x3040, 0x309F, 'Hiragana'),
# (0x30A0, 0x30FF, 'Katakana'),
# (0x31F0, 0x31FF, 'Katakana Phonetic Extensions'),
# (0x1B000, 0x1B0FF, 'Kana Supplement'),
# (0xAC00, 0xD7AF, 'Hangul Syllables'),
# (0x1100, 0x11FF, 'Hangul Jamo'),
# (0xA960, 0xA97F, 'Hangul Jamo Extended A'),
# (0xD7B0, 0xD7FF, 'Hangul Jamo Extended B'),
# (0x3130, 0x318F, 'Hangul Compatibility Jamo'),
# (0xA4D0, 0xA4FF, 'Lisu'),
# (0x16F00, 0x16F9F, 'Miao'),
# (0xA000, 0xA48F, 'Yi Syllables'),
# (0xA490, 0xA4CF, 'Yi Radicals'),
# ]
#
#
# def is_CJK(c):
# n = ord(c)
# for imin, imax, name in CJK_INTERVALS:
# if imin <= n <= imax:
# return True
# return False
#
#
# def prepare_seed(seed):
# # normalize
# seed = unicodedata.normalize('NFKD', unicode(seed))
# # lower
# seed = seed.lower()
# # remove accents
# seed = u''.join([c for c in seed if not unicodedata.combining(c)])
# # normalize whitespaces
# seed = u' '.join(seed.split())
# # remove whitespaces between CJK
# seed = u''.join([seed[i] for i in range(len(seed)) if not (seed[i] in string.whitespace
# and is_CJK(seed[i - 1])
# and is_CJK(seed[i + 1]))])
# return seed
#
#
# filenames = {
# 'en': 'english.txt',
# 'es': 'spanish.txt',
# 'ja': 'japanese.txt',
# 'pt': 'portuguese.txt',
# 'zh': 'chinese_simplified.txt'
# }
#
#
# def normalize_text(seed):
# # normalize
# seed = unicodedata.normalize('NFKD', unicode(seed))
# # lower
# seed = seed.lower()
# # remove accents
# seed = u''.join([c for c in seed if not unicodedata.combining(c)])
# # normalize whitespaces
# seed = u' '.join(seed.split())
# # remove whitespaces between CJK
# seed = u''.join([seed[i] for i in range(len(seed)) if
# not (seed[i] in string.whitespace and is_CJK(seed[i - 1]) and is_CJK(seed[i + 1]))])
# return seed
#
# class Mnemonic(object):
# # Seed derivation no longer follows BIP39
# # Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
#
# def __init__(self, lang=None):
# lang = lang or "en"
# filename = filenames.get(lang[0:2], 'english.txt')
# s = pkgutil.get_data('uwallet', os.path.join('wordlist', filename))
# s = unicodedata.normalize('NFKD', s.decode('utf8'))
# lines = s.split('\n')
# self.wordlist = []
# for line in lines:
# line = line.split('#')[0]
# line = line.strip(' \r')
# assert ' ' not in line
# if line:
# self.wordlist.append(line)
# log.info("wordlist has %d words", len(self.wordlist))
#
# @classmethod
# def mnemonic_to_seed(cls, mnemonic, passphrase):
# PBKDF2_ROUNDS = 2048
# mnemonic = prepare_seed(mnemonic)
# return pbkdf2.PBKDF2(mnemonic, 'uwallet' + passphrase, iterations=PBKDF2_ROUNDS,
# macmodule=hmac, digestmodule=hashlib.sha512).read(64)
#
# def mnemonic_encode(self, i):
# n = len(self.wordlist)
# words = []
# while i:
# x = i % n
# i = i / n
# words.append(self.wordlist[x])
# return ' '.join(words)
#
# def mnemonic_decode(self, seed):
# n = len(self.wordlist)
# words = seed.split()
# i = 0
# while words:
# w = words.pop()
# k = self.wordlist.index(w)
# i = i * n + k
# return i
#
# def check_seed(self, seed, custom_entropy):
# assert is_new_seed(seed)
# i = self.mnemonic_decode(seed)
# return i % custom_entropy == 0
#
# def make_seed(self, num_bits=128, prefix=version.SEED_PREFIX, custom_entropy=1):
# n = int(math.ceil(math.log(custom_entropy, 2)))
# # bits of entropy used by the prefix
# k = len(prefix) * 4
# # we add at least 16 bits
# n_added = max(16, k + num_bits - n)
# log.info("make_seed %s adding %d bits", prefix, n_added)
# my_entropy = ecdsa.util.randrange(pow(2, n_added))
# nonce = 0
# while True:
# nonce += 1
# i = custom_entropy * (my_entropy + nonce)
# seed = self.mnemonic_encode(i)
# assert i == self.mnemonic_decode(seed)
# if is_new_seed(seed, prefix):
# break
# log.info('%d words', len(seed.split()))
# return seed
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import hmac
import math
import hashlib
import unicodedata
import string
import ecdsa
import pbkdf2
from util import print_error
from bitcoin import is_old_seed, is_new_seed
import version
import i18n
# http://www.asahi-net.or.jp/~ax2s-kmtn/ref/unicode/e_asia.html
CJK_INTERVALS = [
(0x4E00, 0x9FFF, 'CJK Unified Ideographs'),
(0x3400, 0x4DBF, 'CJK Unified Ideographs Extension A'),
(0x20000, 0x2A6DF, 'CJK Unified Ideographs Extension B'),
(0x2A700, 0x2B73F, 'CJK Unified Ideographs Extension C'),
(0x2B740, 0x2B81F, 'CJK Unified Ideographs Extension D'),
(0xF900, 0xFAFF, 'CJK Compatibility Ideographs'),
(0x2F800, 0x2FA1D, 'CJK Compatibility Ideographs Supplement'),
(0x3190, 0x319F , 'Kanbun'),
(0x2E80, 0x2EFF, 'CJK Radicals Supplement'),
(0x2F00, 0x2FDF, 'CJK Radicals'),
(0x31C0, 0x31EF, 'CJK Strokes'),
(0x2FF0, 0x2FFF, 'Ideographic Description Characters'),
(0xE0100, 0xE01EF, 'Variation Selectors Supplement'),
(0x3100, 0x312F, 'Bopomofo'),
(0x31A0, 0x31BF, 'Bopomofo Extended'),
(0xFF00, 0xFFEF, 'Halfwidth and Fullwidth Forms'),
(0x3040, 0x309F, 'Hiragana'),
(0x30A0, 0x30FF, 'Katakana'),
(0x31F0, 0x31FF, 'Katakana Phonetic Extensions'),
(0x1B000, 0x1B0FF, 'Kana Supplement'),
(0xAC00, 0xD7AF, 'Hangul Syllables'),
(0x1100, 0x11FF, 'Hangul Jamo'),
(0xA960, 0xA97F, 'Hangul Jamo Extended A'),
(0xD7B0, 0xD7FF, 'Hangul Jamo Extended B'),
(0x3130, 0x318F, 'Hangul Compatibility Jamo'),
(0xA4D0, 0xA4FF, 'Lisu'),
(0x16F00, 0x16F9F, 'Miao'),
(0xA000, 0xA48F, 'Yi Syllables'),
(0xA490, 0xA4CF, 'Yi Radicals'),
]
def is_CJK(c):
n = ord(c)
for imin,imax,name in CJK_INTERVALS:
if n>=imin and n<=imax: return True
return False
def normalize_text(seed):
# normalize
seed = unicodedata.normalize('NFKD', unicode(seed))
# lower
seed = seed.lower()
# remove accents
seed = u''.join([c for c in seed if not unicodedata.combining(c)])
# normalize whitespaces
seed = u' '.join(seed.split())
# remove whitespaces between CJK
seed = u''.join([seed[i] for i in range(len(seed)) if not (seed[i] in string.whitespace and is_CJK(seed[i-1]) and is_CJK(seed[i+1]))])
return seed
filenames = {
'en':'english.txt',
'es':'spanish.txt',
'ja':'japanese.txt',
'pt':'portuguese.txt',
'zh':'chinese_simplified.txt'
}
class Mnemonic(object):
# Seed derivation no longer follows BIP39
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
def __init__(self, lang=None):
lang = lang or 'en'
print_error('language', lang)
filename = filenames.get(lang[0:2], 'english.txt')
path = os.path.join(os.path.dirname(__file__), 'wordlist', filename)
s = open(path,'r').read().strip()
s = unicodedata.normalize('NFKD', s.decode('utf8'))
lines = s.split('\n')
self.wordlist = []
for line in lines:
line = line.split('#')[0]
line = line.strip(' \r')
assert ' ' not in line
if line:
self.wordlist.append(line)
print_error("wordlist has %d words"%len(self.wordlist))
@classmethod
def mnemonic_to_seed(self, mnemonic, passphrase):
PBKDF2_ROUNDS = 2048
mnemonic = normalize_text(mnemonic)
passphrase = normalize_text(passphrase)
return pbkdf2.PBKDF2(mnemonic, 'uwallet' + passphrase, iterations = PBKDF2_ROUNDS, macmodule = hmac, digestmodule = hashlib.sha512).read(64)
def mnemonic_encode(self, i):
n = len(self.wordlist)
words = []
while i:
x = i%n
i = i/n
words.append(self.wordlist[x])
return ' '.join(words)
def get_suggestions(self, prefix):
for w in self.wordlist:
if w.startswith(prefix):
yield w
def mnemonic_decode(self, seed):
n = len(self.wordlist)
words = seed.split()
i = 0
while words:
w = words.pop()
k = self.wordlist.index(w)
i = i*n + k
return i
def make_seed(self, num_bits=128, prefix=version.SEED_PREFIX):
# increase num_bits in | |
# vol_std = np.zeros((img_batch_size, num_passes))
ax = torch.distributions.Normal(torch.tensor(0.0).to(device=torch.device("cuda:0")),
torch.tensor(1.0).to(device=torch.device("cuda:0")))
for fpass in range(num_passes):
# if uncertainty_flag and dropout_level != 0.0:
# print('You must have hetero. + DO on!')
# noise_array = torch.normal(mean=0.0, std=1.0, size=logits_shape, device=torch.device('cuda:0'))
# noise_array = torch.normal(mean=0.0, std=1.0, size=logits_shape, device=torch.device('cuda:0'))
# stochastic_output = logits + sigma * noise_array # * ax.sample(logits_shape)
stochastic_output = logits + sigma * ax.sample(logits_shape)
# else:
# stochastic_output = logits + sigma * noise_array[..., fpass]
# temp_vol = torch.softmax(stochastic_output, dim=1)
# temp_vol = temp_vol[:, 0, ...]
# vol_std[:, fpass] = temp_vol.view(4, -1).sum(1).detach().cpu().numpy()
# The dimension is the class dimension!!
exponent_B = torch.log(torch.sum(torch.exp(stochastic_output), dim=1, keepdim=True))
# print(f'Exponent B shape is {exponent_B.shape}')
inner_logits = exponent_B - stochastic_output
soft_inner_logits = labels * inner_logits
total_loss += torch.exp(soft_inner_logits) #.detach() #.item()
del exponent_B, inner_logits, soft_inner_logits #,noise_array
# gc.collect()
# See probability distributions: torch.distributions
# Just expand and do at once: Casting
# Call backward in loop
mean_loss = total_loss / num_passes
actual_loss = torch.mean(torch.log(mean_loss))
# batch_std = np.std(vol_std, axis=1)
batch_std = np.array([0] * img_batch_size)
return actual_loss, batch_std
def corrected_paper_stochastic_loss_wip(logits, sigma, labels, num_passes):
img_batch_size = logits.shape[0]
logits_shape = list(logits[0, ...].shape)
logits_shape.append(num_passes)
# print(f'The logits shape is {logits_shape}')
ax = torch.distributions.Normal(torch.tensor(0.0).to(device=torch.device("cuda:0")),
torch.tensor(1.0).to(device=torch.device("cuda:0")))
for unc_batch in range(img_batch_size):
# noise_array = torch.normal(mean=0.0, std=1.0, size=logits_shape, device=torch.device('cuda:0'))
print(torch.cuda.memory_allocated())
expanded_logits = logits[unc_batch, ..., None].repeat((1, 1, 1, 1, num_passes))
expanded_sigma = sigma[unc_batch, ..., None].repeat((1, 1, 1, 1, num_passes))
expanded_labels = labels[unc_batch, ..., None].repeat((1, 1, 1, 1, num_passes))
print(torch.cuda.memory_allocated())
# print(f'The expanded logits shape is {expanded_logits.shape} {expanded_sigma.shape}')
stochastic_output = expanded_logits + expanded_sigma * ax.sample(logits_shape)
del expanded_sigma, expanded_logits
exponent_B = torch.log(torch.sum(torch.exp(stochastic_output), dim=-2, keepdim=True))
inner_logits = exponent_B - stochastic_output
soft_inner_logits = expanded_labels * inner_logits
# Sum across number of passes
total_loss = torch.sum(torch.exp(soft_inner_logits), dim=-1) #.detach() #.item()
del exponent_B, inner_logits, soft_inner_logits, expanded_labels
mean_loss = total_loss / num_passes
actual_loss = torch.mean(torch.log(mean_loss))
# if unc_batch != (img_batch_size - 1):
# actual_loss.backward(retain_graph=True)
# else:
actual_loss.backward()
del actual_loss
optimizer.step()
batch_std = np.array([0] * img_batch_size)
return actual_loss, batch_std
def save_img(image, affine, filename):
nifti_img = nib.Nifti1Image(image, affine)
nib.save(nifti_img, filename)
# Check if SAVE_PATH is empty
file_list = os.listdir(path=SAVE_PATH)
num_files = len(file_list)
# Hyper-parameter loading: General parameters so doesn't matter which model file is loaded exactly
if LOAD and num_files > 0:
model_files = glob.glob(os.path.join(SAVE_PATH, '*.pth'))
for some_model_file in model_files:
print(some_model_file)
latest_model_file = max(model_files, key=os.path.getmtime)
checkpoint = torch.load(latest_model_file, map_location=torch.device('cuda:0'))
print(f'Loading {latest_model_file}!')
loaded_epoch = checkpoint['epoch']
loss = checkpoint['loss']
running_iter = checkpoint['running_iter']
EPOCHS = 91
# Memory related variables
batch_size = checkpoint['batch_size']
queue_length = batch_size
patch_size = checkpoint['patch_size']
samples_per_volume = 1
else:
running_iter = 0
loaded_epoch = -1
EPOCHS = 91
# Memory related variables
patch_size = arguments.patch_size
batch_size = 4
queue_length = batch_size
samples_per_volume = 1
# Validation
validation_interval = 3
if uncertainty_flag:
num_loss_passes = arguments.num_unc_passes
print(f'The number of loss passes will be {num_loss_passes}')
# Stratification
training_modes = ['standard', 'stratification', 'kld', 'inference']
training_mode = arguments.experiment_mode
print(f'The training mode is {training_mode}')
stratification_epsilon = config['stratification_epsilon']
print(f'The stratification epsilon is {stratification_epsilon}')
# Some necessary variables
dataset_csv = arguments.csv_label
img_dir = arguments.images_dir # '/nfs/home/pedro/COVID/Data/KCH_CXR_JPG'
label_dir = arguments.labels_dir # '/nfs/home/pedro/COVID/Labels/KCH_CXR_JPG.csv'
print(img_dir)
print(label_dir)
val_batch_size = 4
# Read csv + add directory to filenames
df = pd.read_csv(dataset_csv)
df['Label_Filename'] = df['Filename']
df['Filename'] = img_dir + '/' + df['Filename'].astype(str)
df['Label_Filename'] = label_dir + '/' + 'Label_' + df['Label_Filename'].astype(str)
num_folds = 1 #df.fold.nunique()
# OOD csv
OOD_df = pd.read_csv('/nfs/home/pedro/PhysicsPyTorch/OOD_physics_csv_folds_limited.csv')
OOD_img_dir = '/nfs/project/pborges/SS_LowTD_MPRAGE_OOD_All_subjects/Restricted_30'
OOD_label_dir = '/nfs/project/pborges/Labels_LowTD_MPRAGE_OOD_All_subjects/Restricted_30'
OOD_df['Label_Filename'] = OOD_df['Filename'].str.replace('SS_', '')
OOD_df['Filename'] = OOD_img_dir + '/' + OOD_df['Filename'].astype(str)
# Label pointless, really, but load anyway
OOD_df['Label_Filename'] = OOD_label_dir + '/' + 'Label_' + OOD_df['Label_Filename'].astype(str)
# print(OOD_df)
# Image generation code is hiding under this comment
# On demand image generation
def mprage(T1, PD, TI, TD, tau, Gs=1):
mprage_img = Gs * PD * (1 - 2 * np.exp(-TI / T1) / (1 + np.exp(-(TI + TD + tau) / T1)))
return mprage_img
# Transforms
if arguments.generation_type == 'MPRAGE':
training_transform = porchio.Compose([
# porchio.RescaleIntensity((0, 1)), # so that there are no negative values for RandomMotion
# porchio.RandomMotion(),
# porchio.HistogramStandardization({MRI: landmarks}),
porchio.RandomMPRAGE(TI=(0.6, 1.2), p=1),
# porchio.RandomBiasField(coefficients=0.2), # Bias field coeffs: Default 0.5 may be a bit too high!
porchio.ZNormalization(masking_method=None), # This is whitening
# porchio.RandomNoise(std=(0, 0.1)),
# porchio.ToCanonical(),
# porchio.Resample((4, 4, 4)),
# porchio.CropOrPad((48, 60, 48)),
# porchio.RandomFlip(axes=(0,)),
# porchio.OneOf({
# porchio.RandomAffine(): 0.8,
# porchio.RandomElasticDeformation(): 0.2,}),
])
validation_transform = porchio.Compose([
porchio.RandomMPRAGE(TI=(0.6, 1.2), p=1),
porchio.ZNormalization(masking_method=None),
# porchio.ToCanonical(),
# porchio.Resample((4, 4, 4)),
# porchio.CropOrPad((48, 60, 48)),
])
inference_transform = porchio.Compose([
porchio.ZNormalization(masking_method=None),
# porchio.ToCanonical(),
# porchio.Resample((4, 4, 4)),
# porchio.CropOrPad((48, 60, 48)),
])
elif arguments.generation_type == 'SPGR':
training_transform = porchio.Compose([
# porchio.RescaleIntensity((0, 1)), # so that there are no negative values for RandomMotion
# porchio.RandomMotion(),
# porchio.HistogramStandardization({MRI: landmarks}),
porchio.RandomSPGR(TR=(0.005, 2.0),
TE=(0.005, 0.1),
FA=(5.0, 90.0),
p=1),
# porchio.RandomBiasField(coefficients=0.2), # Bias field coeffs: Default 0.5 may be a bit too high!
porchio.ZNormalization(masking_method=None), # This is whitening
# porchio.RandomNoise(std=(0, 0.1)),
# porchio.ToCanonical(),
# porchio.Resample((4, 4, 4)),
# porchio.CropOrPad((48, 60, 48)),
# porchio.RandomFlip(axes=(0,)),
# porchio.OneOf({
# porchio.RandomAffine(): 0.8,
# porchio.RandomElasticDeformation(): 0.2,}),
])
validation_transform = porchio.Compose([
porchio.RandomSPGR(TR=(0.005, 2.0),
TE=(0.005, 0.1),
FA=(5.0, 90.0),
p=1),
porchio.ZNormalization(masking_method=None),
# porchio.ToCanonical(),
# porchio.Resample((4, 4, 4)),
# porchio.CropOrPad((48, 60, 48)),
])
inference_transform = porchio.Compose([
porchio.RandomSPGR(TR=(0.005, 2.0),
TE=(0.005, 0.1),
FA=(5.0, 90.0),
p=1),
porchio.ZNormalization(masking_method=None),
# porchio.ToCanonical(),
# porchio.Resample((4, 4, 4)),
# porchio.CropOrPad((48, 60, 48)),
])
# CUDA variables
use_cuda = torch.cuda.is_available()
print('Using cuda', use_cuda)
if use_cuda and torch.cuda.device_count() > 1:
print('Using', torch.cuda.device_count(), 'GPUs!')
stacked_cv = False
# OOD on by default!
OOD_flag = True
if not stacked_cv:
inf_fold = 5
if not OOD_flag:
inf_df = df[df.fold == inf_fold]
else:
inf_df = OOD_df[OOD_df.fold == inf_fold]
inf_df.reset_index(drop=True, inplace=True)
# Loader for inference
inference_set = BespokeDataset(inf_df, inference_transform, patch_size=(181, 217, 181), batch_seed=1,
queue_length=batch_size)
inf_loader = DataLoader(inference_set, batch_size=1, shuffle=False)
# print(f'The inference set is {inference_set}')
# print(inference_set[0])
# For aggregation
overall_val_names = []
overall_val_metric = []
overall_gm_volumes = []
overall_gm_volumes2 = []
# If pretrained then initial model file will NOT match those created here: Therefore need to account for this
# Because won't be able to extract epoch and/ or fold from the name
if LOAD and num_files > 0:
pretrained_checker = 'fold' in os.path.basename(latest_model_file)
# Find out fold and epoch
if LOAD and num_files > 0 and pretrained_checker:
basename = os.path.basename(latest_model_file)
if 'best' in basename:
latest_epoch = int(os.path.splitext(basename)[0].split('_')[3])
latest_fold = int(os.path.splitext(basename)[0].split('_')[5])
else:
latest_epoch = int(os.path.splitext(basename)[0].split('_')[2])
latest_fold = int(os.path.splitext(basename)[0].split('_')[4])
print(f'The latest epoch is {latest_epoch}, the loaded epoch is {loaded_epoch}')
assert latest_epoch == loaded_epoch
else:
latest_epoch = -1
latest_fold = 0
print(f'\nStarted {training_mode}-ing!')
loop_switch = True
for fold in range(latest_fold, num_folds):
while loop_switch:
print('\nFOLD', fold)
# Pre-loading sequence
model = nnUNet(1, 2, physics_flag=physics_flag, physics_input=physics_input_size[physics_experiment_type],
physics_output=40, uncertainty_flag=uncertainty_flag, dropout_level=dropout_level)
model = nn.DataParallel(model)
# optimizer = RangerLars(model.parameters())
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.9)
# Early Stopping
early_stopping = pytorchtools.EarlyStopping(patience=9, verbose=True)
# Running lists
running_val_names = []
running_val_metric = []
running_gm_volumes = []
running_gm_volumes2 = []
# Specific fold writer
writer = SummaryWriter(log_dir=os.path.join(log_dir, f'fold_{fold}'))
if LOAD and num_files > 0 and training_mode != 'inference':
# Get model file specific to fold
loaded_model_file = f'model_epoch_{loaded_epoch}_fold_{fold}.pth'
checkpoint = torch.load(os.path.join(SAVE_PATH, loaded_model_file), map_location=torch.device('cuda:0'))
# Main model variables
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# Get the validation entries from previous folds!
running_val_names = checkpoint['running_val_names']
running_val_metric = checkpoint['running_val_metric']
running_gm_volumes = checkpoint['running_gm_volumes']
try:
running_gm_volumes2 = checkpoint['running_gm_volumes2']
except:
print('Missing running volumes2')
overall_val_names = checkpoint['overall_val_names']
overall_val_metric = checkpoint['overall_val_metric']
overall_gm_volumes = checkpoint['overall_gm_volumes']
try:
overall_gm_volumes2 = checkpoint['overall_gm_volumes2']
except:
print('Missing overall volumes2')
# Ensure that no more loading is done for future folds
LOAD = False
elif LOAD and num_files > 0 and training_mode == 'inference':
# Get model file specific to fold
# try:
# best_model_file = glob.glob(os.path.join(SAVE_PATH, f'best_model_epoch_*_fold_{fold}.pth'))
# except:
best_model_file = f'model_epoch_{loaded_epoch}_fold_{fold}.pth'
print(f'Loading checkpoint for model: {best_model_file}')
checkpoint = torch.load(os.path.join(SAVE_PATH, best_model_file), map_location=torch.device('cuda:0'))
# Main model variables
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# Get the validation entries from previous folds!
running_val_names = checkpoint['running_val_names']
running_val_metric = checkpoint['running_val_metric']
running_gm_volumes = checkpoint['running_gm_volumes']
try:
running_gm_volumes2 = checkpoint['running_gm_volumes2']
except:
print('Missing running volumes2')
overall_val_names = checkpoint['overall_val_names']
overall_val_metric = checkpoint['overall_val_metric']
overall_gm_volumes = checkpoint['overall_gm_volumes']
try:
overall_gm_volumes2 = checkpoint['overall_gm_volumes2']
except:
print('Missing overall volumes2')
if stacked_cv: # Pretty much never use this one
# Train / Val/ Inf split
val_fold = fold
inf_fold = num_folds - fold - 1
excluded_folds = [val_fold, inf_fold]
train_df = df[~df.fold.isin(excluded_folds)]
# if not OOD_flag:
val_df = df[df.fold == val_fold]
inf_df = df[df.fold == inf_fold]
# else:
# val_df = OOD_df[OOD_df.fold == val_fold]
# inf_df = OOD_df[OOD_df.fold == inf_fold]
train_df.reset_index(drop=True, inplace=True)
val_df.reset_index(drop=True, inplace=True)
inf_df.reset_index(drop=True, inplace=True)
else:
# Train / Val split
val_fold = fold
excluded_folds = [val_fold]
train_df = | |
<filename>pyvision3/image.py
"""
Created on Dec 18, 2015
@author: <NAME>
This module defines the pyvision image class, which wraps
the image data (array) in a class with convenient methods.
One very useful aspect of the image class is that annotations
(such as bounding boxes that might be drawn around detection regions)
are kept on a separate layer, as opposed to drawing directly
onto the image array itself.
"""
# The following prevents a bunch of pylint no-member errors
# with the cv2 module.
# pylint: disable=E1101
import cv2
import numpy as np
import io
try:
import matplotlib.pyplot as plot
except ImportError:
print("Error importing matplotlib.")
print("Matplotlib integration will not work")
try:
import shapely.geometry as sg
except ImportError:
print("Error importing shapely.")
print("Shapely is required for annotating shapes (polygons) on images.")
print("Shapely is also used to determine if a crop is in bounds, etc.")
from .pv_exceptions import OutOfBoundsError, ImageAnnotationError
from .geometry import in_bounds, integer_bounds, Rect
class Image(object):
"""
A pyvision3 Image object contains the image data, an
annotations layer, and many convenient methods.
Supports 1 channel and 3 channel images.
"""
def __init__(self, source, *args, desc="Pyvision Image", **kwargs):
"""
The constructor wraps a cv2.imread(...) function,
passing in the args and kwargs appropriately, and
then allocating data for the annotations layer.
Parameters
----------
desc: string
Provide a short description of this image, that will be used
by default in window titles and other functions
source: string, file object, or cv2 image array
If string, this is the full path to the image file to load.
If file object, this is an open file handle from which to load
the image.
If ndarray, then we assume this is a cv2 image array which we will
just wrap.
args: variable
Other args will be passed through to cv2.imread, the first arg
should be the image source, like a file name. See the cv2 docs
on imread for more details.
kwargs: variable
Keyword args will be passed through to cv2.imread
Examples
--------
#loading from files
img = pv3.Image('mypic.jpg')
img2 = pv3.Image('mypic.jpg', cv2.IMREAD_GRAYSCALE)
#loading from a file handle...this example is silly, but
# this capability is good for example when loading from a file streamed
# over a network connection, or S3, etc.
with open('somepath/somefile.png', 'rb') as infile:
img3 = pv3.Image(infile)
#Wrapping of a numpy/cv2 ndarray
img4 = pv3.Image( np.zeros( (480,640), dtype='uint8' ) )
"""
self.desc = desc
# metadata dictionary can be used to pass arbitrary info with the image
self.metadata = {}
if isinstance(source, np.ndarray):
self.data = source
self.metadata["source"] = "np.ndarray"
elif type(source) == str:
self.data = cv2.imread(source, *args, **kwargs)
self.metadata["source"] = "file"
self.metadata["filename"] = source
else:
# assume a file object
buf = source.read()
x = np.fromstring(buf, dtype="uint8")
self.data = cv2.imdecode(x, cv2.IMREAD_UNCHANGED)
self.metadata["source"] = "file object or buffer"
self.height, self.width = self.data.shape[0:2]
self.size = (self.width, self.height)
self.nchannels = self.data.shape[2] if len(self.data.shape) == 3 else 1
# Annotation data is a separate BGR image array.
self.annotation_data = (
self.data.copy()
if self.nchannels == 3
else cv2.cvtColor(self.data, cv2.COLOR_GRAY2BGR)
)
def __str__(self):
txt = "Pyvision3 Image: {}".format(self.desc)
txt += "\nWidth: {}, Height: {}, Channels: {}, Depth: {}".format(
self.width, self.height, self.nchannels, self.data.dtype
)
return txt
def __repr__(self):
return str(self)
def __getitem__(self, slc):
return self.data[slc]
def as_grayscale(self, as_type="PV"):
"""
Parameters
----------
as_type: str in ("CV", "PV"), default is "PV"
Returns
-------
A copy of the image (data only, not annotations) as a single channel opencv numpy array,
or, if as_type is "PV", then a pyvision3 Image wrapped around the same.
"""
if self.nchannels == 3:
img_gray = cv2.cvtColor(self.data, cv2.COLOR_BGR2GRAY)
else:
img_gray = self.data.copy()
if as_type == "CV":
return img_gray
else:
return Image(img_gray)
def as_annotated(self, alpha=0.5, as_type="PV"):
"""
Provides an array which represents the merging of the image data
with the annotations layer.
Parameters
----------
alpha: float
Specify a number between 0.0 and 1.0
This is the alpha blend when merging the annotations layer onto the image data.
1.0 means that the annotation will be completely opaque and 0.0 completely transparent.
as_type: string
Specify either "CV" or "PV" (default) to indicate the return type of the annotated
image. If "CV", then an ndarray in the normal opencv format is returned.
If "PV", then a new pyvision3 image is returned with the annotations baked-in.
Returns
-------
A copy of the image array (possibly expanded to 3 channels if the
original image is single channel) with the color annotations baked in by
replacing those pixels in the image with the corresponding non-zero pixels
in the annotation data.
Return type is either an opencv ndarray or a pyvision3 image depending on as_type.
"""
# TODO: What if self.data is a floating point image and the annotations
# are uint8 BGR? We should probably call a normalizing routine of some
# sort that copies/converts self.data into a 3-channel BGR 8-bit image
if self.nchannels == 1:
tmp_img = cv2.cvtColor(self.data, cv2.COLOR_GRAY2BGR)
else:
tmp_img = self.data.copy()
# this works because the annotation_data was initialized as a copy of the
# source data. Annotations draw on this copy, and when we alpha-blend, those pixels
# that were not changed by an annotation will blend back to full intensity.
# i.e., if there were no annotations on pixel x, (1-alpha)*I(x) + (alpha)*A(x) = I(x)
# because A(x) == I(x) where not otherwise annotated.
tmp_img = cv2.addWeighted(
tmp_img, 1.0 - alpha, self.annotation_data, alpha, 0.0
)
if as_type == "PV":
return Image(tmp_img)
return tmp_img
def annotate_shape(
self, shape, color=(255, 0, 0), fill_color=None, *args, **kwargs
):
"""
Draws the specified shape on the annotation data layer.
Currently supports LineString, MultiLineString, LinearRing,
and Polygons.
Parameters
----------
shape: shapely.geometry shape object
color:
An RGB tuple indicating the color, red is (255,0,0)
fill_color:
The color used to fill a closed shape (polygon). None means
that there will be no fill (default).
*args and **kwargs are for optional line parameters that will
be passed onto cv2.line(...) which is used at the core for
drawing the line segments of the shape.
Note
----
OpenCV stores images in BGR not RGB order, thus the annotation
layer will be constructed this way. However, to make it easier for the
user, colors to be used are specified in the normal (r,g,b) tuple order,
and internally, we handle it.
"""
# TODO: annotate_shape should support all shapely geometries
# TODO: annotations should support alpha-channel fills for partial transparency
if isinstance(shape, sg.LinearRing) or isinstance(shape, sg.LineString):
self._draw_segments(shape, color, *args, **kwargs)
elif isinstance(shape, sg.MultiLineString):
for line_string in shape:
self._draw_segments(line_string, color, *args, **kwargs)
elif isinstance(shape, sg.Polygon):
if fill_color is not None:
# use cv2.fillPoly
c = self._fix_color_tuple(fill_color)
exterior = np.array(shape.exterior.coords, dtype="int")
interiors = [np.array(x.coords, dtype="int") for x in shape.interiors]
cv2.fillPoly(self.annotation_data, [exterior] + interiors, color=c)
# draw external ring of polygon
self._draw_segments(shape.exterior, color, *args, **kwargs)
# draw interior rings (holes) if any
for interior_ring in shape.interiors:
self._draw_segments(interior_ring, color, *args, **kwargs)
def annotate_point(self, point, color=(255, 0, 0)):
"""
Annotates a point by drawing a filled circle of radius-3 pix in the annotation
layer.
Parameters
----------
point: tuple (int: x, int: y) or shapely Point object
color: tuple (r,g,b)
"""
pt = (
(int(point.x), int(point.y))
if isinstance(point, sg.point.Point)
else tuple(point)
)
self.annotate_circle(pt, 3, color, thickness=-1)
def annotate_circle(self, ctr, radius, color=(255, 0, 0), *args, **kwargs):
"""
Draws a circle on the annotation layer
Parameters
----------
ctr: (int: x, int: y)
the center points of the circle
radius: int
the radius in pixels of the circle
color: (r,g,b)
*args and **kwargs will be passed onto cv2.circle function, and
can be used to control the thickness and line type. Note that a negative
thickness indicates that the circle will be filled.
"""
c = self._fix_color_tuple(color)
ctr = (int(ctr[0]), int(ctr[1]))
cv2.circle(self.annotation_data, ctr, radius, c, *args, **kwargs)
def annotate_line(self, pt1, pt2, color, *args, **kwargs):
"""
Draws a line segment between two points on the annotation layer.
Parameters
----------
pt1, pt2: tuples, (int: x, int: y)
The start and end points of the line segment
color: tuple (r,g,b)
*args and **kwargs will be passed onto cv2.line, which can | |
# rwk misc module
import scipy, numpy
import time
import os, glob, re, sys
from numpy import *
import copy
#from IPython.core.debugger import Pdb
#mytrace=Pdb().set_trace
import cPickle
def get_date_str():
date_str = time.strftime('%m_%d_%y')
return date_str
def clean_list(listin):
"""Remove blank entries from the start and end of the list."""
listout = copy.copy(listin)
while not listout[0]:
listout.pop(0)
while not listout[-1]:
listout.pop()
return listout
def clean_list_regexp(listin, pat='^[ ,]*$'):
listout = copy.copy(listin)
p = re.compile(pat)
while p.match(listout[0]):
listout.pop(0)
while p.match(listout[-1]):
listout.pop()
return listout
def RegExpPop(listin, pat, returnq=False, multiline=False):
if multiline:
mystr = '\n'.join(listin)
p = re.compile(pat, flags=re.DOTALL)
q = p.search(mystr)
if q:
out = q.group()
newstr = p.sub('',mystr)
listin = newstr.split('\n')
else:
p = re.compile(pat)
out = None
for n, item in enumerate(listin):
q = p.search(item)
if q:
out = listin.pop(n)
break
if returnq:
return q
else:
return out
def PrintToScreen(listin, globals):
if type(listin)==str:
listin=[listin]
for item in listin:
print(item+'=%s'%eval(item, globals))
def SavePickle(mydict, filepath, protocol=2):
"""Dump dictionary mydict to a Pickle file filepath using cPickle,
protocol=2."""
mypkl = open(filepath,'wb')
cPickle.dump(mydict, mypkl, protocol=protocol)
mypkl.close()
def LoadPickle(filepath):
mypkl = open(filepath,'rb')
mydict = cPickle.load(mypkl)
mypkl.close()
return mydict
class object_that_saves(object):
"""This is a mixin class for saving and loading objects using
SavePickle and LoadPickle. The object must have saveattrs
defined; it is a list of strings that refer to the attributes to
save and/or load."""
def build_dict(self):
mydict = {}
for attr in self.saveattrs:
val = getattr(self, attr)
mydict[attr] = val
return mydict
def set_attrs(self, mydict):
for attr, val in mydict.iteritems():
setattr(self, attr, val)
def save(self, filepath, protocol=2):
mydict = self.build_dict()
SavePickle(mydict, filepath, \
protocol=protocol)
def load(self, filepath):
mydict = LoadPickle(filepath)
self.set_attrs(mydict)
class gui_that_saves(object_that_saves):
"""This class suplements object_that_saves by appyling it to guis
where there will be different methods associated with getting and
setting each parameter in the dictionary of saved properties."""
def _initialize_saving(self):
"""Because this is intended to be a mixin class, it doesn't
really have a true __init__ method. This method serves that
purposed by creating the empty dictionaries for get and set
methods."""
self.get_dict = {}
self.set_dict = {}
self.saveattrs = []
def append_item(self, key, get_method, set_method):
self.get_dict[key] = get_method
self.set_dict[key] = set_method
self.saveattrs.append(key)
def build_dict(self):
mydict = {}
for attr in self.saveattrs:
get_method = self.get_dict[attr]
val = get_method()
mydict[attr] = val
return mydict
def set_attrs(self, mydict):
for attr, val in mydict.iteritems():
set_method = self.set_dict[attr]
set_method(val)
def load_from_pickle(filename, key):
mydict = LoadPickle(filename)
return mydict[key]
def myglob(pattern, folder=None):
if folder is not None:
totalpattern = os.path.join(folder, pattern)
else:
totalpattern = pattern
myfiles = glob.glob(totalpattern)
if folder is not None:
outnames = []
for item in myfiles:
fout, name = os.path.split(item)
outnames.append(name)
else:
outnames = myfiles
return outnames
class rwkstr(str):
def findall(self, pattern):
inds=[]
remainingstr=copy.deepcopy(self)
donestr=''
while remainingstr.find(pattern)>-1:
nextind=remainingstr.find(pattern)
absind=nextind+len(donestr)
inds.append(absind)
donestr+=remainingstr[0:nextind+1]
remainingstr=remainingstr[nextind+1:]
return inds
def replace(self,old,new,startind=0,maxreplace=-1):
# print('in rwk replace')
if old==new:
return rwkstr(self)
else:
if startind>0:
prestr=self[0:startind]
rest=self[startind:]
outstr=rest.replace(old,new,maxreplace=maxreplace)
outstr=prestr+outstr
return rwkstr(outstr)
else:
return rwkstr(str.replace(self,old,new,maxreplace))
def __getslice__(self,si,ei):
# print('in rwkstr getslice')
return rwkstr(str.__getslice__(self,si,ei))
def afterlast(self,pattern):
ind=self.rfind(pattern)
if ind>-1:
firstpart=self[0:ind]
lastpart=self[ind+len(pattern):]
return firstpart,lastpart
else:
return self, ''
def beforefirst(self,pattern,startind=0):
ind=self.find(pattern,startind)
if ind>-1:
firstpart=self[0:ind]
lastpart=self[ind+len(pattern):]
return rwkstr(firstpart), rwkstr(lastpart)
else:
return self,''
def contains(self,substr):
return self.find(substr)!=-1
class symstr(rwkstr):
def containsoperators(self,oplist=['*','+','-','/','**']):
for co in oplist:
if self.find(co)>-1:
return True
break
else:
return False
def __addparen__(self,oplist=['*','+','-','/','**']):
if self.containsoperators(oplist):
return '('+self.__str__()+')'
else:
return self.__str__()
def __add__(self,other):
return symstr(self.__str__()+'+'+str(other))
def __sub__(self,other):
myops=['+','-']
if not type(other)==symstr:
other=symstr(other)
return symstr(self.__str__()+'-'+other.__addparen__(myops))
def __mul__(self,other):
myops=['+','-']
if not type(other)==symstr:
other=symstr(other)
return symstr(self.__addparen__(myops)+'*'+other.__addparen__(myops))
# def __pow__(self,other):
# return symstr(self+'^'+symstr(other).__addparen__())
def __rmul__(self,other):
myops=['+','-']
if not type(other)==symstr:
other=symstr(other)
return symstr(other.__addparen__(myops)+'*'+self.__addparen__(myops))
def __div__(self,other):
# Pdb().set_trace()
myops=['+','-']
if not type(other)==symstr:
other=symstr(other)
return symstr(self.__addparen__(myops)+'/'+other.__addparen__())
def __rdiv__(self,other):
return symstr(other).__div__(self)
def __rtruediv__(self,other):
return self.__rdiv__(other)
def __truediv__(self,other):
return self.__div__(other)
def __pow__(self,other):
if not type(other)==symstr:
other=symstr(other)
return symstr(self.__addparen__()+'**'+other.__addparen__())
def __neg__(self):
return symstr('-'+self)
class dictobject:
def __init__(self,**kwargs):
for key,value in kwargs.iteritems():
setattr(self,key,value)
def SortLists(sortbylist,otherlists,reverse=False):
"""This function sorts lists similar to each list being a column
of data in a spreadsheet program and choosing one column to sort
by.
The sortbylist is the column or list that you wish to sort by and
otherlists is a list of the other lists or columns to sort.
Reverse is passed to the sort method of sortbylist."""
newlist=sortbylist[:]
newlist.sort(reverse=reverse)
bigoutlist=[]
for list in otherlists:
curlist=[]
for curitem in newlist:
ind=sortbylist.index(curitem)
curlist.append(list[ind])
bigoutlist.append(curlist)
return (newlist,)+tuple(bigoutlist)
def transposed(lists):
if not lists: return []
return map(lambda *row: list(row), *lists)
def rwkWriteArray(file, array, append=0): #open or create file and append or write array to it
if append:
f=open(file,'a')
else:
f=open(file,'w')
for curline in array:
f.write(curline)
f.close()
def mydirfilter(pathin,ignoredirs):
"""pathin is assumed to be a path to a directory
this function returns true if the last folder
is not in the list of ignoredirs"""
temproot, curdir=os.path.split(pathin)
return curdir not in ignoredirs
def rwkFileSearchRecursive(pattern,root,outpath="",ignoredirs=[]):
"""Starting in root, walk down through the
file structure searching for pattern. The
results are returned as a list of full paths.
If outpath is specified, the output is also
written to it as an ASCII text file."""
topdirs=glob.glob(os.path.join(root,'*'))
topdirs=[item for item in topdirs if os.path.isdir(item)]
topdirs=[item for item in topdirs if mydirfilter(item,ignoredirs)]
allpaths=[]
rootpat=os.path.join(root,pattern)
rootfiles=glob.glob(rootpat)
allpaths.extend(rootfiles)
for curroot in topdirs:
rootpat=os.path.join(curroot,pattern)
rootfiles=glob.glob(rootpat)
allpaths.extend(rootfiles)
for root, dirs, files in os.walk(curroot):
for name in dirs:
curpath=os.path.join(root,name)
print('searching:'+curpath)
curpat=os.path.join(curpath,pattern)
curfiles=glob.glob(curpat)
allpaths.extend(curfiles)
if outpath:
rwkWriteArray(outpath,allpaths)
return allpaths
def rwkFileSearch(pattern,searchdir="",outfile="",append=0):
"""Search for pattern in searchdir without recursing into
the directory (does not walk the directory tree).
Returns files found as a list of filenames (not full paths).
If outfile is specified, the results are also written to
it (or appended) as a text file."""
# import glob
# import os
# curdir=os.getcwd()
# if len(searchdir)>0:
# os.chdir(searchdir)
#--------------------------------------------------
# print 'pattern='+pattern
# print 'curdir='+os.getcwd()
#--------------------------------------------------
curpat=os.path.join(searchdir,pattern)
texfiles=glob.glob(curpat)
#--------------------------------------------------
# print 'filelist:'
#--------------------------------------------------
if len(outfile)>0:
filestoprint=[]
for k,curfile in enumerate(texfiles):
filestoprint.append(curfile + '\n')
rwkWriteArray(outfile,filestoprint,append)
# os.chdir(curdir)
return texfiles
def mycomp(strin, pattern, exact=False):
"""check pattern against strin. If exact, then retrun
pattern==strin, else return strin.find(pattern)>-1."""
if exact:
return bool(strin==pattern)
else:
return bool(strin.find(pattern)>-1)
def searchlist(listin, str2find, exact=False, casesense=False):
"""search for str2find in listin, return first index containing
str2find, or -1 if it is not found.
If exact is False, return the first index that contains str2find,
otherwise require item == str2find for item in listin.
casesense calls lower() on str2find and the items of listin."""
indout=-1
if not casesense:
str2find=str2find.lower()
for k, curstr in enumerate(listin):
if not casesense:
curstr=curstr.lower()
if mycomp(curstr, str2find, exact=exact):
indout=k
break
return indout
def my_import(name):
folder, modname = os.path.split(name)
if folder:
if folder not in sys.path:
sys.path.append(folder)
mod = __import__(modname)
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def reverse( x ):
# Pdb().set_trace()
if hasattr(x,'tolist'):
return scipy.array(reverse(x.tolist()))
l2 = len(x)/2
if l2:
return reverse(x[l2:]) + reverse(x[:l2])
else:
return x
def null(A, eps=1e-10):
u, s, vh = scipy.linalg.svd(A)
null_mask = (s <= eps)
null_space = scipy.compress(null_mask, vh, axis=0)
return scipy.transpose(scipy.conj(null_space))
def colwise(matin,makecopy=1):
# t1=time.time()
if makecopy:
tempmat=copy.deepcopy(matin)
else:
tempmat=matin
# t2=time.time()
# print('copy time='+str(t2-t1))
matout=scipy.atleast_2d(tempmat)
myshape=scipy.shape(matout)
if myshape[0]<myshape[1]:
matout=scipy.transpose(matout)
return matout
def rowwise(matin,makecopy=1):
# t1=time.time()
if makecopy:
tempmat=copy.deepcopy(matin)
else:
tempmat=matin
# t2=time.time()
# print('copy time='+str(t2-t1))
matout=scipy.atleast_2d(tempmat)
myshape=scipy.shape(matout)
if myshape[0]>myshape[1]:
matout=scipy.transpose(matout)
return matout
#def __printent(ent, eps=1e-14,bigthresh=10000,littlethresh=1e-5,width=8):
# """Assumes only real input and is meant to be called by other functions like printent."""
# if abs(ent)>bigthresh or ans(ent)<littlethresh:
# fmt='%0.'+str(width-4)+'e'
# else:
# fmt='%'+str(width)+'g'
# if eps is not None and abs(ent)<eps:
# return '0'
# else:
# return fmt%ent
#def PrettyMat(mat,eps=1e-14):
# imat=imag(mat)
# rmat=real(mat)
# if rmat.any() and imat.any():
# width=6
# else:
# width=8
# for row in mat:
# rowstr='['
# for ent in row:
# if imag(ent):
# rstr=__printent(real(ent),width=width)
def cleannum(value, eps=1e-14):
if numpy.abs(value) < eps:
return 0
elif abs(numpy.real(value)) < eps:
return 1.0j*imag(value)
elif abs(numpy.imag(value)) < eps:
return numpy.real(value)
def prettyvect(vect,fmt='%0.5g',eps=1e-14):
filtvect = [cleannum(item,eps) for item in vect]
strlist = [fmt % item for item in filtvect]
str1 = ', '.join(strlist)
str2 = '[' + str1 + ']'
return str2
def prettymat(mat,fmt='%0.5g',eps=1e-14):
# pdb.set_trace()
if fmt[0]=='%':
ifmt='%+'+fmt[1:]
else:
ifmt='%+'+fmt
fmt='%'+fmt
nc=scipy.shape(mat)[1]
# print('[')
outstr='['
# fmtstr=fmt +(nc-1)*(' & ' +fmt)+'\\\\'
for currow in mat.tolist():
# curtuple=tuple(currow)
# rowstr=fmtstr%curtuple
rowstr='['
for x,ent in enumerate(currow):
if abs(scipy.real(ent))>eps:
realstr=fmt%scipy.real(ent)
else:
realstr='0'
if abs(scipy.imag(ent))>eps:
imagstr=ifmt%scipy.imag(ent)+'j'
else:
imagstr=''
rowstr+=realstr+imagstr
if x<(len(currow)-1):
rowstr+=', '
if x==(len(currow)-1):
rowstr+='],\n'
outstr+=rowstr
if outstr[-1]=='\n':
outstr=outstr[0:-1]
outstr+=']'
print(outstr)
# return outstr
def RREFscaled(mymat):
# Pdb().set_trace()
scalevect=scipy.amax(abs(mymat),1)
scaledrows=[]
for sf,row in zip(scalevect,mymat):
row=row/sf
scaledrows.append(row)
scaledmat=scipy.vstack(scaledrows)
# scaledmat=mymat
nc=scipy.shape(scaledmat)[1]
nr=scipy.shape(scaledmat)[0]
for j in range(nr-1):
# print('=====================')
# print('j='+str(j))
| |
and not self.stateful:
return []
updates = self._unfiltered_updates
# `updates` might contain irrelevant updates, so it needs to be filtered
# with respect to inputs the model has been called on.
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return updates
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, updates)
relevant_conditional_updates = [x for x in updates if x in reachable]
unconditional_updates = [
x for x in updates if x._unconditional_update] # pylint: disable=protected-access
# A layer could be used multiple times in a nested structure,
# so the updates list must be de-duped.
return list(set(
relevant_conditional_updates + unconditional_updates + self._updates))
@property
def losses(self):
"""Retrieves the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
When the network has no registered inputs, all losses are returned.
Returns:
A list of loss tensors.
"""
losses = self._unfiltered_losses
if context.executing_eagerly():
return losses
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return losses
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, losses)
relevant_conditional_losses = [x for x in losses if x in reachable]
unconditional_losses = [
x for x in losses if x._unconditional_loss] # pylint: disable=protected-access
return list(set(
relevant_conditional_losses + unconditional_losses + self._losses))
@property
def trainable_weights(self):
return checkpointable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
@property
def non_trainable_weights(self):
return checkpointable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
@property
def input_spec(self):
"""Gets the network's input specs.
Returns:
A list of `InputSpec` instances (one per input to the model)
or a single instance if the model has only one input.
"""
# If not a graph network, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
def build(self, input_shape):
"""Builds the model based on input shapes received.
This is to be used for subclassed models, which do not know at instantiation
time what their inputs look like.
Args:
input_shape: Single tuple, TensorShape, or list of shapes, where shapes
are tuples, integers, or TensorShapes.
Raises:
ValueError:
1. In case of invalid user-provided data (not of type tuple,
list, or TensorShape).
2. If the model requires call arguments that are agnostic
to the input shapes (positional or kwarg in call signature).
3. If not all layers were properly built.
4. If float type inputs are not supported within the layers.
In each of these cases, the user should build their model by calling it
on real tensor data.
"""
if self._is_graph_network:
self.built = True
return
# If subclass network
if input_shape is None:
raise ValueError('Input shape must be defined when calling build on a '
'model subclass network.')
valid_types = (tuple, list, tensor_shape.TensorShape)
if not isinstance(input_shape, valid_types):
raise ValueError('Specified input shape is not one of the valid types. '
'Please specify a batch input shape of type tuple or '
'list of input shapes. User provided '
'input type: {}'.format(type(input_shape)))
if input_shape and not self.inputs:
if isinstance(input_shape, list):
# List of input shapes
x = [base_layer.generate_dummy_data_from_shape(shape)
for shape in input_shape]
else:
x = base_layer.generate_dummy_data_from_shape(input_shape)
kwargs = {}
num_call_args = len(tf_inspect.getargspec(self.call).args)
if self._expects_training_arg and num_call_args == 3:
# Has call signature of call(self, input, training)
kwargs['training'] = False
elif num_call_args > 2:
# Has invalid call signature of call(self, input, *args, **kwargs)
raise ValueError('Currently, you cannot build your model if it has '
'positional or keyword arguments that are not '
'inputs to the model, but are required for its '
'`call` method. Instead, in order to instantiate '
'and build your model, `call` your model on real '
'tensor data with all expected call arguments.')
try:
self.call(x, **kwargs)
except (errors.InvalidArgumentError, TypeError):
raise ValueError('You cannot build your model by calling `build` '
'if your layers do not support float type inputs. '
'Instead, in order to instantiate and build your '
'model, `call` your model on real tensor data (of '
'the correct dtype).')
if self._layers:
self._track_layers(self._layers)
if self.layers:
for layer in self.layers:
if not layer.built:
raise ValueError('Layer: {} was not built in your model. Calling '
'`build` manually on a subclassed model is only '
'allowed for models with a static topology. '
'In this case, you can build your model by '
'calling it on real tensor data.'.format(layer))
self.built = True
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
inputs = nest.flatten(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = nest.flatten(mask)
if not context.executing_eagerly():
# Try to retrieve cached outputs if the layer has already been called
# on these exact inputs.
cache_key = (generic_utils.object_list_uid(inputs)
+ '_' + generic_utils.object_list_uid(masks))
if cache_key in self._output_tensor_cache:
# Cache hit.
return self._output_tensor_cache[cache_key]
# Actually apply the network graph to the new inputs.
outputs, _ = self._run_internal_graph(inputs,
training=training,
mask=masks)
return outputs
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
if context.executing_eagerly():
return super(Network, self).compute_output_shape(input_shape)
raise NotImplementedError
if isinstance(input_shape, list):
input_shapes = []
for shape in input_shape:
if shape is not None:
input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list()))
else:
input_shapes.append(None)
else:
if input_shape is not None:
input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())]
else:
input_shapes = [None]
if len(input_shapes) != len(self._input_layers):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = generic_utils.object_list_uid(input_shapes)
if cache_key not in self._output_shape_cache:
# Cache miss. We have to run the network graph manually (recursive calls
# to `compute_output_shape`).
layers_to_output_shapes = {}
for i in range(len(input_shapes)):
layer = self._input_layers[i]
input_shape = input_shapes[i]
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor output.
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = input_shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Potentially redundant list,
# same size as node.input_tensors.
input_shapes = []
for j in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[j]
node_index = node.node_indices[j]
tensor_index = node.tensor_indices[j]
shape_key = inbound_layer.name + '_%s_%s' % (node_index,
tensor_index)
input_shape = layers_to_output_shapes[shape_key]
input_shapes.append(input_shape)
if len(input_shapes) == 1:
output_shape = layer.compute_output_shape(input_shapes[0])
else:
output_shape = layer.compute_output_shape(input_shapes)
if isinstance(output_shape, list):
output_shapes = [
tuple(tensor_shape.TensorShape(shape).as_list())
for shape in output_shape
]
else:
output_shapes = [
tuple(tensor_shape.TensorShape(output_shape).as_list())
]
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j in range(len(output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = output_shapes[j]
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
else:
# Cache hit.
output_shapes = self._output_shape_cache[cache_key]
if isinstance(output_shapes, list):
if len(output_shapes) == 1:
return tensor_shape.TensorShape(output_shapes[0])
else:
return [tensor_shape.TensorShape(shape) for shape in output_shapes]
else:
return tensor_shape.TensorShape(output_shapes)
def _run_internal_graph(self, inputs, training=None, mask=None):
"""Computes output | |
<reponame>grzracz/MindWave-Sorting-Hat
import socket
import threading
import pygame
import random
import winsound
import math
import datetime
import os
from queue import Queue
from ctypes import pointer, POINTER, cast, c_int, c_float
TCP_IP = '127.0.0.1'
TCP_PORT = 13854
BUFFER_SIZE = 1024
INSTANCES_NUMBER = 4
WIDTH = 1920
HEIGHT = 1080
class LastInstances:
def __init__(self, number_of_instances):
self.instances = []
for i1 in range(0, number_of_instances):
self.instances.append(0)
self.last = -1
self.amount = number_of_instances
self.average = 0
self.total_average = 0
self.total_amount = 0
def add(self, value):
self.total_amount += 1
self.total_average += (value - self.total_average)/self.total_amount
self.last = (self.last + 1) % self.amount
self.instances[self.last] = value
_sum = 0
for i1 in range(0, self.amount):
_sum += self.instances[i1]
self.average = _sum / self.amount
def get_last(self):
if self.last == -1:
return -1
else:
return self.instances[self.last]
def reset(self):
self.last = -1
self.average = 0
self.total_average = 0
self.total_amount = 0
for i1 in range(0, self.amount):
self.instances[i1] = 0
class MindWaveClient:
def __init__(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((TCP_IP, TCP_PORT))
self.buffer = Queue(5)
thread = threading.Thread(target=self.run)
thread.daemon = True
thread.start()
def run(self):
while True:
mindwave_data = self.s.recv(BUFFER_SIZE).hex().upper()
if mindwave_data and not self.buffer.full():
if len(mindwave_data) < 50:
print("Syncing...")
else:
self.buffer.put(mindwave_data)
elif mindwave_data:
print("Buffer is full:", mindwave_data)
def get(self):
if not self.buffer.empty():
return self.buffer.get()
else:
return None
def empty(self):
return self.buffer.empty()
def reset(self):
while not self.buffer.empty():
self.buffer.get()
def get_next_byte(string, last_byte):
if string[last_byte:last_byte + 2] is "":
return "", 0
else:
return string[last_byte:last_byte+2], last_byte+2
def byte_to_float(string):
string = str(int(string[0]) - 1) + string[1:]
return convert(string)
def convert(_s):
i1 = int(_s, 16)
cp = pointer(c_int(i1))
fp = cast(cp, POINTER(c_float))
return fp.contents.value
class MindWaveParser:
def __init__(self):
self.MindWave = MindWaveClient()
self.last_byte_number = 0
self.parsed_string = ""
self.waves = ["Delta", "Theta", "low Alpha", "high Alpha", "low Beta", "high Beta", "low Gamma", "high Gamma"]
self.signal = LastInstances(INSTANCES_NUMBER)
self.attention = LastInstances(INSTANCES_NUMBER)
self.meditation = LastInstances(INSTANCES_NUMBER)
self.waves_values = []
self.last_signal = 200
for i1 in range(0, 8):
self.waves_values.append(LastInstances(INSTANCES_NUMBER))
thread = threading.Thread(target=self.run)
thread.daemon = True
thread.start()
def run(self):
while True:
if not self.MindWave.empty():
data = self.MindWave.get()
while True:
current_byte, self.last_byte_number = get_next_byte(data, self.last_byte_number)
if current_byte == "AA": # Data starts being sent
current_byte, self.last_byte_number = get_next_byte(data, self.last_byte_number)
self.parsed_string += "SYNCED: "
elif current_byte == "02": # Signal value (0 - 200), where 0 - perfect, 200 - off head state
byte, self.last_byte_number = get_next_byte(data, self.last_byte_number)
self.parsed_string += "Signal: " + str(int(byte, 16)) + "; "
self.signal.add(int(byte, 16))
self.last_signal = int(byte, 16)
elif current_byte == "04": # Attention value (0 - 100)
byte, self.last_byte_number = get_next_byte(data, self.last_byte_number)
self.parsed_string += "Attention: " + str(int(byte, 16)) + "; "
if self.last_signal == 0:
self.attention.add(int(byte, 16))
elif current_byte == "05": # Meditation value (0 - 100)
byte, self.last_byte_number = get_next_byte(data, self.last_byte_number)
self.parsed_string += "Meditation: " + str(int(byte, 16)) + "; "
if self.last_signal == 0:
self.meditation.add(int(byte, 16))
elif current_byte == "81": # EEG values as represented in waves array
current_byte, self.last_byte_number = get_next_byte(data, self.last_byte_number)
self.parsed_string += "EEG waves: "
for i1 in range(0, 8):
float_string = ""
for i2 in range(0, 4):
current_byte, self.last_byte_number = get_next_byte(data, self.last_byte_number)
float_string += current_byte
self.parsed_string += self.waves[i1] + ": " + str(byte_to_float(float_string)) + "; "
if not byte_to_float(float_string) < 0:
if self.last_signal == 0:
self.waves_values[i1].add(byte_to_float(float_string))
elif current_byte != "":
print("Unknown byte:", current_byte)
if self.last_byte_number == 0:
print("")
print("Signal:", self.signal.average, "total:", self.signal.total_average)
print("Attention:", self.attention.average, "total:", self.attention.total_average)
print("Meditation:", self.meditation.average, "total:", self.meditation.total_average)
print("Waves:")
for i1 in range(0, 8):
print(self.waves[i1] + ":", self.waves_values[i1].average,
"total:", self.waves_values[i1].total_average)
self.parsed_string = ""
break
def reset(self):
self.MindWave.reset()
self.last_byte_number = 0
self.parsed_string = ""
self.signal.reset()
self.attention.reset()
self.meditation.reset()
self.last_signal = 200
for i1 in range(0, 8):
self.waves_values[i1].reset()
class Colors:
def __init__(self):
self.black = (0, 0, 0)
self.black5 = (5, 5, 5)
self.white = (255, 255, 255)
self.red = (255, 0, 0)
self.blue = (0, 0, 255)
self.lime = (0, 255, 0)
self.gray = (20, 20, 20)
self.yellow = (255, 255, 0)
self.cyan = (0, 255, 255)
self.green = (0, 128, 0)
self.teal = (0, 128, 128)
self.pink = (255, 192, 203)
self.orange = (255, 165, 0)
self.magenta = (255, 0, 255)
self.cornsilk = (255, 248, 220)
self.skyblue = (0, 191, 255)
self.chocolate = (210, 105, 30)
self.brown = (165, 42, 42)
self.beige = (245, 245, 220)
self.salmon = (240, 128, 114)
self.lightgray = (211, 211, 211)
self.lightgreen = (0, 255, 0)
self.medgreen = (0, 192, 0)
self.darkred = (128, 0, 0)
self.medred = (192, 0, 0)
def clear_window(_window, _x, _y, _width, _height):
pygame.draw.rect(window, c.black5, pygame.Rect(_x, _y, _width, _height))
def text_to_surface(_text, big):
if big:
_text_render = font_big.render(_text, True, c.black5)
else:
_text_render = font_medium.render(_text, True, c.black5)
return _text_render
class SpeechOperator:
def __init__(self, _parser):
self.wave_baselines = [0.000085520879017, 0.000021897343866, # baseline based on readings from 3 people
0.000005682946818, 0.000004351168889,
0.000003498963247, 0.000002834064985,
0.000002102321697, 0.000001087921237]
self.houses = ["Gryffindor",
"Hufflepuff",
"Ravenclaw",
"Slytherin"]
self.on_start = ["Where shall I put you? Let's see...",
"This is interesting.",
"Difficult, very difficult.",
"Are you afraid of what you will hear?",
"Don’t worry, child."]
self.on_start_sounds = [
[
"sound/on_start/whereshalli1.wav",
"sound/on_start/whereshalli2.wav"
],
[
"sound/on_start/thisisinteresting1.wav"
],
[
"sound/on_start/verydifficult1.wav"
],
[
"sound/on_start/areyouafraid1.wav",
"sound/on_start/areyouafraid2.wav"
],
[
"sound/on_start/dontworry1.wav",
"sound/on_start/dontworry2.wav"
]
]
self.random = ["Ah, right then.",
"Hmm, okay...",
"I think I know what to do with you..."]
self.random_sounds = [
[
"sound/random/ahrightthen1.wav",
"sound/random/ahrightthen2.wav"
],
[
"sound/random/hmmokay1.wav",
"sound/random/hmmokay2.wav",
"sound/random/hmmokay3.wav"
],
[
"sound/random/ithinkiknow1.wav"
]
]
self.house_quotes = []
self.house_quotes_sounds = []
for i1 in range(0, 4):
self.house_quotes.append([])
self.house_quotes_sounds.append([])
self.house_quotes[0] = ["Plenty of courage...",
"Yes... very brave.",
"You have a lot of nerve!",
"GRYFFINDOR!"]
self.house_quotes_sounds[0] = [
[
"sound/gryffindor/plentyofcourage1.wav"
],
[
"sound/gryffindor/yesverybrave1.wav"
],
[
"sound/gryffindor/alotofnerve1.wav"
],
[
"sound/gryffindor/gryffindor1.wav",
"sound/gryffindor/gryffindor2.wav"
]
]
self.house_quotes[1] = ["Patient and loyal...",
"Hard work will get you far.",
"Strong sense of justice...",
"HUFFLEPUFF!"]
self.house_quotes_sounds[1] = [
[
"sound/hufflepuff/patientandloyal1.wav"
],
[
"sound/hufflepuff/hardwork1.wav"
],
[
"sound/hufflepuff/strongsenseofjustice1.wav",
"sound/hufflepuff/strongsenseofjustice2.wav"
],
[
"sound/hufflepuff/hufflepuff1.wav",
"sound/hufflepuff/hufflepuff2.wav"
]
]
self.house_quotes[2] = ["Not a bad mind.",
"There's talent! Interesting...",
"Quite smart...",
"RAVENCLAW!"]
self.house_quotes_sounds[2] = [
[
"sound/ravenclaw/notabadmind1.wav",
"sound/ravenclaw/notabadmind2.wav"
],
[
"sound/ravenclaw/therestalent1.wav",
"sound/ravenclaw/therestalent2.wav"
],
[
"sound/ravenclaw/quitesmart1.wav",
"sound/ravenclaw/quitesmart2.wav"
],
[
"sound/ravenclaw/ravenclaw1.wav",
"sound/ravenclaw/ravenclaw2.wav"
]
]
self.house_quotes[3] = ["A nice thirst to prove yourself.",
"Quite ambitious, yes...",
"Great sense of self-preservation...",
"SLYTHERIN!"]
self.house_quotes_sounds[3] = [
[
"sound/slitherin/anicethirst1.wav"
],
[
"sound/slitherin/quiteambitious1.wav",
"sound/slitherin/quiteambitious2.wav"
],
[
"sound/slitherin/greatsenseofself1.wav",
"sound/slitherin/greatsenseofself2.wav"
],
[
"sound/slitherin/slitherin1.wav",
"sound/slitherin/slitherin2.wav"
]
]
self.seconds = 3
self.used_quotes = []
self.parser = _parser
self.last_amount = self.parser.attention.total_amount
self.house_points = [] # Gryffindor, Hufflepuff, Ravenclaw, Slytherin
for i1 in range(0, 4):
self.house_points.append(0)
def update(self, only_points):
_text = None
_frames = 0
_big_text = False
if self.last_amount != self.parser.attention.total_amount and self.parser.waves_values[0].total_amount > 10:
self.last_amount = self.parser.attention.total_amount
self.house_points[0] += ((2 * self.parser.waves_values[6].average) /
(self.parser.waves_values[6].total_average + self.wave_baselines[6])) + \
((2 * self.parser.waves_values[7].average) /
(self.parser.waves_values[7].total_average + self.wave_baselines[7]))
self.house_points[1] += ((2 * self.parser.waves_values[0].average) /
(self.parser.waves_values[0].total_average + self.wave_baselines[0])) + \
((2 * self.parser.waves_values[4].average) /
(self.parser.waves_values[4].total_average + self.wave_baselines[4]))
self.house_points[2] += ((2 * self.parser.waves_values[1].average) /
(self.parser.waves_values[1].total_average + self.wave_baselines[1])) + \
((2 * self.parser.waves_values[5].average) /
(self.parser.waves_values[5].total_average + self.wave_baselines[5]))
self.house_points[3] += ((2 * self.parser.waves_values[2].average) /
(self.parser.waves_values[2].total_average + self.wave_baselines[2])) + \
((2 * self.parser.waves_values[3].average) /
(self.parser.waves_values[3].total_average + self.wave_baselines[3]))
print("House points: Gryffindor (" + str(self.house_points[0])
+ "), Hufflepuff (" + str(self.house_points[1])
+ "), Ravenclaw (" + str(self.house_points[2])
+ "), Slytherin (" + str(self.house_points[3]) + ")")
if (self.parser.waves_values[0].total_amount - 10) % 6 == 0 and not only_points:
if self.parser.waves_values[0].total_amount == 16:
_text = random.choice(self.on_start)
index = self.on_start.index(_text)
winsound.PlaySound(random.choice(self.on_start_sounds[index]), winsound.SND_ASYNC)
_frames = int(self.seconds * 60)
elif self.parser.waves_values[0].total_amount == 40:
highest_house = self.house_points.index(max(self.house_points))
_text = self.house_quotes[highest_house][3]
winsound.PlaySound(random.choice(self.house_quotes_sounds[highest_house][3]), winsound.SND_ASYNC)
_frames = int(self.seconds * 60 * 3)
_big_text = True
else:
random_or_house = random.randint(0, 10)
if random_or_house < 2:
_text = random.choice(self.random)
while True:
if _text in self.used_quotes:
_text = random.choice(self.random)
else:
break
self.used_quotes.append(_text)
index = self.random.index(_text)
winsound.PlaySound(random.choice(self.random_sounds[index]), winsound.SND_ASYNC)
_frames = int(self.seconds * 60)
else:
highest_house = self.house_points.index(max(self.house_points))
random_house_quote = random.randint(0, 2)
_text = self.house_quotes[highest_house][random_house_quote]
while True:
if _text in self.used_quotes:
random_house_quote = random.randint(0, 2)
_text = self.house_quotes[highest_house][random_house_quote]
else:
break
self.used_quotes.append(_text)
index = self.house_quotes[highest_house].index(_text)
winsound.PlaySound(random.choice(self.house_quotes_sounds[highest_house][index]),
winsound.SND_ASYNC)
_frames = int(self.seconds * 60)
if not only_points:
return _text, _frames, _big_text
def reset(self):
for i1 in range(0, 4):
self.house_points[i1] = 0
self.used_quotes = []
class SortingHat:
def __init__(self, _speech, _width, _height, _parser):
self.hat_sleeping = True
self.hat_talking = False
self.frames_left = 0
self.text = None
self.text_surface = None
self.big_text = False
self.chosen = False
self.hat_image = pygame.image.load("img/sortinghat.png").convert()
self.hat_image_talking | |
import pytest
from graphql_relay import to_global_id
from ...core.relay import extract_global_id
from ...core.tests import extract_serializer_input_fields
from ...form.models import Answer, Document, Question
from .. import serializers
@pytest.mark.parametrize(
"question__type,question__data_source,answer__value,answer__date",
[
(Question.TYPE_INTEGER, None, 1, None),
(Question.TYPE_FLOAT, None, 2.1, None),
(Question.TYPE_TEXT, None, "somevalue", None),
(Question.TYPE_MULTIPLE_CHOICE, None, ["somevalue", "anothervalue"], None),
(Question.TYPE_TABLE, None, None, None),
(Question.TYPE_DATE, None, None, "2019-02-22"),
(Question.TYPE_FILE, None, "some-file.pdf", None),
(Question.TYPE_FILE, None, "some-other-file.pdf", None),
(Question.TYPE_DYNAMIC_CHOICE, "MyDataSource", "5.5", None),
(Question.TYPE_DYNAMIC_MULTIPLE_CHOICE, "MyDataSource", ["5.5"], None),
],
)
def test_query_all_documents(
db,
snapshot,
form_question,
form,
document,
document_factory,
question_factory,
form_question_factory,
answer_factory,
file_factory,
answer_document,
answer,
schema_executor,
question,
minio_mock,
data_source_settings,
settings,
):
query = """
query AllDocumentsQuery($search: String) {
allDocuments(search: $search) {
totalCount
edges {
node {
createdByUser
answers {
totalCount
edges {
node {
__typename
question {
slug
label
}
... on StringAnswer {
string_value: value
}
... on IntegerAnswer {
integer_value: value
}
... on ListAnswer {
list_value: value
}
... on FloatAnswer {
float_value: value
}
... on DateAnswer {
date_value: value
}
... on TableAnswer {
table_value: value {
form {
slug
}
}
}
... on FormAnswer {
form_value: value {
form {
slug
}
}
}
... on FileAnswer {
fileValue: value {
name
downloadUrl
metadata
}
}
}
}
}
}
}
}
}
"""
search = isinstance(answer.value, list) and " ".join(answer.value) or answer.value
if question.type == Question.TYPE_FILE:
if answer.value == "some-other-file.pdf":
settings.MINIO_STORAGE_AUTO_CREATE_MEDIA_BUCKET = False
minio_mock.bucket_exists.return_value = False
answer.file = file_factory(name=answer.value)
answer.value = None
answer.save()
search = answer.file.name
result = schema_executor(query, variables={"search": search})
assert not result.errors
snapshot.assert_match(result.data)
def test_complex_document_query_performance(
db,
schema_executor,
document,
form,
form_question_factory,
question_factory,
answer_factory,
file_factory,
question_option_factory,
django_assert_num_queries,
minio_mock,
):
answers = answer_factory.create_batch(5, document=document)
for answer in answers:
form_question_factory(question=answer.question, form=form)
multiple_choice_question = question_factory(type=Question.TYPE_MULTIPLE_CHOICE)
form_question_factory(question=multiple_choice_question, form=form)
question_option_factory.create_batch(10, question=multiple_choice_question)
answer_factory(question=multiple_choice_question)
file_question = question_factory(type=Question.TYPE_FILE)
form_question_factory(question=file_question, form=form)
answer_factory(
question=file_question, value=None, document=document, file=file_factory()
)
query = """
query ($id: ID!) {
allDocuments(id: $id) {
edges {
node {
...FormDocument
}
}
}
}
fragment FormDocument on Document {
id
answers {
edges {
node {
...FieldAnswer
}
}
}
form {
slug
questions {
edges {
node {
...FieldQuestion
}
}
}
}
}
fragment FieldAnswer on Answer {
id
__typename
question {
slug
}
... on StringAnswer {
stringValue: value
}
... on IntegerAnswer {
integerValue: value
}
... on FloatAnswer {
floatValue: value
}
... on DateAnswer {
dateValue: value
}
... on ListAnswer {
listValue: value
}
... on FileAnswer {
fileValue: value {
name
downloadUrl
metadata
}
}
}
fragment FieldQuestion on Question {
slug
label
isRequired
isHidden
... on TextQuestion {
textMaxLength: maxLength
}
... on TextareaQuestion {
textareaMaxLength: maxLength
}
... on IntegerQuestion {
integerMinValue: minValue
integerMaxValue: maxValue
}
... on FloatQuestion {
floatMinValue: minValue
floatMaxValue: maxValue
}
... on ChoiceQuestion {
choiceOptions: options {
edges {
node {
slug
label
}
}
}
}
... on MultipleChoiceQuestion {
multipleChoiceOptions: options {
edges {
node {
slug
label
}
}
}
}
}
"""
with django_assert_num_queries(11):
result = schema_executor(query, variables={"id": str(document.pk)})
assert not result.errors
def test_query_all_documents_filter_answers_by_question(
db, document, answer, question, answer_factory, schema_executor
):
answer_factory(document=document)
query = """
query AllDocumentsQuery($question: ID!) {
allDocuments {
edges {
node {
answers(question: $question) {
edges {
node {
id
}
}
}
}
}
}
}
"""
result = schema_executor(query, variables={"question": question.slug})
assert not result.errors
assert len(result.data["allDocuments"]["edges"]) == 1
result_document = result.data["allDocuments"]["edges"][0]["node"]
assert len(result_document["answers"]["edges"]) == 1
result_answer = result_document["answers"]["edges"][0]["node"]
assert extract_global_id(result_answer["id"]) == str(answer.id)
def test_query_all_documents_filter_answers_by_questions(
db, document_factory, question_factory, answer_factory, schema_executor
):
documents = []
answers = []
questions = []
for i in range(3):
documents.append(document_factory())
questions.append(question_factory())
answers.append(answer_factory(document=documents[-1], question=questions[-1]))
query = """
query AllDocumentsQuery($questions: [ID!]) {
allDocuments {
edges {
node {
answers(questions: $questions) {
edges {
node {
id
}
}
}
}
}
}
}
"""
result = schema_executor(
query, variables={"questions": [questions[0].slug, questions[1].slug]}
)
assert not result.errors
assert len(result.data["allDocuments"]["edges"]) == 3
assert len(result.data["allDocuments"]["edges"][0]["node"]["answers"]["edges"]) == 1
assert len(result.data["allDocuments"]["edges"][1]["node"]["answers"]["edges"]) == 1
assert len(result.data["allDocuments"]["edges"][2]["node"]["answers"]["edges"]) == 0
@pytest.mark.parametrize("update", [True, False])
def test_save_document(db, document, schema_executor, update):
query = """
mutation SaveDocument($input: SaveDocumentInput!) {
saveDocument(input: $input) {
document {
form {
slug
}
id
}
clientMutationId
}
}
"""
inp = {
"input": extract_serializer_input_fields(
serializers.DocumentSerializer, document
)
}
if not update:
# not update = create = we don't pass the ID
del inp["input"]["id"]
result = schema_executor(query, variables=inp)
assert not result.errors
assert result.data["saveDocument"]["document"]["form"]["slug"] == document.form.slug
same_id = extract_global_id(result.data["saveDocument"]["document"]["id"]) == str(
document.id
)
# if updating, the resulting document must be the same
assert same_id == update
@pytest.mark.parametrize("delete_answer", [True, False])
@pytest.mark.parametrize("option__slug", ["option-slug"])
@pytest.mark.parametrize(
"question__type,question__configuration,question__data_source,question__format_validators,answer__value,answer__date,mutation,success",
[
(
Question.TYPE_INTEGER,
{},
None,
[],
1,
None,
"SaveDocumentIntegerAnswer",
True,
),
(
Question.TYPE_INTEGER,
{"min_value": 100},
None,
[],
1,
None,
"SaveDocumentIntegerAnswer",
False,
),
(Question.TYPE_FLOAT, {}, None, [], 2.1, None, "SaveDocumentFloatAnswer", True),
(
Question.TYPE_FLOAT,
{"min_value": 100.0},
None,
[],
1,
None,
"SaveDocumentFloatAnswer",
False,
),
(
Question.TYPE_TEXT,
{},
None,
[],
"Test",
None,
"SaveDocumentStringAnswer",
True,
),
(
Question.TYPE_TEXT,
{"max_length": 1},
None,
[],
"toolong",
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_DATE,
{},
None,
[],
None,
"1900-01-01",
"SaveDocumentDateAnswer",
False,
),
(
Question.TYPE_DATE,
{},
None,
[],
None,
"2019-02-22",
"SaveDocumentDateAnswer",
True,
),
(Question.TYPE_FILE, {}, None, [], None, None, "SaveDocumentFileAnswer", False),
(
Question.TYPE_FILE,
{},
None,
[],
"some-file.pdf",
None,
"SaveDocumentFileAnswer",
True,
),
(
Question.TYPE_FILE,
{},
None,
[],
"not-exist.pdf",
None,
"SaveDocumentFileAnswer",
True,
),
(
Question.TYPE_TEXT,
{"max_length": 1},
None,
[],
"toolong",
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_TABLE,
{},
None,
[],
None,
None,
"SaveDocumentTableAnswer",
True,
),
(Question.TYPE_FORM, {}, None, [], None, None, "SaveDocumentFormAnswer", True),
(
Question.TYPE_TEXTAREA,
{},
None,
[],
"Test",
None,
"SaveDocumentStringAnswer",
True,
),
(
Question.TYPE_TEXTAREA,
{"max_length": 1},
None,
[],
"toolong",
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_MULTIPLE_CHOICE,
{},
None,
[],
["option-slug"],
None,
"SaveDocumentListAnswer",
True,
),
(
Question.TYPE_MULTIPLE_CHOICE,
{},
None,
[],
["option-slug", "option-invalid-slug"],
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_CHOICE,
{},
None,
[],
"option-slug",
None,
"SaveDocumentStringAnswer",
True,
),
(
Question.TYPE_CHOICE,
{},
None,
[],
"invalid-option-slug",
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_DYNAMIC_MULTIPLE_CHOICE,
{},
"MyDataSource",
[],
["5.5", "1"],
None,
"SaveDocumentListAnswer",
True,
),
(
Question.TYPE_DYNAMIC_MULTIPLE_CHOICE,
{},
"MyDataSource",
[],
["not in data"],
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_DYNAMIC_CHOICE,
{},
"MyDataSource",
[],
"5.5",
None,
"SaveDocumentStringAnswer",
True,
),
(
Question.TYPE_DYNAMIC_CHOICE,
{},
"MyDataSource",
[],
"not in data",
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_TEXT,
{},
None,
["email"],
"some text",
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_TEXT,
{},
None,
["email"],
"<EMAIL>",
None,
"SaveDocumentStringAnswer",
True,
),
(
Question.TYPE_TEXTAREA,
{},
None,
["email"],
"some text",
None,
"SaveDocumentStringAnswer",
False,
),
(
Question.TYPE_TEXTAREA,
{},
None,
["email"],
"<EMAIL>",
None,
"SaveDocumentStringAnswer",
True,
),
],
)
def test_save_document_answer(
db,
snapshot,
question,
answer,
mutation,
question_option,
document_factory,
answer_factory,
answer_document_factory,
question_factory,
file_factory,
success,
schema_executor,
delete_answer,
minio_mock,
data_source_settings,
):
mutation_func = mutation[0].lower() + mutation[1:]
query = f"""
mutation {mutation}($input: {mutation}Input!) {{
{mutation_func}(input: $input) {{
answer {{
__typename
... on StringAnswer {{
stringValue: value
}}
... on IntegerAnswer {{
integerValue: value
}}
... on ListAnswer {{
listValue: value
}}
... on FloatAnswer {{
floatValue: value
}}
... on ListAnswer {{
listValue: value
}}
... on DateAnswer {{
dateValue: value
}}
... on TableAnswer {{
table_value: value {{
form {{
slug
}}
}}
}}
... on FormAnswer {{
form_value: value {{
form {{
slug
}}
}}
}}
... on FileAnswer {{
fileValue: value {{
name
uploadUrl
}}
}}
}}
clientMutationId
}}
}}
"""
inp = {
"input": extract_serializer_input_fields(
serializers.SaveAnswerSerializer, answer
)
}
if question.type == Question.TYPE_TABLE:
documents = document_factory.create_batch(2, form=question.row_form)
# create a subtree
sub_question = question_factory(type=Question.TYPE_TEXT)
document_answer = answer_factory(question=sub_question)
documents[0].answers.add(document_answer)
answer_document_factory(answer=answer, document=documents[0])
inp["input"]["value"] = [str(document.pk) for document in documents]
if question.type == Question.TYPE_FORM:
document = document_factory.create(form=question.sub_form)
inp["input"]["value"] = document.pk
if question.type == Question.TYPE_FILE:
if answer.value == "some-file.pdf":
minio_mock.bucket_exists.return_value = False
answer.value = None
answer.save()
if question.type == Question.TYPE_DATE:
inp["input"]["value"] = answer.date
answer.value = None
answer.save()
# Date format is enforced in the model. So we initially had to use a valid date
# here we're able to change it:
if answer.date == "1900-01-01":
inp["input"]["value"] = "not a date"
if delete_answer:
# delete answer to force create test instead of update
Answer.objects.filter(pk=answer.pk).delete()
result = schema_executor(query, variables=inp)
assert not bool(result.errors) == success
if success:
snapshot.assert_match(result.data)
def test_save_document_table_answer_invalid_row_form(
db, schema_executor, answer, document_factory
):
query = """
mutation SaveDocumentTableAnswer($input: SaveDocumentTableAnswerInput!) {
saveDocumentTableAnswer(input: $input) {
clientMutationId
}
}
"""
inp = {
"input": extract_serializer_input_fields(
serializers.SaveAnswerSerializer, answer
)
}
inp["input"]["value"] = [
str(document.pk) for document in document_factory.create_batch(1)
]
result = schema_executor(query, | |
<reponame>khushhallchandra/CS-7641
import copy
import logging
import pandas as pd
import numpy as np
from collections import Counter
from sklearn import preprocessing, utils
import sklearn.model_selection as ms
from scipy.sparse import isspmatrix
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import os
import seaborn as sns
from abc import ABC, abstractmethod
# TODO: Move this to a common lib?
OUTPUT_DIRECTORY = "./output"
if not os.path.exists(OUTPUT_DIRECTORY):
os.makedirs(OUTPUT_DIRECTORY)
if not os.path.exists("{}/images".format(OUTPUT_DIRECTORY)):
os.makedirs("{}/images".format(OUTPUT_DIRECTORY))
# Configure logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
def plot_pairplot(title, df, class_column_name=None):
plt = sns.pairplot(df, hue=class_column_name)
return plt
# Adapted from https://stats.stackexchange.com/questions/239973/a-general-measure-of-data-set-imbalance
def is_balanced(seq):
n = len(seq)
classes = [(clas, float(count)) for clas, count in Counter(seq).items()]
k = len(classes)
H = -sum([(count / n) * np.log((count / n)) for clas, count in classes])
return H / np.log(k) > 0.75
class DataLoader(ABC):
def __init__(self, path, verbose, seed):
self._path = path
self._verbose = verbose
self._seed = seed
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
self.binary = False
self.balanced = False
self._data = pd.DataFrame()
def load_and_process(self, data=None, preprocess=True):
"""
Load data from the given path and perform any initial processing required. This will populate the
features and classes and should be called before any processing is done.
:return: Nothing
"""
if data is not None:
self._data = data
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
else:
self._load_data()
self.log(
"Processing {} Path: {}, Dimensions: {}",
self.data_name(),
self._path,
self._data.shape,
)
if self._verbose:
old_max_rows = pd.options.display.max_rows
pd.options.display.max_rows = 10
self.log("Data Sample:\n{}", self._data)
pd.options.display.max_rows = old_max_rows
if preprocess:
self.log("Will pre-process data")
self._preprocess_data()
self.get_features()
self.get_classes()
self.log("Feature dimensions: {}", self.features.shape)
self.log("Classes dimensions: {}", self.classes.shape)
self.log("Class values: {}", np.unique(self.classes))
class_dist = np.histogram(self.classes)[0]
class_dist = class_dist[np.nonzero(class_dist)]
self.log("Class distribution: {}", class_dist)
self.log(
"Class distribution (%): {}", (class_dist / self.classes.shape[0]) * 100
)
self.log("Sparse? {}", isspmatrix(self.features))
if len(class_dist) == 2:
self.binary = True
self.balanced = is_balanced(self.classes)
self.log("Binary? {}", self.binary)
self.log("Balanced? {}", self.balanced)
def scale_standard(self):
self.features = StandardScaler().fit_transform(self.features)
if self.training_x is not None:
self.training_x = StandardScaler().fit_transform(self.training_x)
if self.testing_x is not None:
self.testing_x = StandardScaler().fit_transform(self.testing_x)
def build_train_test_split(self, test_size=0.3):
if (
not self.training_x
and not self.training_y
and not self.testing_x
and not self.testing_y
):
self.training_x, self.testing_x, self.training_y, self.testing_y = ms.train_test_split(
self.features,
self.classes,
test_size=test_size,
random_state=self._seed,
stratify=self.classes,
)
def get_features(self, force=False):
if self.features is None or force:
self.log("Pulling features")
self.features = np.array(self._data.iloc[:, 0:-1])
return self.features
def get_classes(self, force=False):
if self.classes is None or force:
self.log("Pulling classes")
self.classes = np.array(self._data.iloc[:, -1])
return self.classes
def dump_test_train_val(self, test_size=0.2, random_state=123):
ds_train_x, ds_test_x, ds_train_y, ds_test_y = ms.train_test_split(
self.features,
self.classes,
test_size=test_size,
random_state=random_state,
stratify=self.classes,
)
pipe = Pipeline([("Scale", preprocessing.StandardScaler())])
train_x = pipe.fit_transform(ds_train_x, ds_train_y)
train_y = np.atleast_2d(ds_train_y).T
test_x = pipe.transform(ds_test_x)
test_y = np.atleast_2d(ds_test_y).T
train_x, validate_x, train_y, validate_y = ms.train_test_split(
train_x,
train_y,
test_size=test_size,
random_state=random_state,
stratify=train_y,
)
test_y = pd.DataFrame(np.where(test_y == 0, -1, 1))
train_y = pd.DataFrame(np.where(train_y == 0, -1, 1))
validate_y = pd.DataFrame(np.where(validate_y == 0, -1, 1))
tst = pd.concat([pd.DataFrame(test_x), test_y], axis=1)
trg = pd.concat([pd.DataFrame(train_x), train_y], axis=1)
val = pd.concat([pd.DataFrame(validate_x), validate_y], axis=1)
tst.to_csv(
"data/{}_test.csv".format(self.data_name()), index=False, header=False
)
trg.to_csv(
"data/{}_train.csv".format(self.data_name()), index=False, header=False
)
val.to_csv(
"data/{}_validate.csv".format(self.data_name()), index=False, header=False
)
@abstractmethod
def _load_data(self):
pass
@abstractmethod
def data_name(self):
pass
@abstractmethod
def _preprocess_data(self):
pass
@abstractmethod
def class_column_name(self):
pass
@abstractmethod
def pre_training_adjustment(self, train_features, train_classes):
"""
Perform any adjustments to training data before training begins.
:param train_features: The training features to adjust
:param train_classes: The training classes to adjust
:return: The processed data
"""
return train_features, train_classes
def reload_from_hdf(self, hdf_path, hdf_ds_name, preprocess=True):
self.log("Reloading from HDF {}".format(hdf_path))
loader = copy.deepcopy(self)
df = pd.read_hdf(hdf_path, hdf_ds_name)
loader.load_and_process(data=df, preprocess=preprocess)
loader.build_train_test_split()
return loader
def log(self, msg, *args):
"""
If the learner has verbose set to true, log the message with the given parameters using string.format
:param msg: The log message
:param args: The arguments
:return: None
"""
if self._verbose:
logger.info(msg.format(*args))
class CreditDefaultData(DataLoader):
def __init__(
self, path="data/default of credit card clients.xls", verbose=False, seed=1
):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_excel(self._path, header=1, index_col=0)
def data_name(self):
return "CreditDefaultData"
def class_column_name(self):
return "default payment next month"
def _preprocess_data(self):
pass
def pre_training_adjustment(self, train_features, train_classes):
"""
Perform any adjustments to training data before training begins.
:param train_features: The training features to adjust
:param train_classes: The training classes to adjust
:return: The processed data
"""
return train_features, train_classes
class CreditApprovalData(DataLoader):
def __init__(self, path="data/crx.data", verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_csv(self._path, header=None)
def data_name(self):
return "CreditApprovalData"
def class_column_name(self):
return "12"
def _preprocess_data(self):
# https://www.ritchieng.com/machinelearning-one-hot-encoding/
to_encode = [0, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 15]
label_encoder = preprocessing.LabelEncoder()
one_hot = preprocessing.OneHotEncoder()
df = self._data[to_encode]
df = df.apply(label_encoder.fit_transform)
# https://gist.github.com/ramhiser/982ce339d5f8c9a769a0
vec_data = pd.DataFrame(one_hot.fit_transform(df[to_encode]).toarray())
self._data = self._data.drop(to_encode, axis=1)
self._data = pd.concat([self._data, vec_data], axis=1)
# Clean any ?'s from the unencoded columns
self._data = self._data[(self._data[[1, 2, 7]] != "?").all(axis=1)]
def pre_training_adjustment(self, train_features, train_classes):
return train_features, train_classes
class PenDigitData(DataLoader):
def __init__(self, path="data/digits/pendigits.csv", verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_csv(self._path, header=None)
def class_column_name(self):
return "16"
def data_name(self):
return "PendDigitData"
def _preprocess_data(self):
pass
def pre_training_adjustment(self, train_features, train_classes):
return train_features, train_classes
class OcrDigitData(DataLoader):
def __init__(self, path="data/ocr/optdigits.full", verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_csv(self._path, header=None)
def class_column_name(self):
return "64"
def data_name(self):
return "OcrDigitData"
def _preprocess_data(self):
pass
def pre_training_adjustment(self, train_features, train_classes):
return train_features, train_classes
class BankMarketingData(DataLoader):
def __init__(self, path="data/marketing/bank-additional-full.csv", verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_csv(self._path, sep=";", header=0)
self._data = self._data.drop(["duration"], 1)
def class_column_name(self):
return "y"
def data_name(self):
return "BankMarketingData"
def _preprocess_data(self):
to_encode = ["job", "marital", "education", "default", "housing", "loan", "contact", "month", "day_of_week", "poutcome"]
label_encoder = preprocessing.LabelEncoder()
one_hot = preprocessing.OneHotEncoder()
df = self._data[to_encode]
df = df.apply(label_encoder.fit_transform)
# https://gist.github.com/ramhiser/982ce339d5f8c9a769a0
vec_data = pd.DataFrame(one_hot.fit_transform(df[to_encode]).toarray())
self._data = self._data.drop(to_encode, axis=1)
self._data = pd.concat([self._data, vec_data], axis=1)
# Convert yes/no to 1-hot encoding for output
self._data.y = self._data.y.eq("yes").mul(1)
def pre_training_adjustment(self, train_features, train_classes):
return train_features, train_classes
def MadelonData(DataLoader):
pass
class CensusIncomeData(DataLoader):
def __init__(self, path="data/income/adult.csv", verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_csv(self._path, header=None)
self._data.columns = [
"age",
"employer",
"fnlwt",
"edu",
"edu_num",
"marital",
"occupation",
"relationship",
"race",
"sex",
"cap_gain",
"cap_loss",
"hrs",
"country",
"income",
]
self._data["cap_gain_loss"] = self._data["cap_gain"] - self._data["cap_loss"]
self._data = self._data.drop(["fnlwt", "edu", "cap_gain", "cap_loss"], 1)
self._data["income"] = pd.get_dummies(self._data.income)
replacements = {
"Cambodia": " SE-Asia",
"Canada": " British-Commonwealth",
"China": " China",
"Columbia": " South-America",
"Cuba": " Other",
"Dominican-Republic": " Latin-America",
"Ecuador": " South-America",
"El-Salvador": " South-America ",
"England": " British-Commonwealth",
"France": " Euro_1",
"Germany": " Euro_1",
"Greece": " Euro_2",
"Guatemala": " Latin-America",
"Haiti": " Latin-America",
"Holand-Netherlands": " Euro_1",
"Honduras": " Latin-America",
"Hong": " China",
"Hungary": " Euro_2",
"India": " British-Commonwealth",
"Iran": " Other",
"Ireland": " British-Commonwealth",
"Italy": " Euro_1",
"Jamaica": " Latin-America",
"Japan": " Other",
"Laos": " SE-Asia",
"Mexico": " Latin-America",
"Nicaragua": " Latin-America",
"Outlying-US(Guam-USVI-etc)": " Latin-America",
"Peru": " South-America",
"Philippines": " SE-Asia",
"Poland": " Euro_2",
"Portugal": " Euro_2",
"Puerto-Rico": " Latin-America",
"Scotland": " British-Commonwealth",
"South": " Euro_2",
"Taiwan": " China",
"Thailand": " SE-Asia",
"Trinadad&Tobago": " Latin-America",
"United-States": " United-States",
"Vietnam": " SE-Asia",
"Yugoslavia": " Euro_2",
}
self._data["country"] = self._data["country"].str.strip()
self._data = self._data.replace(
to_replace={
"country": replacements,
"employer": {" Without-pay": " Never-worked"},
"relationship": {" Husband": "Spouse", " Wife": "Spouse"},
}
)
self._data["country"] = self._data["country"].str.strip()
for col in [
"employer",
"marital",
"occupation",
"relationship",
"race",
"sex",
"country",
]:
self._data[col] = self._data[col].str.strip()
self._data = pd.get_dummies(self._data)
self._data = self._data.rename(columns=lambda x: x.replace("-", "_"))
def class_column_name(self):
return "16"
def data_name(self):
return "CensusIncomeData"
def _preprocess_data(self):
# https://www.ritchieng.com/machinelearning-one-hot-encoding/
to_encode = list(range(len(self._data.columns)))
to_encode = ["income"]
label_encoder = preprocessing.LabelEncoder()
one_hot = preprocessing.OneHotEncoder()
df = self._data[to_encode]
df = df.apply(label_encoder.fit_transform)
# https://gist.github.com/ramhiser/982ce339d5f8c9a769a0
vec_data = pd.DataFrame(one_hot.fit_transform(df[to_encode]).toarray())
self._data = self._data.drop(to_encode, axis=1)
self._data = pd.concat([self._data, vec_data], axis=1)
def pre_training_adjustment(self, train_features, train_classes):
return train_features, train_classes
class MushroomData(DataLoader):
def __init__(
self, path="data/mushroom/agaricus-lepiota.data", verbose=False, seed=1
):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_csv(self._path, header=None)
def class_column_name(self):
return "0"
def data_name(self):
return "MushroomData"
def _preprocess_data(self):
# https://www.ritchieng.com/machinelearning-one-hot-encoding/
to_encode = list(range(len(self._data.columns)))
label_encoder = preprocessing.LabelEncoder()
one_hot = preprocessing.OneHotEncoder()
df = self._data[to_encode]
df = df.apply(label_encoder.fit_transform)
# https://gist.github.com/ramhiser/982ce339d5f8c9a769a0
vec_data = pd.DataFrame(one_hot.fit_transform(df[to_encode]).toarray())
self._data = self._data.drop(to_encode, axis=1)
self._data = pd.concat([self._data, vec_data], axis=1)
# Clean any ?'s from the unencoded columns
self._data = self._data[(self._data[[10]] != "?").all(axis=1)]
def pre_training_adjustment(self, train_features, train_classes):
return train_features, train_classes
class WineData(DataLoader):
def __init__(self, path="data/wine/winequality-white.csv", verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_csv(self._path, header=0)
def | |
import json
import os
import pymzml
import pandas as pd
import numpy as np
from tqdm import tqdm
from matchms.importing import load_from_mgf
from pyteomics import mzxml, mzml
import logging
logger = logging.getLogger('msql_fileloading')
def load_data(input_filename, cache=False):
"""
Loading data generically
Args:
input_filename ([type]): [description]
cache (bool, optional): [description]. Defaults to False.
Returns:
[type]: [description]
"""
if cache:
ms1_filename = input_filename + "_ms1.msql.feather"
ms2_filename = input_filename + "_ms2.msql.feather"
if os.path.exists(ms1_filename) or os.path.exists(ms2_filename):
try:
ms1_df = pd.read_feather(ms1_filename)
except:
ms1_df = pd.DataFrame()
try:
ms2_df = pd.read_feather(ms2_filename)
except:
ms2_df = pd.DataFrame()
return ms1_df, ms2_df
# Actually loading
if input_filename[-5:].lower() == ".mzml":
#ms1_df, ms2_df = _load_data_mzML(input_filename)
#ms1_df, ms2_df = _load_data_mzML2(input_filename) # Faster version using pymzML
ms1_df, ms2_df = _load_data_mzML_pyteomics(input_filename) # Faster version using pymzML
elif input_filename[-6:].lower() == ".mzxml":
ms1_df, ms2_df = _load_data_mzXML(input_filename)
elif input_filename[-5:] == ".json":
ms1_df, ms2_df = _load_data_gnps_json(input_filename)
elif input_filename[-4:].lower() == ".mgf":
ms1_df, ms2_df = _load_data_mgf(input_filename)
elif input_filename[-4:].lower() == ".txt" or input_filename[-4:].lower() == ".dat":
ms1_df, ms2_df = _load_data_txt(input_filename)
else:
print("Cannot Load File Extension")
raise Exception("File Format Not Supported")
# Saving Cache
if cache:
ms1_filename = input_filename + "_ms1.msql.feather"
ms2_filename = input_filename + "_ms2.msql.feather"
if not (os.path.exists(ms1_filename) or os.path.exists(ms2_filename)):
try:
ms1_df.to_feather(ms1_filename)
except:
pass
try:
ms2_df.to_feather(ms2_filename)
except:
pass
return ms1_df, ms2_df
def _load_data_mgf(input_filename):
file = load_from_mgf(input_filename)
ms2mz_list = []
for i, spectrum in enumerate(file):
if len(spectrum.peaks.mz) == 0:
continue
mz_list = list(spectrum.peaks.mz)
i_list = list(spectrum.peaks.intensities)
i_max = max(i_list)
i_sum = sum(i_list)
for i in range(len(mz_list)):
if i_list[i] == 0:
continue
peak_dict = {}
peak_dict["i"] = i_list[i]
peak_dict["i_norm"] = i_list[i] / i_max
peak_dict["i_tic_norm"] = i_list[i] / i_sum
peak_dict["mz"] = mz_list[i]
# Handling malformed mgf files
try:
peak_dict["scan"] = spectrum.metadata["scans"]
except:
peak_dict["scan"] = i + 1
try:
peak_dict["rt"] = float(spectrum.metadata["rtinseconds"]) / 60
except:
peak_dict["rt"] = 0
try:
peak_dict["precmz"] = float(spectrum.metadata["pepmass"][0])
except:
peak_dict["precmz"] = 0
peak_dict["ms1scan"] = 0
peak_dict["charge"] = 1 # TODO: Add Charge Correctly here
peak_dict["polarity"] = 1 # TODO: Add Polarity Correctly here
ms2mz_list.append(peak_dict)
# Turning into pandas data frames
ms1_df = pd.DataFrame([peak_dict])
ms2_df = pd.DataFrame(ms2mz_list)
return ms1_df, ms2_df
def _load_data_gnps_json(input_filename):
all_spectra = json.loads(open(input_filename).read())
ms1_df_list = []
ms2_df_list = []
for spectrum in tqdm(all_spectra):
# Skipping spectra bigger than 1MB of peaks
if len(spectrum["peaks_json"]) > 1000000:
continue
peaks = json.loads(spectrum["peaks_json"])
peaks = [peak for peak in peaks if peak[1] > 0]
if len(peaks) == 0:
continue
i_max = max([peak[1] for peak in peaks])
i_sum = sum([peak[1] for peak in peaks])
if i_max == 0:
continue
ms2mz_list = []
for peak in peaks:
peak_dict = {}
peak_dict["i"] = peak[1]
peak_dict["i_norm"] = peak[1] / i_max
peak_dict["i_tic_norm"] = peak[1] / i_sum
peak_dict["mz"] = peak[0]
peak_dict["scan"] = spectrum["spectrum_id"]
peak_dict["rt"] = 0
peak_dict["precmz"] = float(spectrum["Precursor_MZ"])
peak_dict["ms1scan"] = 0
peak_dict["charge"] = 1 # TODO: Add Charge Correctly here
peak_dict["polarity"] = 1 # TODO: Add Polarity Correctly here
ms2mz_list.append(peak_dict)
# Turning into pandas data frames
if len(ms2mz_list) > 0:
ms2_df = pd.DataFrame(ms2mz_list)
ms2_df_list.append(ms2_df)
ms1_df = pd.DataFrame([peak_dict])
ms1_df_list.append(ms1_df)
# Merging
ms1_df = pd.concat(ms1_df_list).reset_index()
ms2_df = pd.concat(ms2_df_list).reset_index()
return ms1_df, ms2_df
def _load_data_mzXML(input_filename):
ms1mz_list = []
ms2mz_list = []
previous_ms1_scan = 0
with mzxml.read(input_filename) as reader:
for spectrum in tqdm(reader):
if len(spectrum["intensity array"]) == 0:
continue
if not "m/z array" in spectrum:
# This is not a mass spectrum
continue
mz_list = list(spectrum["m/z array"])
i_list = list(spectrum["intensity array"])
i_max = max(i_list)
i_sum = sum(i_list)
mslevel = spectrum["msLevel"]
if mslevel == 1:
for i in range(len(mz_list)):
peak_dict = {}
peak_dict["i"] = i_list[i]
peak_dict["i_norm"] = i_list[i] / i_max
peak_dict["i_tic_norm"] = i_list[i] / i_sum
peak_dict["mz"] = mz_list[i]
peak_dict["scan"] = spectrum["id"]
peak_dict["rt"] = spectrum["retentionTime"]
peak_dict["polarity"] = _determine_scan_polarity_mzXML(spectrum)
ms1mz_list.append(peak_dict)
previous_ms1_scan = spectrum["id"]
if mslevel == 2:
msn_mz = spectrum["precursorMz"][0]["precursorMz"]
msn_charge = 0
if "precursorCharge" in spectrum["precursorMz"][0]:
msn_charge = spectrum["precursorMz"][0]["precursorCharge"]
for i in range(len(mz_list)):
peak_dict = {}
peak_dict["i"] = i_list[i]
peak_dict["i_norm"] = i_list[i] / i_max
peak_dict["i_tic_norm"] = i_list[i] / i_sum
peak_dict["mz"] = mz_list[i]
peak_dict["scan"] = spectrum["id"]
peak_dict["rt"] = spectrum["retentionTime"]
peak_dict["precmz"] = msn_mz
peak_dict["ms1scan"] = previous_ms1_scan
peak_dict["charge"] = msn_charge
peak_dict["polarity"] = _determine_scan_polarity_mzXML(spectrum)
ms2mz_list.append(peak_dict)
# Turning into pandas data frames
ms1_df = pd.DataFrame(ms1mz_list)
ms2_df = pd.DataFrame(ms2mz_list)
return ms1_df, ms2_df
def _determine_scan_polarity_mzML(spec):
"""
Gets an enum for positive and negative polarity, for pymzml
Args:
spec ([type]): [description]
Returns:
[type]: [description]
"""
polarity = 0
negative_polarity = spec["negative scan"]
if negative_polarity is True:
polarity = 2
positive_polarity = spec["positive scan"]
if positive_polarity is True:
polarity = 1
return polarity
def _determine_scan_polarity_pyteomics_mzML(spec):
"""
Gets an enum for positive and negative polarity, for pyteomics
Args:
spec ([type]): [description]
Returns:
[type]: [description]
"""
polarity = 0
if "negative scan" in spec:
polarity = 2
if "positive scan" in spec:
polarity = 1
return polarity
def _determine_scan_polarity_mzXML(spec):
polarity = 0
if spec["polarity"] == "+":
polarity = 1
if spec["polarity"] == "-":
polarity = 2
return polarity
def _load_data_mzML_pyteomics(input_filename):
"""
This is a loading operation using pyteomics to help with loading mzML files with ion mobility
Args:
input_filename ([type]): [description]
"""
previous_ms1_scan = 0
# MS1
all_mz = []
all_rt = []
all_polarity = []
all_i = []
all_i_norm = []
all_i_tic_norm = []
all_scan = []
# MS2
all_msn_mz = []
all_msn_rt = []
all_msn_polarity = []
all_msn_i = []
all_msn_i_norm = []
all_msn_i_tic_norm = []
all_msn_scan = []
all_msn_precmz = []
all_msn_ms1scan = []
all_msn_charge = []
all_msn_mobility = []
with mzml.read(input_filename) as reader:
for spectrum in tqdm(reader):
if len(spectrum["intensity array"]) == 0:
continue
# Getting the RT
try:
rt = spectrum["scanList"]["scan"][0]["scan start time"]
except:
rt = 0
# Correcting the unit
try:
if spectrum["scanList"]["scan"][0]["scan start time"].unit_info == "second":
rt = rt / 60
except:
pass
scan = int(spectrum["id"].replace("scanId=", "").split("scan=")[-1])
if not "m/z array" in spectrum:
# This is not a mass spectrum
continue
mz = spectrum["m/z array"]
intensity = spectrum["intensity array"]
i_max = max(intensity)
i_sum = sum(intensity)
# If there is no ms level, its likely an UV/VIS spectrum and we can skip
if not "ms level" in spectrum:
continue
mslevel = spectrum["ms level"]
if mslevel == 1:
all_mz += list(mz)
all_i += list(intensity)
all_i_norm += list(intensity / i_max)
all_i_tic_norm += list(intensity / i_sum)
all_rt += len(mz) * [rt]
all_scan += len(mz) * [scan]
all_polarity += len(mz) * [_determine_scan_polarity_pyteomics_mzML(spectrum)]
previous_ms1_scan = scan
if mslevel == 2:
msn_mz = spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["selected ion m/z"]
msn_charge = 0
if "charge state" in spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]:
msn_charge = int(spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["charge state"])
all_msn_mz += list(mz)
all_msn_i += list(intensity)
all_msn_i_norm += list(intensity / i_max)
all_msn_i_tic_norm += list(intensity / i_sum)
all_msn_rt += len(mz) * [rt]
all_msn_scan += len(mz) * [scan]
all_msn_polarity += len(mz) * [_determine_scan_polarity_pyteomics_mzML(spectrum)]
all_msn_precmz += len(mz) * [msn_mz]
all_msn_ms1scan += len(mz) * [previous_ms1_scan]
all_msn_charge += len(mz) * [msn_charge]
if "product ion mobility" in spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]:
mobility = spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["product ion mobility"]
all_msn_mobility += len(mz) * [mobility]
ms1_df = pd.DataFrame()
if len(all_mz) > 0:
ms1_df['i'] = all_i
ms1_df['i_norm'] = all_i_norm
ms1_df['i_tic_norm'] = all_i_tic_norm
ms1_df['mz'] = all_mz
ms1_df['scan'] = all_scan
ms1_df['rt'] = all_rt
ms1_df['polarity'] = all_polarity
ms2_df = pd.DataFrame()
if len(all_msn_mz) > 0:
ms2_df['i'] = all_msn_i
ms2_df['i_norm'] = all_msn_i_norm
ms2_df['i_tic_norm'] = all_msn_i_tic_norm
ms2_df['mz'] = all_msn_mz
ms2_df['scan'] = all_msn_scan
ms2_df['rt'] = all_msn_rt
ms2_df["polarity"] = all_msn_polarity
ms2_df["precmz"] = all_msn_precmz
ms2_df["ms1scan"] = all_msn_ms1scan
ms2_df["charge"] = all_msn_charge
if len(all_msn_mobility) == len(all_msn_i):
ms2_df["mobility"] = all_msn_mobility
return ms1_df, ms2_df
def _load_data_mzML2(input_filename):
"""This is a faster loading version, but a bit more memory intensive
Args:
input_filename ([type]): [description]
Returns:
[type]: [description]
"""
MS_precisions = {
1: 5e-6,
2: 20e-6,
3: 20e-6,
4: 20e-6,
5: 20e-6,
6: 20e-6,
7: 20e-6,
}
run = pymzml.run.Reader(input_filename, MS_precisions=MS_precisions)
previous_ms1_scan = 0
# MS1
all_mz = []
all_rt = []
all_polarity = []
all_i = []
all_i_norm = []
all_i_tic_norm = []
all_scan = []
# MS2
all_msn_mz = []
all_msn_rt = []
all_msn_polarity = []
all_msn_i = []
all_msn_i_norm = []
all_msn_i_tic_norm = []
all_msn_scan = []
all_msn_precmz = []
all_msn_ms1scan = []
all_msn_charge = []
for i, spec in tqdm(enumerate(run)):
# Getting RT
rt = spec.scan_time_in_minutes()
# Getting peaks
peaks = spec.peaks("raw")
# Filtering out zero rows
peaks = peaks[~np.any(peaks < 1.0, axis=1)]
if spec.ms_level == 2:
if len(peaks) > 1000:
# Sorting by intensity
| |
# can extract only required terms..
# -- but has to handle terms like E(eps D^beta eps) which should not be replace and in tre..
rhs = rhs.subs(closed_subs_tree)
# -3- Closure
rhs = rhs.subs(self.internal_closure)
return rhs
@property
def unclosed_terms(self):
unclosed_terms = set()
# -1- Search in variance/metric system
systems = [self.variance_system, self.metric_system]
for system in systems:
for equation in system:
unclosed_terms.update(equation.args[1].atoms(Expectation))
# -2- Eliminates Expectation(field)
for field,mfield in self.fields.items():
unclosed_terms.discard(Expectation(mfield.random))
return unclosed_terms
class UnivariateTerm(object):
"""
Handle terms in E[ D^alpha \eps D^beta \eps]
"""
""" ..todo :: details what is univariate terms
Univariate terms are of the form E(Dx^alpha eps Dx^beta eps)
"""
def __init__(self, term, function, alpha, beta, degree):
self.term = term
self.function = function
self.alpha = alpha
self.beta = beta
self.degree = degree
def __repr__(self):
return f"Univariate term: {self.function},{self.alpha}, {self.beta}, {self.degree}"
@classmethod
def is_univariate(cls, term):
# Get derivatives
derivatives = term.atoms(Derivative)
# Get functions
functions = set()
for derivative in derivatives:
functions.update(derivative.atoms(Function))
if len(functions) == 1: # Univariate term
function = functions.pop()
if len(derivatives) == 1: # E[eps D^beta eps] or E[(D^alpha eps)**2] or E[(D^alpha eps)**k] k>2
derivative = derivatives.pop()
if term is Expectation(function * derivative): # E[eps D^beta eps]
alpha = 0
beta = derivative.args[1:]
degree = derivative.derivative_count
return cls(term, function, alpha, beta, degree)
elif term is Expectation(derivative*derivative): #E[(D^alpha eps)**2]
alpha = derivative.args[1:]
beta = derivative.args[1:]
degree = 2*derivative.derivative_count
return cls(term, function, alpha, beta, degree)
else:
# E[(D^alpha eps)**k] k>2
return None
elif len(derivatives) == 2:
if term is Expectation(Mul(*derivatives)):
# -1- Compute the total degree
degree = 0
for derivative in derivatives:
degree += derivative.derivative_count
# -2- Extract the two derivatives
alpha, beta = derivatives
alpha = alpha.args[1:]
beta = beta.args[1:]
return cls(term, function, alpha, beta, degree)
else:
return None
else:
return None
else:
return None
def gamma_def(epsilon, k, i, j):
return Expectation(Derivative(epsilon, k) * Derivative(epsilon, i, j))
def gamma_subs(metric, k, i, j):
return Rational(1 , 2) * (Derivative(metric(k, j), i) + Derivative(metric(i, k), j) \
- Derivative(metric(i, j), k))
def skewness_def(epsilon, k, i, j):
return Expectation(epsilon * Derivative(epsilon, k, i, j))
def skewness_subs(metric, k, i, j):
return -Rational(1 , 2) * (Derivative(metric(k, j), i) + Derivative(metric(i, k), j) \
+ Derivative(metric(i, j), k))
class Field(object):
def __init__(self, field):
self.value = field
self.code = clean_latex_name(field.func)
self.coordinates = field.args
self.spatial_coordinates = tuple([coord for coord in self.coordinates if coord is not time_symbol])
self.coords_code = tuple(clean_latex_name(coord) for coord in self.coordinates)
self.spatial_coords_code = tuple(clean_latex_name(coord) for coord in self.spatial_coordinates)
# Associated random fields
self.random = Function(str(field.func))(*field.args, omega)
self.epsilon = Function('{\\varepsilon_{' + self.code + '}}')(*self.coordinates, omega)
self.error = Function('{e_{' + self.code + '}}')(*self.coordinates, omega)
# Associated statistics
# -- Variance field
self.variance = Function('{V_{' + self.code + '}}', positive=True)(*self.coordinates)
# -- Standard deviation field
self.std = Function('{\\sigma_{' + self.code + '}}', positive=True)(*self.coordinates)
# -- Tensor fields
shape_tensor = 2 * (len(self.spatial_coordinates),)
self.metric = Matrix(*shape_tensor, self.metric_func)
self.diffusion = Matrix(*shape_tensor, self.nu_func)
self.aspect = Matrix(*shape_tensor, self.aspect_func)
# --trends
self.trends = {
'field':Derivative(self.value,time_symbol),
'variance':Derivative(self.variance,time_symbol),
'error': Derivative(self.error, time_symbol),
'epsilon': Derivative(self.epsilon, time_symbol),
'metric': Derivative(self.metric, time_symbol),
'diffusion': Derivative(self.diffusion, time_symbol),
'aspect': Derivative(self.aspect, time_symbol),
}
self.subs_tree = UnivariateTree(self.epsilon, self.spatial_coordinates)
self._internal_closure = None
@property
def internal_closure(self):
if self._internal_closure is None:
# Computation of the default internal_closure
closure = {}
# -0- error is centered
closure.update({Expectation(self.error): Integer(0)})
closure.update({Expectation(self.error ** Integer(2)): self.variance})
# -1- epsilon is centered
closure.update({Expectation(self.epsilon): Integer(0)})
# -2- epsilon is normalized
closure.update({Expectation(self.epsilon ** Integer(2)): Integer(1)})
# -3- correlation is flat
closure.update({Expectation(self.epsilon * Derivative(self.epsilon, coord)): Integer(0)
for coord in self.spatial_coordinates})
# -4- metric is labeled
closure.update({
-self.metric_definition(i, j): -self.metric_func(i, j)
for i in range(len(self.spatial_coordinates)) for j in range(i, len(self.spatial_coordinates))}
)
# -5- skewness is function of the metric
skewness_closure = {}
metric_func = lambda xi, xj: self.metric_func(self.spatial_coordinates.index(xi),
self.spatial_coordinates.index(xj))
for partial_order in PartialOrderDerivative.all_of_degree(self.spatial_coordinates, 3):
skewness_closure[skewness_def(self.epsilon, *partial_order.as_sequence)] = skewness_subs(metric_func,
*partial_order.as_sequence)
closure.update(skewness_closure)
self._internal_closure = closure
return self._internal_closure
def index_code(self,i,j):
if j<i:
i,j = j,i
return self.spatial_coords_code[i] + self.spatial_coords_code[j]
def metric_func(self,i,j):
return Function('{g_{' + self.code + ',' + self.index_code(i, j) + '}}', real=True)(*self.coordinates)
def metric_definition(self,i,j):
return -Expectation(self.epsilon * Derivative(self.epsilon, self.spatial_coordinates[i], self.spatial_coordinates[j]))
def nu_func(self,i,j):
return Function('{\\nu_{' + self.code + ',' + self.index_code(i, j) + '}}', real=True)(
*self.coordinates)
def aspect_func(self,i,j):
return Function('{s_{' + self.code + ',' + self.index_code(i, j) + '}}', real=True)(
*self.coordinates)
class PartialOrderDerivative(object):
""" Handler for partial order derivatives """
def __init__(self, coordinates, partial_orders):
self._coordinates = coordinates
self._as_tuple = self._builder(partial_orders)
self._degree = sum(self._as_tuple)
@property
def coordinates(self):
return self._coordinates
@property
def as_tuple(self):
return self._as_tuple
@property
def as_couples(self):
return [(coord, order) for coord, order in zip(self._coordinates, self._as_tuple)]
@property
def as_sequence(self):
# format du type [x0,x1,x1]
sequence = []
for x_i, alpha_i in zip(self._coordinates, self._as_tuple):
if alpha_i > 0:
sequence += alpha_i * [x_i]
return sequence
def _builder(self, partial_orders):
builder_names = ['tuple', 'couples', 'sequence']
test = [getattr(self, 'is_as_' + name)(partial_orders) for name in builder_names]
if any(test):
name = builder_names[test.index(True)]
return getattr(self, '_set_from_' + name)(partial_orders)
else:
raise TypeError(f"{partial_orders} is not a valid derivative order for coordinates {self._coordinates}")
def is_as_tuple(self, partial_orders):
if len(partial_orders) != len(self._coordinates):
return False
if isinstance(partial_orders, tuple):
test = [isinstance(alpha_i, int) for alpha_i in partial_orders]
return False not in test
return False
def is_as_couples(self, partial_orders):
# Can not be empty (ie can not be [])
if partial_orders == []:
return False
try:
# can be a simple couple (x_i,alpha_i)
if isinstance(partial_orders, tuple):
xi, alpha_i = partial_orders
return xi in self._coordinates and isinstance(alpha_i, int)
# can be a list of as_couples [.. (x_i,alpha_i) ..]
if isinstance(partial_orders, list):
test = [xi in self._coordinates and isinstance(alpha_i, int) for xi, alpha_i in partial_orders]
return False not in test
except:
return False
def is_as_sequence(self, partial_orders):
if partial_orders == []:
return True
test = [xi in self._coordinates for xi in partial_orders]
return False not in test
def _set_from_tuple(self, partial_orders_tuple):
return partial_orders_tuple
def _set_from_couples(self, couples):
orders = len(self._coordinates) * [0]
if isinstance(couples, tuple):
xi, alpha_i = couples
orders[self._coordinates.index(xi)] += alpha_i
else:
for xi, alpha_i in couples:
orders[self._coordinates.index(xi)] += alpha_i
return tuple(orders)
def _set_from_sequence(self, sequence):
return tuple([sequence.count(xi) for xi in self._coordinates])
def __eq__(self, rhs):
return self._as_tuple == rhs._as_tuple and self._coordinates == rhs._coordinates
def new(self, partial_orders):
return PartialOrderDerivative(self._coordinates, partial_orders)
def copy(self):
return PartialOrderDerivative(self._coordinates, self._as_tuple)
@classmethod
def all_of_degree(cls, coordinates, derivative_order):
""" Return partial order derivative of all derivative at a given degree
Description
-----------
The algorithm employs dynamical programming based on sets to avoid duplicate outputs.
Each generation is computed from the previous by moving the partial order of the first coordinate toward the others.
Example
-------
>>> coords = symbols(' '.join(['x'+str(i) for i in range(3)]))
>>> for index in PartialOrderDerivative.all_of_degree(coords,4):
>>> print(index.as_couples)
[(x0, 4.0), (x1, 0.0), (x2, 0.0)]
[(x0, 3.0), (x1, 0.0), (x2, 1.0)]
[(x0, 3.0), (x1, 1.0), (x2, 0.0)]
[(x0, 2.0), (x1, 0.0), (x2, 2.0)]
[(x0, 2.0), (x1, 1.0), (x2, 1.0)]
[(x0, 2.0), (x1, 2.0), (x2, 0.0)]
[(x0, 1.0), (x1, 2.0), (x2, 1.0)]
[(x0, 1.0), (x1, 0.0), (x2, 3.0)]
[(x0, 1.0), (x1, 3.0), (x2, 0.0)]
[(x0, 1.0), (x1, 1.0), (x2, 2.0)]
[(x0, 0.0), (x1, 1.0), (x2, 3.0)]
[(x0, 0.0), (x1, 4.0), (x2, 0.0)]
[(x0, 0.0), (x1, 2.0), (x2, 2.0)]
[(x0, 0.0), (x1, 3.0), (x2, 1.0)]
[(x0, 0.0), (x1, 0.0), (x2, 4.0)]
"""
import numpy as np
nb_coordinates = len(coordinates)
start = np.zeros(nb_coordinates, dtype=int)
start[0] = derivative_order
start = tuple(start)
fathers = {start}
generation = 0
while True:
# Exit if all generation has been done.
if generation > derivative_order:
break
# Yield partial order for derivative [ .. (xi,partial_order_i) .. ] for all i's
for partial_orders in fathers:
partial_orders = tuple([int(order) for order in partial_orders])
yield PartialOrderDerivative(coordinates, partial_orders)
# Compute new generation
generation += 1
sons = set()
for father in fathers:
# takes one ball in 0 and distribute it to others
father = np.asarray(father)
flux = np.zeros(nb_coordinates, dtype=int)
flux[0] = -1
for move in range(1, nb_coordinates):
flux[move] = 1
son = father + flux
sons.add(tuple(son))
flux[move] = 0
fathers = sons
class UnivariateTree(object):
""" Compute and handle subtitution tree for terms E[D^alpha epsilon D^beta epsilon]
of degree |alpha|+|beta|<= max_degree
Description
-----------
Dynamical structure that return the tree at a given | |
import pytest
import sqlparse
from typing import Tuple, List, Optional
from unittest.mock import patch, MagicMock
from sql_translate.engine import global_translation
from sql_translate import utils
GHTP = global_translation.GlobalHiveToPresto()
def test_create_parent() -> None:
_GlobalHiveToPresto = global_translation._GlobalTranslator()
@patch('sql_translate.utils.protect_regex_curly_brackets', side_effect=lambda x: x)
def test_translate_query(mock_protect_regex_curly_brackets: MagicMock) -> None:
GHTP = global_translation.GlobalHiveToPresto()
GHTP._remove_dollar_sign = MagicMock(side_effect=lambda x: x)
GHTP._replace_double_quotes = MagicMock(side_effect=lambda x: x)
GHTP._replace_back_ticks = MagicMock(side_effect=lambda x: x)
GHTP._add_double_quotes = MagicMock(side_effect=lambda x: x)
GHTP._increment_array_indexes = MagicMock(side_effect=lambda x: x)
GHTP._cast_divisions_to_double = MagicMock(side_effect=lambda x: x)
GHTP._fix_rlike_calls = MagicMock(side_effect=lambda x: x)
GHTP._fix_lateral_view_explode_calls = MagicMock(side_effect=lambda x: x)
GHTP._fix_interval_formatting = MagicMock(side_effect=lambda x: x)
GHTP._fix_aliasing_on_broadcasting = MagicMock(side_effect=lambda x: x)
GHTP.gbt.fix_group_by_calls = MagicMock(side_effect=lambda x: x)
assert GHTP.translate_query("select * from db.table") == "select * from db.table"
@pytest.mark.parametrize(['query', 'expected'], [
("", ""),
('select "a" from b', "select 'a' from b"), # Would be surrounded ``
('RIGHT JOIN db.table b', 'RIGHT JOIN db.table b')
])
def test_replace_double_quotes(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._replace_double_quotes(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("", ""),
('select `a b` from b', 'select "a b" from b')
])
def test_replace_back_ticks(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._replace_back_ticks(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("select 1, 11, 1.1, '1' from cte", "select 1, 11, 1.1, '1' from cte"),
("select a 7day from cte", 'select a "7day" from cte'),
("select a hel8lo from cte", "select a hel8lo from cte"),
("select 123a from 18mo", 'select "123a" from "18mo"'),
("select case when coalesce(sth_123_bal, 0) > 0 then 1 else 0 end as 123_flag from cte",
'select case when coalesce(sth_123_bal, 0) > 0 then 1 else 0 end as "123_flag" from cte')
])
def test_add_double_quotes(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._add_double_quotes(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("", ""),
('select ab from ${b}', 'select ab from {b}'),
('select regex_like(a, "abc$") from b', 'select regex_like(a, "abc$") from b')
])
def test_remove_dollar_sign(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._remove_dollar_sign(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("", ""),
("select split(a, '_')[1], split(b, '_')[0] from c", "select split(a, '_')[2], split(b, '_')[1] from c"),
("select a from b", "select a from b")
])
def test_increment_array_indexes(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._increment_array_indexes(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("select 1", "select 1"),
("select a/2", "select cast(a AS double)/cast(2 AS double)"),
("select count(a/2)/3", "select cast(count(cast(a AS double)/cast(2 AS double)) AS double)/cast(3 AS double)")
])
def test_cast_divisions_to_double(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._cast_divisions_to_double(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("select a rlike\nb", "select a like\nb"),
("select 1", "select 1")
])
def test_fix_rlike_calls(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._fix_rlike_calls(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("""select a from cte where LATERAL VIEW EXPLODE(split(b, ',')) "7day" AS score""",
f"""select a from cte where CROSS JOIN unnest(split(b, ',')) AS "7day" {utils.function_placeholder}(score)"""),
("select 1", "select 1")
])
def test_fix_lateral_view_explode_calls(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._fix_lateral_view_explode_calls(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("select 1==1", "select 1=1"),
("select 1", "select 1")
])
def test_fix_double_equals(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._fix_double_equals(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("select a from cte where a BETWEEN (CURRENT_DATE - interval '1' as YEAR) AND CURRENT_DATE",
"select a from cte where a BETWEEN (CURRENT_DATE - interval '1' YEAR) AND CURRENT_DATE"),
("select 1", "select 1")
])
def test_fix_interval_formatting(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._fix_interval_formatting(query) == expected
@pytest.mark.parametrize(['query', 'expected'], [
("", ""),
# I. select statements
# I.1. No issue
("select a from cte", "select a from cte"),
("select '1' from cte", "select '1' from cte"), # Corner case: from is a Keyword, not alias!
("select '1' as fff from cte", "select '1' as fff from cte"),
# I.2. Problematic parsing by sqlparse
("select '1' sth from cte", "select '1' as sth from cte"),
('select "7day" sth from cte', 'select "7day" as sth from cte'), # Double quotes
('select 1 `from` from cte', 'select 1 as `from` from cte'), # Backticks
("select 1 hello from cte", "select 1 as hello from cte"),
("select 1.5 alias from cte", "select 1.5 as alias from cte"),
("select '1' a, 1 b, 1.5 c from cte", "select '1' as a, 1 as b, 1.5 as c from cte"),
# II. group by statements
# II.1. No issue
("select a from cte group by a", "select a from cte group by a"),
("select a from cte group by '1', '2' order by '1' sth", "select a from cte group by '1', '2' order by '1' as sth"), # order by is a Keyword
("select a from cte group by '1', '2' sth", "select a from cte group by '1', '2' as sth"), # But order is not
("select a from cte group by '1' as sth order \n\t by '1' sth2", "select a from cte group by '1' as sth order \n\t by '1' as sth2"), # \s+
# II.2. Problematic parsing
("select a from cte group by '1' sth", "select a from cte group by '1' as sth"),
('select "7day" sth from cte group by "7day" sth', 'select "7day" as sth from cte group by "7day" as sth'), # Double quotes
('select 1 `from` from cte group by 1 `from`', 'select 1 as `from` from cte group by 1 as `from`'), # Backticks
("select 1 from cte group by 1 sth", "select 1 from cte group by 1 as sth"),
("select 1.5 from cte group by 1.5 sth", "select 1.5 from cte group by 1.5 as sth"),
("select '1', 1, 1.5 from cte group by '1' a, 1 b, 1.5 c", "select '1', 1, 1.5 from cte group by '1' as a, 1 as b, 1.5 as c"),
])
def test__fix_aliasing_on_broadcasting(query: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP._fix_aliasing_on_broadcasting(query) == expected
@pytest.mark.parametrize(['query_section', 'expected'], [
("with a as (select b, my_column from c) Insert OVERWRITE table d.e PARTITION (my_column) select b from a",
"INSERT INTO d.e\nwith a as (select b, my_column from c) SELECT\nb,\nmy_column\nfrom a"),
("with a as (select b from c) Insert OVERWRITE table d.e PARTITION (f='g') select b from a",
"INSERT INTO d.e\nwith a as (select b from c) SELECT\nb,\n'g'\nfrom a"),
("with a as (select b from c) insert into table \"5_6f\".\"d5 6f\" PARTITION (f='g') select b from a",
"INSERT INTO \"5_6f\".\"d5 6f\"\nwith a as (select b from c) SELECT\nb,\n'g'\nfrom a"),
("with a as (select b from c) insert overwrite table \"5_6f\".\"d5 6f\" select b from a",
"INSERT INTO \"5_6f\".\"d5 6f\"\nwith a as (select b from c) SELECT\nb\nfrom a"),
("with a as (select b from c) Insert OVERWRITE table d.e PARTITION (f) select b, f from a",
"INSERT INTO d.e\nwith a as (select b from c) SELECT\nb,\nf\nfrom a"),
("with a as (select b from c) Insert OVERWRITE table d.e PARTITION (f) select b, xyz f from a",
"INSERT INTO d.e\nwith a as (select b from c) SELECT\nb,\nxyz AS f\nfrom a"),
("with a as (select b from c) Insert OVERWRITE table d.e PARTITION (f='g') select b, xyz from a",
"INSERT INTO d.e\nwith a as (select b from c) SELECT\nb,\nxyz,\n'g'\nfrom a"),
("with a as (select b from c) Insert OVERWRITE table d.e PARTITION (f) select b, xyz from a",
"INSERT INTO d.e\nwith a as (select b from c) SELECT\nb,\nxyz,\nf\nfrom a"),
("""with a as (select 1) Insert OVERWRITE table d.e PARTITION (ld_dt) select d.aaa as bbb
,min(date(t1.ccc)) as task_date
,current_date as ld_dt from cte""",
"""INSERT INTO d.e\nwith a as (select 1) SELECT\nd.aaa AS bbb,\nmin(date(t1.ccc)) AS task_date,\ncurrent_date AS ld_dt\nfrom cte"""),
("""INSERT OVERWRITE TABLE db.table PARTITION ( pkey = '${pkey}')
select distinct
a.sth,
case when f(b.g) - unix_timestamp(a.h) between 0 and 7200 then 1 else 0 end as my_alias
from z a
left join zz b
on a.zzz=b.zzz""",
"""INSERT INTO db.table
SELECT DISTINCT
a.sth,
case when f(b.g) - unix_timestamp(a.h) between 0 and 7200 then 1 else 0 end AS my_alias,
'${pkey}'
from z a
left join zz b
on a.zzz=b.zzz""")
])
def test_move_insert_statement(query_section: str, expected: str) -> None:
GHTP = global_translation.GlobalHiveToPresto()
assert GHTP.move_insert_statement(query_section) == expected
@pytest.mark.parametrize(['query', 'expected'], [
# Single statements
# CTE management
("select cte.a from cte group by cte.a",
"select cte.a from cte group by cte.a"),
("select cte.a from cte group by a",
"select cte.a from cte group | |
as expected.
method_chains = pika.spec.Basic.Deliver(
routing_key=self.chains_routing_key)
method_general = pika.spec.Basic.Deliver(
routing_key=self.general_routing_key)
body_chain_initial = json.dumps(
self.sent_configs_example_chain)
body_general_initial = json.dumps(
self.sent_configs_example_general)
body_new_configs_chain = json.dumps(new_configs_chain)
body_new_configs_general = json.dumps(new_configs_general)
properties = pika.spec.BasicProperties()
self.test_manager._process_configs(blocking_channel, method_chains,
properties, body_chain_initial)
self.test_manager._process_configs(blocking_channel, method_general,
properties, body_general_initial)
expected_output = copy.deepcopy(self.github_repos_configs_example)
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
self.test_manager._process_configs(blocking_channel, method_chains,
properties,
body_new_configs_chain)
self.test_manager._process_configs(blocking_channel, method_general,
properties,
body_new_configs_general)
expected_output['Substrate Polkadot']['config_id3'] = \
new_configs_chain['config_id3']
expected_output['general']['config_id5'] = \
new_configs_general['config_id5']
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(RabbitMQApi, "basic_ack")
@mock.patch.object(GitHubMonitorsManager,
"_create_and_start_monitor_process")
@mock.patch.object(multiprocessing.Process, "terminate")
@mock.patch.object(multiprocessing.Process, "join")
def test_process_configs_stores_modified_configs_to_be_monitored_correctly(
self, join_mock, terminate_mock, startup_mock, mock_ack) -> None:
# In this test we will check that modified configurations with
# `monitor_repo = True` are stored correctly in the state. Some
# configurations will have `monitor_repo = False` to check whether the
# monitor associated with the previous configuration is terminated.
mock_ack.return_value = None
startup_mock.return_value = None
join_mock.return_value = None
terminate_mock.return_value = None
self.test_manager._github_repos_configs = \
self.github_repos_configs_example
self.test_manager._config_process_dict = \
self.config_process_dict_example
new_configs_chain_monitor_true = {
'config_id1': {
'id': 'config_id1',
'parent_id': 'chain_1',
'repo_name': 'new_repo_name_chain',
'monitor_repo': "True",
},
}
new_configs_chain_monitor_false = {
'config_id1': {
'id': 'config_id1',
'parent_id': 'chain_1',
'repo_name': 'new_repo_name_chain',
'monitor_repo': "False",
},
}
new_configs_general_monitor_true = {
'config_id2': {
'id': 'config_id2',
'parent_id': 'GENERAL',
'repo_name': 'new_repo_name_general',
'monitor_repo': "True",
},
}
new_configs_general_monitor_false = {
'config_id2': {
'id': 'config_id2',
'parent_id': 'GENERAL',
'repo_name': 'new_repo_name_general',
'monitor_repo': "false",
},
}
try:
# Must create a connection so that the blocking channel is passed
self.test_manager.rabbitmq.connect()
blocking_channel = self.test_manager.rabbitmq.channel
method_chains = pika.spec.Basic.Deliver(
routing_key=self.chains_routing_key)
method_general = pika.spec.Basic.Deliver(
routing_key=self.general_routing_key)
body_chain_mon_true = json.dumps(new_configs_chain_monitor_true)
body_general_mon_true = json.dumps(
new_configs_general_monitor_true)
body_chain_mon_false = json.dumps(new_configs_chain_monitor_false)
body_general_mon_false = json.dumps(
new_configs_general_monitor_false)
properties = pika.spec.BasicProperties()
expected_output = copy.deepcopy(self.github_repos_configs_example)
self.test_manager._process_configs(blocking_channel, method_chains,
properties, body_chain_mon_true)
expected_output['Substrate Polkadot']['config_id1'] = \
new_configs_chain_monitor_true['config_id1']
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
self.test_manager._process_configs(blocking_channel, method_general,
properties,
body_general_mon_true)
expected_output['general']['config_id2'] = \
new_configs_general_monitor_true['config_id2']
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
self.test_manager._process_configs(blocking_channel, method_chains,
properties,
body_chain_mon_false)
expected_output['Substrate Polkadot'] = {}
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
self.assertTrue(
'config_id1' not in self.test_manager.config_process_dict)
self.test_manager._process_configs(
blocking_channel, method_general, properties,
body_general_mon_false)
expected_output['general'] = {}
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
self.assertTrue(
'config_id2' not in self.test_manager.config_process_dict)
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(RabbitMQApi, "basic_ack")
@mock.patch.object(multiprocessing.Process, "terminate")
@mock.patch.object(multiprocessing.Process, "join")
def test_process_configs_removes_deleted_configs_from_state_correctly(
self, join_mock, terminate_mock, mock_ack) -> None:
# In this test we will check that removed configurations are actually
# removed from the state
mock_ack.return_value = None
join_mock.return_value = None
terminate_mock.return_value = None
self.test_manager._github_repos_configs = \
self.github_repos_configs_example
self.test_manager._config_process_dict = \
self.config_process_dict_example
new_configs_chain = {}
new_configs_general = {}
try:
# Must create a connection so that the blocking channel is passed
self.test_manager.rabbitmq.connect()
blocking_channel = self.test_manager.rabbitmq.channel
method_chains = pika.spec.Basic.Deliver(
routing_key=self.chains_routing_key)
method_general = pika.spec.Basic.Deliver(
routing_key=self.general_routing_key)
body_chain = json.dumps(new_configs_chain)
body_general = json.dumps(new_configs_general)
properties = pika.spec.BasicProperties()
expected_output = copy.deepcopy(self.github_repos_configs_example)
self.test_manager._process_configs(blocking_channel, method_chains,
properties, body_chain)
expected_output['Substrate Polkadot'] = {}
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
self.assertTrue(
'config_id1' not in self.test_manager.config_process_dict)
self.test_manager._process_configs(blocking_channel, method_general,
properties, body_general)
expected_output['general'] = {}
self.assertEqual(
expected_output, self.test_manager.github_repos_configs)
self.assertTrue(
'config_id2' not in self.test_manager.config_process_dict)
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(RabbitMQApi, "basic_ack")
@mock.patch.object(GitHubMonitorsManager,
"_create_and_start_monitor_process")
def test_proc_configs_starts_new_monitors_for_new_configs_to_be_monitored(
self, startup_mock, mock_ack) -> None:
# We will check whether _create_and_start_monitor_process is called
# correctly on each newly added configuration if
# `monitor_repo = True`. Implicitly we will be also testing that if
# `monitor_repo = False` no new monitor is created.
mock_ack.return_value = None
startup_mock.return_value = None
new_configs_chain = {
'config_id1': {
'id': 'config_id1',
'parent_id': 'chain_1',
'repo_name': 'repo_1',
'monitor_repo': "True",
},
'config_id3': {
'id': 'config_id3',
'parent_id': 'chain_1',
'repo_name': 'repo_3',
'monitor_repo': "True",
},
'config_id4': {
'id': 'config_id4',
'parent_id': 'chain_1',
'repo_name': 'repo_4',
'monitor_repo': "False",
}
}
new_configs_general = {
'config_id2': {
'id': 'config_id2',
'parent_id': 'GENERAL',
'repo_name': 'repo_2',
'monitor_repo': "True",
},
'config_id5': {
'id': 'config_id5',
'parent_id': 'GENERAL',
'repo_name': 'repo_5',
'monitor_repo': "True",
},
'config_id6': {
'id': 'config_id6',
'parent_id': 'GENERAL',
'repo_name': 'repo_6',
'monitor_repo': "False",
}
}
try:
# Must create a connection so that the blocking channel is passed
self.test_manager.rabbitmq.connect()
blocking_channel = self.test_manager.rabbitmq.channel
# We will send new configs through both the existing and
# non-existing chain and general paths to make sure that all routes
# work as expected.
method_chains = pika.spec.Basic.Deliver(
routing_key=self.chains_routing_key)
method_general = pika.spec.Basic.Deliver(
routing_key=self.general_routing_key)
body_chain_initial = json.dumps(
self.sent_configs_example_chain)
body_general_initial = json.dumps(
self.sent_configs_example_general)
body_new_configs_chain = json.dumps(new_configs_chain)
body_new_configs_general = json.dumps(new_configs_general)
properties = pika.spec.BasicProperties()
self.test_manager._process_configs(blocking_channel, method_chains,
properties, body_chain_initial)
self.assertEqual(1, startup_mock.call_count)
args, _ = startup_mock.call_args
self.assertTrue('config_id1' and 'Substrate Polkadot' in args)
self.assertEqual(
self.sent_configs_example_chain['config_id1']['id'],
args[0].repo_id)
self.assertEqual(
self.sent_configs_example_chain['config_id1']['parent_id'],
args[0].parent_id)
self.assertEqual(self.sent_configs_example_chain['config_id1'][
'repo_name'] + '/', args[0].repo_name)
self.assertEqual(
str_to_bool(
self.sent_configs_example_chain['config_id1'][
'monitor_repo']), args[0].monitor_repo)
self.assertEqual(env.GITHUB_RELEASES_TEMPLATE.format(
self.sent_configs_example_chain['config_id1']['repo_name']
+ '/'), args[0].releases_page)
self.test_manager._process_configs(blocking_channel, method_chains,
properties,
body_new_configs_chain)
self.assertEqual(2, startup_mock.call_count)
args, _ = startup_mock.call_args
self.assertTrue('config_id3' and 'Substrate Polkadot' in args)
self.assertEqual(new_configs_chain['config_id3']['id'],
args[0].repo_id)
self.assertEqual(new_configs_chain['config_id3']['parent_id'],
args[0].parent_id)
self.assertEqual(new_configs_chain['config_id3']['repo_name'] + '/',
args[0].repo_name)
self.assertEqual(
str_to_bool(new_configs_chain['config_id3']['monitor_repo']),
args[0].monitor_repo)
self.assertEqual(env.GITHUB_RELEASES_TEMPLATE.format(
new_configs_chain['config_id3']['repo_name'] + '/'),
args[0].releases_page)
self.test_manager._process_configs(blocking_channel, method_general,
properties, body_general_initial)
self.assertEqual(3, startup_mock.call_count)
args, _ = startup_mock.call_args
self.assertTrue('config_id2' and 'general' in args)
self.assertEqual(
self.sent_configs_example_general['config_id2']['id'],
args[0].repo_id)
self.assertEqual(
self.sent_configs_example_general['config_id2']['parent_id'],
args[0].parent_id)
self.assertEqual(self.sent_configs_example_general['config_id2'][
'repo_name'] + '/', args[0].repo_name)
self.assertEqual(
str_to_bool(
self.sent_configs_example_general['config_id2'][
'monitor_repo']), args[0].monitor_repo)
self.assertEqual(env.GITHUB_RELEASES_TEMPLATE.format(
self.sent_configs_example_general['config_id2']['repo_name']
+ '/'), args[0].releases_page)
self.test_manager._process_configs(blocking_channel, method_general,
properties,
body_new_configs_general)
self.assertEqual(4, startup_mock.call_count)
args, _ = startup_mock.call_args
self.assertTrue('config_id5' and 'general' in args)
self.assertEqual(new_configs_general['config_id5']['id'],
args[0].repo_id)
self.assertEqual(new_configs_general['config_id5']['parent_id'],
args[0].parent_id)
self.assertEqual(new_configs_general['config_id5'][
'repo_name'] + '/', args[0].repo_name)
self.assertEqual(
str_to_bool(new_configs_general['config_id5']['monitor_repo']),
args[0].monitor_repo)
self.assertEqual(env.GITHUB_RELEASES_TEMPLATE.format(
new_configs_general['config_id5']['repo_name'] + '/'),
args[0].releases_page)
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.monitors.starters.create_logger")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_proc_confs_term_and_starts_monitors_for_modified_confs_to_be_mon(
self, mock_ack, mock_create_logger) -> None:
# In this test we will check that modified configurations with
# `monitor_repo = True` will have new monitors started. Implicitly
# we will be checking that modified configs with
# `monitor_repo = False` will only have their previous processes
# terminated.
mock_ack.return_value = None
mock_create_logger.return_value = self.dummy_logger
new_configs_chain_monitor_true = {
'config_id1': {
'id': 'config_id1',
'parent_id': 'chain_1',
'repo_name': 'new_repo_name_chain',
'monitor_repo': "True",
},
}
new_configs_chain_monitor_false = {
'config_id1': {
'id': 'config_id1',
'parent_id': 'chain_1',
'repo_name': 'new_repo_name_chain',
'monitor_repo': "False",
},
}
new_configs_general_monitor_true = {
'config_id2': {
'id': 'config_id2',
'parent_id': 'GENERAL',
'repo_name': 'new_repo_name_general',
'monitor_repo': "True",
},
}
new_configs_general_monitor_false = {
'config_id2': {
'id': 'config_id2',
'parent_id': 'GENERAL',
'repo_name': 'new_repo_name_general',
'monitor_repo': "false",
},
}
try:
# Must create a connection so that the blocking channel is passed
self.test_manager.rabbitmq.connect()
blocking_channel = self.test_manager.rabbitmq.channel
method_chains = pika.spec.Basic.Deliver(
routing_key=self.chains_routing_key)
method_general = pika.spec.Basic.Deliver(
routing_key=self.general_routing_key)
body_chain_initial = json.dumps(self.sent_configs_example_chain)
body_general_initial = json.dumps(
self.sent_configs_example_general)
body_chain_mon_true = json.dumps(new_configs_chain_monitor_true)
body_general_mon_true = json.dumps(
new_configs_general_monitor_true)
body_chain_mon_false = json.dumps(new_configs_chain_monitor_false)
body_general_mon_false = json.dumps(
new_configs_general_monitor_false)
properties = pika.spec.BasicProperties()
# First send the new configs as the state is empty
self.test_manager._process_configs(blocking_channel, method_chains,
properties, body_chain_initial)
self.test_manager._process_configs(blocking_channel, method_general,
properties, body_general_initial)
# Give some time till the processes start
time.sleep(1)
# Assure that the processes have been started
self.assertTrue(self.test_manager.config_process_dict[
'config_id1']['process'].is_alive())
self.assertTrue(self.test_manager.config_process_dict[
'config_id2']['process'].is_alive())
# Send the updated configs with `monitor_repo = True`
conf_id1_old_proc = self.test_manager.config_process_dict[
'config_id1']['process']
conf_id2_old_proc = self.test_manager.config_process_dict[
'config_id2']['process']
self.test_manager._process_configs(blocking_channel, method_chains,
properties, body_chain_mon_true)
self.test_manager._process_configs(blocking_channel, method_general,
properties,
body_general_mon_true)
# Give some time till the processes restart
time.sleep(1)
# Check that the old proceses has terminated and a new one has
# started.
self.assertFalse(conf_id1_old_proc.is_alive())
self.assertTrue(self.test_manager.config_process_dict[
'config_id1']['process'].is_alive())
self.assertFalse(conf_id2_old_proc.is_alive())
self.assertTrue(self.test_manager.config_process_dict[
'config_id2']['process'].is_alive())
# Send the updated configs with `monitor_repo = False`
conf_id1_old_proc = self.test_manager.config_process_dict[
'config_id1']['process']
conf_id2_old_proc = self.test_manager.config_process_dict[
'config_id2']['process']
self.test_manager._process_configs(blocking_channel, method_chains,
properties, body_chain_mon_false)
self.test_manager._process_configs(blocking_channel, method_general,
properties,
body_general_mon_false)
# Give some time till the processes stops
time.sleep(1)
# Check that the old process has terminated and that new ones have
# not been started. Note, if _create_start_process is called then
# the config ids would be in config_process_dict
self.assertFalse(conf_id1_old_proc.is_alive())
self.assertFalse(
'config_id1' in self.test_manager.config_process_dict)
self.assertFalse(conf_id2_old_proc.is_alive())
self.assertFalse(
'config_id2' in self.test_manager.config_process_dict)
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(RabbitMQApi, "basic_ack")
@mock.patch.object(GitHubMonitorsManager,
"_create_and_start_monitor_process")
@mock.patch.object(multiprocessing.Process, "join")
@mock.patch.object(multiprocessing.Process, "terminate")
def test_process_confs_restarts_an_updated_monitor_with_the_correct_conf(
self, mock_terminate, mock_join, startup_mock, mock_ack) -> None:
# We will check whether _create_and_start_monitor_process is called
# correctly on an updated configuration.
mock_ack.return_value = None
startup_mock.return_value = None
mock_terminate.return_value = None
mock_join.return_value = None
updated_configs_chain = {
'config_id1': {
'id': 'config_id1',
'parent_id': 'chain_1',
'repo_name': 'changed_repo_name_chain',
'monitor_repo': "True",
},
}
updated_configs_general = {
'config_id2': {
'id': 'config_id2',
'parent_id': 'GENERAL',
'repo_name': 'changed_repo_name_gen',
'monitor_repo': "True",
},
}
self.test_manager._github_repos_configs = \
self.github_repos_configs_example
self.test_manager._config_process_dict = \
self.config_process_dict_example
try:
# Must create a connection so that the blocking channel is passed
self.test_manager.rabbitmq.connect()
blocking_channel = self.test_manager.rabbitmq.channel
# We will send new configs through both the existing and
# non-existing chain and general paths to make sure that all routes
# work as expected.
method_chains = pika.spec.Basic.Deliver(
routing_key=self.chains_routing_key)
method_general = pika.spec.Basic.Deliver(
routing_key=self.general_routing_key)
body_updated_configs_chain = json.dumps(updated_configs_chain)
body_updated_configs_general = json.dumps(updated_configs_general)
properties = pika.spec.BasicProperties()
self.test_manager._process_configs(blocking_channel, method_chains,
properties,
body_updated_configs_chain)
self.assertEqual(1, startup_mock.call_count)
args, _ = startup_mock.call_args
| |
the VisTrails workflow (containing a list of .tif raster files) as an
input to the raster converter. All of the files listed in the MDS will be converted to
the raster format specified in the "Format" dialogue box.
'''
#configuration = []
_input_ports = [("inputMDS", "(gov.usgs.sahm:MergedDataSet:Other)"),
('inputDir', '(edu.utah.sci.vistrails.basic:Directory)'),
('format', '(edu.utah.sci.vistrails.basic:String)'),
('multipleCores', '(edu.utah.sci.vistrails.basic:Boolean)', {'defaults':str(['True']), 'optional':True})]
_output_ports = [('outputDir', '(edu.utah.sci.vistrails.basic:Directory)')]
def compute(self):
writetolog("\nRunning TiffConverter", True)
ourRFC = RFC.FormatConverter()
if self.has_input('inputMDS'):
ourRFC.MDSFile = self.force_get_input('inputMDS').name
elif self.has_input('inputDir'):
ourRFC.inputDir = self.force_get_input('inputDir').name
if self.has_input('format'):
format = self.force_get_input('format')
if format == '':
format = 'asc'
ourRFC.format = format
if self.has_input("multipleCores"):
if self.get_input("multipleCores"):
ourRFC.multicores = "True"
ourRFC.outputDir = utils.mknextdir(prefix='ConvertedRasters_')
if configuration.verbose:
ourRFC.verbose = True
ourRFC.logger = utils.getLogger()
writetolog(" output directory = " + ourRFC.outputDir, False, False)
try:
ourRFC.run()
except TrappedError as e:
raise ModuleError(self, e.message)
except:
utils.informative_untrapped_error(self, "RasterFormatConverter")
outputDir = utils.create_dir_module(ourRFC.outputDir)
self.set_output('outputDir', outputDir)
writetolog("\nFinished running TiffConverter", True)
class TestTrainingSplit(Module):
'''
Test Training Split
The TestTrainingSplit module provides the opportunity to establish specific settings
for how field data will be used in the modeling process. Three parameters can be set
by the user:
1. Ratio of Presence/Absence Points:
This field is populated with a number corresponding to the desired proportion of
presence and absence points to be used in the analysis. If populated, this entry should
be a number greater than zero. (A value of '1' will result in an equal number of both
presence and absence points being used, a value of '2' indicates that twice as many
presence points will be used, a value of 0.5 indicates that twice as many absence points
will be used, etc.). All field data points with a value equal to or greater than 1 are
interpreted as presence points. Although the original field data is unmodified, this
option will reduce the sample size as the merged dataset containing sample points will
have points deleted from it to achieve the specified ratio. A warning will be generated
if more than 50% of either the presence or absence points will be deleted based on the
ratio specified by the user. Background points are ignored by this module (they are read
in and written out, but not assigned to either the test or training split).
When left empty, this field will default to 'null' and the model will use the existing
presence/absence ratio present in the field data.
2. Input Merged Data Set (MDS):
This is the input data set consisting of locational data for each sample point, the
values of each predictor variable at those points, and if established, a field denoting
the weight that will be assigned to each point in modeling. This input is usually provided
by the upstream steps that precede the Test Training Split module. Any value entered here
(e.g., specifying another existing MDS on the file system) will override the input
specified by a model connection in the visual display.
3. Training Proportion:
This is the proportion of the sample points that will be used to train the model, relative
to the total number of points. Entered values should be greater than 0 but less than 1.
For example, a value of '0.9' will result in 90% of the sample points being used to train
the model, with 10% of the sample being held out to test the model's performance. Choosing
an appropriate training proportion can depend on various factors, such as the total number
of sample points available.
'''
_input_ports = [("inputMDS", "(gov.usgs.sahm:MergedDataSet:Other)"),
('trainingProportion', '(edu.utah.sci.vistrails.basic:Float)',
{'defaults':str(['0.7'])}),
('RatioPresAbs', '(edu.utah.sci.vistrails.basic:Float)')]
_output_ports = [("outputMDS", "(gov.usgs.sahm:MergedDataSet:Other)")]
def compute(self):
if self.has_input('trainingProportion'):
print 'real input'
writetolog("\nGenerating Test Training split ", True)
inputMDS = utils.dir_path_value(self.force_get_input('inputMDS', []))
outputMDS = utils.mknextfile(prefix='TestTrainingSplit_', suffix='.csv')
global models_path
args = "i=" + '"' + inputMDS + '"' + " o=" + '"' + outputMDS + '"'
args += " rc=" + utils.MDSresponseCol(inputMDS)
if (self.has_input("trainingProportion")):
try:
trainingProportion = float(self.get_input("trainingProportion"))
if trainingProportion <= 0 or trainingProportion > 1:
raise ModuleError(self, "Train Proportion (trainProp) must be a number between 0 and 1 excluding 0")
args += " p=" + str(trainingProportion)
except:
raise ModuleError(self, "Train Proportion (trainProp) must be a number between 0 and 1 excluding 0")
if (self.has_input("RatioPresAbs")):
try:
RatioPresAbs = float(self.get_input("RatioPresAbs"))
if RatioPresAbs <= 0:
raise ModuleError(self, "The ratio of presence to absence (RatioPresAbs) must be a number greater than 0")
args += " m=" + str(trainingProportion)
except:
raise ModuleError(self, "The ratio of presence to absence (RatioPresAbs) must be a number greater than 0")
utils.runRScript("TestTrainSplit.r", args, self)
output = os.path.join(outputMDS)
if os.path.exists(output):
output_file = utils.create_file_module(output)
writetolog("Finished Test Training split ", True)
else:
msg = "Problem encountered generating Test Training split. Expected output file not found."
writetolog(msg, False)
raise ModuleError(self, msg)
self.set_output("outputMDS", output_file)
class CovariateCorrelationAndSelection(Module):
'''
Covariate Correlation And Selection
The CovariateCorrelationAndSelection view provides a breakpoint in the modeling workflow
for the user to assess how well each variable explains the distribution of the sampled
data points and to remove any variables that may exhibit high correlation with others.
The display shows the 10 most correlated variables of those selected. These variables
are displayed on the diagonal and their respective graphical display and correlation with
other variables can be found by locating the row/column intersection between each (above
and below the diagonal). The column heading over each variable displays the number of
other variables with which the environmental predictor is correlated. The user defined
"Threshold" option allows a user to specify the degree of correlation required between
two variables for them to be counted in this tally.
A user is provided with the opportunity to select a new set of the environmental predictor
variables and "Update" the Covariate Correlation screen to investigate the relationships
among the new variables selected. The options are provided to include or exclude the
presence/count points, the absence points (when applicable), and the background points in
this correlation test. Variables with a high degree of correlation with other variables
should generally be unchecked in their respective radio buttons, and will be excluded from
subsequent analysis steps in the model workflow.
Multiple iterations can be run at this screen, allowing the user to investigate the
relationships among the environmental predictor variables and choose the most appropriate
set to be used in the subsequent modeling. When the desired set of variables has been chosen,
the "OK" button is selected and processing will resume in the VisTrails workflow.
'''
kwargs = {}
kwargs['defaults'] = str(['initial'])
_input_ports = [("inputMDS", "(gov.usgs.sahm:MergedDataSet:Other)"),
('selectionName', '(edu.utah.sci.vistrails.basic:String)', kwargs),
('ShowGUI', '(edu.utah.sci.vistrails.basic:Boolean)')]
_output_ports = [("outputMDS", "(gov.usgs.sahm:MergedDataSet:Other)")]
def compute(self):
writetolog("\nOpening Select Predictors Layers widget", True)
inputMDS = utils.dir_path_value(self.force_get_input('inputMDS'))
selectionName = self.force_get_input('selectionName', 'initial')
# outputMDS = utils.mknextfile(prefix='SelectPredictorsLayers_' + selectionName + "_", suffix='.csv')
# displayJPEG = utils.mknextfile(prefix='PredictorCorrelation_' + selectionName + "_", suffix='.jpg')
global session_dir
outputMDS = os.path.join(session_dir, "CovariateCorrelationOutputMDS_" + selectionName + ".csv")
displayJPEG = os.path.join(session_dir, "CovariateCorrelationDisplay.jpg")
writetolog(" inputMDS = " + inputMDS, False, False)
writetolog(" displayJPEG = " + displayJPEG, False, False)
writetolog(" outputMDS = " + outputMDS, False, False)
if os.path.exists(outputMDS):
utils.applyMDS_selection(outputMDS, inputMDS)
os.remove(outputMDS)
self.callDisplayMDS(inputMDS, outputMDS, displayJPEG)
output_file = utils.create_file_module(outputMDS)
writetolog("Finished Select Predictors Layers widget", True)
self.set_output("outputMDS", output_file)
def callDisplayMDS(self, inputMDS, outputMDS, displayJPEG):
dialog = SelectListDialog(inputMDS, outputMDS, displayJPEG, configuration.r_path)
#dialog.setWindowFlags(QtCore.Qt.WindowMaximizeButtonHint)
# print " ... finished with dialog "
retVal = dialog.exec_()
#outputPredictorList = dialog.outputList
if retVal == 1:
raise ModuleError(self, "Cancel or Close selected (not OK) workflow halted.")
class ProjectionLayers(Module):
'''
Projection Layers
Note: as of June 2011, this module offers some functionality that is only available
to users running the SAHM package within the USGS Fort Collins Science Center (FORT).
The ProjectionLayers module provides the option to prepare a separate set of predictor
layers so that the results of a model developed from one set of environmental predictors
can be projected onto a new modeled space. This second set of environmental predictors
(corresponding to the | |
tiebreak
from game;
create view if not exists heat_game_divided as
select * from game_divided where game_type = 'P';
create view if not exists player_wins as
select p.id, sum(case when g.p_id is null then 0
when g.p_score is null or g.opp_score is null then 0
when g.p_score == 0 and g.opp_score == 0 and g.tiebreak then 0
when g.p_score > g.opp_score then 1
else 0 end) wins
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_draws as
select p.id, sum(case when g.p_id is null then 0
when g.p_score is null or g.opp_score is null then 0
when g.p_score == 0 and g.opp_score == 0 and g.tiebreak then 0
when g.p_score == g.opp_score then 1
else 0 end) draws
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_points as
select p.id, sum(case when g.p_score is null then 0
when g.tiebreak and g.p_score > g.opp_score
then g.opp_score
else g.p_score end) points
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_points_against as
select p.id, sum(case when g.opp_score is null then 0
when g.tiebreak and g.opp_score > g.p_score
then g.p_score
else g.opp_score end) points_against
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_played as
select p.id, sum(case when g.p_score is not null and g.opp_score is not null then 1 else 0 end) played
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_played_first as
select p.id, count(g.p1) played_first
from player p left outer join completed_heat_game g on p.id = g.p1
group by p.id;
create table final_game_types(game_type text, power int);
insert into final_game_types values ('QF', 2), ('SF', 1), ('F', 0);
create view if not exists player_finals_results as
select p.id, coalesce(gd.game_type, gt.game_type) game_type,
case when gd.p_score is null then '-'
when gd.p_score > gd.opp_score then 'W'
when gd.p_score = gd.opp_score then 'D'
else 'L'
end result
from player p, final_game_types gt
left outer join game_divided gd on p.id = gd.p_id
and (gd.game_type = gt.game_type or (gt.game_type = 'F' and gd.game_type = '3P'));
create view if not exists player_finals_form as
select p.id, coalesce(pfr_qf.result, '-') qf,
coalesce(pfr_sf.result, '-') sf,
case when pfr_f.result is null then '-'
when pfr_f.game_type = '3P' then lower(pfr_f.result)
else pfr_f.result end f
from player p
left outer join player_finals_results pfr_qf on p.id = pfr_qf.id and pfr_qf.game_type = 'QF'
left outer join player_finals_results pfr_sf on p.id = pfr_sf.id and pfr_sf.game_type = 'SF'
left outer join player_finals_results pfr_f on p.id = pfr_f.id and pfr_f.game_type in ('3P', 'F')
group by p.id;
create view if not exists player_standings as
select p.id, p.name, p.division, played.played, wins.wins, draws.draws,
points.points, points_against.points_against, ppf.played_first,
pff.qf || pff.sf || upper(pff.f) finals_form,
case when pff.f = '-' then 0
else
case when pff.qf = 'W' then 48
when pff.qf = 'D' then 32
when pff.qf = 'L' then 16
else case when pff.sf != '-' or pff.f != '-' then 48 else 0 end
end +
case when pff.sf = 'W' then 12
when pff.sf = 'D' then 8
when pff.sf = 'L' then 4
-- If you're playing in a third place match then you're considered
-- to have lost the nonexistent semi-final. If you're playing in a
-- final then you're considered to have won the semi-final.
else case when pff.f in ('w', 'd', 'l') then 4
when pff.f in ('W', 'D', 'L') then 12
else 0 end
end +
case when pff.f = 'W' then 3
when pff.f = 'D' then 2
when pff.f = 'L' then 1
else 0
end
end finals_points
from player p, player_wins wins, player_draws draws, player_played played,
player_points points, player_points_against points_against,
player_played_first ppf, player_finals_form pff
where p.id = wins.id
and p.id = played.id
and p.id = points.id
and p.id = draws.id
and p.id = points_against.id
and p.id = ppf.id
and p.id = pff.id;
-- Tables for controlling the display system Teleost
create table if not exists teleost(current_mode int);
delete from teleost;
insert into teleost values(0);
create table if not exists teleost_modes(num int, name text, desc text);
create table if not exists tr_opts (
bonus float,
rating_diff_cap float
);
delete from tr_opts;
insert into tr_opts (bonus, rating_diff_cap) values (50, 40);
-- View for working out tournament ratings
-- For each game, you get 50 + your opponent's rating if you win,
-- your opponent's rating if you draw, and your opponent's rating - 50 if
-- you lost. For the purpose of this calculation, your opponent's rating
-- is your opponent's rating at the start of the tourney, except where that
-- is more than 40 away from your own, in which case it's your rating +40 or
-- -40 as appropriate.
-- The 50 and 40 are configurable, in the tr_opts table.
create view tournament_rating as
select p.id, p.name,
avg(case when hgd.p_score > hgd.opp_score then rel_ratings.opp_rating + tr_opts.bonus
when hgd.p_score = hgd.opp_score then rel_ratings.opp_rating
else rel_ratings.opp_rating - tr_opts.bonus end) tournament_rating
from player p, heat_game_divided hgd on p.id = hgd.p_id,
(select me.id p_id, you.id opp_id,
case when you.rating < me.rating - tr_opts.rating_diff_cap
then me.rating - tr_opts.rating_diff_cap
when you.rating > me.rating + tr_opts.rating_diff_cap
then me.rating + tr_opts.rating_diff_cap
else you.rating end opp_rating
from player me, player you, tr_opts) rel_ratings
on rel_ratings.p_id = p.id and hgd.opp_id = rel_ratings.opp_id,
tr_opts
where hgd.p_score is not null and hgd.opp_score is not null
group by p.id, p.name;
-- Table for information about tables (boards). The special table_no -1 means
-- the default settings for tables. So if table -1 is marked as accessible
-- that means every table not listed is considered to be accessible.
create table board (
table_no integer primary key,
accessible integer not null
);
-- By default, if a board isn't listed in this table then it isn't accessible.
insert into board (table_no, accessible) values (-1, 0);
-- Log any failures to upload updates
create table if not exists upload_error_log (
ts text,
failure_type int,
message text
);
-- Time of last successful upload
create table if not exists upload_success (
ts text
);
insert into upload_success values (null);
commit;
""";
class TourneyException(Exception):
def __init__(self, description=None):
if description:
self.description = description;
class TourneyInProgressException(TourneyException):
description = "Tournament is in progress."
pass;
class PlayerDoesNotExistException(TourneyException):
description = "Player does not exist."
pass;
class PlayerExistsException(TourneyException):
description = "Player already exists."
pass;
class DuplicatePlayerException(TourneyException):
description = "No two players are allowed to have the same name."
pass
class UnknownRankMethodException(TourneyException):
description = "Unknown ranking method."
pass;
class DBNameExistsException(TourneyException):
description = "Tourney name already exists."
pass;
class DBNameDoesNotExistException(TourneyException):
description = "No tourney by that name exists."
pass;
class InvalidDBNameException(TourneyException):
description = "Invalid tourney name."
pass;
class InvalidRatingException(TourneyException):
description = "Invalid rating. Rating must be an integer."
pass;
class TooManyPlayersException(TourneyException):
description = "You've got too many players. Turf some out onto the street."
pass
class IncompleteRatingsException(TourneyException):
description = "Incomplete ratings - specify ratings for nobody or everybody."
pass;
class InvalidDivisionNumberException(TourneyException):
description = "Invalid division number"
pass
class InvalidPlayerNameException(TourneyException):
description = "A player's name is not allowed to be blank or consist entirely of whitespace."
class InvalidTableSizeException(TourneyException):
description = "Invalid table size - number of players per table must be 2 or 3."
pass;
class FixtureGeneratorException(TourneyException):
description = "Failed to generate fixtures."
pass;
class PlayerNotInGameException(TourneyException):
description = "That player is not in that game."
pass;
class NotMostRecentRoundException(TourneyException):
description = "That is not the most recent round."
pass
class NoGamesException(TourneyException):
description = "No games have been played."
pass
class IllegalDivisionException(TourneyException):
description = "Cannot distribute players into the specified number of divisions in the way you have asked, either because there aren't enough players, or the number of players in a division cannot be set to the requested multiple."
pass
class DBVersionMismatchException(TourneyException):
description = "This tourney database file was created with a version of atropine which is not compatible with the one you're using."
pass
class InvalidEntryException(TourneyException):
description = "Result entry is not valid."
pass
class QualificationTimeoutException(TourneyException):
description = "In calculating the standings table, we took too long to work out which players, if any, have qualified for the final. This may be due to an unusually large number of players, or an unusual tournament setup. In this case it is strongly recommended go to General Setup and disable qualification analysis by setting the number of places in the qualification zone to zero."
pass
class | |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import re
import subprocess
import sys
import tempfile
import typing
import unittest.mock
from unittest.mock import call
import pytest # type:ignore
import autosynth.abstract_source
import autosynth.synth
from autosynth import git
from autosynth.change_pusher import (
AbstractChangePusher,
SquashingChangePusher,
AbstractPullRequest,
)
from autosynth.synthesizer import AbstractSynthesizer
from integration_tests import util
import json
import datetime
class WriteFile:
"""Mock source version that writes content to a file."""
def __init__(self, path: str, content: str):
self.path = path
self.content = content
self.apply_call_history: typing.List[typing.Dict] = []
def apply(self, preconfig: typing.Dict) -> None:
# Also set a value in the preconfig.
self.apply_call_history.append(dict(preconfig))
preconfig[self.path] = self.content
with open(self.path, "wt") as f:
f.write(self.content)
def get_comment(self) -> str:
return f"Wrote {self.content} to {self.path}."
def __repr__(self):
return f"WriteFile({repr(self.path)}, {repr(self.content)})"
class NoChange:
"""Mock source version that causes no change in generated code."""
def apply(self, preconfig: typing.Dict) -> None:
pass
def get_comment(self) -> str:
return "no change"
class Failed:
"""Mock source version that causes synthtool to fail."""
def apply(self, preconfig: typing.Dict) -> None:
raise subprocess.CalledProcessError(-1, "synthesize", "", "")
def get_comment(self) -> str:
return "failed"
class MockSynthesizer(AbstractSynthesizer):
"""A place to set the action of synthesize()."""
def __init__(self):
self._actions = []
def add_action(self, action):
self._actions.append(action)
def synthesize(
self, logfile: pathlib.Path, environ: typing.Mapping[str, str] = None
) -> str:
actions = self._actions
self._actions = []
for action in actions:
action()
return "synth log"
class CompiledVersion(autosynth.abstract_source.AbstractSourceVersion):
"""Takes a list of mock versions, and applies them all in its apply function."""
_timestamp = 0
def __init__(
self,
versions: typing.List[typing.Any],
synthesizer: MockSynthesizer,
source_name: str,
):
self.versions = versions
self.synthesizer = synthesizer
self.synthesize_call_count = 0
self.source_name = self.source_description = source_name
def apply(self, preconfig: typing.Dict) -> None:
"""Applies all my mock versions."""
def synthesize():
self.synthesize_call_count += 1
for version in self.versions[0:-1]:
try:
version.apply(preconfig)
except subprocess.CalledProcessError:
pass
self.versions[-1].apply(preconfig)
self.synthesizer.add_action(synthesize)
def get_comment(self) -> str:
return self.versions[-1].get_comment()
def get_source_description(self) -> str:
return self.source_description
def get_source_name(self) -> str:
return self.source_name
def get_timestamp(self) -> datetime.datetime:
CompiledVersion._timestamp += 1
return datetime.datetime.fromtimestamp(CompiledVersion._timestamp)
def compile_histories(
histories: typing.List[typing.List[typing.Any]], synthesizer: MockSynthesizer,
) -> typing.Sequence[typing.Sequence[CompiledVersion]]:
"""Takes a list of mock source versions, and returns a list of source versions."""
# Split the history so it looks like it came from two sources.
compiled_histories = [
[
CompiledVersion(history[0:i], synthesizer, f"source{source_number + 1}")
for i in range(1, len(history) + 1)
]
for source_number, history in enumerate(histories)
]
return compiled_histories
def test_synthesize_loop_with_empty_change_history():
with tempfile.TemporaryDirectory() as temp_dir:
x = autosynth.synth.SynthesizeLoopToolbox(
[], "test", temp_dir, "synth.metadata", ""
)
commit_count = autosynth.synth.synthesize_loop(
x, False, MockChangePusher(), MockSynthesizer,
)
assert 0 == commit_count
class MockPullRequest(AbstractPullRequest):
def add_labels(self, labels: typing.Sequence[str]) -> None:
pass
class MockChangePusher(AbstractChangePusher):
def push_changes(
self, commit_count: int, branch: str, pr_title: str = "", synth_log: str = ""
) -> None:
return MockPullRequest()
def check_if_pr_already_exists(self, branch) -> bool:
return False
class SynthesizeLoopFixture:
"""Test fixture for synthesize_loop() tests."""
def __init__(self, temp_dir: str):
self.temp_dir = temp_dir
self.change_pusher = unittest.mock.Mock(MockChangePusher)
self.change_pusher.check_if_pr_already_exists.return_value = False
self.synthesizer = MockSynthesizer()
def synthesize_loop(
self,
source_versions: typing.Sequence[
typing.Sequence[autosynth.abstract_source.AbstractSourceVersion]
],
multiple_prs: bool = False,
) -> int:
x = autosynth.synth.SynthesizeLoopToolbox(
source_versions, "test", self.temp_dir, "synth.metadata", ""
)
return autosynth.synth.synthesize_loop(
x, multiple_prs, self.change_pusher, self.synthesizer,
)
@pytest.fixture
def synthesize_loop_fixture() -> typing.Generator[SynthesizeLoopFixture, None, None]:
with tempfile.TemporaryDirectory() as temp_dir, tempfile.TemporaryDirectory() as working_repo, util.OsChdirContext(
working_repo
):
# Create a git repo with a README.
subprocess.check_call(["git", "init", "."])
with open("README.md", "wt") as readme:
readme.write("Well done.")
git.commit_all_changes("Added Readme")
subprocess.check_call(["git", "checkout", "-b", "test"])
# Create a synthesizer.
yield SynthesizeLoopFixture(temp_dir)
def test_synthesize_loop_with_realistic_change_history(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
synthesize_loop_with_realistic_change_history(
synthesize_loop_fixture, False, "mock-synth-golden.log"
)
def test_synthesize_loop_with_realistic_change_history_multiple_prs(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
synthesize_loop_with_realistic_change_history(
synthesize_loop_fixture, True, "mock-synth-golden-multiple-prs.log"
)
pusher = synthesize_loop_fixture.change_pusher
calls = pusher.mock_calls
golden_calls = [
call.check_if_pr_already_exists("test-source1"),
call.push_changes(
3,
"test-source1",
"[CHANGE ME] Re-generated to pick up changes from source1.",
),
call.push_changes().add_labels(["context: partial"]),
call.check_if_pr_already_exists("test-source2"),
call.push_changes(
3,
"test-source2",
"[CHANGE ME] Re-generated to pick up changes from source2.",
),
call.push_changes().add_labels(["context: partial"]),
]
assert golden_calls == calls
def test_synthesize_loop_with_realistic_change_history_squash_prs(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
pusher = synthesize_loop_fixture.change_pusher
synthesize_loop_fixture.change_pusher = SquashingChangePusher(pusher)
synthesize_loop_with_realistic_change_history(
synthesize_loop_fixture, True, "mock-synth-golden-squash-prs.log"
)
calls = pusher.mock_calls
golden_calls = [
call.check_if_pr_already_exists("test-source1"),
call.push_changes(
1,
"test-source1",
"[CHANGE ME] Re-generated to pick up changes from source1.",
"",
),
call.push_changes().add_labels(["context: partial"]),
call.check_if_pr_already_exists("test-source2"),
call.push_changes(
1,
"test-source2",
"[CHANGE ME] Re-generated to pick up changes from source2.",
"",
),
call.push_changes().add_labels(["context: partial"]),
]
assert golden_calls == calls
def synthesize_loop_with_realistic_change_history(
synthesize_loop_fixture: SynthesizeLoopFixture, multiple_prs: bool, golden_file: str
):
change_history = [
[
WriteFile("a.txt", "a"),
NoChange(),
NoChange(),
NoChange(),
WriteFile("b.txt", "b"),
WriteFile("a.txt", "z"),
],
[
NoChange(),
Failed(),
Failed(),
WriteFile("c.txt", "c"),
WriteFile("c.txt", "c-more"),
NoChange(),
],
]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
# Call synthesize_loop.
synthesize_loop_fixture.synthesize_loop(source_versions, multiple_prs)
# Confirm the git log looks like the golden log.
handle, git_log_path = tempfile.mkstemp(".log")
with os.fdopen(handle, "w") as git_log:
subprocess.run(
["git", "log", "-p", "--no-decorate"], stdout=git_log,
)
golden_log_path = str(pathlib.Path(__file__).parent / "testdata" / golden_file)
util.assert_git_logs_match(git_log_path, golden_log_path)
# Confirm that binary search yielded some benefit: for at least one version
# synthesize was never called.
flat_source_versions = [v for group in source_versions for v in group]
versions_never_synthesized = [
v for v in flat_source_versions if v.synthesize_call_count == 0
]
assert len(versions_never_synthesized) > 0
def test_synthesize_loop_with_no_changes(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
change_history = [
[
NoChange(),
NoChange(),
NoChange(),
NoChange(),
# These failures are effectively ignored. If they were not
# ignored, then a single broken version would break autosynth
# forever. In other words, if the most recent version succeeds, then
# autosynth must succeed, regardless of history.
# If the most recent version fails, then autosynth must fail.
Failed(),
NoChange(),
],
[Failed(), NoChange()],
]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
commit_count = synthesize_loop_fixture.synthesize_loop(source_versions)
assert 0 == commit_count
def test_synthesize_loop_with_synthesize_failure(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
change_history = [[Failed(), Failed()]]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
try:
synthesize_loop_fixture.synthesize_loop(source_versions)
assert False, "Expected an exception to be thrown."
except subprocess.CalledProcessError:
pass
def test_synthesize_loop_with_new_synth_metadata(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
change_history = [
[WriteFile("synth.metadata", "{}")],
[WriteFile("synth.metadata", '{"a": "b"}')],
]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
commit_count = synthesize_loop_fixture.synthesize_loop(source_versions)
assert 1 == commit_count
def test_synthesize_loop_preconfig(synthesize_loop_fixture: SynthesizeLoopFixture):
a = WriteFile("a.txt", "a")
b = WriteFile("b.txt", "b")
c = WriteFile("c.txt", "c")
change_history = [[a, b, c]]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
synthesize_loop_fixture.synthesize_loop(source_versions)
for preconfig in a.apply_call_history:
assert {} == preconfig
for preconfig in b.apply_call_history:
assert {"a.txt": "a"} == preconfig
for preconfig in c.apply_call_history:
assert {"a.txt": "a", "b.txt": "b"} == preconfig
def test_synthesize_loop_track_obsolete_files(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
# Create a synth.metadata with empty generatedFiles.
metadata = {"generatedFiles": []}
with open("synth.metadata", "wt") as synth_metadata:
synth_metadata.write(json.dumps(metadata))
git.commit_all_changes("Added synth.metadata with empty generatedFiles.")
# Create a generated change that populate synth.metadata's generatedFiles.
metadata = {"generatedFiles": ["a.txt"]}
write_metadata = WriteFile("synth.metadata", json.dumps(metadata))
# Invoke the synthesize loop.
change_history = [[NoChange(), write_metadata]]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
synthesize_loop_fixture.synthesize_loop(source_versions)
# Confirm the synth loop pushed a change.
calls = synthesize_loop_fixture.change_pusher.mock_calls
assert call.push_changes(1, "test", "chore: start tracking obsolete files") in calls
def test_synthesize_loop_skips_multiple_existing_prs(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
change_history = [
[WriteFile("a.txt", "a")],
[WriteFile("b.txt", "b")],
]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
# Call synthesize_loop.
synthesize_loop_fixture.change_pusher.check_if_pr_already_exists.return_value = True
synthesize_loop_fixture.synthesize_loop(source_versions, True)
calls = synthesize_loop_fixture.change_pusher.mock_calls
golden_calls = [
call.check_if_pr_already_exists("test-source1"),
call.check_if_pr_already_exists("test-source2"),
]
assert golden_calls == calls
def test_synthesize_loop_uses_single_commit_title_for_pr_title(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
change_history = [
[NoChange()],
[NoChange(), WriteFile("a.txt", "a")],
]
source_versions = compile_histories(
change_history, synthesize_loop_fixture.synthesizer
)
# Call synthesize_loop.
synthesize_loop_fixture.synthesize_loop(source_versions, True)
calls = synthesize_loop_fixture.change_pusher.mock_calls
golden_calls = [
call.check_if_pr_already_exists("test-source1"),
call.check_if_pr_already_exists("test-source2"),
call.push_changes(1, "test-source2", "Wrote a to a.txt."),
call.push_changes().add_labels(["context: full"]),
]
assert golden_calls == calls
def test_synthesize_loop_always_pushes_something_when_latest_version_succeeds(
synthesize_loop_fixture: SynthesizeLoopFixture,
):
histories = [
[Failed(), WriteFile("a.txt", "a")],
[Failed(), WriteFile("b.txt", "b")],
]
source_versions = compile_histories(histories, synthesize_loop_fixture.synthesizer)
# Synthesize loop will throw an exception.
try:
synthesize_loop_fixture.synthesize_loop(source_versions, True)
assert False, "Expected an exception to be thrown."
except Exception:
pass
# But the loop still should have pushed a change.
synthesize_loop_fixture.change_pusher.push_changes.assert_called()
def make_working_repo(working_dir: str):
"""Create a local repo that resembles a real repo.
Specifically, it has a history of synth.py changes that actually change the
generated output.
"""
subprocess.check_call(["git", "init"], cwd=working_dir)
subprocess.check_call(
[
"git",
"remote",
"add",
"origin",
"https://github.com/googleapis/nodejs-vision.git",
]
)
working_path = pathlib.Path(working_dir)
# The simplest possible synth.py. It generates one file with | |
<filename>qiskit/algorithms/optimizers/spsa.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Simultaneous Perturbation Stochastic Approximation (SPSA) optimizer.
This implementation allows both, standard first-order as well as second-order SPSA.
"""
from typing import Iterator, Optional, Union, Callable, Tuple, Dict, List, Any
import logging
import warnings
from time import time
from collections import deque
import scipy
import numpy as np
from qiskit.utils import algorithm_globals
from .optimizer import Optimizer, OptimizerSupportLevel, OptimizerResult, POINT
# number of function evaluations, parameters, loss, stepsize, accepted
CALLBACK = Callable[[int, np.ndarray, float, float, bool], None]
TERMINATIONCHECKER = Callable[[int, np.ndarray, float, float, bool], bool]
logger = logging.getLogger(__name__)
class SPSA(Optimizer):
"""Simultaneous Perturbation Stochastic Approximation (SPSA) optimizer.
SPSA [1] is an gradient descent method for optimizing systems with multiple unknown parameters.
As an optimization method, it is appropriately suited to large-scale population models,
adaptive modeling, and simulation optimization.
.. seealso::
Many examples are presented at the `SPSA Web site <http://www.jhuapl.edu/SPSA>`__.
The main feature of SPSA is the stochastic gradient approximation, which requires only two
measurements of the objective function, regardless of the dimension of the optimization
problem.
Additionally to standard, first-order SPSA, where only gradient information is used, this
implementation also allows second-order SPSA (2-SPSA) [2]. In 2-SPSA we also estimate the
Hessian of the loss with a stochastic approximation and multiply the gradient with the
inverse Hessian to take local curvature into account and improve convergence.
Notably this Hessian estimate requires only a constant number of function evaluations
unlike an exact evaluation of the Hessian, which scales quadratically in the number of
function evaluations.
.. note::
SPSA can be used in the presence of noise, and it is therefore indicated in situations
involving measurement uncertainty on a quantum computation when finding a minimum.
If you are executing a variational algorithm using a Quantum ASseMbly Language (QASM)
simulator or a real device, SPSA would be the most recommended choice among the optimizers
provided here.
The optimization process can includes a calibration phase if neither the ``learning_rate`` nor
``perturbation`` is provided, which requires additional functional evaluations.
(Note that either both or none must be set.) For further details on the automatic calibration,
please refer to the supplementary information section IV. of [3].
.. note::
This component has some function that is normally random. If you want to reproduce behavior
then you should set the random number generator seed in the algorithm_globals
(``qiskit.utils.algorithm_globals.random_seed = seed``).
Examples:
This short example runs SPSA for the ground state calculation of the ``Z ^ Z``
observable where the ansatz is a ``PauliTwoDesign`` circuit.
.. code-block:: python
import numpy as np
from qiskit.algorithms.optimizers import SPSA
from qiskit.circuit.library import PauliTwoDesign
from qiskit.opflow import Z, StateFn
ansatz = PauliTwoDesign(2, reps=1, seed=2)
observable = Z ^ Z
initial_point = np.random.random(ansatz.num_parameters)
def loss(x):
bound = ansatz.bind_parameters(x)
return np.real((StateFn(observable, is_measurement=True) @ StateFn(bound)).eval())
spsa = SPSA(maxiter=300)
result = spsa.optimize(ansatz.num_parameters, loss, initial_point=initial_point)
To use the Hessian information, i.e. 2-SPSA, you can add `second_order=True` to the
initializer of the `SPSA` class, the rest of the code remains the same.
.. code-block:: python
two_spsa = SPSA(maxiter=300, second_order=True)
result = two_spsa.optimize(ansatz.num_parameters, loss, initial_point=initial_point)
References:
[1]: <NAME> (1998). An Overview of the Simultaneous Perturbation Method for Efficient
Optimization, Johns Hopkins APL Technical Digest, 19(4), 482–492.
`Online at jhuapl.edu. <https://www.jhuapl.edu/SPSA/PDF-SPSA/Spall_An_Overview.PDF>`_
[2]: <NAME> (1997). Accelerated second-order stochastic optimization using only
function measurements, Proceedings of the 36th IEEE Conference on Decision and Control,
1417-1424 vol.2. `Online at IEEE.org. <https://ieeexplore.ieee.org/document/657661>`_
[3]: <NAME> al. (2017). Hardware-efficient Variational Quantum Eigensolver for
Small Molecules and Quantum Magnets. Nature 549, pages242–246(2017).
`arXiv:1704.05018v2 <https://arxiv.org/pdf/1704.05018v2.pdf#section*.11>`_
"""
def __init__(
self,
maxiter: int = 100,
blocking: bool = False,
allowed_increase: Optional[float] = None,
trust_region: bool = False,
learning_rate: Optional[Union[float, np.array, Callable[[], Iterator]]] = None,
perturbation: Optional[Union[float, np.array, Callable[[], Iterator]]] = None,
last_avg: int = 1,
resamplings: Union[int, Dict[int, int]] = 1,
perturbation_dims: Optional[int] = None,
second_order: bool = False,
regularization: Optional[float] = None,
hessian_delay: int = 0,
lse_solver: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None,
initial_hessian: Optional[np.ndarray] = None,
callback: Optional[CALLBACK] = None,
termination_checker: Optional[TERMINATIONCHECKER] = None,
) -> None:
r"""
Args:
maxiter: The maximum number of iterations. Note that this is not the maximal number
of function evaluations.
blocking: If True, only accepts updates that improve the loss (up to some allowed
increase, see next argument).
allowed_increase: If ``blocking`` is ``True``, this argument determines by how much
the loss can increase with the proposed parameters and still be accepted.
If ``None``, the allowed increases is calibrated automatically to be twice the
approximated standard deviation of the loss function.
trust_region: If ``True``, restricts the norm of the update step to be :math:`\leq 1`.
learning_rate: The update step is the learning rate is multiplied with the gradient.
If the learning rate is a float, it remains constant over the course of the
optimization. If a NumPy array, the :math:`i`-th element is the learning rate for
the :math:`i`-th iteration. It can also be a callable returning an iterator which
yields the learning rates for each optimization step.
If ``learning_rate`` is set ``perturbation`` must also be provided.
perturbation: Specifies the magnitude of the perturbation for the finite difference
approximation of the gradients. See ``learning_rate`` for the supported types.
If ``perturbation`` is set ``learning_rate`` must also be provided.
last_avg: Return the average of the ``last_avg`` parameters instead of just the
last parameter values.
resamplings: The number of times the gradient (and Hessian) is sampled using a random
direction to construct a gradient estimate. Per default the gradient is estimated
using only one random direction. If an integer, all iterations use the same number
of resamplings. If a dictionary, this is interpreted as
``{iteration: number of resamplings per iteration}``.
perturbation_dims: The number of perturbed dimensions. Per default, all dimensions
are perturbed, but a smaller, fixed number can be perturbed. If set, the perturbed
dimensions are chosen uniformly at random.
second_order: If True, use 2-SPSA instead of SPSA. In 2-SPSA, the Hessian is estimated
additionally to the gradient, and the gradient is preconditioned with the inverse
of the Hessian to improve convergence.
regularization: To ensure the preconditioner is symmetric and positive definite, the
identity times a small coefficient is added to it. This generator yields that
coefficient.
hessian_delay: Start multiplying the gradient with the inverse Hessian only after a
certain number of iterations. The Hessian is still evaluated and therefore this
argument can be useful to first get a stable average over the last iterations before
using it as preconditioner.
lse_solver: The method to solve for the inverse of the Hessian. Per default an
exact LSE solver is used, but can e.g. be overwritten by a minimization routine.
initial_hessian: The initial guess for the Hessian. By default the identity matrix
is used.
callback: A callback function passed information in each iteration step. The
information is, in this order: the number of function evaluations, the parameters,
the function value, the stepsize, whether the step was accepted.
termination_checker: A callback function executed at the end of each iteration step. The
arguments are, in this order: the parameters, the function value, the number
of function evaluations, the stepsize, whether the step was accepted. If the callback
returns True, the optimization is terminated.
To prevent additional evaluations of the objective method, if the objective has not yet
been evaluated, the objective is estimated by taking the mean of the objective
evaluations used in the estimate of the gradient.
Raises:
ValueError: If ``learning_rate`` or ``perturbation`` is an array with less elements
than the number of iterations.
Example:
.. code-block::python
import numpy as np
from qiskit.algorithms.optimizers import SPSA
def | |
log in acc.logs:
if log["timestamp"] == timestamp:
return Log(**log)
return None
async def get_member_logs(self, member: discord.Member) -> Union[List[Log], list]:
"""Renvoie tous les logs (sous forme d'objets Log) d'un membre
Renvoie une liste vide si aucun log n'est présent"""
acc = await self.get_account(member)
all_logs = []
if acc.logs:
for log in acc.logs:
all_logs.append(Log(member, **log))
return all_logs
async def add_log(self, member: discord.Member, text: str, delta: int) -> list:
"""Ajoute un log au membre visé
Renvoie le nouvel état des logs"""
if not isinstance(text, str):
raise TypeError("Type du contenu du log invalide, {} != str".format(type(text)))
if not isinstance(delta, int):
raise TypeError("Type de somme du log invalide, {} != int".format(type(delta)))
added = {"text": text, "timestamp": int(time.time()), "delta": delta}
acc = await self.get_account(member)
logs = acc.logs
max_logs_length = await self.config.max_logs_length()
if len(logs) >= max_logs_length:
logs = logs[-(max_logs_length - 1):]
logs.append(added)
await self.config.member(member).logs.set(logs)
return logs
async def delete_log(self, member: discord.Member, timestamp: int) -> list:
"""Retire un log (ou plusieurs s'ils ont un timestamp identique) au membre visé
Typiquement optionnel, les logs étant remplacés au fur et à mesure des ajouts
Renvoie le nouvel état des logs"""
if not isinstance(timestamp, int):
raise TypeError("Type du timestamp du log invalide, {} != int".format(type(timestamp)))
if not await self.get_log(member, timestamp):
raise ValueError(f"Log avec le timestamp {timestamp} pour USERID={member.id} introuvable")
acc = await self.get_account(member)
logs = acc.logs
new = copy(logs)
for log in logs:
if log["timestamp"] == timestamp:
new.remove(log)
await self.config.member(member).logs.set(new)
return new
async def wipe_logs(self, member: discord.Member) -> None:
"""Supprime tous les logs d'un membre"""
await self.config.member(member).clear_raw("logs")
async def wipe_guild(self, guild: discord.Guild) -> None:
"""Supprime les données bancaires des membres d'un serveur"""
await self.config.clear_all_members(guild)
async def wipe_account(self, member: discord.Member) -> None:
"""Supprime les données bancaires d'un membre"""
await self.config.member(member).clear()
async def raw_delete_account(self, user_id: int, guild: discord.Guild) -> None:
"""Supprime un compte bancaire par ID du membre"""
await self.config.member_from_ids(guild.id, user_id).clear()
async def get_max_balance(self) -> int:
"""Renvoie la valeur maximale que peut atteindre un solde de membre (sur n'importe quel serveur)"""
return self.config.max_balance()
async def set_max_balance(self, value: int) -> None:
"""Modifie la valeur maximale qu'un solde de membre peut atteindre"""
if not isinstance(value, int):
raise TypeError("Type de la valeur maximale invalide, {} != int".format(type(value)))
if value <= 0:
raise ValueError("Valeur invalide, le maximum ne peut pas être négatif ou nul")
await self.config.max_balance.set(value)
async def get_max_logs_length(self) -> int:
"""Renvoie le nombre maximal de logs pouvant être stockés dans les données bancaires d'un membre"""
return self.config.max_logs_length()
async def set_max_logs_length(self, length: int) -> None:
"""Modifie le nombre de logs stockés pour un membre"""
if not isinstance(length, int):
raise TypeError("Type de la longueur maximale invalide, {} != int".format(type(length)))
if length < 1:
raise ValueError("Valeur invalide, le maximum ne peut pas être négatif ou nul")
await self.config.max_logs_length.set(length)
async def get_guild_leaderboard(self, guild: discord.Guild, cutoff: int = None) -> Union[list, List[Account]]:
"""Renvoie le top des membres les plus riches du serveur (liste d'objets Account)
Renvoie une liste vide si aucun top n'est générable"""
users = await self.config.all_members(guild)
sorted_users = sorted(list(users.items()), key=lambda u: u[1]["balance"], reverse=True)
top = []
for uid, acc in sorted_users:
user = guild.get_member(uid)
if user:
top.append(Account(user, **acc))
return top[:cutoff] if cutoff else top
async def get_leaderboard_position_for(self, member: discord.Member) -> int:
"""Renvoie la position du membre dans le classement de son serveur
Renvoie la dernière place du classement si le membre n'est pas trouvé"""
top = await self.get_guild_leaderboard(member.guild)
for acc in top:
if acc.user == member:
return top.index(acc) + 1
return len(top)
async def utils_parse_timedelta(self, time_string: str) -> timedelta:
"""Renvoie un objet *timedelta* à partir d'un str contenant des informations de durée (Xj Xh Xm Xs)"""
if not isinstance(time_string, str):
raise TypeError("Le texte à parser est invalide, {} != str".format(type(time_string)))
regex = re.compile('^((?P<days>[\\.\\d]+?)j)? *((?P<hours>[\\.\\d]+?)h)? *((?P<minutes>[\\.\\d]+?)m)? *((?P<seconds>[\\.\\d]+?)s)? *$')
sch = regex.match(time_string)
if not sch:
raise ValueError("Aucun timedelta n'a pu être déterminé des valeurs fournies")
parsed = sch.groupdict()
return timedelta(**{i: int(parsed[i]) for i in parsed if parsed[i]})
# Commandes -----------------------v
@commands.group(name="bank", aliases=["b"], invoke_without_command=True)
async def _bank_actions(self, ctx, user: discord.Member = None):
"""Commandes de gestion du compte bancaire virtuel *Cash*"""
if ctx.invoked_subcommand is None:
return await ctx.invoke(self.show_bank, user=user)
@_bank_actions.command(name="show")
@commands.guild_only()
async def show_bank(self, ctx, user: discord.Member = None):
"""Afficher les infos de son compte"""
user = user if user else ctx.message.author
acc = await self.get_account(user)
curr = await self.get_currency(ctx.guild)
hum_balance = humanize_number(acc.balance)
em = discord.Embed(color=user.color, timestamp=ctx.message.created_at)
em.set_author(name="Compte de " + str(user), icon_url=user.avatar_url)
em.add_field(name="💰 Solde", value=box(f"{hum_balance} {curr}"))
delta = await self.get_delta(user)
delta_emoji = "📉" if delta < 0 else "📈"
em.add_field(name=f"{delta_emoji} Variation", value=box(f"{delta:+}"))
top = await self.get_leaderboard_position_for(user)
em.add_field(name="🏅 Position", value=box(f"#{top}"))
logs = await self.get_member_logs(user)
if logs:
txt = "\n".join([f"{log.delta:+} · {log.text[:50]}" for log in logs][::-1])
em.add_field(name="📃 Historique", value=txt)
await ctx.send(embed=em)
@_bank_actions.command(name="give")
@commands.guild_only()
@commands.cooldown(1, 60, commands.BucketType.member)
async def bank_give(self, ctx, receveur: discord.Member, somme: int):
"""Transférer de l'argent à un receveur tiers"""
try:
await self.transfert_credits(ctx.author, receveur, int(somme))
curr = await self.get_currency(ctx.guild)
await ctx.send(f"**Transfert réalisé** • {receveur.mention} a reçu **{somme}** {curr}")
except ValueError:
return await ctx.send("**Impossible** • Vous ne pouvez pas transférer une somme nulle ou négative")
except BalanceTooHigh:
plaf = humanize_number(await self.config.max_balance())
return await ctx.send(f"**Limite atteinte** • {receveur.mention} ne peut pas recevoir cette somme car "
f"il dépasserait le plafond fixé de {plaf}")
await self.add_log(ctx.author, f"Transfert d'argent à {receveur.name}", -somme)
await self.add_log(receveur, f"Reception d'argent de {ctx.author.name}", somme)
@_bank_actions.command(name="gift")
@commands.guild_only()
@commands.cooldown(1, 60, commands.BucketType.member)
async def bank_gift(self, ctx, somme: int, expire: str = "24h"):
"""Générer un code cadeau contenant des crédits (retrait différé)
Le retrait de crédits sur le compte du membre générateur n'est pas immédiat et l'utilisation du code sera impossible en cas de manque de fonds
Par défaut les codes expirent au bout de 24h, vous pouvez modifier cela avec le paramètre *<expire>* en utilisant le format `\"Xj Xh Xm Xs\"`"""
user = ctx.author
if somme < 1:
return await ctx.send(
"**Erreur** • La valeur doit être positive (sup. à 0)")
try:
tdelta = await self.utils_parse_timedelta(expire)
except ValueError:
return await ctx.send("**Erreur** • Le temps d'expiration n'est pas valide, utilisez le format `\"Xj Xh Xm Xs\"`")
if await self.enough_balance(user, somme):
timestamp = (datetime.now() + tdelta).timestamp()
curr = await self.get_currency(ctx.guild)
em = discord.Embed(title=f"**Nouveau code-cadeau** · {somme} {curr}",
description="**En cours de génération...**")
em.add_field(name="Information", value="Un membre peut utiliser ce code avec `b open`\n"
"Vous serez débité de la valeur du code lors de son utilisation\n"
"L'expiration du code rend impossible son utilisation, pour "
"détruire le code avant sa date d'expiration utilisez-le vous-même.")
em.set_footer(text="Ce code expirera dans {}".format(humanize_timedelta(timedelta=tdelta)))
try:
dm = await user.send(embed=em)
except:
return await ctx.send("**Erreur** • Je ne peux pas générer de code si vous ne me permettez pas de vous envoyer un MP")
try:
code = await self.new_gift_code(user, somme, int(timestamp))
await asyncio.sleep(1)
em.description = box(code)
em.colour = user.color
await dm.edit(embed=em)
except ValueError as e:
await ctx.send(
f"**Erreur** • La génération du code n'a pas pu se faire en raison d'un problème dans les valeurs fournies : `{e}`")
em.description = box("Erreur dans la génération du code")
await dm.edit(embed=em)
else:
await ctx.send(
"**Impossible** • Même si le retrait n'est pas immédiat, vous devez avoir la somme sur votre compte préalablement à la génération d'un code")
@_bank_actions.command(name="open")
async def bank_open_gift(self, ctx, code: str):
"""Utiliser un code-cadeau et obtenir son contenu
Les codes ne fonctionnent que sur le serveur où ils ont été générés"""
code = code.upper().strip()
try:
if ctx.guild:
gift = await self.get_gift_code(ctx.guild, code)
else:
gift = await self.fetch_gift_code(code)
except ValueError:
return await ctx.send("**Invalide** • Le code fourni est invalide, vérifiez-le et réessayez")
except GiftCodeExpired:
return await ctx.send("**Expiré** • Le code fourni a expiré, consultez le générateur du code pour en obtenir un nouveau")
if gift:
guild = gift.guild
curr = await self.get_currency(guild)
hum_value = humanize_number(gift.value)
content = f"{hum_value} {curr}"
em = discord.Embed(title=f"**Code-cadeau** · {code}",
description="Voulez-vous échanger le code contre son contenu ?")
em.add_field(name="Contenu", value=box(content))
em.set_footer(text="🎁 Accepter | ❌ Refuser")
emojis = ["🎁", "❌"]
msg = await ctx.send(embed=em)
start_adding_reactions(msg, emojis)
try:
react, user = await self.bot.wait_for("reaction_add",
check=lambda r, u: u == ctx.author and r.message.id == msg.id,
timeout=20)
except asyncio.TimeoutError:
| |
want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, | |
<reponame>Leonardo-Maciel/PSO_Maciel<gh_stars>10-100
"""
`matplotlib.figure` implements the following classes:
`Figure`
Top level `~matplotlib.artist.Artist`, which holds all plot elements.
`SubplotParams`
Control the default spacing between subplots.
"""
import inspect
import logging
from numbers import Integral
import numpy as np
import matplotlib as mpl
from matplotlib import docstring, projections
from matplotlib import __version__ as _mpl_version
import matplotlib.artist as martist
from matplotlib.artist import Artist, allow_rasterization
from matplotlib.backend_bases import (
FigureCanvasBase, NonGuiException, MouseButton)
import matplotlib.cbook as cbook
import matplotlib.colorbar as cbar
import matplotlib.image as mimage
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
from matplotlib.gridspec import GridSpec, SubplotSpec # noqa: F401
import matplotlib.legend as mlegend
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
import matplotlib._layoutbox as layoutbox
_log = logging.getLogger(__name__)
def _stale_figure_callback(self, val):
if self.figure:
self.figure.stale = val
class _AxesStack(cbook.Stack):
"""
Specialization of `.Stack`, to handle all tracking of `~.axes.Axes` in a
`.Figure`.
This stack stores ``key, (ind, axes)`` pairs, where:
* **key** is a hash of the args and kwargs used in generating the Axes.
* **ind** is a serial index tracking the order in which axes were added.
AxesStack is a callable; calling it returns the current axes.
The `current_key_axes` method returns the current key and associated axes.
"""
def __init__(self):
super().__init__()
self._ind = 0
def as_list(self):
"""
Return a list of the Axes instances that have been added to the figure.
"""
ia_list = [a for k, a in self._elements]
ia_list.sort()
return [a for i, a in ia_list]
def get(self, key):
"""
Return the Axes instance that was added with *key*.
If it is not present, return *None*.
"""
item = dict(self._elements).get(key)
if item is None:
return None
cbook.warn_deprecated(
"2.1",
message="Adding an axes using the same arguments as a previous "
"axes currently reuses the earlier instance. In a future "
"version, a new instance will always be created and returned. "
"Meanwhile, this warning can be suppressed, and the future "
"behavior ensured, by passing a unique label to each axes "
"instance.")
return item[1]
def _entry_from_axes(self, e):
ind, k = {a: (ind, k) for k, (ind, a) in self._elements}[e]
return (k, (ind, e))
def remove(self, a):
"""Remove the axes from the stack."""
super().remove(self._entry_from_axes(a))
def bubble(self, a):
"""
Move the given axes, which must already exist in the
stack, to the top.
"""
return super().bubble(self._entry_from_axes(a))
def add(self, key, a):
"""
Add Axes *a*, with key *key*, to the stack, and return the stack.
If *key* is unhashable, replace it by a unique, arbitrary object.
If *a* is already on the stack, don't add it again, but
return *None*.
"""
# All the error checking may be unnecessary; but this method
# is called so seldom that the overhead is negligible.
cbook._check_isinstance(Axes, a=a)
try:
hash(key)
except TypeError:
key = object()
a_existing = self.get(key)
if a_existing is not None:
super().remove((key, a_existing))
cbook._warn_external(
"key {!r} already existed; Axes is being replaced".format(key))
# I don't think the above should ever happen.
if a in self:
return None
self._ind += 1
return super().push((key, (self._ind, a)))
def current_key_axes(self):
"""
Return a tuple of ``(key, axes)`` for the active axes.
If no axes exists on the stack, then returns ``(None, None)``.
"""
if not len(self._elements):
return self._default, self._default
else:
key, (index, axes) = self._elements[self._pos]
return key, axes
def __call__(self):
return self.current_key_axes()[1]
def __contains__(self, a):
return a in self.as_list()
@cbook.deprecated("3.2")
class AxesStack(_AxesStack):
pass
class SubplotParams:
"""
A class to hold the parameters for a subplot.
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Defaults are given by :rc:`figure.subplot.[name]`.
Parameters
----------
left : float
The position of the left edge of the subplots,
as a fraction of the figure width.
right : float
The position of the right edge of the subplots,
as a fraction of the figure width.
bottom : float
The position of the bottom edge of the subplots,
as a fraction of the figure height.
top : float
The position of the top edge of the subplots,
as a fraction of the figure height.
wspace : float
The width of the padding between subplots,
as a fraction of the average axes width.
hspace : float
The height of the padding between subplots,
as a fraction of the average axes height.
"""
self.validate = True
for key in ["left", "bottom", "right", "top", "wspace", "hspace"]:
setattr(self, key, mpl.rcParams[f"figure.subplot.{key}"])
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the dimensions of the passed parameters. *None* means unchanged.
"""
if self.validate:
if ((left if left is not None else self.left)
>= (right if right is not None else self.right)):
raise ValueError('left cannot be >= right')
if ((bottom if bottom is not None else self.bottom)
>= (top if top is not None else self.top)):
raise ValueError('bottom cannot be >= top')
if left is not None:
self.left = left
if right is not None:
self.right = right
if bottom is not None:
self.bottom = bottom
if top is not None:
self.top = top
if wspace is not None:
self.wspace = wspace
if hspace is not None:
self.hspace = hspace
class Figure(Artist):
"""
The top level container for all the plot elements.
The Figure instance supports callbacks through a *callbacks* attribute
which is a `.CallbackRegistry` instance. The events you can connect to
are 'dpi_changed', and the callback will be called with ``func(fig)`` where
fig is the `Figure` instance.
Attributes
----------
patch
The `.Rectangle` instance representing the figure background patch.
suppressComposite
For multiple figure images, the figure will make composite images
depending on the renderer option_image_nocomposite function. If
*suppressComposite* is a boolean, this will override the renderer.
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __repr__(self):
return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format(
clsname=self.__class__.__name__,
h=self.bbox.size[0], w=self.bbox.size[1],
naxes=len(self.axes),
)
def __init__(self,
figsize=None,
dpi=None,
facecolor=None,
edgecolor=None,
linewidth=0.0,
frameon=None,
subplotpars=None, # rc figure.subplot.*
tight_layout=None, # rc figure.autolayout
constrained_layout=None, # rc figure.constrained_layout.use
):
"""
Parameters
----------
figsize : 2-tuple of floats, default: :rc:`figure.figsize`
Figure dimension ``(width, height)`` in inches.
dpi : float, default: :rc:`figure.dpi`
Dots per inch.
facecolor : default: :rc:`figure.facecolor`
The figure patch facecolor.
edgecolor : default: :rc:`figure.edgecolor`
The figure patch edge color.
linewidth : float
The linewidth of the frame (i.e. the edge linewidth of the figure
patch).
frameon : bool, default: :rc:`figure.frameon`
If ``False``, suppress drawing the figure background patch.
subplotpars : `SubplotParams`
Subplot parameters. If not given, the default subplot
parameters :rc:`figure.subplot.*` are used.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
If ``False`` use *subplotpars*. If ``True`` adjust subplot
parameters using `.tight_layout` with default padding.
When providing a dict containing the keys ``pad``, ``w_pad``,
``h_pad``, and ``rect``, the default `.tight_layout` paddings
will be overridden.
constrained_layout : bool, default: :rc:`figure.constrained_layout.use`
If ``True`` use constrained layout to adjust positioning of plot
elements. Like ``tight_layout``, but designed to be more
flexible. See
:doc:`/tutorials/intermediate/constrainedlayout_guide`
for examples. (Note: does not work with `add_subplot` or
`~.pyplot.subplot2grid`.)
"""
super().__init__()
# remove the non-figure artist _axes property
# as it makes no sense for a figure to be _in_ an axes
# this is used by the property methods in the artist base class
# which are over-ridden in this class
del self._axes
self.callbacks = cbook.CallbackRegistry()
if figsize is None:
figsize = mpl.rcParams['figure.figsize']
if dpi is None:
dpi = mpl.rcParams['figure.dpi']
if facecolor is None:
facecolor = mpl.rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = mpl.rcParams['figure.edgecolor']
if frameon is None:
frameon = mpl.rcParams['figure.frameon']
if not np.isfinite(figsize).all() or (np.array(figsize) < 0).any():
raise ValueError('figure size must be positive finite not '
f'{figsize}')
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.dpi_scale_trans = Affine2D().scale(dpi)
# do not use property as it will trigger
self._dpi = dpi
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.transFigure = BboxTransformTo(self.bbox)
self.patch = Rectangle(
xy=(0, 0), width=1, height=1, visible=frameon,
facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth,
# Don't let the figure patch influence bbox calculation.
in_layout=False)
self._set_artist_props(self.patch)
self.patch.set_antialiased(False)
FigureCanvasBase(self) # Set self.canvas.
self._suptitle = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
# constrained_layout:
self._layoutbox = None
# set in set_constrained_layout_pads()
self.set_constrained_layout(constrained_layout)
self.set_tight_layout(tight_layout)
self._axstack = _AxesStack() # track all figure axes and current axes
self.clf()
| |
0], K0)
assert_allclose(res.predicted_state[:, 1], [y11, y21 - theta * y11])
P2 = np.array([[1 + sigma2_mu, -theta],
[-theta, 1 + theta**2]])
assert_allclose(res.predicted_state_cov[..., 1], P2)
assert_allclose(res.predicted_diffuse_state_cov[..., 1], np.zeros((2, 2)))
# Miscellaneous
assert_equal(res.nobs_diffuse, 1)
def test_common_level_restricted_analytic():
# Analytic test using results from Koopman (1997), section 5.3,
# with the restriction mu_bar = 0
mod = model_common_level(restricted=True)
y11, y21 = mod.endog[:, 0]
theta = mod['design', 1, 0]
sigma2_mu = mod['state_cov', 0, 0]
# Perform filtering
res = mod.smooth()
# Basic initialization variables
assert_allclose(res.predicted_state_cov[..., 0], 0)
assert_allclose(res.predicted_diffuse_state_cov[..., 0], 1)
# Output of the exact diffuse initialization, see Koopman (1997)
phi = 1 / (1 + theta**2)
# Note: since Koopman (1997) did not apply the univariate method,
# forecast errors and covariances, and the Kalman gain won't match
# assert_allclose(res.forecasts_error[:, 0], [y11, y21])
# assert_allclose(res.forecasts_error_cov[0, 0, 0], np.eye(2))
# F_inf1 = np.array([[1, theta],
# [theta, theta**2]])
# assert_allclose(res.forecasts_error_diffuse_cov[0, 0, 0], F_inf1)
# assert_allclose(res.kalman_gain[..., 0], phi * np.array([1, theta]))
assert_allclose(res.predicted_state[:, 1], phi * (y11 + theta * y21))
# Note: Koopman (1997) actually has phi + sigma2_mu**0.5, but that appears
# to be a typo
assert_allclose(res.predicted_state_cov[..., 1], phi + sigma2_mu)
assert_allclose(res.predicted_diffuse_state_cov[..., 1], 0)
# Miscellaneous
assert_equal(res.nobs_diffuse, 1)
class CheckSSMResults(object):
atol = 1e-14
rtol = 1e-07
atol_diffuse = 1e-7
rtol_diffuse = None
def check_object(self, actual, desired, rtol_diffuse):
# Short-circuit the test if desired is set to None (which allows us to
# skip testing some objects where appropriate)
if actual is None or desired is None:
return
# Optionally apply a different relative tolerance to the periods in the
# diffuse observations.
# This is especially useful when testing against approximate diffuse
# initialization. By definition, the first few observations will be
# quite different between the exact and approximate approach for many
# quantities.
# Note that the absolute tolerance is also pretty low (1e-7), mostly
# for comparison against zero values in the approximate case
d = None
if rtol_diffuse is None:
rtol_diffuse = self.rtol_diffuse
if rtol_diffuse is not None:
d = self.d
if rtol_diffuse != np.inf:
assert_allclose(actual.T[:d], desired.T[:d], rtol=rtol_diffuse,
atol=self.atol_diffuse)
assert_allclose(actual.T[d:], desired.T[d:], rtol=self.rtol,
atol=self.atol)
# - Filtered results tests -----------------------------------------------
def test_forecasts(self, rtol_diffuse=None):
actual = self.results_a.forecasts
desired = self.results_a.forecasts
self.check_object(actual, desired, rtol_diffuse)
def test_forecasts_error(self, rtol_diffuse=None):
actual = self.results_a.forecasts_error
desired = self.results_a.forecasts_error
self.check_object(actual, desired, rtol_diffuse)
def test_forecasts_error_cov(self, rtol_diffuse=None):
actual = self.results_a.forecasts_error_cov
desired = self.results_b.forecasts_error_cov
self.check_object(actual, desired, rtol_diffuse)
def test_filtered_state(self, rtol_diffuse=1e-5):
# Note: we do want to check the diffuse values here, with a reduced
# tolerance. See the note before the smoothed values for additional
# details.
actual = self.results_a.filtered_state
desired = self.results_b.filtered_state
self.check_object(actual, desired, rtol_diffuse)
def test_filtered_state_cov(self, rtol_diffuse=None):
actual = self.results_a.filtered_state_cov
desired = self.results_b.filtered_state_cov
self.check_object(actual, desired, rtol_diffuse)
def test_predicted_state(self, rtol_diffuse=None):
actual = self.results_a.predicted_state
desired = self.results_b.predicted_state
self.check_object(actual, desired, rtol_diffuse)
def test_predicted_state_cov(self, rtol_diffuse=None):
actual = self.results_a.predicted_state_cov
desired = self.results_b.predicted_state_cov
self.check_object(actual, desired, rtol_diffuse)
def test_kalman_gain(self, rtol_diffuse=None):
actual = self.results_a.kalman_gain
desired = self.results_b.kalman_gain
self.check_object(actual, desired, rtol_diffuse)
def test_loglike(self, rtol_diffuse=None):
if np.isscalar(self.results_b.llf_obs):
actual = np.sum(self.results_a.llf_obs)
desired = self.results_b.llf_obs
assert_allclose(actual, desired)
else:
actual = self.results_a.llf_obs
desired = self.results_b.llf_obs
self.check_object(actual, desired, rtol_diffuse)
# - Smoothed output tests ------------------------------------------------
# Note: for smoothed states, we do want to check some of the diffuse values
# even in the approximate case, but with reduced precision. Note also that
# there are cases that demonstrate the numerical error associated with the
# approximate method, and so some specific tests are overridden in certain
# cases, since they would not pass.
def test_smoothed_state(self, rtol_diffuse=1e-5):
actual = self.results_a.smoothed_state
desired = self.results_b.smoothed_state
self.check_object(actual, desired, rtol_diffuse)
def test_smoothed_state_cov(self, rtol_diffuse=1e-5):
actual = self.results_a.smoothed_state_cov
desired = self.results_b.smoothed_state_cov
self.check_object(actual, desired, rtol_diffuse)
def test_smoothed_state_autocov(self, rtol_diffuse=None):
actual = self.results_a.smoothed_state_autocov
desired = self.results_b.smoothed_state_autocov
self.check_object(actual, desired, rtol_diffuse)
def test_smoothed_measurement_disturbance(self, rtol_diffuse=1e-5):
actual = self.results_a.smoothed_measurement_disturbance
desired = self.results_b.smoothed_measurement_disturbance
self.check_object(actual, desired, rtol_diffuse)
def test_smoothed_measurement_disturbance_cov(self, rtol_diffuse=1e-5):
actual = self.results_a.smoothed_measurement_disturbance_cov
desired = self.results_b.smoothed_measurement_disturbance_cov
self.check_object(actual, desired, rtol_diffuse)
def test_smoothed_state_disturbance(self, rtol_diffuse=1e-5):
actual = self.results_a.smoothed_state_disturbance
desired = self.results_b.smoothed_state_disturbance
self.check_object(actual, desired, rtol_diffuse)
def test_smoothed_state_disturbance_cov(self, rtol_diffuse=1e-5):
actual = self.results_a.smoothed_state_disturbance_cov
desired = self.results_b.smoothed_state_disturbance_cov
self.check_object(actual, desired, rtol_diffuse)
# - Smoothed intermediate tests ------------------------------------------
# This isn't computed in the univariate method or by KFAS
# def test_smoothing_error(self, rtol_diffuse=None):
# actual = self.results_a.smoothing_error
# desired = self.results_b.smoothing_error
# self.check_object(actual, desired, rtol_diffuse)
def test_scaled_smoothed_estimator(self, rtol_diffuse=1e-5):
actual = self.results_a.scaled_smoothed_estimator
desired = self.results_b.scaled_smoothed_estimator
self.check_object(actual, desired, rtol_diffuse)
def test_scaled_smoothed_estimator_cov(self, rtol_diffuse=1e-5):
actual = self.results_a.scaled_smoothed_estimator_cov
desired = self.results_b.scaled_smoothed_estimator_cov
self.check_object(actual, desired, rtol_diffuse)
# - Diffuse objects tests ------------------------------------------------
# Note: these can't be checked against the approximate diffuse method.
def test_forecasts_error_diffuse_cov(self, rtol_diffuse=None):
actual = self.results_a.forecasts_error_diffuse_cov
desired = self.results_b.forecasts_error_diffuse_cov
self.check_object(actual, desired, rtol_diffuse)
def test_predicted_diffuse_state_cov(self, rtol_diffuse=None):
actual = self.results_a.predicted_diffuse_state_cov
desired = self.results_b.predicted_diffuse_state_cov
self.check_object(actual, desired, rtol_diffuse)
# We don't currently store this array
# def test_kalman_gain_diffuse(self, rtol_diffuse=None):
# actual = self.results_a.
# desired = self.results_b.
# self.check_object(actual, desired, rtol_diffuse)
def test_scaled_smoothed_diffuse_estimator(self, rtol_diffuse=None):
actual = self.results_a.scaled_smoothed_diffuse_estimator
desired = self.results_b.scaled_smoothed_diffuse_estimator
self.check_object(actual, desired, rtol_diffuse)
def test_scaled_smoothed_diffuse1_estimator_cov(self, rtol_diffuse=None):
actual = self.results_a.scaled_smoothed_diffuse1_estimator_cov
desired = self.results_b.scaled_smoothed_diffuse1_estimator_cov
self.check_object(actual, desired, rtol_diffuse)
def test_scaled_smoothed_diffuse2_estimator_cov(self, rtol_diffuse=None):
actual = self.results_a.scaled_smoothed_diffuse2_estimator_cov
desired = self.results_b.scaled_smoothed_diffuse2_estimator_cov
self.check_object(actual, desired, rtol_diffuse)
# - Simulation smoother results tests ------------------------------------
# def test_simulation_smoothed_state(self):
# assert_allclose(
# self.sim_a.simulated_state,
# self.sim_a.simulated_state)
# def test_simulation_smoothed_measurement_disturbance(self):
# assert_allclose(
# self.sim_a.simulated_measurement_disturbance,
# self.sim_a.simulated_measurement_disturbance)
# def test_simulation_smoothed_state_disturbance(self):
# assert_allclose(
# self.sim_a.simulated_state_disturbance,
# self.sim_a.simulated_state_disturbance)
class CheckApproximateDiffuseMixin(object):
"""
Test the exact diffuse initialization against the approximate diffuse
initialization. By definition, the first few observations will be quite
different between the exact and approximate approach for many quantities,
so we do not test them here.
"""
approximate_diffuse_variance = 1e6
@classmethod
def setup_class(cls, *args, **kwargs):
init_approx = kwargs.pop('init_approx', None)
super(CheckApproximateDiffuseMixin, cls).setup_class(*args, **kwargs)
# Get the approximate diffuse results
kappa = cls.approximate_diffuse_variance
if init_approx is None:
init_approx = Initialization(cls.ssm.k_states,
'approximate_diffuse', approximate_diffuse_variance=kappa)
cls.ssm.initialize(init_approx)
cls.results_b = cls.ssm.smooth()
# Instruct the tests not to test against the first d values
cls.rtol_diffuse = np.inf
def test_initialization_approx(self):
kappa = self.approximate_diffuse_variance
assert_allclose(self.results_b.initial_state_cov,
np.eye(self.ssm.k_states) * kappa)
assert_equal(self.results_b.initial_diffuse_state_cov, None)
class CheckKFASMixin(object):
"""
Test against values from KFAS
"""
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs.setdefault('filter_univariate', True)
super(CheckKFASMixin, cls).setup_class(*args, **kwargs)
# Get the KFAS results objects
cls.results_b = kfas_helpers.parse(cls.results_path, cls.ssm)
# Set some attributes that KFAS does not compute
cls.results_b.smoothed_state_autocov = None
# Remove the Kalman gain matrix since KFAS computes it using the
# non-univariate method
cls.results_b.kalman_gain = None
# Remove the filtered_state_cov since KFAS v1.3.1 has a bug for these
# matrices (they are not even symmetric)
cls.results_b.filtered_state_cov = None
# KFAS v1.3.1 seems to compute the loglikelihood incorrectly, so we
# correct for it here
# (we need to add back in the constant term for all of the non-missing
# diffuse observations for which Finf is nonsingular)
Finf = cls.results_b.forecasts_error_diffuse_cov.T
Finf_nonsingular_obs = np.c_[[np.diag(Finf_t) for Finf_t in Finf]] > 0
nonmissing = ~np.isnan(cls.ssm.endog).T
constant = (-0.5 * np.log(2 * np.pi) *
(Finf_nonsingular_obs * nonmissing).sum(axis=1))
cls.results_b.llf_obs += constant[:cls.results_a.nobs_diffuse].sum()
# - VAR(1) -------------------------------------------------------------------
class CheckVAR1(CheckSSMResults):
@classmethod
def setup_class(cls, **kwargs):
filter_univariate = kwargs.pop('filter_univariate', False)
cls.mod, cls.ssm = model_var1(**kwargs)
if filter_univariate:
cls.ssm.filter_univariate = True
cls.results_a = cls.ssm.smooth()
cls.d = cls.results_a.nobs_diffuse
def test_nobs_diffuse(self):
assert_allclose(self.d, 1)
def test_initialization(self):
assert_allclose(self.results_a.initial_state_cov, 0)
assert_allclose(self.results_a.initial_diffuse_state_cov, np.eye(2))
class TestVAR1_Approx(CheckApproximateDiffuseMixin, CheckVAR1):
pass
class TestVAR1_KFAS(CheckKFASMixin, CheckVAR1):
results_path = os.path.join(
current_path, 'results', 'results_exact_initial_var1_R.csv')
# - VAR(1) + Measurement error -----------------------------------------------
class CheckVAR1MeasurementError(CheckVAR1):
@classmethod
def setup_class(cls, **kwargs):
kwargs['measurement_error'] = True
super(CheckVAR1MeasurementError, cls).setup_class(**kwargs)
class TestVAR1MeasurementError_Approx(CheckApproximateDiffuseMixin,
CheckVAR1MeasurementError):
# Note: somewhat fragile, we need to increase the approximate variance to
# 1e9 for the tests to pass at the appropriate level of precision, but
# we can't increase too much more than this because then we start get
# numerical errors (e.g. 1e10 is fine but 1e11 doesn't pass)
approximate_diffuse_variance = 1e9
def test_smoothed_measurement_disturbance_cov(self, rtol_diffuse=None):
# Note: this test would fail here with most rtol, because
# this is an example where the numerical errors associated with the
# approximate method result in noticeable errors
# term: (x is the exact method, y is the approximate method)
# x: array([[[3.355072, 0. ],
# [0. , 4.221227]]])
# y: array([[[ 3.355072, -0.600856],
# [-0.600856, 4.221227]]])
super(TestVAR1MeasurementError_Approx,
self).test_smoothed_measurement_disturbance_cov(
rtol_diffuse=rtol_diffuse)
class TestVAR1MeasurementError_KFAS(CheckKFASMixin, CheckVAR1MeasurementError):
results_path = os.path.join(current_path, 'results',
'results_exact_initial_var1_measurement_error_R.csv')
# - VAR(1) + Missing data ----------------------------------------------------
class CheckVAR1Missing(CheckVAR1):
@classmethod
| |
import tensorflow as tf
import numpy as np
import random
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from keras.models import Sequential
from keras.layers import Dense, Activation, LSTM, Dropout
from keras.utils import to_categorical
from keras import optimizers
from keras import metrics
from keras import backend as K
from datetime import datetime, timedelta
import pandas as pd
from copy import deepcopy
## <NAME>
## <NAME>
## <NAME>
seed = 123
random.seed(seed)
np.random.seed(seed)
class BasicTemplateAlgorithm(QCAlgorithm):
'''Basic template algorithm simply initializes the date range and cash'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.session = K.get_session()
self.graph = tf.get_default_graph()
self.SetStartDate(2018,8,1) #Set Start Date
self.SetEndDate(2018,11,21) #Set End Date
self.SetCash(100000) #Set Strategy Cash
## start the Keras/ Tensorflow session
self.session = K.get_session()
self.graph = tf.get_default_graph()
## set the currency pair that we are trading, and the correlated currency pair
self.currency = "AUDUSD"
self.AddForex(self.currency, Resolution.Daily)
self.correl_currency = "USDCHF"
self.AddForex(self.correl_currency, Resolution.Daily)
## define a long list, short list and portfolio
self.long_list, self.short_list = [], []
# Initialise indicators
self.rsi = RelativeStrengthIndex(9)
self.bb = BollingerBands(14, 2, 2)
self.macd = MovingAverageConvergenceDivergence(12, 26, 9)
self.stochastic = Stochastic(14, 3, 3)
self.ema = ExponentialMovingAverage(9)
## Arrays to store the past indicators
prev_rsi, prev_bb, prev_macd, lower_bb, upper_bb, sd_bb, prev_stochastic, prev_ema = [],[],[],[],[],[],[],[]
## Make history calls for both currency pairs
self.currency_data = self.History([self.currency], 150, Resolution.Daily) # Drop the first 20 for indicators to warm up
self.correl_data = self.History([self.correl_currency], 150, Resolution.Daily)
## save the most recent open and close
ytd_open = self.currency_data["open"][-1]
ytd_close = self.currency_data["close"][-1]
## remove yesterday's data. We will query this onData
self.currency_data = self.currency_data[:-1]
self.correl_data = self.correl_data[:-1]
## iterate over past data to update the indicators
for tup in self.currency_data.loc[self.currency].itertuples():
# making Ibasedatabar for stochastic
bar = QuoteBar(tup.Index,
self.currency,
Bar(tup.bidclose, tup.bidhigh, tup.bidlow, tup.bidopen),
0,
Bar(tup.askclose, tup.askhigh, tup.asklow, tup.askopen),
0,
timedelta(days=1)
)
self.stochastic.Update(bar)
prev_stochastic.append(float(self.stochastic.ToString()))
self.rsi.Update(tup.Index, tup.close)
prev_rsi.append(float(self.rsi.ToString()))
self.bb.Update(tup.Index, tup.close)
prev_bb.append(float(self.bb.ToString()))
lower_bb.append(float(self.bb.LowerBand.ToString()))
upper_bb.append(float(self.bb.UpperBand.ToString()))
sd_bb.append(float(self.bb.StandardDeviation.ToString()))
self.macd.Update(tup.Index, tup.close)
prev_macd.append(float(self.macd.ToString()))
self.ema.Update(tup.Index, tup.close)
prev_ema.append(float(self.ema.ToString()))
## Forming the Indicators df
## This is common to the Price Prediction
rsi_df = pd.DataFrame(prev_rsi, columns = ["rsi"])
macd_df = pd.DataFrame(prev_macd, columns = ["macd"])
upper_bb_df = pd.DataFrame(upper_bb, columns = ["upper_bb"])
lower_bb_df = pd.DataFrame(lower_bb, columns = ["lower_bb"])
sd_bb_df = pd.DataFrame(sd_bb, columns = ["sd_bb"])
stochastic_df = pd.DataFrame(prev_stochastic, columns = ["stochastic"])
ema_df = pd.DataFrame(prev_ema, columns=["ema"])
self.indicators_df = pd.concat([rsi_df, macd_df, upper_bb_df, lower_bb_df, sd_bb_df, stochastic_df, ema_df], axis=1)
self.indicators_df = self.indicators_df.iloc[20:]
self.indicators_df.reset_index(inplace=True, drop=True)
## Currency Data Price
self._currency_data = deepcopy(self.currency_data)
self._currency_data = self._currency_data.reset_index(level = [0, 1], drop = True)
self._currency_data.drop(columns=["askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh", "bidhigh", "bidlow", "bidclose"], inplace=True)
self._currency_data = self._currency_data.iloc[20:]
self._currency_data.reset_index(inplace=True, drop=True)
## saving the previous 6 days OHLC for the price prediction model
_close_prev_prices = self._previous_prices("close", self._currency_data["close"], 6)
_open_prev_prices = self._previous_prices("open", self._currency_data["open"], 6)
_high_prev_prices = self._previous_prices("high", self._currency_data["high"], 6)
_low_prev_prices = self._previous_prices("low", self._currency_data["low"], 6)
_all_prev_prices = pd.concat([_close_prev_prices, _open_prev_prices, _high_prev_prices, _low_prev_prices], axis=1)
_final_table = self._currency_data.join(_all_prev_prices, how="outer")
_final_table = _final_table.join(self.indicators_df, how="outer")
# Drop NaN from feature table
self._features = _final_table.dropna()
self._features.reset_index(inplace=True, drop=True)
# Make labels for LSTM model
self._labels = self._features["close"]
self._labels = pd.DataFrame(self._labels)
self._labels.index -= 1
self._labels = self._labels[1:]
_new_row = pd.DataFrame({"close": [ytd_close]})
self._labels = self._labels.append(_new_row)
self._labels.reset_index(inplace=True, drop=True)
# Currency Data Direction
self.currency_data_direction = self.currency_data.reset_index(level = [0, 1], drop = True)
self.currency_data_direction.drop(columns=["askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh", "bidhigh",
"bidlow", "bidclose", "open", "high", "low"], inplace=True)
self.currency_data_direction = self.currency_data_direction.iloc[20:]
self.currency_data_direction.reset_index(inplace=True, drop=True)
# Correlation Currency Data
self.correl_data = self.correl_data.reset_index(level = [0, 1], drop = True)
self.correl_data.drop(columns=["askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh", "bidhigh",
"bidlow", "bidclose", "open", "high", "low"], inplace=True)
self.correl_data = self.correl_data.iloc[20:]
self.correl_data.reset_index(inplace=True, drop=True)
self.correl_data.rename(index=str, columns = {"close": "correl_close"}, inplace=True)
# Close Price Direction Change
self.close_dir_change = self.direction_change("close", self.currency_data_direction["close"], 11)
# Correlation Currency Direction Change
self.correl_dir_change = self.direction_change("correl_close", self.correl_data["correl_close"], 11)
# Join the tables
joined_table_direction = self.currency_data_direction.join(self.close_dir_change, how="outer")
joined_table_direction = joined_table_direction.join(self.correl_dir_change, how="outer")
joined_table_direction = joined_table_direction.join(self.indicators_df, how="outer")
# Features Direction
self.features_direction = joined_table_direction.dropna()
self.features_direction.reset_index(inplace=True, drop=True)
## lowerBB and upperBB should change to the difference
self.features_direction["lower_bb_diff"] = self.features_direction["close"] - self.features_direction["lower_bb"]
self.features_direction["upper_bb_diff"] = self.features_direction["upper_bb"] - self.features_direction["close"]
self.features_direction["ema_diff"] = self.features_direction["ema"] - self.features_direction["close"]
self.features_direction.drop(columns=["upper_bb", "lower_bb", "ema"], inplace=True)
# Make raw df for labels
self.labels = self.features_direction["close"]
self.labels = pd.DataFrame(self.labels)
self.labels.index -= 1
self.labels = self.labels[1:]
new_row = pd.DataFrame({"close": [ytd_close]})
self.labels = self.labels.append(new_row)
self.labels.reset_index(inplace=True, drop=True)
## Form the binary labels: 1 for up and 0 for down
self.labels_direction_new = pd.DataFrame(columns=["direction"])
for row in self.labels.iterrows():
new_close, old_close = row[1], self.features_direction["close"][row[0]]
change = (new_close - old_close)[0]
percent_change = 100*change/old_close
if percent_change >=0:
this_df = pd.DataFrame({"direction":[1]})
elif percent_change <0:
this_df = pd.DataFrame({"direction":[0]})
self.labels_direction_new = self.labels_direction_new.append(this_df)
self.labels_direction_new.reset_index(inplace=True, drop =True)
## Test out different features
self.features_direction.drop(columns=[ "rsi", "stochastic", "close", "sd_bb"], inplace=True)
self.scaler_X = MinMaxScaler()
self.scaler_X.fit(self.features_direction)
scaled_features_direction = self.scaler_X.transform(self.features_direction)
# Hyperparameters Funetuning
max_depth= [10, 15, 20, 30]
n_estimators= [100, 200, 300, 500]
criterion= ["gini", "entropy"]
tscv = TimeSeriesSplit(n_splits=4)
params_df = pd.DataFrame(columns = ["depth", "n_est", "criterion", "acc_score"])
for depth in max_depth:
for n_est in n_estimators:
for crn in criterion:
acc_scores = []
for train_index, test_index in tscv.split(scaled_features_direction):
X_train, X_test = scaled_features_direction[train_index], scaled_features_direction[test_index]
#Y_train, Y_test = labels_direction.loc[train_index], labels_direction.loc[test_index]
Y_train, Y_test = self.labels_direction_new["direction"][train_index], self.labels_direction_new["direction"][test_index]
Y_train, Y_test = Y_train.astype('int'), Y_test.astype('int')
RF = RandomForestClassifier(criterion=crn, n_estimators=n_est, max_depth=depth, random_state=12345)
RF_model = RF.fit(X_train, Y_train)
y_pred = RF_model.predict(X_test)
acc_score = accuracy_score(Y_test, y_pred)
acc_scores.append(acc_score)
average_acc = np.mean(acc_scores)
# self.Debug("ACC")
# self.Debug(average_acc)
## make this df for cells, epoch and mse and append to params_df
this_df = pd.DataFrame({"depth": [depth], "n_est":[n_est], "criterion": [crn], "acc_score": [average_acc]})
params_df = params_df.append(this_df)
opt_values = params_df[params_df['acc_score'] == params_df['acc_score'].max()]
opt_depth, opt_n_est, opt_crn = opt_values["depth"][0], opt_values["n_est"][0], opt_values["criterion"][0]
self.RF = RandomForestClassifier(criterion="gini", n_estimators=300, max_depth=10, random_state=123)
self.RF_model = self.RF.fit(scaled_features_direction, self.labels_direction_new["direction"].astype('int'))
## Define scaler for this class
self._scaler_X = MinMaxScaler()
self._scaler_X.fit(self._features)
self._scaled_features = self._scaler_X.transform(self._features)
self._scaler_Y = MinMaxScaler()
self._scaler_Y.fit(self._labels)
self._scaled_labels = self._scaler_Y.transform(self._labels)
## fine tune the model to determine hyperparameters
## only done once (upon inititialize)
_tscv = TimeSeriesSplit(n_splits=2)
_cells = [100, 200]
_epochs = [50, 100]
## create dataframee to store optimal hyperparams
_params_df = pd.DataFrame(columns = ["cells", "epoch", "mse"])
# ## loop thru all combinations of cells and epochs
for i in _cells:
for j in _epochs:
print("CELL", i, "EPOCH", j)
# list to store the mean square errors
cvscores = []
for train_index, test_index in _tscv.split(self._scaled_features):
#print(train_index, test_index)
X_train, X_test = self._scaled_features[train_index], self._scaled_features[test_index]
Y_train, Y_test = self._scaled_labels[train_index], self._scaled_labels[test_index]
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
model = Sequential()
model.add(LSTM(i, input_shape = (1, X_train.shape[2]), return_sequences = True))
model.add(Dropout(0.10))
model.add(LSTM(i,return_sequences = True))
model.add(LSTM(i))
model.add(Dropout(0.10))
model.add(Dense(1))
model.compile(loss= 'mean_squared_error',optimizer = 'rmsprop', metrics = ['mean_squared_error'])
model.fit(X_train,Y_train,epochs=j,verbose=0)
scores = model.evaluate(X_test, Y_test)
cvscores.append(scores[1])
## get average value of mean sq error
MSE = np.mean(cvscores)
## make this df for cells, epoch and mse and append to params_df
this_df = pd.DataFrame({"cells": [i], "epoch":[j], "mse": [MSE]})
# self.Debug(this_df)
# params_df = params_df.append(this_df)
_params_df = _params_df.append(this_df)
self.Debug(_params_df)
# # Check the optimised values (O_values) obtained from cross validation
# # This code gives the row which has minimum mse and store the values to O_values
# _O_values = _params_df[_params_df['mse'] == _params_df['mse'].min()]
# # Extract the optimised values of cells and epochcs from abbove row (having min mse)
self._opt_cells = 200
self._opt_epochs = 100
# self._opt_cells = _O_values["cells"][0]
# self._opt_epochs = _O_values["epoch"][0]
_X_train = np.reshape(self._scaled_features, (self._scaled_features.shape[0], 1, self._scaled_features.shape[1]))
_y_train = self._scaled_labels
self._session = K.get_session()
self._graph = tf.get_default_graph()
# Intialise the model with optimised parameters
self._model = Sequential()
self._model.add(LSTM(self._opt_cells, input_shape = (1, _X_train.shape[2]), return_sequences = True))
self._model.add(Dropout(0.20))
self._model.add(LSTM(self._opt_cells,return_sequences = True))
self._model.add(Dropout(0.20))
self._model.add(LSTM(self._opt_cells, return_sequences = True))
self._model.add(LSTM(self._opt_cells))
self._model.add(Dropout(0.20))
self._model.add(Dense(1))
# self.model.add(Activation("softmax"))
self._model.compile(loss= 'mean_squared_error',optimizer = | |
<reponame>leith-bartrich/fiellc
import typing
from fiepipelib.assetaspect.routines.autoconf import AutoConfigurationResult
from fiepipelib.assetstructure.routines.structure import AbstractSubPath, AbstractAssetBasePath, \
GenericAssetBasePathsSubDir, AbstractRootBasePath, AutoManageResults, StaticSubDir, AutoCreateResults
from fiepipelib.automanager.data.localconfig import LegalEntityConfig, LegalEntityMode
from fiepipelib.container.local_config.data.automanager import ContainerAutomanagerConfigurationComponent
from fiepipelib.gitstorage.routines.gitasset import GitAssetRoutines
from fiepipelib.gitstorage.routines.gitroot import GitRootRoutines
from fieui.FeedbackUI import AbstractFeedbackUI
from fiepipelib.rootaspect.routines.config import RootAspectConfigurationRoutines
from fiepipelib.assetaspect.routines.config import AssetAspectConfigurationRoutines
from fiepipepostoffice.data.aspect_config import PostOfficeConfiguration, DeliveryNamingMethod
from fiepipelib.enum import get_worse_enum
from fiepipepostoffice.data.box_aspect_config import BoxAspectConfig
from fiepipepostoffice.data.delivery_aspect_config import DeliveryAspectConfig
class PostOfficeApsectRoutines(RootAspectConfigurationRoutines[PostOfficeConfiguration]):
def default_configuration(self):
self.get_configuration().set_delivery_naming_method(DeliveryNamingMethod.UTC_DATE_HASH)
async def reconfigure_interactive_routine(self):
#TODO: implement choice of delivery naming method.
return
class PostOfficeRootStructureRoutines(AbstractRootBasePath["PostOfficeRootStructureRoutines"]):
_boxes:"Boxes" = None
def get_boxes(self):
return self._boxes
def __init__(self, routines: GitRootRoutines):
super().__init__(routines)
self._boxes = Boxes("boxes", self)
async def automanager_routine(self, feedback_ui: AbstractFeedbackUI, entity_config: LegalEntityConfig,
container_config: ContainerAutomanagerConfigurationComponent) -> AutoManageResults:
# We can assume we've (the root) just been updated by the automanager and that we're not conflicted.
# But that is all. This means we don't need to 'pull' from remote.
# for a post office, this means we know which boxes exist out there. But we don't neccesarily know what deliveries
# exist because they are sub-assets of the boxes, which have not been updated themselves.
# also, we may not have pushed up our latest boxes or their deliveries.
# basic behavior is eventual consistency. Therefore we want to push the boxes and their deliveries as fast
# as possible to avoid conflicts in delivery naming. Further, we want to pull them as early as possible to
# avoid conflicts.
# further, only highly privileged machines can actually publish changes to a post-office in a high security
# environment. And we expect security to be handled by github's permissions.
# for delivery (content) internals, we don't automatically pull any new ones down.
# Nor do we push them up. We have explicit lock and archive commands for that kind of
# behavior. At some point, we may want to set a local configuration variable for auto-archive-locked.
# behavior is:
# we need to create, add and commit missing structure for ourselves (autocreate).
# we need to report (and fail) dirt that's not handled by structure, and isn't just a submodule version update.
# we should push those commits.
# we need to then walk through the boxes and do the same. That's:
# pull, check for conflicts, auto-create, check for dirt, and then push.
# and that's it.
# return results. best possible status. we degrade as we move along.
ret = AutoManageResults.CLEAN
if entity_config.get_mode() == LegalEntityMode.NONE:
# for lack of a better respose, we don't know.
return AutoManageResults.PENDING
if entity_config.get_mode() == LegalEntityMode.USER_WORKSTATION:
# first, create static structure.
create_status = AutoCreateResults.NO_CHANGES
for subpath in self.get_subpaths():
if not subpath.exists():
# this is recursive....
subpath_ret = await subpath.automanager_create(feedback_ui, entity_config, container_config)
create_status = get_worse_enum(create_status, subpath_ret)
# catch a failure.
if create_status == AutoCreateResults.CANNOT_COMPLETE:
await feedback_ui.output(
"Canceling further auto-management of this post-office due to a subpath failing to create.")
return AutoManageResults.CANNOT_COMPLETE
# we need to check for working-copy dirt that's not in the index and fail based on it.
is_dirty = self.is_dirty(False, True, True, False)
if is_dirty:
await feedback_ui.output("Root worktree is dirty. Cannot auto-commit. Canceling further auto-management.")
await feedback_ui.output(self.get_path())
return AutoManageResults.CANNOT_COMPLETE
# Commit the index if it needs it.
# this will be pushed next time through.
index_dirty = self.is_dirty(True, False, False, False)
if index_dirty:
commit_output = self.get_routines().get_repo().git.commit(m="Auto-manager commit of post-office changed structure.")
await feedback_ui.output(commit_output)
# we move into our child box logic.
boxes = self._boxes.get_asset_basepaths()
for box in boxes:
asset_routines = box.get_asset_routines()
asset_routines.load()
# check for conflicts
if asset_routines.is_in_conflict():
await feedback_ui.error("Post-office Box is in conflict: " + asset_routines.abs_path)
await feedback_ui.error("Canceling further auto-management of this Box.")
ret = get_worse_enum(ret, AutoManageResults.CANNOT_COMPLETE)
#other boxes might get further...
continue
asset_server_routines = box.get_gitlab_asset_routines()
remote_exists = await asset_server_routines.remote_exists(feedback_ui)
if not remote_exists:
success = await asset_server_routines.push_sub_routine(feedback_ui,"master",False)
#since we can't check ahead and behind on a non-existant, we return pending,
#hoping for the remote to come back and take teh push.
if not success:
await feedback_ui.warn("Push of new Post-Office Box failed: " + asset_routines.abs_path)
await feedback_ui.warn("Canceling further auto-management of this Box...")
ret = get_worse_enum(ret, AutoManageResults.PENDING)
#other boxes might get further...
continue
#early push and pull for eventual consistency.
is_behind_remote = await asset_server_routines.is_behind_remote(feedback_ui)
is_ahead_of_remote = await asset_server_routines.is_aheadof_remote(feedback_ui)
if is_behind_remote and not is_ahead_of_remote:
#we're behind and not ahead
#we pull but failure doesn't matter.
success = await asset_server_routines.pull_sub_routine(feedback_ui,"master")
if is_ahead_of_remote and not is_behind_remote:
#we're ahead and not behidn. we push and failure doesn't matter.
success = await asset_server_routines.push_sub_routine(feedback_ui,"master",False)
if is_ahead_of_remote and is_behind_remote:
#we're both ahead and behind. We pull, check for conflicts, then push.
#failure doesn't matter.
success = await asset_server_routines.pull_sub_routine(feedback_ui,"master")
if asset_routines.is_in_conflict():
await feedback_ui.error("Conflict detected while updating Post-Office Box: " + asset_routines.abs_path)
ret = get_worse_enum(ret, AutoManageResults.CANNOT_COMPLETE)
#other boxes might get further...
continue
success = await asset_server_routines.push_sub_routine(feedback_ui,"master",False)
#now we auto_create and push (early) if needed.
auto_create_results = await box.automanager_create(feedback_ui,entity_config,container_config)
if auto_create_results == AutoCreateResults.CANNOT_COMPLETE:
await feedback_ui.error("Post-office Box couldn't auto-create: " + asset_routines.abs_path)
ret = get_worse_enum(ret, AutoManageResults.CANNOT_COMPLETE)
#other boxes might get further
continue
else:
if asset_routines.get_repo().is_dirty(False,True,True,False):
#dirty and cannot auto-commit
await feedback_ui.error("Post-office Box is dirty and cannot auto-commit changes: " + asset_routines.abs_path)
ret = get_worse_enum(ret, AutoManageResults.CANNOT_COMPLETE)
#other boxes might get further...
continue
if asset_routines.get_repo().is_dirty(True,False,False,False):
#dirty index can be auto-commit
commit_output = asset_routines.get_repo().git.commit(m="Auto-commit of Post-Office Box Structure.")
await feedback_ui.output(commit_output)
push_success = await asset_server_routines.push_sub_routine(feedback_ui,"master",False)
if push_success:
ret = get_worse_enum(ret, AutoManageResults.CLEAN)
else:
ret = get_worse_enum(ret, AutoManageResults.UNPUBLISHED_COMMITS)
else:
ret = get_worse_enum(ret, AutoManageResults.CLEAN)
if ret == AutoManageResults.CANNOT_COMPLETE or ret == AutoManageResults.PENDING:
await feedback_ui.error(
"At least one child's auto-create and publish routine failed or is pending. Canceling further auto-management.")
return ret
return ret
def get_sub_basepaths(self) -> typing.List["AbstractAssetBasePath"]:
return self._boxes.get_asset_basepaths()
def get_subpaths(self) -> "typing.List[AbstractSubPath[PostOfficeRootStructureRoutines]]":
return [self._boxes]
def get_aspect_routines(self) -> PostOfficeApsectRoutines:
config = PostOfficeConfiguration(self.get_path())
return PostOfficeApsectRoutines(config)
async def automanager_create_self(self, feedback_ui: AbstractFeedbackUI, entity_config: LegalEntityConfig,
container_config: ContainerAutomanagerConfigurationComponent) -> 'AutoCreateResults':
aspect_routines = self.get_aspect_routines()
if not aspect_routines.is_configured():
aspect_routines.default_configuration()
aspect_routines.commit()
return await super().automanager_create_self(feedback_ui, entity_config, container_config)
class Boxes(GenericAssetBasePathsSubDir["PostOfficeRootStructureRoutines", "PostOfficeRootStructureRoutines", "Box"]):
def get_asset_basepath_by_dirname(self, dirname: str) -> "Box":
asset_routines = self.get_asset_routines_by_dirname(dirname)
asset_routines.load()
po_root_struct = self.get_base_static_path()
return Box(asset_routines,po_root_struct)
class BoxAspectRoutines(AssetAspectConfigurationRoutines[BoxAspectConfig]):
async def auto_reconfigure_routine(self, feedback_ui: AbstractFeedbackUI) -> AutoConfigurationResult:
pass
def default_configuration(self):
pass
async def reconfigure_interactive_routine(self):
pass
class Box(AbstractAssetBasePath["Box"]):
_incoming: "Section" = None
def get_incoming(self) -> "Section":
return self._incoming
_outgoing: "Section" = None
def get_outgoing(self) -> "Section":
return self._outgoing
_post_office_root_structure: PostOfficeRootStructureRoutines = None
def get_post_office_root_structure(self) -> PostOfficeRootStructureRoutines:
return self._post_office_root_structure
def __init__(self, routines: GitAssetRoutines, po_root_structure:PostOfficeRootStructureRoutines):
self._post_office_root_structure = po_root_structure
super().__init__(routines)
self._incoming = Section("incoming", self)
self._outgoing = Section("outgoing", self)
def get_sub_basepaths(self) -> typing.List["AbstractAssetBasePath"]:
return []
def get_subpaths(self) -> "typing.List[AbstractSubPath[Box]]":
return [self._incoming, self._outgoing]
async def automanager_create_self(self, feedback_ui: AbstractFeedbackUI, entity_config: LegalEntityConfig,
container_config: ContainerAutomanagerConfigurationComponent) -> 'AutoCreateResults':
asset_routines = self.get_asset_routines()
box_config = BoxAspectConfig(asset_routines.abs_path)
aspect_routines = BoxAspectRoutines(box_config,asset_routines)
if not aspect_routines.is_configured():
aspect_routines.default_configuration()
aspect_routines.commit()
return await super().automanager_create_self(feedback_ui, entity_config, container_config)
class Section(GenericAssetBasePathsSubDir[Box, Box, "Delivery"]):
def get_asset_basepath_by_dirname(self, dirname: str) -> "Delivery":
asset_routines = self.get_asset_routines_by_dirname(dirname)
asset_routines.load()
return Delivery(asset_routines,self.get_parent_path())
async def create_new_delivery_routine(self, feedback_ui:AbstractFeedbackUI):
post_office = self.get_parent_path().get_post_office_root_structure()
aspect_routines = post_office.get_aspect_routines()
aspect_routines.load()
dirname = aspect_routines.get_configuration().get_new_delivery_name(self.get_path())
await feedback_ui.output("Creating new delivery: " + dirname)
self.create_new_empty_asset(dirname)
await feedback_ui.output("Auto-creating delivery: " + dirname)
await self.autocreate_asset_by_dirname(dirname,feedback_ui)
await feedback_ui.output("Done creating new delivery.")
class DeliveryAspectRoutines(AssetAspectConfigurationRoutines[DeliveryAspectConfig]):
async def auto_reconfigure_routine(self, feedback_ui: AbstractFeedbackUI) -> AutoConfigurationResult:
self.update_git_meta()
return AutoConfigurationResult.UNCLEAR
def default_configuration(self):
config = self.get_configuration()
config.set_locked(False)
async def reconfigure_interactive_routine(self):
pass
async def lock_routine(self):
"""Lock the delivery. Changes to the delivery will not be allowed going forward."""
self.load()
config = self.get_configuration()
config.set_locked(True)
self.commit()
async def unlock_routine(self):
"""Unlock the delivery. Changes to the delivery will be allowed again."""
self.load()
config = self.get_configuration()
config.set_locked(False)
self.commit()
class Delivery(AbstractAssetBasePath["Delivery"]):
_CONTENT_DIR_NAME = "content"
_content: "Content" = None
def get_content(self) -> "Content":
return self._content
_box_routines: "Box" = None
def get_box_routines(self) -> "Box":
return self._box_routines
def __init__(self, routines: GitAssetRoutines, box:"Box"):
self._box_routines = box
super().__init__(routines)
self._content = Content(self._CONTENT_DIR_NAME, self)
def get_sub_basepaths(self) -> typing.List["AbstractAssetBasePath"]:
return []
def get_subpaths(self) -> "typing.List[AbstractSubPath[Delivery]]":
return [self._content]
def get_aspect_routines(self) -> DeliveryAspectRoutines:
asset_routines = self.get_asset_routines()
asset_routines.load()
asset_path = asset_routines.abs_path
config = DeliveryAspectConfig(asset_path)
return DeliveryAspectRoutines(config, asset_routines)
async def commit_and_lock_routine(self, feedback_ui: AbstractFeedbackUI) -> bool:
aspect_routines = self.get_aspect_routines()
aspect_routines.load()
asset_routines = aspect_routines.get_asset_routines()
working_asset = asset_routines.working_asset
locked = aspect_routines.get_configuration().get_locked()
if locked:
await feedback_ui.error("Delivery is locked. It should not be modified.")
return False
repo = working_asset.GetRepo()
add_output = repo.git.add("content")
await feedback_ui.output(add_output)
aspect_routines.get_configuration().set_locked(True)
aspect_routines.commit()
commit_output = repo.git.commit(m="Adding and locking delivery content.")
await feedback_ui.output(commit_output)
async | |
<filename>models/new/fanogan.py
import tensorflow as tf
from base.base_model import BaseModel
from utils.alad_utils import get_getter
class FAnogan(BaseModel):
def __init__(self, config):
super(FAnogan, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
# Kernel initialization for the convolutions
if self.config.trainer.init_type == "normal":
self.init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)
elif self.config.trainer.init_type == "xavier":
self.init_kernel = tf.contrib.layers.xavier_initializer(
uniform=False, seed=None, dtype=tf.float32
)
# Placeholders
self.is_training_gen = tf.placeholder(tf.bool)
self.is_training_dis = tf.placeholder(tf.bool)
self.is_training_enc = tf.placeholder(tf.bool)
self.image_input = tf.placeholder(
tf.float32, shape=[None] + self.config.trainer.image_dims, name="x"
)
self.noise_tensor = tf.placeholder(
tf.float32, shape=[None, self.config.trainer.noise_dim], name="noise"
)
self.true_labels = tf.placeholder(dtype=tf.float32, shape=[None, 1], name="true_labels")
self.generated_labels = tf.placeholder(dtype=tf.float32, shape=[None, 1], name="gen_labels")
self.real_noise = tf.placeholder(
dtype=tf.float32, shape=[None] + self.config.trainer.image_dims, name="real_noise"
)
self.fake_noise = tf.placeholder(
dtype=tf.float32, shape=[None] + self.config.trainer.image_dims, name="fake_noise"
)
self.logger.info("Building training graph...")
with tf.variable_scope("FAnogan"):
# Generator and Discriminator Training
with tf.variable_scope("Generator_Model"):
self.image_gen = self.generator(self.noise_tensor) + self.fake_noise
with tf.variable_scope("Discriminator_Model"):
self.disc_real, self.disc_f_real = self.discriminator(
self.image_input + self.real_noise
)
self.disc_fake, self.disc_f_fake = self.discriminator(self.image_gen)
# Encoder Training
with tf.variable_scope("Encoder_Model"):
# ZIZ Architecture
self.encoded_gen_noise = self.encoder(self.image_gen)
# IZI Architecture
self.encoded_img = self.encoder(self.image_input)
with tf.variable_scope("Generator_Model"):
self.gen_enc_img = self.generator(self.encoded_img)
with tf.variable_scope("Discriminator_Model"):
# IZI Training
self.disc_real_izi, self.disc_f_real_izi = self.discriminator(self.image_input)
self.disc_fake_izi, self.disc_f_fake_izi = self.discriminator(self.gen_enc_img)
with tf.name_scope("Loss_Funcions"):
with tf.name_scope("Encoder"):
if self.config.trainer.encoder_training_mode == "ziz":
self.loss_encoder = tf.reduce_mean(
self.mse_loss(
self.encoded_gen_noise,
self.noise_tensor,
mode=self.config.trainer.encoder_loss_mode,
)
* (1.0 / self.config.trainer.noise_dim)
)
elif self.config.trainer.encoder_training_mode == "izi":
self.izi_reconstruction = self.mse_loss(
self.image_input,
self.gen_enc_img,
mode=self.config.trainer.encoder_loss_mode,
) * (
1.0
/ (self.config.data_loader.image_size * self.config.data_loader.image_size)
)
self.loss_encoder = tf.reduce_mean(self.izi_reconstruction)
elif self.config.trainer.encoder_training_mode == "izi_f":
self.izi_reconstruction = self.mse_loss(
self.image_input,
self.gen_enc_img,
mode=self.config.trainer.encoder_loss_mode,
) * (
1.0
/ (self.config.data_loader.image_size * self.config.data_loader.image_size)
)
self.izi_disc = self.mse_loss(
self.disc_f_real_izi,
self.disc_f_fake_izi,
mode=self.config.trainer.encoder_loss_mode,
) * (
1.0
* self.config.trainer.kappa_weight_factor
/ self.config.trainer.feature_layer_dim
)
self.loss_encoder = tf.reduce_mean(self.izi_reconstruction + self.izi_disc)
with tf.name_scope("Discriminator_Generator"):
if self.config.trainer.mode == "standard":
self.loss_disc_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.true_labels, logits=self.disc_real
)
)
self.loss_disc_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.generated_labels, logits=self.disc_fake
)
)
self.loss_discriminator = self.loss_disc_real + self.loss_disc_fake
# Flip the weigths for the encoder and generator
if self.config.trainer.flip_labels:
labels_gen = tf.zeros_like(self.disc_fake)
else:
labels_gen = tf.ones_like(self.disc_fake)
# Generator
self.loss_generator_ce = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_gen, logits=self.disc_fake
)
)
delta = self.disc_f_fake - self.disc_f_real
delta = tf.layers.Flatten()(delta)
self.loss_generator_fm = tf.reduce_mean(
tf.norm(delta, ord=2, axis=1, keepdims=False)
)
self.loss_generator = (
self.loss_generator_ce
+ self.config.trainer.feature_match_weight * self.loss_generator_fm
)
elif self.config.trainer.mode == "wgan":
self.loss_d_fake = -tf.reduce_mean(self.disc_fake)
self.loss_d_real = -tf.reduce_mean(self.disc_real)
self.loss_discriminator = -self.loss_d_fake + self.loss_d_real
self.loss_generator = -tf.reduce_mean(self.disc_fake)
# Weight Clipping and Encoder Part
elif self.config.trainer.mode == "wgan_gp":
self.loss_generator = -tf.reduce_mean(self.disc_fake)
self.loss_d_fake = -tf.reduce_mean(self.disc_fake)
self.loss_d_real = -tf.reduce_mean(self.disc_real)
self.loss_discriminator = -self.loss_d_fake - self.loss_d_real
alpha_x = tf.random_uniform(
shape=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,
minval=0.0,
maxval=1.0,
)
differences_x = self.image_gen - self.image_input
interpolates_x = self.image_input + (alpha_x * differences_x)
gradients = tf.gradients(self.discriminator(interpolates_x), [interpolates_x])[
0
]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes - 1.0) ** 2)
self.loss_discriminator += self.config.trainer.wgan_gp_lambda * gradient_penalty
with tf.name_scope("Optimizations"):
if self.config.trainer.mode == "standard":
# Build the optimizers
self.generator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.discriminator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_disc,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.encoder_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
elif self.config.trainer.mode == "wgan":
# Build the optimizers
self.generator_optimizer = tf.train.RMSPropOptimizer(self.config.trainer.wgan_lr)
self.discriminator_optimizer = tf.train.RMSPropOptimizer(
self.config.trainer.standard_lr_disc
)
self.encoder_optimizer = tf.train.AdamOptimizer(
self.config.trainer.wgan_lr,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
elif self.config.trainer.mode == "wgan_gp":
# Build the optimizers
self.generator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.wgan_gp_lr, beta1=0.0, beta2=0.9
)
self.discriminator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_disc, beta1=0.0, beta2=0.9
)
self.encoder_optimizer = tf.train.AdamOptimizer(
self.config.trainer.wgan_gp_lr, beta1=0.0, beta2=0.9
)
# Collect all the variables
all_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# Generator Network Variables
self.generator_vars = [
v for v in all_variables if v.name.startswith("FAnogan/Generator_Model")
]
# Discriminator Network Variables
self.discriminator_vars = [
v for v in all_variables if v.name.startswith("FAnogan/Discriminator_Model")
]
if self.config.trainer.mode == "wgan":
clip_ops = []
for var in self.discriminator_vars:
clip_bounds = [-0.01, 0.01]
clip_ops.append(
tf.assign(var, tf.clip_by_value(var, clip_bounds[0], clip_bounds[1]))
)
self.clip_disc_weights = tf.group(*clip_ops)
# Encoder Network Variables
self.encoder_vars = [
v for v in all_variables if v.name.startswith("FAnogan/Encoder_Model")
]
# Create Training Operations
# Generator Network Operations
self.gen_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="FAnogan/Generator_Model"
)
# Discriminator Network Operations
self.disc_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="FAnogan/Discriminator_Model"
)
# Encoder Network Operations
self.enc_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="FAnogan/Encoder_Model"
)
# Initialization of Optimizers
with tf.control_dependencies(self.gen_update_ops):
self.gen_op = self.generator_optimizer.minimize(
self.loss_generator,
var_list=self.generator_vars,
global_step=self.global_step_tensor,
)
with tf.control_dependencies(self.disc_update_ops):
self.disc_op = self.discriminator_optimizer.minimize(
self.loss_discriminator, var_list=self.discriminator_vars
)
with tf.control_dependencies(self.enc_update_ops):
self.enc_op = self.encoder_optimizer.minimize(
self.loss_encoder, var_list=self.encoder_vars
)
# Exponential Moving Average for Estimation
self.dis_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_dis = self.dis_ema.apply(self.discriminator_vars)
self.gen_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_gen = self.gen_ema.apply(self.generator_vars)
self.enc_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_enc = self.enc_ema.apply(self.encoder_vars)
with tf.control_dependencies([self.disc_op]):
self.train_dis_op = tf.group(maintain_averages_op_dis)
with tf.control_dependencies([self.gen_op]):
self.train_gen_op = tf.group(maintain_averages_op_gen)
with tf.control_dependencies([self.enc_op]):
self.train_enc_op = tf.group(maintain_averages_op_enc)
self.logger.info("Building Testing Graph...")
with tf.variable_scope("FAnogan"):
# Generator and Discriminator Training
with tf.variable_scope("Generator_Model"):
self.image_gen_ema = self.generator(
self.noise_tensor, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.disc_real_ema, self.disc_f_real_ema = self.discriminator(
self.image_input, getter=get_getter(self.dis_ema)
)
self.disc_fake_ema, self.disc_f_fake_ema = self.discriminator(
self.image_gen_ema, getter=get_getter(self.dis_ema)
)
# Encoder Training
with tf.variable_scope("Encoder_Model"):
# ZIZ Architecture
self.encoded_gen_noise_ema = self.encoder(
self.image_gen_ema, getter=get_getter(self.enc_ema)
)
# IZI Architecture
self.encoded_img_ema = self.encoder(
self.image_input, getter=get_getter(self.enc_ema)
)
with tf.variable_scope("Generator_Model"):
self.gen_enc_img_ema = self.generator(
self.encoded_img_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
# IZI Training
self.disc_real_izi_ema, self.disc_f_real_izi_ema = self.discriminator(
self.image_input, getter=get_getter(self.dis_ema)
)
self.disc_fake_izi_ema, self.disc_f_fake_izi_ema = self.discriminator(
self.gen_enc_img_ema, getter=get_getter(self.dis_ema)
)
with tf.name_scope("Testing"):
with tf.name_scope("izi_f_loss"):
self.score_reconstruction = self.mse_loss(
self.image_input, self.gen_enc_img_ema
) * (
1.0 / (self.config.data_loader.image_size * self.config.data_loader.image_size)
)
self.score_disc = self.mse_loss(
self.disc_f_real_izi_ema, self.disc_f_fake_izi_ema
) * (
1.0
* self.config.trainer.kappa_weight_factor
/ self.config.trainer.feature_layer_dim
)
self.izi_f_score = self.score_reconstruction + self.score_disc
with tf.name_scope("ziz_loss"):
self.score_reconstruction = self.mse_loss(
self.image_input, self.gen_enc_img_ema
) * (
1.0 / (self.config.data_loader.image_size * self.config.data_loader.image_size)
)
self.ziz_score = self.score_reconstruction
if self.config.trainer.enable_early_stop:
self.rec_error_valid = tf.reduce_mean(self.izi_f_score)
if self.config.log.enable_summary:
with tf.name_scope("Summary"):
with tf.name_scope("Disc_Summary"):
tf.summary.scalar("loss_discriminator", self.loss_discriminator, ["dis"])
if self.config.trainer.mode == "standard":
tf.summary.scalar("loss_dis_real", self.loss_disc_real, ["dis"])
tf.summary.scalar("loss_dis_fake", self.loss_disc_fake, ["dis"])
with tf.name_scope("Gen_Summary"):
tf.summary.scalar("loss_generator", self.loss_generator, ["gen"])
if self.config.trainer.mode == "standard":
tf.summary.scalar("loss_generator_ce", self.loss_generator_ce, ["gen"])
tf.summary.scalar("loss_generator_fm", self.loss_generator_fm, ["gen"])
tf.summary.scalar("loss_encoder", self.loss_encoder, ["enc"])
with tf.name_scope("Image_Summary"):
tf.summary.image("reconstruct", self.image_gen, 3, ["image_1"])
tf.summary.image("input_images", self.image_input, 3, ["image_1"])
tf.summary.image("gen_enc_img", self.gen_enc_img, 3, ["image_2"])
tf.summary.image("input_image_2", self.image_input, 3, ["image_2"])
if self.config.trainer.enable_early_stop:
with tf.name_scope("validation_summary"):
tf.summary.scalar("valid", self.rec_error_valid, ["v"])
self.sum_op_dis = tf.summary.merge_all("dis")
self.sum_op_gen = tf.summary.merge_all("gen")
self.sum_op_enc = tf.summary.merge_all("enc")
self.sum_op_im_1 = tf.summary.merge_all("image_1")
self.sum_op_im_2 = tf.summary.merge_all("image_2")
self.sum_op_valid = tf.summary.merge_all("v")
def generator(self, noise_input, getter=None):
with tf.variable_scope("Generator", custom_getter=getter, reuse=tf.AUTO_REUSE):
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_g = tf.layers.Dense(
units=4 * 4 * 512, kernel_initializer=self.init_kernel, name="fc"
)(noise_input)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
x_g = tf.reshape(x_g, [-1, 4, 4, 512])
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=512,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=256,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_5"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=1,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.tanh(x_g, name="tanh")
return x_g
def discriminator(self, image_input, getter=None):
with tf.variable_scope("Discriminator", custom_getter=getter, reuse=tf.AUTO_REUSE):
# First Convolutional Layer
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_d = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="d_conv1",
)(image_input)
x_d = tf.layers.batch_normalization(
inputs=x_d,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="d_bn_1",
)
x_d = tf.nn.leaky_relu(
features=x_d, alpha=self.config.trainer.leakyReLU_alpha, name="d_lr_1"
) # 28 x 28 x 64
# Second Convolutional Layer
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_d = tf.layers.Conv2D(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="d_conv_2",
)(x_d)
x_d = tf.layers.batch_normalization(
inputs=x_d,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="d_bn_2",
)
x_d = tf.nn.leaky_relu(
features=x_d, alpha=self.config.trainer.leakyReLU_alpha, name="d_lr_2"
) # 14 x 14 x 128
# Third Convolutional Layer
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_d = tf.layers.Conv2D(
filters=256,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="d_conv_3",
)(x_d)
x_d = tf.layers.batch_normalization(
inputs=x_d,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="d_bn_3",
)
x_d = tf.nn.leaky_relu(
features=x_d, alpha=self.config.trainer.leakyReLU_alpha, name="d_lr_3"
) # 7 x 7 x 256
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_d = tf.layers.Flatten(name="d_flatten")(x_d)
x_d = tf.layers.batch_normalization(
inputs=x_d,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="d_bn_4",
)
x_d = tf.nn.leaky_relu(
features=x_d, alpha=self.config.trainer.leakyReLU_alpha, name="d_lr_4"
)
intermediate_layer = x_d
net_name = "Layer_5"
with tf.variable_scope(net_name):
x_d = tf.layers.Dense(units=1, name="d_dense")(x_d)
return x_d, intermediate_layer
def encoder(self, image_input, getter=None):
with tf.variable_scope("Encoder", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e, momentum=self.config.trainer.batch_momentum, training=self.is_training_enc
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with | |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import pdb, time
from torch.autograd import Variable
torch.manual_seed(12)
SIGMA = 1
EPSILON = 1e-5
class GatedConv1d(nn.Module):
def __init__(self, input_channels, output_channels,
kernel_size, stride, padding=0, dilation=1, activation=None):
super(GatedConv1d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv1d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv1d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class GatedConvTranspose1d(nn.Module):
def __init__(self, input_channels, output_channels,
kernel_size, stride, padding=0, output_padding=0, dilation=1,
activation=None):
super(GatedConvTranspose1d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.ConvTranspose1d(input_channels, output_channels,
kernel_size, stride, padding, output_padding,
dilation=dilation)
self.g = nn.ConvTranspose1d(input_channels, output_channels,
kernel_size, stride, padding, output_padding,
dilation=dilation)
def forward(self, x):
#start_time = time.time()
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
#print ("here:", time.time()-start_time)
g = self.sigmoid(self.g(x))
#print ("here1:", time.time()-start_time)
return h * g
class ConvTranspose1d(nn.Module):
def __init__(self, input_channels, output_channels,
kernel_size, stride, padding=0, output_padding=0, dilation=1,
activation=None):
super(ConvTranspose1d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.ConvTranspose1d(input_channels, output_channels,
kernel_size, stride, padding, output_padding,
dilation=dilation)
def forward(self, x):
#start_time = time.time()
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
#print ("here:", time.time()-start_time)
#print ("here1:", time.time()-start_time)
return h
class edgeGNN(nn.Module):
def __init__(self, nfeat, nhid, nOut,nNodes, dropout, nEdgF=1):
super(edgeGNN, self).__init__()
self.fc_node_1_1 = nn.Linear(nfeat, nhid)
self.fc_node_1_2 = nn.Linear(nhid, nhid)
self.fc_edge_1_1 = nn.Linear(nhid * 2+nEdgF, nhid)
self.fc_edge_1_2 = nn.Linear(nhid, nhid)
self.fc_node_2_1 = nn.Linear(nhid * 2, nhid)
self.fc_node_2_2 = nn.Linear(nhid, nhid)
self.fc_edge_2_1 = nn.Linear(nhid * 2+nEdgF, nhid)
self.fc_edge_2_2 = nn.Linear(nhid, nhid)
self.ln1 = LayerNorm(nhid)
self.ln2 = LayerNorm(nhid)
self.dropout = dropout
self.act = nn.ReLU()
self.n2e = nn.Linear(2*nhid,nOut)
self.g2e = nn.Sequential(nn.Conv1d(nNodes,int(nNodes/2),1),nn.ReLU(),
nn.Conv1d(int(nNodes/2),1,1))
self.sparseMM = SparseMM.apply
def forward(self, x, n2e_in, n2e_out, xE):
#pdb.set_trace()
## First GNN layer
# Node MLP
x = self.act(self.fc_node_1_1(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.act(self.fc_node_1_2(x))
# Node to edge
x_in = self.sparseMM(n2e_in, x)
x_out = self.sparseMM(n2e_out, x)
x = torch.cat([x_in, x_out, xE], 1)
# Edge MLP
x = self.act(self.fc_edge_1_1(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.act(self.fc_edge_1_2(x))
#x = self.fc_edge_1_2(x)
# Edge to node
x_in = self.sparseMM(n2e_in.transpose(0, 1), x)
x_out = self.sparseMM(n2e_out.transpose(0, 1), x)
x = torch.cat([x_in, x_out], 1)
### Second GNN layer
# Node MLP
x = self.act(self.fc_node_2_1(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.act(self.fc_node_2_2(x))
# Node to edge
x_in = self.sparseMM(n2e_in, x)
x_out = self.sparseMM(n2e_out, x)
x = torch.cat([x_in, x_out, xE], 1)
# Edge MLP
x = self.act(self.fc_edge_2_1(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.act(self.fc_edge_2_2(x))
#x = self.fc_edge_2_2(x)
# Edge to node
x_in = self.sparseMM(n2e_in.transpose(0, 1), x)
x_out = self.sparseMM(n2e_out.transpose(0, 1), x)
x = torch.cat([x_in, x_out], 1)
x = self.n2e(x.unsqueeze(0))
z = self.g2e(x)
return z.squeeze(1)
class recEdgeGNN(nn.Module):
def __init__(self, nfeat, nhid, nOut, dropout, niter):
super(recEdgeGNN, self).__init__()
self.fc_node_1_1 = nn.Linear(nfeat, 2*nhid)
self.fc_node_1_2 = nn.Linear(nhid, nhid)
self.fc_edge_1_1 = nn.Linear(nhid * 2, nhid)
self.fc_edge_1_2 = nn.Linear(nhid, nhid)
self.fc_node_2_1 = nn.Linear(nhid * 2, nhid)
self.fc_node_2_2 = nn.Linear(nhid, nhid)
self.ln1 = LayerNorm(nhid)
self.ln2 = LayerNorm(nhid)
self.dropout = dropout
self.niter = niter
self.e2p = nn.Linear(2*nhid,nOut) # embedding to prediction
def forward(self, x, n2e_in, n2e_out):
x = F.relu(self.fc_node_1_1(x))
for _ in range(self.niter):
# Node MLP
x = F.relu(self.fc_node_2_1(x))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.fc_node_2_2(x))
# Node to edge
x_in = SparseMM()(n2e_in, x)
x_out = SparseMM()(n2e_out, x)
x = torch.cat([x_in, x_out], 1)
# Edge MLP
x = F.relu(self.fc_edge_1_1(x))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.fc_edge_1_2(x))
# Edge to node
x_in = SparseMM()(n2e_in.transpose(0, 1), x)
x_out = SparseMM()(n2e_out.transpose(0, 1), x)
x = torch.cat([x_in, x_out], 1)
# pdb.set_trace()
return x, self.e2p(x.mean(0).view(1,-1))
class GraphAttentionLayer(nn.Module):
"""
Simple Graph Attention Layer, with separate processing of self-connection.
Equation format from https://docs.dgl.ai/en/latest/tutorials/models/1_gnn/9_gat.html
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight_neighbor = nn.Parameter(
torch.Tensor(in_features, out_features))
self.weight_self = nn.Parameter(torch.Tensor(in_features, out_features))
self.a = nn.Parameter(torch.Tensor(2*out_features,1))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.alpha = 0.2
self.leakyRelu = nn.LeakyReLU(self.alpha, inplace=True)
self.softmax = nn.Softmax(dim=1)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight_neighbor.size(1))
self.weight_neighbor.data.uniform_(-stdv, stdv)
self.weight_self.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj, n2e_in, n2e_out):
N = adj.shape[0]
act_self = torch.mm(input, self.weight_self)
# Transform node activations Eq. (1)
h = torch.mm(input, self.weight_neighbor)
# Compute pairwise edge features (Terms inside Eq. (2))
h_in = torch.mm(n2e_in, h)
h_out = torch.mm(n2e_out,h)
hEdge = torch.cat([h_in, h_out],1)
# Apply leakyReLU and weights for attention coefficients Eq.(2)
e = self.leakyRelu(torch.matmul(hEdge, self.a))
# Apply Softmax per node Eq.(3)
# Sparse implementation
idx = adj.coalesce().indices()
# val = adj.coalesce().values()
numNgbrs = (idx[0] == 0).sum()
attention = self.softmax(e.view(-1,numNgbrs)).view(-1)
#pdb.set_trace()
# Weigh nodes with attention; done by weighting the adj entries
# alpha = torch.sparse.FloatTensor(idx,val*attention,(N,N))
adj._values = adj._values * attention
# Compute node updates with attention Eq. (4)
act_neighbor = SparseMM()(adj,h)
output = act_self + act_neighbor
if self.bias is not None:
return output + self.bias
else:
return output
class MLP(nn.Module):
def __init__(self, nfeat, nNodes, nhid, nOut,dropout):
super(MLP, self).__init__()
self.fc1 = nn.Linear(nfeat, nhid)
self.fc2 = nn.Linear(nhid, nhid)
self.fc3 = nn.Linear(nhid, nhid)
self.fc4 = nn.Linear(nhid, nhid)
self.dropout = dropout
# self.ln1 = LayerNorm(nhid)
self.bn1 = nn.BatchNorm1d(nNodes)
# self.g2e = nn.Linear(nhid,nhid) #graph to embedding
self.e2p = nn.Linear(nhid,nOut,bias=True) # embedding to prediction
self.act = nn.LeakyReLU()
# self.act = nn.ReLU()
def forward(self, inputs, adj):
x = self.act(self.fc1(inputs))
x = self.bn1(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.act(self.fc2(x))
# x = F.dropout(x, self.dropout, training=self.training)
# x = self.act(self.fc3(x))
# x = F.dropout(x, self.dropout, training=self.training)
# x = self.act(self.fc4(x))
return x, self.e2p(x.mean(1))
class GAT(nn.Module):
def __init__(self, nfeat, nNodes, nhid, nOut,dropout):
super(GAT, self).__init__()
# self.fc1 = nn.Linear(nfeat, nhid)
# self.fc2 = nn.Linear(nhid, nhid)
self.gc1 = GraphAttentionLayer(nfeat, nhid)
self.gc2 = GraphAttentionLayer(nhid, nhid)
self.dropout = dropout
# self.ln1 = LayerNorm(nhid)
# self.g2e = nn.Linear(nhid,1) #graph to embedding
self.e2p = nn.Linear(nhid,nOut) # embedding to prediction
def encode(self, x, adj, n2e_in, n2e_out):
# pdb.set_trace()
x = F.relu(self.gc1(x, adj, n2e_in, n2e_out))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc2(x, adj, n2e_in, n2e_out))
return x
def forward(self, inputs, adj, n2e_in, n2e_out):
z = self.encode(inputs, adj, n2e_in, n2e_out)
# z = self.g2e(z)
# return z, self.e2p(z.transpose(0,1))
return z, self.e2p(z.mean(0).view(1,-1))
class nodeGNN(nn.Module):
def __init__(self, nfeat, nNodes, nhid, nOut,dropout):
super(nodeGNN, self).__init__()
self.fc1 = nn.Linear(nfeat, nhid)
# self.fc2 = nn.Linear(nhid, nhid)
self.gc1 = GraphConvolution(nhid, nhid)
self.gc2 = GraphConvolution(nhid, nhid)
# self.gc3 = GraphConvolutionFirstOrder(nhid, nhid)
# self.gc4 = GraphConvolutionFirstOrder(nhid, nhid)
self.dropout = dropout
# self.ln1 = LayerNorm(nhid)
# self.g2e = nn.Linear(nhid,1) #graph to embedding
self.e2p = nn.Linear(nhid,nOut) # embedding to prediction
def encode(self, x, adj):
# pdb.set_trace()
x = F.relu(self.fc1(x))
# x = F.dropout(x, self.dropout, training=self.training)
# x = F.relu(self.fc2(x))
# x = self.ln1(x)
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc2(x, adj))
# x = F.dropout(x, self.dropout, training=self.training)
# x = F.relu(self.gc3(x, adj))
# x = F.dropout(x, self.dropout, training=self.training)
# x = F.relu(self.gc4(x, adj))
return x
def forward(self, inputs, adj):
# pdb.set_trace()
z = self.encode(inputs, adj)
# z = self.g2e(z)
# return z, self.e2p(z.transpose(0,1))
return z, self.e2p(z.mean(1))
class gatedGNN(nn.Module):
def __init__(self, nfeat, nNodes, nhid, nOut,dropout):
super(gatedGNN, self).__init__()
self.gate1 = nn.Linear(nhid, 1)
self.gate2 = nn.Linear(nhid, 1)
self.gc1 = GraphConvolutionFirstOrder(nfeat, nhid)
self.gc2 = GraphConvolutionFirstOrder(nhid, nhid)
self.gc3 = GraphConvolutionFirstOrder(nhid, nhid)
# self.gc4 = GraphConvolutionFirstOrder(nhid, nhid)
self.dropout = dropout
# self.ln1 = LayerNorm(nhid)
# self.g2e = nn.Linear(nhid,1) #graph to embedding
self.e2p = nn.Linear(nhid,nOut) # embedding to prediction
def encode(self, x, adj):
# pdb.set_trace()
# x = F.relu(self.fc1(x))
# x = F.dropout(x, self.dropout, training=self.training)
# x = F.relu(self.fc2(x))
# x = self.ln1(x)
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x_in = x
r = F.softmax(self.gate1(x), dim=0)
z = F.softmax(self.gate2(x), dim=0)
x = F.relu(self.gc2(x*r, adj))
# x = F.relu(r * x_in)
x = (1-z) * x_in + z * x
# x = F.relu(self.gc3(x, adj))
return x
def forward(self, inputs, adj):
z = self.encode(inputs, adj)
# pdb.set_trace()
# z = self.g2e(z)
# return z, self.e2p(z.transpose(0,1))
return z, self.e2p(z.mean(0).view(1,-1))
class recGNN(nn.Module):
def __init__(self, nfeat, nNodes, nhid, nOut,dropout,nIter,idxRange):
super(recGNN, self).__init__()
self.iter = nIter
self.fc1 = nn.Linear(nfeat, nhid)
self.fc2 = nn.Linear(nhid, nhid)
# self.gc = GraphConvolutionFirstOrder(nhid, nhid)
self.gc = GraphConvolution(nhid, nhid)
self.dropout = dropout
# self.e2p = nn.Linear(nhid,nOut) # embedding to prediction
self.e2p = nn.Linear(nhid,nOut) # embedding to regression output
self.g2e = nn.Linear(8,1)
self.idxRange = idxRange
self.ln1 = LayerNorm(nhid)
self.ln2 = LayerNorm(nhid)
self.bn1 | |
<gh_stars>1-10
import pandas as pd
import numpy as np
import scipy.stats
import scipy.special
import pysam
import h5py
import multiprocessing as mp
import statsmodels.stats.multitest
from DIGDriver.sequence_model import sequence_tools
from DIGDriver.sequence_model import gp_tools
def tabix_to_dataframe(tbx, chrom, start, end):
""" Fetch a regions from a tabix file of mutations
and return as dataframe
"""
res = [row.split("\t") for row in tbx.fetch(chrom, start, end)]
if not res:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'ID']
elif len(res[0]) == 6:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'ID']
elif len(res[0]) == 7:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'ID', 'ANNOT']
elif len(res[0]) == 8:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'ID', 'MUT', 'CONTEXT']
else:
cols = ['CHROM', 'START', 'END', 'REF', 'ALT', 'ID', 'ANNOT', 'MUT', 'CONTEXT']
df = pd.DataFrame(res, columns=cols)
df = df.astype(dict(START=int, END=int))
return df
def mutation_freq_conditional(S_mut, S_gen, N):
""" Calculate the conditonal probability of a mutation given it's trinucleotide context
(Normalizes mutation frequencies in a region by trinucloetide frequencies
in that region)
Each trinucloetide sequence is treated as a four-sided die: one side for each possible mutation
and one side for no mutation. We want to estimate the probability of rolling each side given
the trinucleotide context.
We observe N * #{trinuc in genome} rolls of each trinucleotide die.
We observe #{b | trinuc} stored in S_mut
Pr(b | trinuc) = #{b | trinuc} / N * #{trinuc}
"""
# K = S_mut.sum() ## Total number of mutations
S_mut_norm = S_mut.copy().astype(float)
for tup in S_mut.index:
S_mut_norm[tup] = S_mut[tup] / (N * S_gen[tup[1]])
return S_mut_norm
def mutation_freq_joint(S_mut, S_gen, N):
""" Calculate the joint probability of observing a mutation in a particular trinucleotide context
Pr(b, tinuc) = Pr(b | trinuc) * Pr(trinuc)
Empirically: Pr(b, trinuc) = #{b | trinuc} / N * #{all trinucs}
Each trinucloetide sequence is treated as a four-sided die: one side for each possible mutation
and one side for no mutation. We want to estimate the probability of rolling each side given
the trinucleotide context.
We observe N * #{trinuc in genome} rolls of each trinucleotide die.
We observe #{b | trinuc} stored in S_mut
Pr(b | trinuc) = #{b | trinuc} / N * #{trinuc}
"""
# K = S_mut.sum() ## Total number of mutations
S_mut_norm = S_mut.copy().astype(float)
for tup in S_mut.index:
S_mut_norm[tup] = S_mut[tup] / (N * S_gen[tup[1]])
return S_mut_norm
def train_sequence_model(train_idx, f_model, N, key_prefix=None):
""" Train a trinucleotide sequence model based on precalculated mutational frequencies
and trinucleotide occurences across the genome
"""
train_idx_str = ['chr{}:{}-{}'.format(row[0], row[1], row[2]) for row in train_idx]
key_mut = 'mutation_counts'
if key_prefix:
key_mut = key_prefix + "_" + key_mut
df_mut = pd.read_hdf(f_model, key=key_mut)
df_gen = pd.read_hdf(f_model, key='genome_counts')
S_mut_train = df_mut.loc[train_idx_str, :].sum(axis=0) ## mutation context counts in train set
S_gen_train = df_gen.loc[train_idx_str, :].sum(axis=0) ## trinucloetide counts in train set
## Probabilities stratified by mutation type
Pr_mut_train = mutation_freq_conditional(S_mut_train, S_gen_train, N)
## Probabilities by trinucleotide context
keys = set([tup[1] for tup in Pr_mut_train.index])
d = {key: 0 for key in keys}
for key in d:
d[key] = sum([Pr_mut_train[tup] for tup in Pr_mut_train.index if tup[1]==key])
S_pr = pd.Series(d)
# return Pr_mut_train, S_pr
return Pr_mut_train, d
def expected_mutations_by_context(train_idx, test_idx, f_model, N=1, key_prefix=None):
""" Calculate the expected number of mutations in a train-test split
based only on nucleotide sequence context
"""
_, d_mut = train_sequence_model(train_idx, f_model, N, key_prefix=key_prefix)
s_mut = pd.Series(d_mut)
df_gen = pd.read_hdf(f_model, key='genome_counts')
df_exp = (df_gen * s_mut).sum(axis=1)
train_idx_str = ['chr{}:{}-{}'.format(row[0], row[1], row[2]) for row in train_idx]
test_idx_str = ['chr{}:{}-{}'.format(row[0], row[1], row[2]) for row in test_idx]
exp_train = df_exp.loc[train_idx_str]
exp_test = df_exp.loc[test_idx_str]
return exp_train, exp_test
def apply_nb_to_region(CHROM, START, END, mu, sigma, S_probs, tabix, fasta, n_up=2, n_down=2, binsize=1, collapse=False):
chrom = "chr{}".format(CHROM)
probs, pos_lst = sequence_tools.base_probabilities_by_region(fasta, S_probs, chrom, START, END,
n_up=n_up, n_down=n_down, normed=True,
collapse=collapse
)
# probs, pos_lst, trinucs = sequence_tools.base_probabilities_by_region(fasta, S_probs, chrom, START, END)
# print(probs)
df = tabix_to_dataframe(tabix, str(CHROM), START, END)
mut_counts = df.START.value_counts()
alpha, theta = normal_params_to_gamma(mu, sigma)
# expR = alpha * theta
pvals = []
poss = []
obss = []
exps = []
pt_lst = []
if binsize == 1:
for pos, pt in zip(pos_lst, probs):
k = 0
if pos in mut_counts.index:
k = mut_counts[pos]
p = 1 / (pt * theta + 1)
pvals.append(nb_pvalue_exact(k, alpha, p))
# pvals.append(nb_pvalue_approx(k, alpha, p))
poss.append(pos)
obss.append(k)
exps.append(pt * mu)
pt_lst.append(pt)
else:
for i in range(0, len(pos_lst), binsize):
pt = np.sum(probs[i:i+binsize])
k = 0
for pos in pos_lst[i:i+binsize]:
if pos in mut_counts.index:
k += mut_counts[pos]
p = 1 / (pt * theta + 1)
pvals.append(nb_pvalue_exact(k, alpha, p))
# pvals.append(nb_pvalue_approx(k, alpha, p))
pos = np.mean(pos_lst[i:i+binsize])
poss.append(pos)
obss.append(k)
exps.append(pt * mu)
pt_lst.append(pt)
pvals = np.array(pvals, dtype=float)
poss = np.array(poss)
obss = np.array(obss)
exps = np.array(exps)
# chroms = np.array([CHROM] * len(poss))
return pvals, poss, obss, exps, pt_lst
def nb_model(d_pr, idx, mu_lst, sigma_lst, f_tabix, f_fasta, n_up=2, n_down=2, binsize=50, collapse=False):
tabix = pysam.TabixFile(f_tabix)
fasta = pysam.FastaFile(f_fasta)
# alpha_lst, theta_lst = normal_params_to_gamma(mu_lst, sigma_lst)
pvals_lst = []
poss_lst = []
obss_lst = []
exps_lst = []
chrom_lst = []
reg_lst = []
mus_lst = []
sigmas_lst = []
pts_lst = []
for row, mu, sigma in zip(idx, mu_lst, sigma_lst):
pvals, poss, obss, exps, pts = apply_nb_to_region(row[0], row[1], row[2],
mu, sigma, d_pr, tabix, fasta,
n_up=n_up, n_down=n_down,
binsize=binsize, collapse=collapse
)
pvals_lst.append(pvals)
poss_lst.append(poss)
obss_lst.append(obss)
exps_lst.append(exps)
pts_lst.append(pts)
chrom_lst.append(np.array([row[0]]*len(poss)))
reg_lst.append(np.array(["{}:{}-{}".format(row[0], row[1], row[2])]*len(poss)))
mus_lst.append(np.array([mu]*len(poss)))
sigmas_lst.append(np.array([sigma]*len(poss)))
all_pvals = np.array([pval for pvals in pvals_lst for pval in pvals]).reshape(-1, 1)
all_pos = np.array([pos for poss in poss_lst for pos in poss]).reshape(-1, 1)
all_obs = np.array([obs for obss in obss_lst for obs in obss]).reshape(-1, 1)
all_exp = np.array([exp for exps in exps_lst for exp in exps]).reshape(-1, 1)
all_pt = np.array([pt for pts in pts_lst for pt in pts]).reshape(-1, 1)
all_chroms = np.array([chrom for chroms in chrom_lst for chrom in chroms]).reshape(-1, 1)
all_regs = np.array([reg for regs in reg_lst for reg in regs]).reshape(-1, 1)
all_mu = np.array([mu for mus in mus_lst for mu in mus]).reshape(-1, 1)
all_std = np.array([std for stds in sigmas_lst for std in stds]).reshape(-1, 1)
nd = np.hstack([all_chroms, all_pos, all_obs, all_exp, all_pvals, all_pt, all_mu, all_std])
df = pd.DataFrame(nd, columns=['CHROM', 'POS', 'OBS', 'EXP', 'PVAL', 'Pi', 'MU', 'SIGMA'])
df['REGION'] = all_regs
return df
# return pvals_lst, poss_lst, obss_lst, exps_lst, chrom_lst, reg_lst, mus_lst, sigmas_lst
def normal_params_to_gamma(mu, sigma):
alpha = mu**2 / sigma**2
theta = sigma**2 / mu
return alpha, theta
def nb_pvalue_greater(k, alpha, p):
""" Calculate an UPPER TAIL p-value for a negative binomial distribution
"""
if k == 0:
pval = 1.
else:
pval = scipy.special.betainc(k, alpha, 1-p) # + \
# Approximate p-value if betainc returns zero
if pval == 0:
pval = scipy.stats.nbinom.pmf(k, alpha, p)
return pval
def nb_pvalue_greater_midp_DEPRECATED(k, alpha, p):
""" Calculate an UPPER TAIL p-value for a negative binomial distribution
with a midp correction
"""
if k == 0:
pval = 1 - 0.5 * scipy.stats.nbinom.pmf(k, alpha, p)
else:
pval = 0.5 * scipy.stats.nbinom.pmf(k, alpha, p) + \
scipy.special.betainc(k+1, alpha, 1-p)
return pval
def nb_pvalue_greater_midp(k, alpha, p):
""" Calculate an UPPER TAIL p-value for a negative binomial distribution
with a midp correction
"""
pval = 0.5 * scipy.stats.nbinom.pmf(k, alpha, p) + \
scipy.special.betainc(k+1, alpha, 1-p)
return pval
def nb_pvalue_less(k, alpha, p):
""" Calculate a LOWER TAIL p-value for a negative binomial distribution
"""
pval = scipy.special.betainc(alpha, k+1, p)
def nb_pvalue_less_midp(k, alpha, p):
""" Calculate a LOWER TAIL p-value for a negative binomial distribution
with a midp correction
"""
if k == 0:
pval = 0.5 * scipy.stats.nbinom.pmf(k, alpha, p)
else:
pval = 0.5 * scipy.stats.nbinom.pmf(k, alpha, p) + \
scipy.special.betainc(alpha, k, p)
return pval
def nb_pvalue_exact(k, alpha, p, mu=None):
""" Calculate an UPPER TAIL or LOWER TAIL p-value for a negative binomial distribution
conditional on whether k is greater or less than the expectation
"""
if not mu:
mu = alpha * (1-p) / p
if k < mu:
pval = scipy.special.betainc(alpha, k+1, p)
else:
pval = scipy.special.betainc(k, alpha, 1-p)
if pval == 0:
pval = scipy.stats.nbinom.pmf(k, alpha, p)
return pval
def nb_pvalue_midp(k, alpha, p, mu=None):
""" Calculate an UPPER TAIL or LOWER TAIL p-value for a negative binomial distribution
conditional | |
# -*- coding: UTF-8 -*-
# generated by wxGlade 0.4 on Thu Jul 13 10:46:35 2006
import wx
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
import os, types, sys, traceback
from lxml import etree
from document import Document
from config import Config
from deco import DecoImage
# begin wxGlade: dependencies
# end wxGlade
sEncoding = "utf-8"
sConfigFile = "wxvisu.ini"
sAbout = """MultiPageXML viewer- version 2.0
Copyright Xerox 2006-2008
<NAME>, <NAME>
In 2016 and 2017, further development by and for the EU project READ. The READ project has received funding from the European Union�s Horizon 2020 research and innovation programme under grant agreement No 674943.
<NAME>, <NAME>
System info:
- python version %s
- wx version %s
""" % (sys.version, wx.VERSION)
sHelp = """Have a look at http://dima.grenoble.xrce.xerox.com/wiki/WXVisu"""
sConfirmQuit = """Some unsaved actions occured - please confirm the quit _without_ saving!"""
#HACk to deal with the recent document by generating code
gdicRecentDoc = {}
gSELF = None
def setEncoding(s):
global sEncoding
sEncoding = s
def setConfigFile(s):
global sConfigFile
sConfigFile = s
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# The NavCanvas
self.wysi = NavCanvas.NavCanvas(self,
1,
Debug=0,
BackgroundColor="WHITE")
tb = self.wysi.ToolBar
tsize=(20,20)
bck_bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_BACK)
for_bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_FORWARD)
jumpback_bmp = wx.ArtProvider.GetBitmap(wx.ART_UNDO)
jumpback_bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_TO_PARENT)
untick_bmp = wx.ArtProvider.GetBitmap(wx.ART_DEL_BOOKMARK)
self.tc_pagesel = wx.TextCtrl(tb,
style=wx.TE_PROCESS_ENTER)
# self.tc_pagecur = wx.TextCtrl(tb,
# style=wx.TE_READONLY)
# # style=wx.TE_CENTRE|wx.TE_PROCESS_ENTER)
self.tc_pagetot = wx.TextCtrl(tb,
style=wx.TE_READONLY)
# Add some buttons to the canvas toolbar
tb.AddSeparator()
ID_BACKWARD = wx.NewId()
ID_FORWARD = wx.NewId()
tb.AddSimpleTool(ID_BACKWARD, bck_bmp, "Display the previous page")
# tb.AddControl(self.tc_pagecur)
tb.AddControl(self.tc_pagetot)
tb.AddSimpleTool(ID_FORWARD, for_bmp, "Display the next page")
tb.AddControl(self.tc_pagesel)
ID_JUMPBACK = wx.NewId()
tb.AddSeparator()
tb.AddSimpleTool(ID_JUMPBACK, jumpback_bmp, "Back")
ID_UNTICKALL = wx.NewId()
tb.AddSeparator()
tb.AddSimpleTool(ID_UNTICKALL, untick_bmp, "Untick")
tb.Realize()
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
self.SetMenuBar(self.frame_1_menubar)
wxglade_tmp_menu = wx.Menu()
id_load = wx.NewId()
id_reload = wx.NewId()
id_reloadini = wx.NewId()
id_save = wx.NewId()
id_saveas = wx.NewId()
id_close = wx.NewId()
id_quit = wx.NewId()
id_imgfolder = wx.NewId()
wxglade_tmp_menu.Append(id_load, "&Load Xml File", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(id_reload, "&Re-load the Xml File", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(id_save, "&Save Xml File", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(id_saveas, "Save &As Xml File", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(id_imgfolder, "Select image folder", "", wx.ITEM_NORMAL)
#MARCHE PAS wxglade_tmp_menu.Append(id_reloadini, "&Reload INI File", "", wx.ITEM_NORMAL)
#wxglade_tmp_menu.Append(id_close, "&Close", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(id_quit, "&Quit", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
self.frame_1_menubar.Append(wxglade_tmp_menu, "&File")
self.wxglade_file_menu = wxglade_tmp_menu
self.cntRecentDoc = 0
wxglade_tmp_menu = wx.Menu()
id_help = wx.NewId()
id_about = wx.NewId()
wxglade_tmp_menu.Append(id_help, "&Content", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(id_about, "&About", "", wx.ITEM_NORMAL)
self.frame_1_menubar.Append(wxglade_tmp_menu, "&Help")
# Menu Bar end
# end wxGlade
self.loadConfig(sConfigFile)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.OnMenu_LoadXML, id=id_load)
self.Bind(wx.EVT_MENU, self.OnMenu_ReLoadXML, id=id_reload)
self.Bind(wx.EVT_MENU, self.OnMenu_ReloadINI, id=id_reloadini)
self.Bind(wx.EVT_MENU, self.OnMenu_SaveXML, id=id_save)
self.Bind(wx.EVT_MENU, self.OnMenu_SaveAsXML, id=id_saveas)
self.Bind(wx.EVT_MENU, self.OnMenu_ImgFolder, id=id_imgfolder)
self.Bind(wx.EVT_MENU, self.OnMenu_Quit, id=id_quit)
self.Bind(wx.EVT_MENU, self.OnMenu_Help, id=id_help)
self.Bind(wx.EVT_MENU, self.OnMenu_About, id=id_about)
self.Bind(wx.EVT_TEXT_ENTER, self.OnToolbar_ChangePage, self.tc_pagesel)
self.Bind(wx.EVT_TOOL, self.OnToolbar_BackwardPage, id=ID_BACKWARD)
self.Bind(wx.EVT_TOOL, self.OnToolbar_ForwardPage, id=ID_FORWARD)
self.Bind(wx.EVT_TOOL, self.OnToolbar_JumpBack, id=ID_JUMPBACK)
self.Bind(wx.EVT_TOOL, self.OnToolbar_UntickAll, id=ID_UNTICKALL)
self.stackJump = []
self.bindConfigWidget()
self.current_page_node = None
self.path = None
self.doc = None
self.current_fonts = None
self.bModified = False
def loadConfig(self, sConfigFile):
self.sConfigFile = sConfigFile
self.config = Config(sConfigFile)
#now the checkboxes for the decoration types
self.dChekBox2Deco = {} #checkbox -> deco
self.dDeco2ChekBox = {} #deco -> checkbox
self.dBut2Deco = {} #button -> deco
self.ltCheckboxObjects = []
for deco in self.config.getDecoList():
if deco.isSeparator(): #a separator in the toolbar
chkbx, butPrev, butNext = None, None, None
else:
chkbx = wx.CheckBox(self, -1, deco.getSurname())
chkbx.SetToolTipString(deco.getMainXPath())
#chkbx.SetBackgroundColour("RED")
if deco.isEnabled():
chkbx.SetValue(1)
else:
chkbx.SetValue(0)
self.dChekBox2Deco[chkbx] = deco
self.dDeco2ChekBox[deco] = chkbx
#also include two buttons for each
butPrev = wx.NewId()
butPrev = wx.Button(self, butPrev, "<", size=(14,14), style=wx.BU_EXACTFIT)
self.dBut2Deco[butPrev] = deco
butNext = wx.NewId()
butNext = wx.Button(self, butNext, ">", size=(14,14), style=wx.BU_EXACTFIT)
self.dBut2Deco[butNext] = deco
self.ltCheckboxObjects.append((chkbx, butPrev, butNext))
def bindConfigWidget(self):
for chkbx, butPrev, butNext in self.ltCheckboxObjects:
if chkbx == None:
assert butPrev == None and butNext == None, "SW INTERNAL ERROR: bindConfigWidget: %s"%( (butPrev, butNext))
else:
self.Bind(wx.EVT_CHECKBOX, self.cbkDecoCheckBox, chkbx)
self.Bind(wx.EVT_BUTTON, self.cbkDecoPrev, butPrev)
self.Bind(wx.EVT_BUTTON, self.cbkDecoNext, butNext)
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("Xml Visualizer")
# self.SetSize((1000, 800))
# self.SetSize((1500, 1200))
# self.SetSize((800, 900))
self.SetSize((1100, 1200))
self.SetBackgroundColour("light blue")
#self.SetBackgroundColour(" jhfg ksd") #default color.... :-)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
self.sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(self.sizer_3, 0, wx.ALL|wx.ADJUST_MINSIZE, 5)
# self.sizer_3.FitInside(self) no effect
# self.sizer_3.FitInside(self.wysi) no effect
# FitInside has no effect... :-/
sizer_1.FitInside(self)
sizer_2.FitInside(self)
self.sizer_3.FitInside(self)
sizer_2.Add(self.wysi, 1, wx.EXPAND, 0)
self.__do_layout_sizer3()
# sizer_1.Add(self.wysi, 1, wx.EXPAND, 0)
self.SetAutoLayout(True)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def __do_layout_sizer3(self):
#self.lDecoSizer = []
# self.sizer_3.AddSpacer( (20,20))
self.sizer_3.AddSpacer(1)
for chkbx, butPrev, butNext in self.ltCheckboxObjects:
sz = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_3.Add(sz, 0, wx.ALL|wx.ADJUST_MINSIZE, 1)
if chkbx == None:
#separator in the toolbar
pass
#okself.sizer_3.AddSpacer( (20,20))
#self.sizer_3.Add( wx.StaticLine() )
sz.Add( wx.StaticLine(self,size=(100, 1)), 0, wx.ALL|wx.CENTER|wx.EXPAND , 1 )
#sz.Add( wx.StaticLine(self, size=(100, 3)), flag=wx.LI_HORIZONTAL|wx.SOLID )
else:
# sz = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_3.Add(sz, 0, wx.ALL|wx.ADJUST_MINSIZE, 5)
sz.Add(chkbx , 0, wx.ALL|wx.ADJUST_MINSIZE, 1)
sz.Add(butPrev, 0, wx.ALL|wx.ADJUST_MINSIZE, 1)
sz.Add(butNext, 0, wx.ALL|wx.ADJUST_MINSIZE, 1)
#self.lDecoSizer.append(sz)
#-------------------
def OnMenu_LoadXML(self, event): # wxGlade: MyFrame.<event_handler>
global gSELF, gdicRecentDoc
try:
curdir = os.path.dirname(self.path)
except:
curdir = self.config.working_dir
#we also tolerate gzipped xml
#wildcard = "Xml Document (*.xml; *.xml.gz)|*.xml;*.xml.gz"
wildcard = "All files (*.*)|*.*|XML (*.xml; *.xml.gz)|*.xml;*.xml.gz|PageXml (*.pxml; *.mpxml)|*.pxml; *.mpxml"
dlg = wx.FileDialog(self, message="Choose a model",
wildcard=wildcard, defaultDir=curdir,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR)
dlg.CenterOnScreen()
val = dlg.ShowModal()
if val == wx.ID_OK:
path = dlg.GetPath()
if self.loadXML(path):
#---deal with recent documents:
if path not in gdicRecentDoc.values():
self.cntRecentDoc += 1
gdicRecentDoc[self.cntRecentDoc] = path
gSELF = self
id_doc = wx.NewId()
self.wxglade_file_menu.Append(id_doc, path, "", wx.ITEM_NORMAL)
#generate a callback function
sCode = """
def OnMenu_LoadRecent%d(event):
global gSELF, gdicRecentDoc
try:
gSELF.loadXML(gdicRecentDoc[%d])
except KeyError:
pass
""" % (self.cntRecentDoc, self.cntRecentDoc) #we use a global dictionarry because of / and \ pbs...
#print sCode
exec sCode
fun = eval("OnMenu_LoadRecent%d"%self.cntRecentDoc)
#print fun
self.Bind(wx.EVT_MENU, fun, id=id_doc)
dlg.Destroy()
def OnMenu_ReLoadXML(self, event): # wxGlade: MyFrame.<event_handler>
self.loadXML(self.path)
def OnMenu_ReloadINI(self, event): # HACK!!!!
#remove the widget of the decos
for sz in self.lDecoSizer:
self.sizer_3.Remove(sz)
self.__do_layout()
self.loadConfig(self.sConfigFile)
self.config.setXPathContext( self.doc.getXPCtxt() )
self.bindConfigWidget()
self.__do_layout()
self.Layout()
self.display_page()
def OnMenu_SaveXML(self, event):
ret = self.doc.saveXML(self.doc.getFilename())
if ret: self.bModified = False
def OnMenu_SaveAsXML(self, event):
curdir = os.path.dirname(self.doc.getFilename())
if not curdir: curdir = os.getcwd()
wildcard = "Xml Document (*.xml)|*.xml"
dlg = wx.FileDialog(self, message="Choose a file",
wildcard=wildcard, defaultDir=curdir,
style=wx.OPEN | wx.CHANGE_DIR)
dlg.CenterOnScreen()
val = dlg.ShowModal()
if val == wx.ID_OK:
ret = self.doc.saveXML(dlg.GetPath())
dlg.Destroy()
if ret: self.bModified = False
def OnMenu_ImgFolder(self, event):
curdir = os.path.dirname(self.doc.getFilename())
if not curdir: curdir = os.getcwd()
dlg = wx.DirDialog (None, "Select the image folder", "",
wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
dlg.CenterOnScreen()
val = dlg.ShowModal()
if val == wx.ID_OK:
DecoImage.sImageFolder = dlg.GetPath()
dlg.Destroy()
self.bModified = True
self.display_page()
def OnMenu_Quit(self, event):
if self.bModified:
dlg = wx.MessageDialog(self, message=sConfirmQuit, style = wx.OK | wx.CANCEL)
val = dlg.ShowModal()
if val != wx.ID_OK: return
self.Destroy()
#-------------------
def OnMenu_Help(self, event):
dlg = wx.MessageDialog(self, message=sHelp,
caption="wxvisu help",
style=wx.ICON_INFORMATION)
dlg.CenterOnScreen()
val = dlg.ShowModal()
dlg.Destroy()
def OnMenu_About(self, event):
dlg = wx.MessageDialog(self, message=sAbout,
caption="About wxvisu",
style=wx.ICON_INFORMATION)
dlg.CenterOnScreen()
val = dlg.ShowModal()
dlg.Destroy()
#-------------------
def OnCanvas_Enter(self, obj):
# txt = self.doc.obj_n[obj].nsProp('segmantic_type', 'http://www.xrce.xerox.com/ML')
# if not txt:
# txt = self.doc.obj_n[obj].name
txt = self.doc.obj_n[obj].serialize()
txt = unicode(txt, sEncoding)
tip = wx.TipWindow(self, txt)
wx.FutureCall(3000, tip.Close)
def OnToolbar_BackwardPage(self, evt):
"""Click on the backward button of the toolbar of the canvas"""
if self.doc:
self.display_page( self.doc.getPrevPageIndex() )
def OnToolbar_ForwardPage(self, evt):
"""Click on the forward button of the toolbar of the canvas"""
if self.doc:
self.display_page( self.doc.getNextPageIndex() )
def OnToolbar_Jump(self, evt, index, (x,y,w,h)):
"""Click on the jump back button of the toolbar of the canvas"""
if self.doc:
#TODO!
if index:
self.stackJump.append( self.doc.getDisplayedIndex() )
if x:
self.display_page( index, (x,y,w,h))
else:
self.display_page( index ) #precise at the page level, not more.
def OnToolbar_JumpBack(self, evt):
"""Click on the jump back button of the toolbar of the canvas"""
if self.doc and self.stackJump:
self.display_page( self.stackJump.pop() )
def OnToolbar_UntickAll(self, evt):
"""
untick all decorations TO BE FINISHED
TODO: untick the checkboxes...
"""
for deco in self.config.getDecoList():
try:
deco.setEnabled(False)
self.dDeco2ChekBox[deco].SetValue(0)
except:
# DecoSeparator...
pass
if self.doc:
d = self.doc.displayed
self.display_page(d)
def OnToolbar_ChangePage(self, evt):
"""Modify the page number in the text control of the toolbar of the
canvas"""
if self.doc:
new_page = evt.GetString()
# to deal with "n (n/N)" - hack | |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.18436,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.347493,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.846305,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.675572,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.16985,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.67094,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.51636,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.538025,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.56174,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.159885,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.02449,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.252185,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.181119,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.41207,
'Execution Unit/Register Files/Runtime Dynamic': 0.205609,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.65989,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.67218,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.14702,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00309542,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00309542,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00268775,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.0010359,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00260178,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0114804,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0299773,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.174114,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.490841,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.591369,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.29778,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0433456,
'L2/Runtime Dynamic': 0.00926367,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 7.60604,
'Load Store Unit/Data Cache/Runtime Dynamic': 3.07636,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.20605,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.20605,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 8.58301,
'Load Store Unit/Runtime Dynamic': 4.29857,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.508083,
'Load Store Unit/StoreQ/Runtime Dynamic': 1.01617,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.18032,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.180735,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0811651,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.870233,
'Memory Management Unit/Runtime Dynamic': 0.2619,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 30.5888,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.557803,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0412572,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.343812,
'Renaming Unit/Int Front End RAT/Subthreshold | |
-0.342929519602),
(0.279196853654, -0.342990041745),
(0.273185849664, -0.343049210031),
(0.267171726877, -0.343107029136),
(0.261154558848, -0.34316350371),
(0.255134418626, -0.343218638375),
(0.249111378777, -0.343272437724),
(0.2430855114, -0.343324906316),
(0.237056888143, -0.343376048676),
(0.23102558023, -0.343425869291),
(0.224991658471, -0.343474372606),
(0.218955193286, -0.343521563026),
(0.212916254721, -0.343567444906),
(0.206874912467, -0.343612022552),
(0.200831235877, -0.343655300219),
(0.200852273965, -0.340715198856),
(0.200873125061, -0.337774168942),
(0.20089378942, -0.334832219663),
(0.200914267286, -0.331889360172),
(0.200934558896, -0.328945599594),
(0.200954664474, -0.326000947023),
(0.20097458424, -0.323055411524),
(0.200994318402, -0.320109002131),
(0.20101386716, -0.317161727851),
(0.201033230706, -0.314213597663),
(0.201052409224, -0.311264620514),
(0.201071402888, -0.308314805328),
(0.201090211866, -0.305364160996),
(0.201108836318, -0.302412696385),
(0.201127276397, -0.299460420333),
(0.201145532246, -0.296507341653),
(0.201163604004, -0.293553469129),
(0.2011814918, -0.290598811521),
(0.201199195758, -0.287643377559),
(0.201216715995, -0.284687175953),
(0.201234052621, -0.281730215382),
(0.20125120574, -0.278772504503),
(0.201268175448, -0.275814051948),
(0.201284961839, -0.272854866322),
(0.201301564997, -0.269894956208),
(0.201317985001, -0.266934330164),
(0.201334221927, -0.263972996724),
(0.201350275843, -0.261010964399),
(0.201366146812, -0.258048241676),
(0.201381834893, -0.255084837021),
(0.20139734014, -0.252120758876),
(0.2014126626, -0.249156015659),
(0.207474049692, -0.249125168249),
(0.213533111442, -0.249093403855),
(0.21958978032, -0.249060719001),
(0.225643988558, -0.249027110186),
(0.23169566814, -0.248992573892),
(0.237744750778, -0.248957106582),
(0.243791167896, -0.248920704713),
(0.249834850614, -0.248883364731),
(0.255875729728, -0.248845083084),
(0.261913735689, -0.248805856221),
(0.267948798591, -0.248765680595),
(0.273980848142, -0.24872455267),
(0.280009813654, -0.248682468926),
(0.286035624015, -0.248639425858),
(0.292058207677, -0.24859541998),
(0.298077492629, -0.248550447834),
(0.304093406378, -0.248504505986),
(0.310105875933, -0.248457591031),
(0.316114827779, -0.248409699598),
(0.322120187858, -0.24836082835),
(0.328121881547, -0.248310973985),
(0.334119833638, -0.248260133241),
(0.340113968319, -0.248208302893),
(0.346104209146, -0.248155479758),
(0.352090479028, -0.248101660692),
(0.358072700202, -0.248046842594),
(0.364050794215, -0.247991022403),
(0.370024681898, -0.247934197099),
(0.375994283348, -0.247876363699),
(0.381959517906, -0.24781751926),
(0.387920304136, -0.247757660873),
(0.393876559802, -0.247696785663)]},
25: {'color': 'skyblue',
'polygon': [(0.187353724482, -0.249273548096),
(0.187339953571, -0.252239113384),
(0.187326012614, -0.255204014375),
(0.187311901577, -0.258168242672),
(0.18729762042, -0.261131789858),
(0.1872831691, -0.264094647491),
(0.187268547569, -0.267056807108),
(0.187253755774, -0.270018260224),
(0.187238793655, -0.272978998329),
(0.187223661151, -0.275939012891),
(0.187208358191, -0.278898295354),
(0.187192884703, -0.281856837137),
(0.187177240604, -0.284814629637),
(0.18716142581, -0.287771664225),
(0.187145440229, -0.290727932246),
(0.187129283763, -0.293683425023),
(0.187112956306, -0.296638133851),
(0.187096457749, -0.29959205),
(0.187079787973, -0.302545164714),
(0.187062946853, -0.305497469212),
(0.18704593426, -0.308448954684),
(0.187028750053, -0.311399612295),
(0.187011394087, -0.314349433184),
(0.186993866208, -0.317298408458),
(0.186976166256, -0.320246529202),
(0.18695829406, -0.323193786469),
(0.186940249445, -0.326140171285),
(0.186922032224, -0.329085674647),
(0.186903642205, -0.332030287524),
(0.186885079186, -0.334974000855),
(0.186866342956, -0.337916805551),
(0.186847433295, -0.34085869249),
(0.186828349976, -0.343799652522),
(0.18077743085, -0.343839611059),
(0.174724473656, -0.343878287309),
(0.168669546252, -0.34391568518),
(0.162612716265, -0.343951808503),
(0.156554051111, -0.343986661035),
(0.15049361801, -0.344020246452),
(0.144431484002, -0.344052568345),
(0.138367715963, -0.344083630218),
(0.132302380616, -0.344113435487),
(0.126235544551, -0.344141987471),
(0.120167274238, -0.344169289395),
(0.114097636039, -0.344195344384),
(0.108026696224, -0.344220155459),
(0.101954520983, -0.344243725535),
(0.0958811764428, -0.344266057421),
(0.0898067286729, -0.344287153811),
(0.083731243704, -0.344307017288),
(0.0776547875378, -0.344325650316),
(0.0715774261586, -0.34434305524),
(0.0654992255451, -0.344359234285),
(0.0594202516812, -0.344374189549),
(0.0533405705669, -0.344387923005),
(0.0472602482282, -0.3444004365),
(0.0411793507274, -0.344411731745),
(0.0350979441729, -0.344421810324),
(0.0290160947276, -0.344430673685),
(0.022933868619, -0.344438323138),
(0.0168513321468, -0.344444759859),
(0.0107685516916, -0.344449984884),
(0.00468559372271, -0.344453999107),
(-0.0013974751943, -0.344456803285),
(-0.00748058839022, -0.344458398029),
(-0.00748203426554, -0.341511974501),
(-0.00748347424098, -0.338564634104),
(-0.00748490832126, -0.335616385667),
(-0.00748633651361, -0.332667237997),
(-0.00748775882764, -0.329717199873),
(-0.0074891752754, -0.326766280051),
(-0.00749058587118, -0.323814487264),
(-0.00749199063156, -0.32086183022),
(-0.00749338957527, -0.317908317604),
(-0.00749478272317, -0.314953958078),
(-0.00749617009822, -0.311998760282),
(-0.00749755172535, -0.309042732833),
(-0.00749892763136, -0.306085884326),
(-0.00750029784501, -0.303128223334),
(-0.00750166239681, -0.300169758409),
(-0.007503021319, -0.297210498082),
(-0.00750437464552, -0.294250450863),
(-0.00750572241191, -0.291289625241),
(-0.00750706465516, -0.288328029685),
(-0.00750840141384, -0.285365672645),
(-0.00750973272784, -0.28240256255),
(-0.00751105863844, -0.27943870781),
(-0.00751237918817, -0.276474116816),
(-0.00751369442071, -0.273508797939),
(-0.00751500438093, -0.270542759534),
(-0.00751630911472, -0.267576009935),
(-0.00751760866902, -0.26460855746),
(-0.00751890309164, -0.261640410407),
(-0.00752019243129, -0.258671577058),
(-0.00752147673748, -0.255702065677),
(-0.00752275606044, -0.252731884512),
(-0.00752403045109, -0.249761041793),
(-0.00142302497471, -0.249759056805),
(0.00467792956925, -0.249756229275),
(0.0107787662045, -0.249752558881),
(0.0168794180705, -0.249748045171),
(0.0229798184177, -0.249742687557),
(0.0290799006044, -0.249736485322),
(0.0351795980912, -0.249729437614),
(0.0412788444365, -0.249721543456),
(0.0473775732914, -0.24971280174),
(0.0534757183937, -0.249703211234),
(0.0595732135621, -0.24969277058),
(0.0656699926901, -0.249681478301),
(0.0717659897387, -0.249669332797),
(0.0778611387298, -0.249656332352),
(0.0839553737386, -0.249642475135),
(0.0900486288857, -0.249627759202),
(0.0961408383286, -0.249612182502),
(0.102231936253, -0.249595742875),
(0.108321856866, -0.24957843806),
(0.114410534381, -0.249560265694),
(0.120497903014, -0.249541223319),
(0.126583896971, -0.249521308384),
(0.132668450436, -0.249500518248),
(0.13875149756, -0.249478850185),
(0.144832972452, -0.249456301387),
(0.150912809166, -0.249432868968),
(0.156990941687, -0.249408549971),
(0.163067303918, -0.249383341365),
(0.169141829672, -0.249357240057),
(0.175214452652, -0.249330242893),
(0.18128510644, -0.24930234666),
(0.187353724482, -0.249273548096)]},
26: {'color': 'violet',
'polygon': [(-0.0109215914164, -0.24983573483),
(-0.0109190994224, -0.252806550707),
(-0.0109165995908, -0.25577670472),
(-0.0109140918731, -0.258746188637),
(-0.0109115762212, -0.261714994213),
(-0.0109090525879, -0.264683113184),
(-0.0109065209265, -0.267650537267),
(-0.0109039811913, -0.270617258164),
(-0.0109014333375, -0.273583267558),
(-0.010898877321, -0.276548557114),
(-0.010896313099, -0.279513118481),
(-0.0108937406295, -0.282476943285),
(-0.0108911598718, -0.285440023138),
(-0.0108885707864, -0.288402349629),
(-0.010885973335, -0.29136391433),
(-0.0108833674807, -0.294324708792),
(-0.0108807531878, -0.297284724548),
(-0.0108781304224, -0.300243953109),
(-0.0108754991518, -0.303202385965),
(-0.010872859345, -0.306160014589),
(-0.0108702109728, -0.309116830428),
(-0.0108675540075, -0.312072824911),
(-0.0108648884233, -0.315027989444),
(-0.0108622141962, -0.317982315411),
(-0.0108595313042, -0.320935794175),
(-0.0108568397271, -0.323888417075),
(-0.0108541394468, -0.326840175428),
(-0.0108514304475, -0.329791060526),
(-0.0108487127153, -0.33274106364),
(-0.0108459862385, -0.335690176015),
(-0.0108432510078, -0.338638388874),
(-0.0108405070162, -0.341585693412),
(-0.010837754259, -0.344532080801),
(-0.0169207858668, -0.344533713836),
(-0.0230036902563, -0.344534137996),
(-0.0290864003365, -0.344533353359),
(-0.0351688488835, -0.344531359857),
(-0.0412509685354, -0.344528157276),
(-0.0473326917865, -0.344523745256),
(-0.0534139509829, -0.34451812329),
(-0.0594946783171, -0.344511290724),
(-0.0655748058239, -0.344503246761),
(-0.0716542653758, -0.344493990454),
(-0.0777329886791, -0.344483520712),
(-0.0838109072699, -0.344471836298),
(-0.0898879525108, -0.344458935831),
(-0.095964055587, -0.344444817785),
(-0.102039147503, -0.344429480489),
(-0.10811315908, -0.344412922132),
(-0.114186020953, -0.344395140757),
(-0.120257663566, -0.344376134271),
(-0.126328017172, -0.344355900435),
(-0.13239701183, -0.344334436876),
(-0.1384645774, -0.34431174108),
(-0.144530643543, -0.344287810398),
(-0.150595139719, -0.344262642045),
(-0.156657995182, -0.344236233101),
(-0.16271913898, -0.344208580516),
(-0.168778499953, -0.344179681106),
(-0.174836006727, -0.344149531558),
(-0.180891587719, -0.344118128431),
(-0.186945171125, -0.344085468157),
(-0.192996684926, -0.344051547043),
(-0.199046056882, -0.344016361272),
(-0.205093214529, -0.343979906905),
(-0.205116727141, -0.341040391114),
(-0.205140049305, -0.338099941532),
(-0.205163181221, -0.335158567015),
(-0.205186123082, -0.332216276397),
(-0.205208875081, -0.329273078493),
(-0.205231437403, -0.326328982096),
(-0.205253810235, -0.323383995978),
(-0.205275993755, -0.320438128892),
(-0.205297988142, -0.317491389573),
(-0.205319793569, -0.314543786735),
(-0.205341410207, -0.311595329075),
(-0.205362838224, -0.308646025268),
(-0.205384077784, -0.305695883975),
(-0.205405129048, -0.302744913837),
(-0.205425992174, -0.299793123478),
(-0.205446667318, -0.296840521504),
(-0.205467154631, -0.293887116504),
(-0.205487454264, -0.290932917052),
(-0.205507566361, -0.287977931703),
(-0.205527491067, -0.285022168997),
(-0.205547228522, -0.282065637458),
(-0.205566778865, -0.279108345595),
(-0.20558614223, -0.276150301901),
(-0.205605318749, -0.273191514851),
(-0.205624308553, -0.27023199291),
(-0.205643111769, -0.267271744525),
(-0.20566172852, -0.264310778128),
(-0.205680158929, -0.261349102138),
(-0.205698403116, -0.25838672496),
(-0.205716461197, -0.255423654984),
(-0.205734333287, -0.252459900586),
(-0.205752019497, -0.24949547013),
(-0.199686819382, -0.249519656697),
(-0.193619414292, -0.249542940315),
(-0.187549875781, -0.249565324555),
(-0.181478275254, -0.249586812908),
(-0.175404683968, -0.249607408776),
(-0.169329173033, -0.249627115475),
(-0.163251813411, -0.249645936225),
(-0.157172675918, -0.249663874153),
(-0.151091831223, -0.249680932287),
(-0.145009349852, -0.249697113554),
(-0.138925302184, -0.249712420776),
(-0.132839758453, -0.249726856669),
(-0.126752788748, -0.249740423839),
(-0.120664463012, -0.249753124781),
(-0.114574851045, -0.249764961876),
(-0.1084840225, -0.249775937385),
(-0.102392046887, -0.249786053455),
(-0.0962989935698, -0.249795312107),
(-0.0902049317686, -0.249803715243),
(-0.084109930559, -0.249811264637),
(-0.0780140588727, -0.249817961938),
(-0.0719173854981, -0.249823808668),
(-0.0658199790807, -0.249828806215),
(-0.0597219081237, -0.249832955841),
(-0.0536232409894, -0.249836258673),
(-0.0475240458996, -0.249838715705),
(-0.0414243909375, -0.249840327797),
(-0.0353243440484, -0.249841095677),
(-0.0292239730419, -0.249841019933),
(-0.0231233455936, -0.24984010102),
(-0.0170225292471, -0.249838339258),
(-0.0109215914164, -0.24983573483)]},
27: {'color': 'violet',
'polygon': [(-0.219818586895, -0.249447025895),
(-0.21980103157, -0.252410541781),
(-0.219783276984, -0.255373379837),
(-0.21976532301, -0.258335531679),
(-0.219747169515, -0.261296988909),
(-0.219728816367, -0.264257743115),
(-0.219710263429, -0.267217785871),
(-0.219691510563, -0.270177108737),
(-0.219672557625, -0.27313570326),
(-0.219653404472, -0.27609356097),
(-0.219634050956, -0.279050673384),
(-0.219614496925, -0.282007032004),
(-0.219594742226, -0.284962628317),
(-0.219574786703, -0.287917453793),
(-0.219554630194, -0.29087149989),
(-0.219534272537, -0.293824758045),
(-0.219513713566, -0.296777219684),
(-0.219492953112, -0.299728876213),
(-0.219471991, -0.302679719023),
(-0.219450827056, -0.305629739489),
(-0.219429461099, -0.308578928966),
(-0.219407892946, -0.311527278795),
(-0.219386122412, -0.314474780298),
(-0.219364149305, -0.317421424776),
(-0.219341973434, -0.320367203517),
(-0.219319594599, -0.323312107786),
(-0.2192970126, -0.326256128832),
(-0.219274227233, -0.329199257882),
(-0.219251238288, -0.332141486146),
(-0.219228045554, -0.335082804812),
(-0.219204648813, -0.338023205049),
(-0.219181047846, -0.340962678003),
(-0.219157242428, -0.343901214803),
(-0.225196526871, -0.343858017301),
(-0.231233281911, -0.343813533497),
(-0.237267433849, -0.343767758856),
(-0.243298908739, -0.343720688731),
(-0.249327632394, -0.343672318361),
(-0.255353530371, -0.343622642876),
(-0.261376527975, -0.343571657298),
(-0.26739655025, -0.343519356538),
(-0.273413521974, -0.343465735405),
(-0.279427367656, -0.343410788597),
(-0.285438011529, -0.343354510709),
(-0.291445377542, -0.343296896234),
(-0.297449389356, -0.343237939558),
(-0.303449970337, -0.343177634965),
(-0.309447043547, -0.343115976637),
(-0.315440531734, -0.343052958652),
(-0.321430357329, -0.342988574989),
(-0.327416442434, -0.342922819519),
(-0.33339870881, -0.342855686016),
(-0.33937707787, -0.342787168147),
(-0.345351470668, -0.342717259476),
(-0.351321807887, -0.342645953464),
(-0.357288009824, -0.342573243466),
(-0.363249996383, -0.342499122727),
(-0.369207687056, -0.342423584389),
(-0.375161000912, -0.342346621479),
(-0.38110985658, -0.342268226916),
(-0.387054172235, -0.342188393502),
(-0.392993865578, -0.342107113924),
(-0.398928853824, -0.34202438075),
(-0.404859053678, -0.341940186423),
(-0.410784381319, -0.341854523264),
(-0.410829881436, -0.338935920784),
(-0.410874973529, -0.336016338678),
(-0.410919658335, -0.333095786285),
(-0.410963936584, -0.330174272927),
(-0.411007808992, -0.327251807901),
(-0.411051276268, -0.324328400488),
(-0.411094339112, -0.321404059946),
(-0.411136998212, -0.318478795515),
(-0.411179254248, -0.315552616415),
(-0.41122110789, -0.312625531848),
(-0.411262559799, -0.309697550998),
(-0.411303610628, -0.30676868303),
(-0.411344261019, -0.30383893709),
(-0.411384511607, -0.30090832231),
(-0.411424363016, -0.2979768478),
(-0.411463815863, -0.295044522657),
(-0.411502870757, -0.292111355961),
(-0.411541528295, -0.289177356772),
(-0.411579789069, -0.286242534139),
(-0.411617653661, -0.28330689709),
(-0.411655122645, -0.280370454642),
(-0.411692196586, -0.277433215793),
(-0.411728876043, -0.274495189527),
(-0.411765161566, -0.271556384813),
(-0.411801053694, -0.268616810605),
(-0.411836552963, -0.265676475843),
(-0.411871659899, -0.262735389452),
(-0.411906375018, -0.259793560342),
(-0.411940698832, -0.25685099741),
(-0.411974631843, -0.253907709538),
(-0.412008174547, -0.250963705595),
(-0.412041327432, -0.248018994436),
(-0.40609647668, -0.248079219925),
(-0.40014684327, -0.248138392194),
(-0.394192507431, -0.248196516052),
(-0.388233548924, -0.248253596293),
(-0.382270047068, -0.248309637691),
(-0.376302080754, -0.248364645004),
(-0.370329728473, -0.248418622977),
(-0.364353068327, -0.248471576337),
(-0.35837217805, -0.248523509799),
(-0.352387135026, -0.248574428059),
(-0.346398016301, -0.248624335801),
(-0.340404898604, -0.248673237691),
(-0.334407858356, -0.248721138378),
(-0.328406971684, -0.248768042494),
(-0.322402314437, -0.248813954648),
(-0.316393962195, -0.248858879433),
(-0.31038199028, -0.248902821416),
(-0.304366473768, -0.248945785139),
(-0.298347487497, -0.248987775119),
(-0.292325106079, -0.249028795844),
(-0.286299403904, -0.24906885177),
(-0.280270455153, -0.249107947321),
(-0.274238333799, -0.249146086884),
(-0.26820311362, -0.249183274806),
(-0.262164868202, -0.249219515395),
(-0.256123670945, -0.249254812914),
(-0.250079595065, -0.249289171577),
(-0.244032713607, -0.249322595549),
(-0.237983099441, -0.249355088942),
(-0.231930825269, -0.249386655811),
(-0.225875963628, -0.249417300151),
(-0.219818586895, -0.249447025895)]},
28: {'color': 'violet',
'polygon': [(-0.425677080722, -0.248055266679),
(-0.425644048873, -0.250998094997),
(-0.425610612257, -0.253940212431),
(-0.425576770344, -0.256881610077),
(-0.425542522591, -0.259822279018),
(-0.425507868452, -0.262762210321),
(-0.42547280737, -0.265701395041),
(-0.425437338781, -0.268639824216),
(-0.425401462112, -0.271577488872),
(-0.425365176783, -0.274514380019),
(-0.425328482203, -0.277450488652),
(-0.425291377777, -0.280385805753),
(-0.425253862898, -0.283320322286),
(-0.425215936951, -0.286254029201),
(-0.425177599313, -0.289186917432),
(-0.425138849351, -0.292118977898),
(-0.425099686426, -0.295050201501),
(-0.425060109888, -0.297980579126),
(-0.425020119076, -0.300910101643),
(-0.424979713325, -0.303838759905),
(-0.424938891956, -0.306766544746),
(-0.424897654284, -0.309693446985),
(-0.424855999612, -0.312619457421),
(-0.424813927235, -0.315544566837),
(-0.424771436439, -0.318468765996),
(-0.4247285265, -0.321392045645),
(-0.424685196682, -0.324314396508),
(-0.424641446243, -0.327235809293),
(-0.424597274428, -0.330156274687),
(-0.424552680473, -0.333075783358),
(-0.424507663603, -0.335994325951),
(-0.424462223034, -0.338911893092),
(-0.424416357972, -0.341828475387),
(-0.430324982901, -0.341735142317),
(-0.436228369422, -0.341640306615),
(-0.442126430557, -0.341543959896),
(-0.448019078659, -0.341446093622),
(-0.453906225385, -0.341346699096),
(-0.459787781663, -0.341245767455),
(-0.46566365767, -0.341143289662),
(-0.471533762794, -0.341039256503),
(-0.477398005611, -0.340933658575),
(-0.483256293845, -0.340826486284),
(-0.489108534338, -0.340717729832),
(-0.494954633015, -0.340607379207),
(-0.500794494843, -0.340495424183),
(-0.506628023801, -0.340381854298),
(-0.512455122832, -0.340266658856),
(-0.518275693808, -0.340149826906),
(-0.524089637486, -0.34003134724),
(-0.529896853463, -0.339911208376),
(-0.535697240133, -0.339789398546),
(-0.541490694641, -0.339665905689),
(-0.547277112832, -0.339540717431),
(-0.553056389201, -0.339413821076),
(-0.558828416846, -0.339285203592),
(-0.564593087409, -0.339154851592),
(-0.570350291025, -0.339022751325),
(-0.576099916266, -0.338888888656),
(-0.58184185008, -0.338753249052),
(-0.587575977732, -0.338615817563),
(-0.593302182744, -0.338476578805),
(-0.59902034683, -0.338335516945),
(-0.604730349829, -0.338192615679),
(-0.610432069642, -0.338047858212),
(-0.61050390095, -0.335166556951),
(-0.610575053779, -0.332284188364),
(-0.610645529727, -0.329400762866),
(-0.610715330382, -0.326516290847),
(-0.610784457312, -0.32363078267),
(-0.610852912076, -0.320744248675),
(-0.610920696218, -0.317856699175),
(-0.610987811266, -0.314968144458),
(-0.611054258736, -0.31207859479),
(-0.611120040131, -0.309188060411),
(-0.611185156938, -0.306296551539),
(-0.611249610632, -0.303404078369),
(-0.611313402674, -0.300510651073),
(-0.611376534509, -0.297616279803),
(-0.61143900757, -0.294720974685),
(-0.611500823278, -0.291824745827),
(-0.611561983036, -0.288927603314),
(-0.611622488237, -0.286029557211),
(-0.611682340258, -0.283130617562),
(-0.611741540464, -0.28023079439),
(-0.611800090203, -0.2773300977),
(-0.611857990813, -0.274428537474),
(-0.611915243616, -0.271526123677),
(-0.611971849921, -0.268622866254),
(-0.612027811022, -0.26571877513),
(-0.612083128201, -0.262813860212),
(-0.612137802725, -0.259908131388),
(-0.612191835848, -0.257001598526),
(-0.612245228809, -0.254094271479),
(-0.612297982834, -0.251186160078),
(-0.612350099134, -0.248277274139),
(-0.612401578909, -0.245367623458),
(-0.606674817546, -0.245470049148),
(-0.600939997943, -0.245571210808),
(-0.595197238763, -0.245671117675),
(-0.589446656641, -0.245769778649),
(-0.58368836626, -0.245867202315),
(-0.577922480421, -0.24596339696),
(-0.572149110114, -0.246058370588),
(-0.56636836458, -0.246152130943),
(-0.560580351383, -0.246244685519),
(-0.554785176471, -0.246336041581),
(-0.548982944238, -0.246426206176),
(-0.543173757581, -0.246515186149),
(-0.537357717965, -0.246602988156),
(-0.531534925471, -0.246689618679),
(-0.52570547886, -0.246775084036),
(-0.519869475618, -0.246859390393),
(-0.514027012014, -0.246942543777),
(-0.508178183143, -0.247024550085),
(-0.502323082982, -0.247105415095),
(-0.496461804431, -0.247185144476),
(-0.49059443936, -0.247263743795),
(-0.484721078652, -0.247341218528),
(-0.478841812248, -0.247417574065),
(-0.472956729182, -0.24749281572),
(-0.467065917626, -0.247566948739),
(-0.461169464925, -0.247639978303),
(-0.455267457633, -0.247711909533),
(-0.44935998155, -0.247782747504),
(-0.443447121754, -0.247852497239),
(-0.437528962634, -0.247921163722),
(-0.431605587922, -0.247988751898),
(-0.425677080722, -0.248055266679)]},
29: {'color': 'violet',
'polygon': [(-0.625651578135, -0.245082412402),
(-0.625596436407, -0.247989071406),
(-0.625540639358, -0.250894961352),
(-0.625484185751, -0.253800072364),
(-0.625427074336, -0.256704394545),
(-0.625369303846, -0.259607917982),
(-0.625310873001, -0.262510632741),
(-0.625251780509, -0.265412528872),
(-0.625192025061, -0.268313596404),
(-0.625131605335, -0.271213825348),
(-0.625070519996, -0.274113205694),
(-0.625008767693, -0.277011727414),
(-0.624946347062, -0.279909380461),
(-0.624883256726, -0.282806154766),
(-0.624819495292, -0.28570204024),
(-0.624755061355, -0.288597026776),
(-0.624689953495, -0.291491104242),
(-0.624624170277, -0.29438426249),
(-0.624557710255, -0.297276491345),
(-0.624490571966, -0.300167780616),
(-0.624422753935, -0.303058120086),
(-0.624354254672, -0.305947499517),
(-0.624285072674, -0.308835908649),
(-0.624215206424, -0.311723337198),
(-0.62414465439, -0.314609774858),
(-0.624073415028, -0.317495211297),
(-0.624001486779, -0.320379636162),
(-0.62392886807, -0.323263039073),
(-0.623855557314, -0.326145409624),
(-0.623781552911, -0.329026737388),
(-0.623706853246, -0.331907011908),
(-0.623631456692, -0.334786222701),
(-0.623555361606, -0.33766435926),
(-0.629228687593, -0.337517840464),
(-0.634893177761, -0.337369382808),
(-0.640548698465, | |
Returns: The help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: ItemAutomationPeer) -> str
Gets a string that conveys the visual status of the specified
System.Windows.UIElement.
Returns: The status. Examples include "Busy" or "Online".
"""
pass
def GetItemTypeCore(self, *args): #cannot find CLR method
"""
GetItemTypeCore(self: ItemAutomationPeer) -> str
Gets a human-readable string that contains the type of item that the specified
System.Windows.UIElement represents.
Returns: The item type. An example includes "Mail Message" or "Contact".
"""
pass
def GetLabeledByCore(self, *args): #cannot find CLR method
"""
GetLabeledByCore(self: ItemAutomationPeer) -> AutomationPeer
Gets the System.Windows.Automation.Peers.AutomationPeer for the
System.Windows.Controls.Label that is targeted to the specified
System.Windows.UIElement.
Returns: The System.Windows.Automation.Peers.LabelAutomationPeer for the element that is
targeted by the System.Windows.Controls.Label.
"""
pass
def GetLocalizedControlTypeCore(self, *args): #cannot find CLR method
"""
GetLocalizedControlTypeCore(self: AutomationPeer) -> str
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType.
Returns: The type of the control.
"""
pass
def GetNameCore(self, *args): #cannot find CLR method
"""
GetNameCore(self: ItemAutomationPeer) -> str
Gets the text label of the System.Windows.UIElement that corresponds to the
data item in the System.Windows.Controls.ItemsControl.Items collection that is
associated with this System.Windows.Automation.Peers.ItemAutomationPeer.
Returns: The text label.
"""
pass
def GetOrientationCore(self, *args): #cannot find CLR method
"""
GetOrientationCore(self: ItemAutomationPeer) -> AutomationOrientation
Gets a value that indicates whether the specified System.Windows.UIElement is
laid out in a particular direction.
Returns: The direction of the specified System.Windows.UIElement. Optionally, the method
returns System.Windows.Automation.Peers.AutomationOrientation.None if the
System.Windows.UIElement is not laid out in a particular direction.
"""
pass
def GetPattern(self, patternInterface):
"""
GetPattern(self: DataGridItemAutomationPeer, patternInterface: PatternInterface) -> object
Returns the object that supports the specified control pattern of the element
that is associated with this automation peer.
patternInterface: An enumeration value that specifies the control pattern.
Returns: The current System.Windows.Automation.Peers.DataGridCellItemAutomationPeer
object, if patternInterface is a supported value; otherwise, null. For more
information, see Remarks.
"""
pass
def GetPeerFromPointCore(self, *args): #cannot find CLR method
""" GetPeerFromPointCore(self: DataGridItemAutomationPeer, point: Point) -> AutomationPeer """
pass
def HasKeyboardFocusCore(self, *args): #cannot find CLR method
"""
HasKeyboardFocusCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the specified System.Windows.UIElement
currently has keyboard input focus.
Returns: true if the specified System.Windows.UIElement has keyboard input focus;
otherwise, false.
"""
pass
def IsContentElementCore(self, *args): #cannot find CLR method
"""
IsContentElementCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the specified System.Windows.UIElement
contains data that is presented to the user.
Returns: true if the element is a content element; otherwise, false.
"""
pass
def IsControlElementCore(self, *args): #cannot find CLR method
"""
IsControlElementCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.ItemAutomationPeer is
understood by the end user as interactive.
Returns: true if the element is a control; otherwise, false.
"""
pass
def IsEnabledCore(self, *args): #cannot find CLR method
"""
IsEnabledCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the specified System.Windows.UIElement can
receive and send events.
Returns: true if the UI Automation�peer can receive and send events; otherwise, false.
"""
pass
def IsKeyboardFocusableCore(self, *args): #cannot find CLR method
"""
IsKeyboardFocusableCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the specified System.Windows.UIElement can
accept keyboard focus.
Returns: true if the element can accept keyboard focus; otherwise, false.
"""
pass
def IsOffscreenCore(self, *args): #cannot find CLR method
"""
IsOffscreenCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the specified System.Windows.UIElement is
off the screen.
Returns: true if the specified System.Windows.UIElement is not on the screen; otherwise,
false.
"""
pass
def IsPasswordCore(self, *args): #cannot find CLR method
"""
IsPasswordCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the specified System.Windows.UIElement
contains protected content.
Returns: true if the specified System.Windows.UIElement contains protected content;
otherwise, false.
"""
pass
def IsRequiredForFormCore(self, *args): #cannot find CLR method
"""
IsRequiredForFormCore(self: ItemAutomationPeer) -> bool
Gets a value that indicates whether the specified System.Windows.UIElement is
required to be completed on a form.
Returns: true if the specified System.Windows.UIElement is required to be completed;
otherwise, false.
"""
pass
def PeerFromProvider(self, *args): #cannot find CLR method
"""
PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
def ProviderFromPeer(self, *args): #cannot find CLR method
"""
ProviderFromPeer(self: AutomationPeer, peer: AutomationPeer) -> IRawElementProviderSimple
Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the
specified System.Windows.Automation.Peers.AutomationPeer.
peer: The automation peer.
Returns: The proxy.
"""
pass
def SetFocusCore(self, *args): #cannot find CLR method
"""
SetFocusCore(self: ItemAutomationPeer)
Sets the keyboard input focus on the specified System.Windows.UIElement. The
System.Windows.UIElement corresponds to the data item in the
System.Windows.Controls.ItemsControl.Items collection that is associated with
this System.Windows.Automation.Peers.ItemAutomationPeer.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, item, dataGridPeer):
""" __new__(cls: type, item: object, dataGridPeer: DataGridAutomationPeer) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class DataGridRowAutomationPeer(FrameworkElementAutomationPeer):
"""
Exposes System.Windows.Controls.DataGridRow types to UI Automation.
DataGridRowAutomationPeer(owner: DataGridRow)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: DataGridRow) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class DataGridRowHeaderAutomationPeer(ButtonBaseAutomationPeer):
"""
Exposes System.Windows.Controls.Primitives.DataGridRowHeader types to UI Automation.
DataGridRowHeaderAutomationPeer(owner: DataGridRowHeader)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: DataGridRowHeader) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class DatePickerAutomationPeer(FrameworkElementAutomationPeer, IExpandCollapseProvider, IValueProvider):
"""
Exposes System.Windows.Controls.DatePicker types to UI Automation.
DatePickerAutomationPeer(owner: DatePicker)
"""
def GetAcceleratorKeyCore(self, *args): #cannot find CLR method
"""
GetAcceleratorKeyCore(self: UIElementAutomationPeer) -> str
Gets the accelerator key for the System.Windows.UIElement that is associated
with this System.Windows.Automation.Peers.UIElementAutomationPeer. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetAcceleratorKey.
Returns: The System.Windows.Automation.AutomationProperties.AcceleratorKey that is
returned by
System.Windows.Automation.AutomationProperties.GetAcceleratorKey(System.Windows.
DependencyObject).
"""
pass
def GetAccessKeyCore(self, *args): #cannot find CLR method
"""
GetAccessKeyCore(self: UIElementAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that is associated with
this System.Windows.Automation.Peers.UIElementAutomationPeer.This method is
called by System.Windows.Automation.Peers.AutomationPeer.GetAccessKey.
Returns: The access key for the System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetAutomationControlTypeCore(self, *args): #cannot find CLR method
""" GetAutomationControlTypeCore(self: DatePickerAutomationPeer) -> AutomationControlType """
pass
def GetAutomationIdCore(self, *args): #cannot find CLR method
"""
GetAutomationIdCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that uniquely identifies the System.Windows.FrameworkElement
that is associated with this
System.Windows.Automation.Peers.FrameworkElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationId.
Returns: The automation identifier for the element associated with the
System.Windows.Automation.Peers.FrameworkElementAutomationPeer, or
System.String.Empty if there isn't an automation identifier.
"""
pass
def GetBoundingRectangleCore(self, *args): #cannot find CLR method
"""
GetBoundingRectangleCore(self: UIElementAutomationPeer) -> Rect
Gets the | |
isinstance(task.importer_spec.artifact_uri, str):
importer_spec.artifact_uri.constant.string_value = task.importer_spec.artifact_uri
return importer_spec
def build_container_spec_for_task(
task: pipeline_task.PipelineTask
) -> pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec:
"""Builds PipelineContainerSpec for a pipeline task.
Args:
task: The task to build a ComponentSpec for.
Returns:
A PipelineContainerSpec object for the task.
"""
container_spec = (
pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec(
image=task.container_spec.image,
command=task.container_spec.command,
args=task.container_spec.args,
env=[
pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec
.EnvVar(name=name, value=value)
for name, value in (task.container_spec.env or {}).items()
]))
if task.container_spec.resources is not None:
container_spec.resources.cpu_limit = (
task.container_spec.resources.cpu_limit)
container_spec.resources.memory_limit = (
task.container_spec.resources.memory_limit)
if task.container_spec.resources.accelerator_count is not None:
container_spec.resources.accelerator.CopyFrom(
pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec
.ResourceSpec.AcceleratorConfig(
type=task.container_spec.resources.accelerator_type,
count=task.container_spec.resources.accelerator_count,
))
return container_spec
def _fill_in_component_input_default_value(
component_spec: pipeline_spec_pb2.ComponentSpec,
input_name: str,
default_value: Optional[type_utils.PARAMETER_TYPES],
) -> None:
"""Fills in the default of component input parameter.
Args:
component_spec: The ComponentSpec to update in place.
input_name: The name of the input parameter.
default_value: The default value of the input parameter.
"""
if default_value is None:
return
parameter_type = component_spec.input_definitions.parameters[
input_name].parameter_type
if pipeline_spec_pb2.ParameterType.NUMBER_INTEGER == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.number_value = default_value
elif pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.number_value = default_value
elif pipeline_spec_pb2.ParameterType.STRING == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.string_value = default_value
elif pipeline_spec_pb2.ParameterType.BOOLEAN == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.bool_value = default_value
elif pipeline_spec_pb2.ParameterType.STRUCT == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.CopyFrom(
_to_protobuf_value(default_value))
elif pipeline_spec_pb2.ParameterType.LIST == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.CopyFrom(
_to_protobuf_value(default_value))
def build_component_spec_for_group(
pipeline_channels: List[pipeline_channel.PipelineChannel],
is_root_group: bool,
) -> pipeline_spec_pb2.ComponentSpec:
"""Builds ComponentSpec for a TasksGroup.
Args:
group: The group to build a ComponentSpec for.
pipeline_channels: The list of pipeline channels referenced by the group.
Returns:
A PipelineTaskSpec object representing the loop group.
"""
component_spec = pipeline_spec_pb2.ComponentSpec()
for channel in pipeline_channels:
input_name = (
channel.name if is_root_group else
_additional_input_name_for_pipeline_channel(channel))
if isinstance(channel, pipeline_channel.PipelineArtifactChannel):
component_spec.input_definitions.artifacts[
input_name].artifact_type.CopyFrom(
type_utils.get_artifact_type_schema(channel.channel_type))
else:
# channel is one of PipelineParameterChannel, LoopArgument, or
# LoopArgumentVariable.
component_spec.input_definitions.parameters[
input_name].parameter_type = type_utils.get_parameter_type(
channel.channel_type)
if is_root_group:
_fill_in_component_input_default_value(
component_spec=component_spec,
input_name=input_name,
default_value=channel.value,
)
return component_spec
def _pop_input_from_task_spec(
task_spec: pipeline_spec_pb2.PipelineTaskSpec,
input_name: str,
) -> None:
"""Removes an input from task spec inputs.
Args:
task_spec: The pipeline task spec to update in place.
input_name: The name of the input, which could be an artifact or paremeter.
"""
task_spec.inputs.artifacts.pop(input_name)
task_spec.inputs.parameters.pop(input_name)
if task_spec.inputs == pipeline_spec_pb2.TaskInputsSpec():
task_spec.ClearField('inputs')
def _update_task_spec_for_loop_group(
group: tasks_group.ParallelFor,
pipeline_task_spec: pipeline_spec_pb2.PipelineTaskSpec,
) -> None:
"""Updates PipelineTaskSpec for loop group.
Args:
group: The loop group to update task spec for.
pipeline_task_spec: The pipeline task spec to update in place.
"""
if group.items_is_pipeline_channel:
loop_items_channel = group.loop_argument.items_or_pipeline_channel
input_parameter_name = _additional_input_name_for_pipeline_channel(
loop_items_channel)
loop_argument_item_name = _additional_input_name_for_pipeline_channel(
group.loop_argument.full_name)
loop_arguments_item = '{}-{}'.format(
input_parameter_name, for_loop.LoopArgument.LOOP_ITEM_NAME_BASE)
assert loop_arguments_item == loop_argument_item_name
pipeline_task_spec.parameter_iterator.items.input_parameter = (
input_parameter_name)
pipeline_task_spec.parameter_iterator.item_input = (
loop_argument_item_name)
# If the loop items itself is a loop arguments variable, handle the
# subvar name.
if isinstance(loop_items_channel, for_loop.LoopArgumentVariable):
pipeline_task_spec.inputs.parameters[
input_parameter_name].parameter_expression_selector = (
'parseJson(string_value)["{}"]'.format(
loop_items_channel.subvar_name))
pipeline_task_spec.inputs.parameters[
input_parameter_name].component_input_parameter = (
_additional_input_name_for_pipeline_channel(
loop_items_channel.loop_argument))
else:
input_parameter_name = _additional_input_name_for_pipeline_channel(
group.loop_argument)
raw_values = group.loop_argument.items_or_pipeline_channel
pipeline_task_spec.parameter_iterator.items.raw = json.dumps(
raw_values, sort_keys=True)
pipeline_task_spec.parameter_iterator.item_input = (
input_parameter_name)
_pop_input_from_task_spec(
task_spec=pipeline_task_spec,
input_name=pipeline_task_spec.parameter_iterator.item_input)
def _resolve_condition_operands(
left_operand: Union[str, pipeline_channel.PipelineChannel],
right_operand: Union[str, pipeline_channel.PipelineChannel],
) -> Tuple[str, str]:
"""Resolves values and PipelineChannels for condition operands.
Args:
left_operand: The left operand of a condition expression.
right_operand: The right operand of a condition expression.
Returns:
A tuple of the resolved operands values:
(left_operand_value, right_operand_value).
"""
# Pre-scan the operand to get the type of constant value if there's any.
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type in [
pipeline_spec_pb2.ParameterType.STRUCT,
pipeline_spec_pb2.ParameterType.LIST,
pipeline_spec_pb2.ParameterType
.PARAMETER_TYPE_ENUM_UNSPECIFIED,
]:
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
raise ValueError('Conditional requires scalar parameter values'
' for comparison. Found input "{}" of type {}'
' in pipeline definition instead.'.format(
input_name,
value_or_reference.channel_type))
parameter_types = set()
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
else:
parameter_type = type_utils.get_parameter_type(
type(value_or_reference).__name__)
parameter_types.add(parameter_type)
if len(parameter_types) == 2:
# Two different types being compared. The only possible types are
# String, Boolean, Double and Integer. We'll promote the other type
# using the following precedence:
# String > Boolean > Double > Integer
if pipeline_spec_pb2.ParameterType.STRING in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif pipeline_spec_pb2.ParameterType.BOOLEAN in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
else:
# Must be a double and int, promote to double.
assert pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
assert pipeline_spec_pb2.ParameterType.NUMBER_INTEGER in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
canonical_parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
elif len(parameter_types) == 1: # Both operands are the same type.
canonical_parameter_type = parameter_types.pop()
else:
# Probably shouldn't happen.
raise ValueError('Unable to determine operand types for'
' "{}" and "{}"'.format(left_operand, right_operand))
operand_values = []
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
operand_value = "inputs.parameter_values['{input_name}']".format(
input_name=input_name)
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER:
operand_value = 'int({})'.format(operand_value)
elif isinstance(value_or_reference, str):
operand_value = "'{}'".format(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif isinstance(value_or_reference, bool):
# Booleans need to be compared as 'true' or 'false' in CEL.
operand_value = str(value_or_reference).lower()
parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
elif isinstance(value_or_reference, int):
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
else:
assert isinstance(value_or_reference, float), value_or_reference
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
if parameter_type != canonical_parameter_type:
# Type-cast to so CEL does not complain.
if canonical_parameter_type == pipeline_spec_pb2.ParameterType.STRING:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.BOOLEAN,
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = "'{}'".format(operand_value)
elif canonical_parameter_type == pipeline_spec_pb2.ParameterType.BOOLEAN:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = 'true' if int(operand_value) == 0 else 'false'
else:
assert canonical_parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
assert parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
operand_value = 'double({})'.format(operand_value)
operand_values.append(operand_value)
return tuple(operand_values)
def _update_task_spec_for_condition_group(
group: tasks_group.Condition,
pipeline_task_spec: pipeline_spec_pb2.PipelineTaskSpec,
) -> None:
"""Updates PipelineTaskSpec for condition group.
Args:
group: The condition group to update task spec for.
pipeline_task_spec: The pipeline task spec to update in place.
"""
left_operand_value, right_operand_value = _resolve_condition_operands(
group.condition.left_operand, group.condition.right_operand)
condition_string = (
f'{left_operand_value} {group.condition.operator} {right_operand_value}'
)
pipeline_task_spec.trigger_policy.CopyFrom(
pipeline_spec_pb2.PipelineTaskSpec.TriggerPolicy(
condition=condition_string))
def build_task_spec_for_exit_task(
task: pipeline_task.PipelineTask,
dependent_task: str,
pipeline_inputs: pipeline_spec_pb2.ComponentInputsSpec,
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Builds PipelineTaskSpec for an exit handler's exit task.
Args:
tasks: The exit handler's exit task to build task spec for.
dependent_task: The dependent task name for the exit task, i.e. the name
of the exit handler group.
pipeline_inputs: The pipeline level input definitions.
Returns:
A PipelineTaskSpec object representing the exit task.
"""
pipeline_task_spec = build_task_spec_for_task(
task=task,
parent_component_inputs=pipeline_inputs,
tasks_in_current_dag=[], # Does not matter for exit task
input_parameters_in_current_dag=pipeline_inputs.parameters.keys(),
input_artifacts_in_current_dag=[],
)
pipeline_task_spec.dependent_tasks.extend([dependent_task])
pipeline_task_spec.trigger_policy.strategy = (
pipeline_spec_pb2.PipelineTaskSpec.TriggerPolicy.TriggerStrategy
.ALL_UPSTREAM_TASKS_COMPLETED)
for input_name, input_spec in task.component_spec.inputs.items():
if type_utils.is_task_final_status_type(input_spec.type):
pipeline_task_spec.inputs.parameters[
input_name].task_final_status.producer_task = dependent_task
return pipeline_task_spec
def build_task_spec_for_group(
group: tasks_group.TasksGroup,
pipeline_channels: List[pipeline_channel.PipelineChannel],
tasks_in_current_dag: List[str],
is_parent_component_root: bool,
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Builds PipelineTaskSpec for a group.
Args:
group: The group to build PipelineTaskSpec for.
pipeline_channels: The list of pipeline channels referenced by the group.
tasks_in_current_dag: The list of tasks names for tasks in the same dag.
is_parent_component_root: Whether the parent component is the pipeline's
root dag.
Returns:
A PipelineTaskSpec object representing the group.
"""
pipeline_task_spec = pipeline_spec_pb2.PipelineTaskSpec()
pipeline_task_spec.task_info.name = group.display_name or group.name
pipeline_task_spec.component_ref.name = (
component_utils.sanitize_component_name(group.name))
for channel in pipeline_channels:
channel_full_name = channel.full_name
subvar_name = None
if isinstance(channel, for_loop.LoopArgumentVariable):
channel_full_name = channel.loop_argument.full_name
subvar_name = channel.subvar_name
input_name = _additional_input_name_for_pipeline_channel(channel)
channel_name = channel.name
if subvar_name:
pipeline_task_spec.inputs.parameters[
input_name].parameter_expression_selector = (
'parseJson(string_value)["{}"]'.format(subvar_name))
if not channel.is_with_items_loop_argument:
channel_name = channel.items_or_pipeline_channel.name
if isinstance(channel, pipeline_channel.PipelineArtifactChannel):
if channel.task_name and channel.task_name in tasks_in_current_dag:
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.producer_task = (
component_utils.sanitize_task_name(channel.task_name))
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.output_artifact_key = (
channel_name)
else:
pipeline_task_spec.inputs.artifacts[
input_name].component_input_artifact = (
channel_full_name
if is_parent_component_root else input_name)
else:
# channel is one of PipelineParameterChannel, LoopArgument, or
# LoopArgumentVariable
if channel.task_name and channel.task_name in tasks_in_current_dag:
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.producer_task = (
component_utils.sanitize_task_name(channel.task_name))
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key = (
channel_name)
else:
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
channel_full_name if is_parent_component_root else
_additional_input_name_for_pipeline_channel(
channel_full_name))
if isinstance(group, tasks_group.ParallelFor):
_update_task_spec_for_loop_group(
group=group,
pipeline_task_spec=pipeline_task_spec,
)
elif isinstance(group, tasks_group.Condition):
_update_task_spec_for_condition_group(
group=group,
pipeline_task_spec=pipeline_task_spec,
)
return pipeline_task_spec
def populate_metrics_in_dag_outputs(
tasks: List[pipeline_task.PipelineTask],
task_name_to_parent_groups: Mapping[str, List[GroupOrTaskType]],
task_name_to_task_spec: Mapping[str, pipeline_spec_pb2.PipelineTaskSpec],
task_name_to_component_spec: Mapping[str, pipeline_spec_pb2.ComponentSpec],
pipeline_spec: pipeline_spec_pb2.PipelineSpec,
) -> None:
"""Populates metrics artifacts in DAG outputs.
Args:
tasks: The list of tasks that may produce metrics outputs.
task_name_to_parent_groups: The dict of task name to parent groups.
Key is the task's name. Value is a list of ancestor groups including
the task itself. The list of a given op is sorted in a way that the
farthest group is the first and the task itself is the last.
task_name_to_task_spec: The dict of task name to PipelineTaskSpec.
task_name_to_component_spec: The dict of task name to ComponentSpec.
pipeline_spec: The pipeline_spec to update in-place.
"""
for task in tasks:
component_spec = task_name_to_component_spec[task.name]
# Get the tuple of (component_name, task_name) of all its parent groups.
parent_components_and_tasks = [('_root', | |
<filename>lib/models/mask.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.connect import xcorr_depthwise
class ARN(nn.Module):
"""
Attention Retrieval Network in Ocean+
"""
def __init__(self, inchannels=256, outchannels=256):
super(ARN, self).__init__()
self.s_embed = nn.Conv2d(inchannels, outchannels, 1) # embedding for search feature
self.t_embed = nn.Conv2d(inchannels, outchannels, 1) # embeeding for template feature
def forward(self, xf, zf, zf_mask):
# xf: [B, C, H, W]
# zf: [B, C, H, W]
# zf_mask: [B, H, W]
# pdb.set_trace()
xf = self.s_embed(xf)
zf = self.t_embed(zf)
B, C, Hx, Wx = xf.size()
B, C, Hz, Wz = zf.size()
xf = xf.permute(0, 2, 3, 1).contiguous() # [B, H, W, C]
xf = xf.view(B, -1, C) # [B, H*W, C]
zf = zf.view(B, C, -1) # [B, C, H*W]
att = torch.matmul(xf, zf) # [HW, HW]
att = att / math.sqrt(C)
att = F.softmax(att, dim=-1) # [HW, HW]
zf_mask = nn.Upsample(size=(Hz, Wz), mode='bilinear', align_corners=True)(zf_mask.unsqueeze(1))
# zf_mask = (zf_mask > 0.5).float()
zf_mask = zf_mask.view(B, -1, 1)
arn = torch.matmul(att, zf_mask) # [B, H*W]
arn = arn.view(B, Hx, Hx).unsqueeze(1)
return arn
class MSS(nn.Module):
"""
Multi-resolution Single-stage Segmentation (fast, used for VOT-RT)
"""
def __init__(self):
super(MSS, self).__init__()
# BACKBONE
self.b4 = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU())
self.b3 = nn.Sequential(nn.Conv2d(512, 32, 3, padding=1), nn.ReLU())
self.b2 = nn.Sequential(nn.Conv2d(256, 16, 3, padding=1), nn.ReLU())
self.b1 = nn.Sequential(nn.Conv2d(64, 4, 3, padding=1), nn.ReLU())
self.b0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1), nn.ReLU())
# REFINE
self.rCo = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU())
self.r3 = nn.Sequential(nn.Conv2d(64, 32, 3, padding=1), nn.ReLU())
self.r2 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU())
self.r1 = nn.Sequential(nn.Conv2d(16, 4, 3, padding=1), nn.ReLU())
self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1))
# multi refine layer
self.m3 = nn.Sequential(nn.Conv2d(32, 2, 3, padding=1)) # stride = 8
self.m2 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 4
self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2
self.multi_loss = True
# for sequential
self.sequential = ARN(256, 64)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
def forward(self, features, input_size=None, zf_ori=None, template_mask=None):
b1, b2, b3, b4, corr = features
b4_size = b4.size()[2:]
b3_size = b3.size()[2:]
b2_size = b2.size()[2:]
b1_size = b1.size()[2:]
if input_size is None: input_size = (255, 255)
# prepare for sequential model
arn = self.sequential(b4, zf_ori, template_mask) # [B, H, W]
arn = torch.clamp(arn, 0, 1)
b4 = b4 + arn
corr = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(corr)
r4 = self.rCo(corr) + self.b4(b4)
r4 = nn.Upsample(size=b3_size, mode='bilinear', align_corners=True)(r4)
r3 = self.r3(r4) + self.b3(b3)
# r3up + b2
r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3)
r2 = self.r2(r3) + self.b2(b2)
# r2up + b1
r2 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(r2)
r1 = self.r1(r2) + self.b1(b1)
# r1 up
r1 = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(r1)
mask = self.r0(r1)
return mask
class MMS(nn.Module):
def __init__(self):
"""
Multi-resolution Multi-stage Segmentation (suitable for VOS)
"""
super(MMS, self).__init__()
# BACKBONE
self.b4 = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU())
self.b3 = nn.Sequential(nn.Conv2d(512, 32, 3, padding=1), nn.ReLU())
self.b2 = nn.Sequential(nn.Conv2d(256, 16, 3, padding=1), nn.ReLU())
self.b1 = nn.Sequential(nn.Conv2d(64, 4, 3, padding=1), nn.ReLU())
self.b0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1), nn.ReLU())
# REFINE
self.rCo = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU())
self.r3 = nn.Sequential(nn.Conv2d(64, 32, 3, padding=1), nn.ReLU())
self.r2 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU())
self.r1 = nn.Sequential(nn.Conv2d(16, 4, 3, padding=1), nn.ReLU())
self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1))
# being used in training (not inference)
self.m3 = nn.Sequential(nn.Conv2d(32, 2, 3, padding=1)) # stride = 8
self.m2 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 4
self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2
# for sequential
self.sequential = ARN(256, 64) # transduction attention
self.iter = IterRefine()
self.iter2 = IterRefine2()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
self.ratio1, self.ratio2, self.ratio3 = 0.33, 0.33, 0.33
def forward(self, features, input_size=None, zf_ori=None, template_mask=None):
b1, b2, b3, b4, corr = features
b4_size = b4.size()[2:]
b3_size = b3.size()[2:]
b2_size = b2.size()[2:]
b1_size = b1.size()[2:]
if input_size is None: input_size = (255, 255)
# iter list -- return for iter list
iterList = []
# transduction network
arn = self.sequential(b4, zf_ori, template_mask) # [B, H, W]
arn = torch.clamp(arn, 0, 1)
b4 = b4 + arn
corr = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(corr)
corr = self.rCo(corr)
b4 = self.b4(b4)
r4 = corr + b4
iterList.append(r4) # [64]
b3 = self.b3(b3)
iterList.append(b3) # [64, 32]
r3 = self.r3(r4) + b3
r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3)
b2 = self.b2(b2) # [64, 32, 16]
iterList.append(b2) # [64, 32, 16]
r2 = self.r2(r3) + b2
r2 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(r2)
b1 = self.b1(b1) # [64, 32, 16, 4]
iterList.append(b1)
r1 = self.r1(r2) + b1
# r1 up
r1 = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(r1)
mask = self.r0(r1)
##### iter refine
mask_iter, flist = self.iter(iterList, pre_mask = mask, input_size=input_size)
mask_list = self.iter2(flist, pre_mask=mask_iter, input_size=input_size)
return self.ratio1 * mask + self.ratio2 * mask_iter[0] + self.ratio3 * mask_list
def update_iter(self, ratio1, ratio2, ratio3):
self.ratio1, self.ratio2, self.ratio3 = ratio1, ratio2, ratio3
class IterRefine(nn.Module):
def __init__(self):
"""
stage2 of MMS
TODO: simplify the code
"""
super(IterRefine, self).__init__()
# BACKBONE
self.b3 = nn.Sequential(nn.Conv2d(32, 32, 3, padding=1), nn.ReLU())
self.b2 = nn.Sequential(nn.Conv2d(16, 16, 3, padding=1), nn.ReLU())
self.b1 = nn.Sequential(nn.Conv2d(4, 4, 3, padding=1), nn.ReLU())
# REFINE
self.r4 = nn.Sequential(nn.Conv2d(64, 32, 3, padding=1), nn.ReLU())
self.r3 = nn.Sequential(nn.Conv2d(32, 32, 3, padding=1), nn.ReLU())
self.r2 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU())
self.r1 = nn.Sequential(nn.Conv2d(16, 4, 3, padding=1), nn.ReLU())
self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1))
# being used in training (not inference)
self.m3 = nn.Sequential(nn.Conv2d(32, 2, 3, padding=1)) # stride = 8
self.m2 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 4
self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2
self.sequential = ARN(256, 64) # transduction attention
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
def forward(self, fList, pre_mask=None, input_size=None):
b4, b3, b2, b1 = fList # [64, 32, 16, 4]
iterList = []
# pre_mask processing
att = F.softmax(pre_mask, dim=1)[:,1,...].unsqueeze(1)
att = torch.clamp(att, 0.7, 1)
b4_size = b4.size()[2:]
b3_size = b3.size()[2:]
b2_size = b2.size()[2:]
b1_size = b1.size()[2:]
if input_size is None: input_size = (255, 255)
att_b4 = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(att)
att_b2 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(att)
att_b1 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(att)
b4, b3, b2, b1 = att_b4 * b4, att_b4 * b3, att_b2 * b2, att_b1 * b1
r4 = self.r4(b4)
iterList.append(r4)
b3 = self.b3(b3)
iterList.append(b3)
r3 = self.r3(r4) + b3 # 32
# r3up + b2
r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3)
b2 = self.b2(b2)
iterList.append(b2)
r2 = self.r2(r3) + b2 # 16
# r2up + b1
r2 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(r2)
b1 = self.b1(b1)
iterList.append(b1)
r1 = self.r1(r2) + b1
# r1 up
r1 = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(r1)
mask = self.r0(r1)
return [mask], iterList
class IterRefine2(nn.Module):
def __init__(self):
"""
stage3 of MMS
TODO: simplify the code
"""
super(IterRefine2, self).__init__()
# BACKBONE
self.b3 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU())
self.b2 = nn.Sequential(nn.Conv2d(16, 8, 3, padding=1), nn.ReLU())
self.b1 = nn.Sequential(nn.Conv2d(4, 4, 3, padding=1), nn.ReLU())
# REFINE
self.r4 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU())
self.r3 = nn.Sequential(nn.Conv2d(16, 16, 3, padding=1), nn.ReLU())
self.r2 = nn.Sequential(nn.Conv2d(16, 8, 3, padding=1), nn.ReLU())
self.r1 = nn.Sequential(nn.Conv2d(8, 4, 3, padding=1), nn.ReLU())
self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1))
# being used in training (not inference)
self.m3 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 8
self.m2 = nn.Sequential(nn.Conv2d(8, 2, 3, padding=1)) # stride = 4
self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
def forward(self, fList, pre_mask=None, input_size=None):
b4, b3, b2, b1 = fList # [32, 32, 16, 4]
iterList = []
# pre_mask processing
att = F.softmax(pre_mask[0], dim=1)[:, 1, ...].unsqueeze(1)
att = torch.clamp(att, 0.7, 1)
b4_size = b4.size()[2:]
b3_size = b3.size()[2:]
b2_size = b2.size()[2:]
b1_size = b1.size()[2:]
if input_size is None: input_size = (255, 255)
att_b4 = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(att)
att_b2 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(att)
att_b1 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(att)
b4, b3, b2, b1 = att_b4 * b4, att_b4 * b3, att_b2 * b2, att_b1 * b1
r4 = self.r4(b4)
b3 = self.b3(b3)
r3 = self.r3(r4) + b3 # 16
# r3up + b2
r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3)
b2 = | |
= 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({hpta_EEOc: -1.0,
h_EEOc: -1.0,
hpta_EEOe: 1.0,
h_EEOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Octanoate Transport
#octa_EEOe <-> octa_e
octa_EEOe = Metabolite('octa_EEOe', formula='C8H15O2', name='Octanoate', compartment='EEOe', charge= -1)
reaction = Reaction('EEO_EX_octa')
reaction.name = 'EEO octa exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({octa_e: EEO_Abnd,
octa_EEOe: -1.})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#octa_EEOe + h_EEOe <-> octa_EEOc + h_EEOc
reaction = Reaction('EEO_Octanoate_import')
reaction.name = 'Octanoate import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({octa_EEOe: -1.0,
h_EEOe: -1.0,
octa_EEOc: 1.0,
h_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#octa_EEOc + h_EEOc <-> octa_EEOe + h_EEOe
reaction = Reaction('EEO_Octanoate_export')
reaction.name = 'Octanote export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({octa_EEOc: -1.0,
h_EEOc: -1.0,
octa_EEOe: 1.0,
h_EEOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Lactate Transport
#lac__D_EEOe <-> lac__D_e
lac__D_EEOe = Metabolite('lac__D_EEOe', formula='C3H5O3', name='Octanoate', compartment='EEOe', charge= -1)
reaction = Reaction('EEO_EX_lac__D')
reaction.name = 'EEO lac__D exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({lac__D_e: EEO_Abnd,
lac__D_EEOe: -1.})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#lac__D_EEOe + h_EEOe <-> lac__D_EEOc + h_EEOc
reaction = Reaction('EEO_Lactate_import')
reaction.name = 'Lactate import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 0. # This is the default
reaction.add_metabolites({lac__D_EEOe: -1.0,
h_EEOe: -1.0,
lac__D_EEOc: 1.0,
h_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#lac__D_EEOc + h_EEOc <-> lac__D_EEOe + h_EEOe
reaction = Reaction('EEO_Lactate_export')
reaction.name = 'Lactate export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({lac__D_EEOc: -1.0,
h_EEOc: -1.0,
lac__D_EEOe: 1.0,
h_EEOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Ethanol Transport
#etoh_EEOe <-> etoh_e
etoh_EEOe = Metabolite('etoh_EEOe', formula='C2H6O', name='Ethanol', compartment='EEOe', charge= 0)
reaction = Reaction('EEO_EX_etoh')
reaction.name = 'EEO etoh exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({etoh_e: EEO_Abnd,
etoh_EEOe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#etoh_EEOe <-> etoh_EEOc
reaction = Reaction('EEO_Ethanol_import')
reaction.name = 'Ethanol import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({etoh_EEOe: -1.0,
etoh_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#etoh_EEOc <-> etoh_EEOe
reaction = Reaction('EEO_Ethanol_export')
reaction.name = 'Ethanol export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({etoh_EEOc: -1.0,
etoh_EEOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Proton Transport
#h_EEOe <-> h_e
reaction = Reaction('EEO_EX_h')
reaction.name = 'EEO h exchange '
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_e: EEO_Abnd,
h_EEOe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h_EEOe <-> h_EEOc
reaction = Reaction('EEO_H_import')
reaction.name = 'H+ import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_EEOe: -1.0,
h_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h_EEOc <-> h_EEOe
reaction = Reaction('EEO_H_export')
reaction.name = 'H+ export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_EEOc: -1.0,
h_EEOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#ATP for Transport
EEO_ATP_Transport = Metabolite('EEO_ATP_Transport', formula='', name='', compartment='e')
reaction = Reaction('EEO_Transport_ATP')
reaction.name = 'Transport ATP'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({EEO_ATP_Transport: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# Formate_Transport_ATP
# atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Formate_Transport_ATP')
reaction.name = 'Formate Transport ATP'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Acetate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Acetate_Transport_ATP')
reaction.name = 'Acetate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Propionate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Propionate_Transport_ATP')
reaction.name = 'Propionate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Butyrate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Butyrate_Transport_ATP')
reaction.name = 'Butyrate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Valerate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Valerate_Transport_ATP')
reaction.name = 'Valerate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Hexanoate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Hexanoate_Transport_ATP')
reaction.name = 'Hexanoate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Heptanoate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Heptanoate_Transport_ATP')
reaction.name = 'Heptanoate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Octanoate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Octanoate_Transport_ATP')
reaction.name = 'Octanoate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Lactate_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Lactate_Transport_ATP')
reaction.name = 'ATP Transport'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Ethanol_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Ethanol_Transport_ATP')
reaction.name = 'Ethanol Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Proton_Transport_ATP
#atp_EEOc + h2o_EEOc <-> adp_EEOc + pi_EEOc + h_EEOc
reaction = Reaction('EEO_Proton_Transport_ATP')
reaction.name = 'ATP Transport'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_EEOc: -1.0,
h2o_EEOc: -1.0,
adp_EEOc: 1.0,
pi_EEOc: 1.0,
h_EEOc: 1.0,
ATP_TRANS_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Pentose Phosphate Pathway
# ru5p__D_EEOc <-> xu5p__D_EEOc
ru5p__D_EEOc = Metabolite('ru5p__D_EEOc', formula='C5H9O8P', name='D-Ribulose 5-phosphate', compartment='EEOc', charge=-2)
xu5p__D_EEOc = Metabolite('xu5p__D_EEOc', formula='C5H9O8P', name='D-xylulose 5-phosphate', | |
a hub.
new_state(bool): New autoremoteness state to set for hub. True means remote will be automanaged. Defaults to None when only the current value will be returned.
Returns:
bool: True for a hub with autoremote enabled.
"""
if new_state is not None:
_setAttr(hub_id, 'autoremote', new_state)
return _getAttr(hub_id, 'autoremote', default=True, boolean=True)
### Hub info ###
def tz(**kwargs):
"""Get timezone of given hub or default hub if no id is specified. For more optional kwargs see cozify.hub_api.get()
Args:
**hub_id(str): Hub to query, by default the default hub is used.
Returns:
str: Timezone of the hub, for example: 'Europe/Helsinki'
"""
_fill_kwargs(kwargs)
return hub_api.tz(**kwargs)
def ping(autorefresh=True, **kwargs):
"""Perform a cheap API call to trigger any potential APIError and return boolean for success/failure. For optional kwargs see cozify.hub_api.get()
Args:
autorefresh(bool): Wether to perform a autorefresh after an initially failed ping. If successful, will still return True. Defaults to True.
**hub_id(str): Hub to ping or default if neither id or name set.
**hub_name(str): Hub to ping by name.
Returns:
bool: True for a valid and working hub authentication state.
"""
try:
_fill_kwargs(kwargs) # this can raise an APIError if hub_token has expired
# Detect remote-ness and flip state if needed
if not kwargs['remote'] and kwargs['autoremote'] and not kwargs['host']:
remote(kwargs['hub_id'], True)
kwargs['remote'] = True
logging.debug('Ping determined hub is remote and flipped state to remote.')
# We could still be remote but just have host set. If so, tz will fail.
timezone = tz(**kwargs)
logging.debug('Ping performed with tz call, response: {0}'.format(timezone))
except APIError as e:
if e.status_code == 401 or e.status_code == 403:
if autorefresh:
from cozify import cloud
logging.warning('Hub token has expired, hub.ping() attempting to renew it.')
logging.debug('Original APIError was: {0}'.format(e))
if cloud.authenticate(trustHub=False): # if this fails we let it fail.
return True
logging.error(e)
return False
else:
raise
except ConnectionError as e:
# If we're not already remote but are allowed to flip to it
if not kwargs['remote'] and kwargs['autoremote']:
# Flip to remote to hopefully reach the hub that way
logging.warning('Hub connection failed, switching to remote.')
remote(kwargs['hub_id'], True)
kwargs['remote'] = True
# retry the call and let it burn to the ground on failure
try:
timezone = tz(**kwargs)
except (APIError, ConnectionError) as e:
logging.error('Cannot connect via Cloud either, your hub is dead.')
# undo remote so it doesn't stick around, since the failure was undetermined
remote(kwargs['hub_id'], False)
return False
else:
logging.info('Hub connection succeeded remotely, leaving hub configured as remote.')
return True
else:
# Failure was to the cloud, we can't salvage that.
raise
else:
return True
def name(hub_id):
"""Get hub name by it's id.
Args:
hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub.
Returns:
str: Hub name or None if the hub wasn't found.
"""
return _getAttr(hub_id, 'hubname')
def host(hub_id):
"""Get hostname of matching hub_id
Args:
hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub.
Returns:
str: ip address of matching hub. Be aware that this may be empty if the hub is only known remotely and will still give you an ip address even if the hub is currently remote and an ip address was previously locally known.
"""
return _getAttr(hub_id, 'host')
def token(hub_id, new_token=None):
"""Get hub_token of matching hub_id or set a new value for it.
Args:
hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub.
Returns:
str: Hub authentication token.
"""
if new_token:
_setAttr(hub_id, 'hubtoken', new_token)
return _getAttr(hub_id, 'hubtoken')
def hub_id(hub_name):
"""Get hub id by it's name.
Args:
hub_name(str): Name of hub to query. The name is given when registering a hub to an account.
Returns:
str: hub_id on success, raises an attributeerror on failure.
"""
for section in config.state.sections():
if section.startswith("Hubs."):
logging.debug('Found hub: {0}'.format(section))
if config.state[section]['hubname'] == hub_name:
return section[5:] # cut out "Hubs."
raise AttributeError('Hub not found: {0}'.format(hub_name))
def exists(hub_id):
"""Check for existance of hub in local state.
Args:
hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub.
"""
if 'Hubs.{0}'.format(hub_id) in config.state:
return True
else:
return False
def default():
"""Return id of default Hub.
If default hub isn't known an AttributeError will be raised.
"""
if 'default' not in config.state['Hubs']:
logging.fatal('Default hub not known, you should run cozify.authenticate()')
raise AttributeError
else:
return config.state['Hubs']['default']
### Internals ###
def _getAttr(hub_id, attr, default=None, boolean=False):
"""Get hub state attributes by attr name. Optionally set a default value if attribute not found.
Args:
hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub.
attr(str): Name of hub attribute to retrieve
default: Optional default value to set for unset attributes. If no default is provided these raise an AttributeError.
boolean: Retrieve and return value as a boolean instead of string. Defaults to False.
Returns:
str: Value of attribute or exception on failure.
"""
section = 'Hubs.' + hub_id
if section in config.state:
if attr not in config.state[section]:
if default is not None:
_setAttr(hub_id, attr, default)
else:
raise AttributeError('Attribute {0} not set for hub {1}'.format(attr, hub_id))
if boolean:
return config.state.getboolean(section, attr)
else:
return config.state[section][attr]
else:
raise AttributeError("Hub id '{0}' not found in state.".format(hub_id))
def _setAttr(hub_id, attr, value, commit=True):
"""Set hub state attributes by hub_id and attr name
Args:
hub_id(str): Id of hub to store for. The id is a string of hexadecimal sections used internally to represent a hub.
attr(str): Name of cloud state attribute to overwrite. Attribute will be created if it doesn't exist.
value(str): Value to store
commit(bool): True to commit state after set. Defaults to True.
"""
if isinstance(value, bool):
value = str(value)
section = 'Hubs.' + hub_id
if section in config.state:
if attr not in config.state[section]:
logging.info(
"Attribute {0} was not already in {1} state, new attribute created.".format(
attr, section))
config.state[section][attr] = value
if commit:
config.stateWrite()
else:
logging.warning('Section {0} not found in state.'.format(section))
raise AttributeError
def _get_id(**kwargs):
"""Get a hub_id from various sources, meant so that you can just throw kwargs at it and get a valid id.
If no data is available to determine which hub was meant, will default to the default hub. If even that fails, will raise an AttributeError.
Args:
**hub_id(str): Will be returned as-is if defined.
**hub_name(str): Name of hub.
hubName(str): Deprecated. Compatibility keyword for hub_name, to be removed in v0.3
hubId(str): Deprecated. Compatibility keyword for hub_id, to be removed in v0.3
"""
if 'hub_id' in kwargs or 'hubId' in kwargs:
logging.debug("Redundant hub._get_id call, resolving hub_id to itself.")
if 'hub_id' in kwargs:
return kwargs['hub_id']
return kwargs['hubId']
if 'hub_name' in kwargs or 'hubName' in kwargs:
if 'hub_name' in kwargs:
return hub_id(kwargs['hub_name'])
return hub_id(kwargs['hubName'])
return default()
def _fill_kwargs(kwargs):
"""Check that common items are present in kwargs and fill them if not.
Args:
kwargs(dict): kwargs dictionary to fill. Operated on directly.
"""
if 'hub_id' not in kwargs:
kwargs['hub_id'] = _get_id(**kwargs)
if 'remote' not in kwargs:
kwargs['remote'] = remote(kwargs['hub_id'])
if 'autoremote' not in kwargs:
kwargs['autoremote'] = True
if 'hub_token' not in kwargs:
kwargs['hub_token'] = token(kwargs['hub_id'])
if 'cloud_token' not in kwargs:
from . import cloud
kwargs['cloud_token'] = cloud.token()
if 'host' not in kwargs:
# This may end up being None if we're remote
kwargs['host'] = host(kwargs['hub_id'])
def _clean_state(state):
"""Return purged state of values so only wanted values can be modified.
Args:
state(dict): device state dictionary. Original won't be modified.
"""
out = {}
for k, v in state.items():
if isinstance(v, dict): # recurse nested dicts
out[k] = _clean_state(v)
elif k == "type": # type values are kept
out[k] = v
else: # null out the rest
out[k] = None
return out
def _in_range(value, low, high, description='undefined'):
"""Check that the value is in the given range, raise an error if not.
None is always considered a valid value.
Returns:
bool: True if value in range. Otherwise a ValueError is raised.
"""
if value is not | |
from PyQt5 import QtWidgets, QtCore, QtPrintSupport
from PyQt5.QtCore import QDate, QTime, Qt, QTimer, QRectF
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtPrintSupport import QPrinter
from database import MyCursor
from PyQt5 import QtGui
from datetime import datetime, timedelta
from annulation import Annulation, RemEntry
from ops import Ui_zd
#GLOBALS
SWITCH = 0
class AddOps(QWidget, Ui_zd):
def __init__(self, parent=None):
super(AddOps, self).__init__(parent)
self.setWindowModality(Qt.ApplicationModal)
self.setupUi(self)
self.setWindowTitle('Opérations')
self.m = MyCursor()
flags = QtCore.Qt.WindowFlags(QtCore.Qt.FramelessWindowHint)
self.setWindowFlags(flags)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)
self.setWindowIcon(QIcon('MJDB_ICON.ico'))
self.acc_list = self.findChild(QComboBox, 'acc_combo')
self.sacc_list = self.findChild(QComboBox, 'acc_combo2')
self.debit = self.findChild(QLineEdit, 'debit')
self.credit = self.findChild(QLineEdit, 'credit')
self.insert = self.findChild(QPushButton, 'new_op')
self.con = self.findChild(QRadioButton, 'con')
self.non_con = self.findChild(QRadioButton, 'non_con')
self.w_debit = self.findChild(QLabel, 'w_debit')
self.w_credit = self.findChild(QLabel, 'w_credit')
self.date = self.findChild(QLabel, 'date')
self.time = self.findChild(QLabel, 'time')
self.ops_table = self.findChild(QTableWidget, 'ops_table')
self.user = self.findChild(QLabel, 'user')
self.wrong = self.findChild(QLabel, 'wrong')
self.g_ballance = self.findChild(QLineEdit, 'b_sc')
self.g_ballance_ = self.findChild(QLineEdit, 'b_g')
self.general = self.findChild(QLineEdit, 'general_b')
self.title = self.findChild(QFrame, 'title_bar')
self.colse_btn = self.findChild(QPushButton, 'close_btn')
self.minimize = self.findChild(QPushButton, 'mini')
self.date_label = self.findChild(QLabel, 'today')
self.annulation = self.findChild(QCheckBox, 'annulation')
self.insert.clicked.connect(self.add_ops)
self.colse_btn.clicked.connect(lambda: self.close())
self.minimize.clicked.connect(lambda: self.showMinimized())
self.remarque_btn.clicked.connect(self.handlePrint)
self.ops_table.setSortingEnabled(False)
now = QDate.currentDate()
self.date.setText(now.toString(Qt.ISODate))
timer = QTimer(self)
timer.timeout.connect(self.displaytime)
timer.start(1000)
self.m = MyCursor()
self.m.mycursor.execute('SELECT name FROM Accounts')
for x in self.m.mycursor.fetchall():
self.acc_list.addItems(x)
self.acc_list.currentIndexChanged.connect(self.repeat)
self.sacc_list.currentIndexChanged.connect(self.repeat1)
self.con.setChecked(True)
self.general_()
def moveWindow(event):
if event.buttons() == Qt.LeftButton:
self.move(self.pos() + event.globalPos() - self.dragPos)
self.dragPos = event.globalPos()
event.accept()
self.title_bar.mouseMoveEvent = moveWindow
self.debit.textChanged.connect(self.debitChanged)
self.credit.textChanged.connect(self.creditChanged)
self.debit.mousePressEvent = lambda _: self.debit.selectAll()
self.credit.mousePressEvent = lambda _: self.credit.selectAll()
# CELL CLICKED
self.ops_table.cellClicked.connect(self.cellOpClicked)
def handlePrint(self):
printer = QPrinter(QPrinter.HighResolution)
dialog = QtPrintSupport.QPrintDialog(printer, self)
if dialog.exec_() == QtPrintSupport.QPrintDialog.Accepted:
self.handlePaintRequest()
def handlePaintRequest(self):
rows = self.ops_table.selectionModel().selectedRows()
for self.index in sorted(rows):
pass
if len(rows) > 0:
for row in range(self.index.row(), self.index.row() + 1):
self.twi0 = self.ops_table.item(row, 0)
self.twi8 = self.ops_table.item(row, 1)
self.twi16 = self.ops_table.item(row, 2)
self.twi32 = self.ops_table.item(row, 3)
self.twi64 = self.ops_table.item(row, 4)
self.twi128 = self.ops_table.item(row, 5)
self.twi256 = self.ops_table.item(row, 6)
datetime2 = datetime.now().date().strftime('%d/%m/%Y')
datetime1 = datetime.now().strftime('%H:%M:%S')
printer = QPrinter()
printer.setOrientation(getattr(QPrinter, "Landscape"))
printer.setPaperSize(QPrinter.A5)
painter = QtGui.QPainter()
# Start painter
painter.begin(printer)
# Grab a widget you want to print
printer.setFullPage(True)
# Draw grabbed pixmap
painter.setRenderHint(QPainter.SmoothPixmapTransform)
painter.drawImage(0, 0, QImage('recus-01.jpg'))
painter.setPen(QColor('black'))
font = QFont('Mongolian Baiti')
font1 = QFont('Mongolian Baiti')
font.setPointSize(15)
font1.setPointSize(12)
painter.setFont(QFont(font1))
painter.drawText(170, 50, datetime2)
painter.drawText(170, 77, datetime1)
painter.drawText(140, 102, self.user.text())
painter.drawText(15, 320, self.acc_combo.currentText())
painter.drawText(15, 375, self.twi0.text())
painter.drawText(500, 330, self.twi8.text())
painter.drawText(560, 372, self.twi32.text())
painter.drawText(455, 417, self.twi64.text())
painter.drawText(850, 417, self.twi16.text())
painter.setFont(QFont(font))
painter.drawText(405, 525, self.twi128.text())
painter.drawText(705, 525, self.twi256.text())
painter.end()
def cellOpClicked(self):
self.stackedWidget.setCurrentIndex(0)
def general_(self):
self.m = MyCursor()
self.m.mycursor.execute("SELECT SUM(debit) FROM Ops WHERE type IN ('C', 'C / Annulation')")
result4 = self.m.mycursor.fetchone()[0]
self.o = MyCursor()
self.o.mycursor.execute("SELECT SUM(credit) FROM Ops WHERE type IN ('C', 'C / Annulation')")
result5 = self.o.mycursor.fetchone()[0]
if result4 is None and result5 is None:
pass
else:
re = result4 + result5
formatted_re = "{:,.2f}".format(re)
if re < 0:
self.general.setStyleSheet("""QLineEdit{border-radius:10px;
color: rgb(255, 0, 0);}""")
self.general.setText(formatted_re + ' DH')
elif re > 0:
self.general.setStyleSheet("""QLineEdit{border-radius:10px;
color: rgb(0, 170, 0);}""")
self.general.setText(formatted_re + ' DH')
elif re == 0:
self.general.setStyleSheet("""QLineEdit{border-radius:10px;
color: rgb(0, 0, 0);}""")
self.general.setText(formatted_re + ' DH')
def debitChanged(self):
if self.debit.text() == '':
self.debit.setText('0')
self.debit.selectAll()
def creditChanged(self):
if self.credit.text() == '':
self.credit.setText('0')
self.credit.selectAll()
def mousePressEvent(self, event):
self.dragPos = event.globalPos()
def displaytime(self):
time = QTime.currentTime()
self.time.setText(time.toString(Qt.DefaultLocaleLongDate))
def style_table(self):
for x in range(self.ops_table.rowCount()):
g = float(self.ops_table.item(x, 6).text())
y = float(self.ops_table.item(x, 5).text())
if g < 0:
formatted_float_debit = "{:,.2f}".format(g)
self.ops_table.setItem(x, 6, QtWidgets.QTableWidgetItem(str(formatted_float_debit + ' DH')))
self.ops_table.item(x, 6).setForeground(QtGui.QColor(255, 0, 0))
elif y > 0:
formatted_float_debit1 = "{:,.2f}".format(y)
self.ops_table.setItem(x, 5, QtWidgets.QTableWidgetItem(str(formatted_float_debit1 + ' DH')))
self.ops_table.item(x, 5).setForeground(QtGui.QColor(0, 170, 0))
elif g == 0 or y == 0:
formatted_float_debit = "{:,.2f}".format(g)
formatted_float_debit1 = "{:,.2f}".format(y)
self.ops_table.setItem(x, 6, QtWidgets.QTableWidgetItem(str(formatted_float_debit + ' DH')))
self.ops_table.item(x, 6).setForeground(QtGui.QColor(0, 0, 0))
self.ops_table.setItem(x, 5, QtWidgets.QTableWidgetItem(str(formatted_float_debit1 + ' DH')))
self.ops_table.item(x, 5).setForeground(QtGui.QColor(0, 0, 0))
def fill_table(self):
if self.sacc_list.currentText() == 'Tout':
time_r = datetime.now().strftime('%H-%M-%S')
r = " ".join([self.date_label.text(), str(time_r)])
date = datetime.strptime(r, '%d/%m/%Y %H-%M-%S')
self.m.mycursor.execute("""SELECT opID, reference, userID, created, type ,debit, credit FROM Ops
WHERE acc = %s and created between %s and %s""",
(self.acc_list.currentText(), date - timedelta(1), datetime.now()))
f = self.m.mycursor.fetchall()
self.ops_table.setRowCount(0)
for column_number, row_data in enumerate(f):
self.ops_table.insertRow(column_number)
for row_number, data in enumerate(row_data):
self.ops_table.setItem(column_number, row_number, QtWidgets.QTableWidgetItem(str(data)))
self.style_table()
elif self.sacc_list.currentText() == 'Selectionnez un sous-compte...':
time_r = datetime.now().strftime('%H-%M-%S')
r = " ".join([self.date_label.text(), str(time_r)])
date = datetime.strptime(r, '%d/%m/%Y %H-%M-%S')
self.m.mycursor.execute("""SELECT opID, reference, userID, created, type ,debit, credit FROM Ops
WHERE acc = %s and created between %s and %s""",
(self.acc_list.currentText(), date - timedelta(1), datetime.now()))
f = self.m.mycursor.fetchall()
self.ops_table.setRowCount(0)
for column_number, row_data in enumerate(f):
self.ops_table.insertRow(column_number)
for row_number, data in enumerate(row_data):
self.ops_table.setItem(column_number, row_number, QtWidgets.QTableWidgetItem(str(data)))
self.style_table()
else:
time_r = datetime.now().strftime('%H-%M-%S')
r = " ".join([self.date_label.text(), str(time_r)])
date = datetime.strptime(r, '%d/%m/%Y %H-%M-%S')
self.m.mycursor.execute("""SELECT opID, reference, userID, created, type ,debit, credit FROM Ops
WHERE opID = %s and acc=%s and created between %s and %s""",
(self.sacc_list.currentText(), self.acc_list.currentText(), date - timedelta(1), datetime.now()))
f = self.m.mycursor.fetchall()
self.ops_table.setRowCount(0)
for column_number, row_data in enumerate(f):
self.ops_table.insertRow(column_number)
for row_number, data in enumerate(row_data):
self.ops_table.setItem(column_number, row_number, QtWidgets.QTableWidgetItem(str(data)))
self.style_table()
time_r = datetime.now().strftime('%H-%M-%S')
r = " ".join([self.date_label.text(), str(time_r)])
date = datetime.strptime(r, '%d/%m/%Y %H-%M-%S')
self.m = MyCursor()
self.m.mycursor.execute("SELECT SUM(debit) FROM Ops WHERE opID=%s and acc=%s and created between %s and %s", (self.sacc_list.currentText(), self.acc_list.currentText(),
date - timedelta(1), datetime.now().date(),))
result = self.m.mycursor.fetchone()[0]
self.o = MyCursor()
self.o.mycursor.execute("SELECT SUM(credit) FROM Ops WHERE opID=%s and acc=%s and created between %s and %s", (self.sacc_list.currentText(), self.acc_list.currentText(),
date - timedelta(1), datetime.now().date(),))
result1 = self.o.mycursor.fetchone()[0]
if result is None and result1 is None:
l_sc = 0
self.g_ballance.setStyleSheet("""border-radius:10px;
color: rgb(0, 0, 0);""")
formatted_float_debit = "{:,.2f}".format(l_sc)
self.g_ballance.setText(str(formatted_float_debit) + ' DH')
else:
l_sc = result + result1
if l_sc < 0:
self.g_ballance.setStyleSheet("""border-radius:10px;
color: rgb(255, 0, 0);""")
formatted_float_debit = "{:,.2f}".format(l_sc)
self.g_ballance.setText(str(formatted_float_debit) + ' DH')
elif l_sc > 0:
self.g_ballance.setStyleSheet("""border-radius:10px;
color: rgb(0, 170, 0);""")
formatted_float_debit = "{:,.2f}".format(l_sc)
self.g_ballance.setText(str(formatted_float_debit) + ' DH')
def repeat1(self):
self.fill_table()
def soldeSCompte(self):
time_r = datetime.now().strftime('%H-%M-%S')
r = " ".join([self.date_label.text(), str(time_r)])
date = datetime.strptime(r, '%d/%m/%Y %H-%M-%S')
self.m = MyCursor()
self.m.mycursor.execute("SELECT SUM(debit) FROM Ops WHERE opID=%s and acc=%s and created between %s and %s", (self.sacc_list.currentText(), self.acc_list.currentText(), datetime.now().date() - timedelta(1), datetime.now().date(),))
result = self.m.mycursor.fetchone()[0]
self.m.mycursor.execute("SELECT SUM(credit) FROM Ops WHERE opID=%s and acc=%s and created between %s and %s", (self.sacc_list.currentText(), self.acc_list.currentText(), datetime.now().date() - timedelta(1), datetime.now().date(),))
result1 = self.m.mycursor.fetchone()[0]
if result is None and result1 is None:
self.g_ballance.setStyleSheet("color: rbg(0, 0, 0);")
fr1 = 0
formatted_float_debit = "{:,.2f}".format(fr1)
self.g_ballance.setText(str(formatted_float_debit) + ' DH')
else:
self.fr12 = result + result1
if self.fr12 < 0:
self.g_ballance.setStyleSheet("color: rgb(255, 0, 0);")
formatted_float_debit = "{:,.2f}".format(self.fr12)
self.g_ballance.setText(str(formatted_float_debit) + ' DH')
elif self.fr12 > 0:
self.g_ballance.setStyleSheet("color: rgb(0, 170, 0);")
formatted_float_debit = "{:,.2f}".format(self.fr12)
self.g_ballance.setText(str(formatted_float_debit) + ' DH')
return self.fr12
def soldeCompte(self):
self.m.mycursor.execute("SELECT SUM(debit) FROM Ops WHERE acc=%s", (self.acc_list.currentText(),))
result2 = self.m.mycursor.fetchone()[0]
self.m.mycursor.execute("SELECT SUM(credit) FROM Ops WHERE acc=%s", (self.acc_list.currentText(),))
result3 = self.m.mycursor.fetchone()[0]
if result2 is None and result3 is None:
pass
else:
self.fr = result2 + result3
self.m.mycursor.execute("UPDATE Accounts SET balance=%s WHERE name=%s", (self.fr, self.acc_list.currentText(),))
self.m.db.commit()
return self.fr
def keyPressEvent(self, event):
r = RemEntry()
switch_rem = r.switch
if event.key() == Qt.Key_Return:
self.add_ops()
elif event.key() == Qt.Key_Return and SWITCH == 2:
self.anul()
def addfunc(self, sacc, user, debit1, credit1, anul, motif):
time_r = datetime.now().strftime('%H-%M-%S')
r = " ".join([self.date_label.text(), str(time_r)])
date = datetime.strptime(r, '%d/%m/%Y %H-%M-%S')
debit1 = float(self.debit.text())
credit1 = (float(self.credit.text()) * (-1))
if self.soldeCompte() is None and self.soldeSCompte() is None:
m = MyCursor()
m.mycursor.execute(
"INSERT INTO Ops (opID, userID, created, type, debit, credit, soldeCompte, soldeSCompte, acc, motif) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(sacc, user, date, self.type + anul, debit1, credit1, debit1+credit1, debit1+credit1,
str(self.acc_list.currentText()), motif,))
m.db.commit()
else:
if self.soldeSCompte() is None:
m = MyCursor()
m.mycursor.execute(
"INSERT INTO Ops (opID, userID, created, type, debit, credit, soldeCompte, soldeSCompte, acc, motif) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(sacc, user, date, self.type + anul, debit1, credit1, self.soldeCompte()+debit1+credit1, debit1+credit1,
str(self.acc_list.currentText()), motif,))
m.db.commit()
else:
m = MyCursor()
m.mycursor.execute(
"INSERT INTO Ops (opID, userID, created, type, debit, credit, soldeCompte, soldeSCompte, acc, motif) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(sacc, user, date, self.type + anul, debit1, credit1, self.soldeCompte()+debit1+credit1, self.soldeSCompte()+debit1+credit1,
str(self.acc_list.currentText()), motif,))
m.db.commit()
self.soldeSCompte()
self.soldeCompte()
m = MyCursor()
m.mycursor.execute('SELECT opID, reference, userID, created, type ,debit, credit FROM ops WHERE opID = %s',
(self.sacc_list.currentText(),))
f = m.mycursor.fetchall()
self.ops_table.setRowCount(0)
for column_number, row_data in enumerate(f):
self.ops_table.insertRow(column_number)
for row_number, data in enumerate(row_data):
self.ops_table.setItem(column_number, row_number, QtWidgets.QTableWidgetItem(str(data)))
for x in range(self.ops_table.rowCount()):
self.ops_table.item(x, 6).setForeground(QtGui.QColor(255, 0, 0))
self.ops_table.item(x, | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implements the beam search sampler."""
import warnings
import mxnet as mx
import abc
from mxnet.gluon import HybridBlock
from typing import Callable, Optional
from .layers import get_activation
LARGE_POSITIVE_FLOAT = 1e18
LARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT
class BaseStepDecoder(abc.ABC):
"""Base class of a step decoder
You may inherit `BaseStepDecoder` and implement the required methods.
"""
@property
@abc.abstractmethod
def state_batch_axis(self):
"""Batch axis of the state
i --> axis of the batch dimension
None --> no batch axis in the state
"""
raise NotImplementedError()
@property
def data_batch_axis(self):
return 0
@abc.abstractmethod
def init_states(self, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, data, states):
"""The common signature of the sequence decoder
Parameters
----------
data
states
Returns
-------
out
new_states
"""
raise NotImplementedError
# TODO(sxjscience)
# 1. Add ParticleFilter Sampler
class BeamSearchScorer(HybridBlock):
r"""Score function used in beam search.
Implements the length-penalized score function first used in the GNMT paper::
scores = (log_probs + scores) / length_penalty
length_penalty = (\frac{K + length}{K + 1})^\alpha
See Also
"Google's Neural Machine Translation System: Bridging the Gap between Human and
Machine Translation (https://arxiv.org/pdf/1609.08144.pdf)"
Parameters
----------
alpha
If `alphas < 1.0`, it favors shorter sequences
If `alpha >= 1.0`, it favors longer sequences
K
Parameter in the formula
from_logits
Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.
temperature
The temperature of the scoring function
"""
def __init__(self, alpha: float = 1.0,
K: float = 5.0,
from_logits: bool = False,
temperature: float = 1.0):
super().__init__()
self._alpha = float(alpha)
self._K = K
self._temperature = temperature
self._from_logits = from_logits
def __call__(self, outputs, scores, step): # pylint: disable=arguments-differ
"""Compute new scores of each candidate
Parameters
----------
outputs : mx.np.ndarray
If from_logits is True, outputs is the log probabilities of the candidates.
Shape (d1, d2, ..., dn, V).
Otherwise, outputs is the unnormalized outputs from predictor of the same shape,
before softmax/log_softmax.
scores : mx.np.ndarray
The original scores of the beams. Shape (d1, d2, ..., dn)
step : mx.np.ndarray
Step to calculate the score function. It starts from 1. The shape is a scalar.
Returns
-------
candidate_scores : mx.np.ndarray
The scores of all the candidates. Shape (d1, d2, ..., dn, V), where V is the size
of the vocabulary.
"""
return super().__call__(outputs, scores, step)
def forward(self, outputs, scores, step): # pylint: disable=arguments-differ
if not self._from_logits:
outputs = mx.npx.log_softmax(outputs / self._temperature)
if self._alpha != 0.0:
step = step.astype(mx.np.float32)
prev_lp = (self._K + step - 1) ** self._alpha / ((self._K + 1) ** self._alpha)
prev_lp = prev_lp * (step != 1).astype(mx.np.float32) + (step == 1).astype(mx.np.float32)
lp = (self._K + step) ** self._alpha / ((self._K + 1) ** self._alpha)
scores = scores * prev_lp
candidate_scores = (outputs + mx.np.expand_dims(scores, axis=-1)) / lp
else:
candidate_scores = outputs + mx.np.expand_dims(scores, axis=-1)
return candidate_scores
def __repr__(self):
s = '{name}(alpha={alpha}, K={K}, from_logits={from_logits})'
return s.format(name=self.__class__.__name__,
alpha=self._alpha,
K=self._K,
from_logits=self._from_logits)
def _expand_to_beam_size(data, beam_size, batch_size, state_batch_axis=None):
"""Tile all the states to have batch_size * beam_size on the batch axis.
Parameters
----------
data : A single mx.np.ndarray or nested container with mx.np.ndarray
Each mx.np.ndarray should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
beam_size : int
Beam size
batch_size : int
Batch size
state_batch_axis : Nested structure of dictionary, default None.
Descriptors for states, usually from decoder's ``state_batch_axis()``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object that contains mx.np.ndarray
Each mx.np.ndarray should have shape batch_size * beam_size on the batch axis.
"""
if isinstance(data, (list, tuple)):
if state_batch_axis is not None:
# TODO(sxjscience) Better Exception Handling
return [_expand_to_beam_size(d, beam_size, batch_size, batch_axis)
for d, batch_axis in zip(data, state_batch_axis)]
else:
return [_expand_to_beam_size(d, beam_size, batch_size, None) for d in data]
elif isinstance(data, dict):
if state_batch_axis is not None:
return {k: _expand_to_beam_size(v, beam_size, batch_size, state_batch_axis[k])
for k, v in data.items()}
else:
return {k: _expand_to_beam_size(v, beam_size, batch_size, None)
for k, v in data.items()}
elif isinstance(data, mx.np.ndarray):
if state_batch_axis is None:
batch_axis = 0
else:
batch_axis = state_batch_axis
if data.shape[batch_axis] != batch_size:
raise ValueError('The batch size of all the inner elements in states must be '
'{}, Found shape={}, inferred batch axis={}'.format(batch_size, data.shape, batch_axis))
new_shape = list(data.shape)
new_shape[batch_axis] = batch_size * beam_size
new_shape = tuple(new_shape)
bcast_new_shape = new_shape[:batch_axis] + (batch_size, beam_size) + new_shape[(batch_axis + 1):]
return mx.np.expand_dims(data, batch_axis + 1).broadcast_to(bcast_new_shape).reshape(new_shape)
elif data is None:
return None
else:
raise NotImplementedError
def _choose_states(states, indices, state_batch_axis=None):
"""
Parameters
----------
states : Object contains mx.np.ndarray
indices : mx.np.ndarray
Indices of the states to take. Shape (N,).
state_batch_axis
Descriptors for states, it is generated from decoder's ``state_batch_axis``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object contains mx.np.ndarray
Each mx.np.ndarray should have shape (..., N, ...).
"""
if isinstance(states, (list, tuple)):
if state_batch_axis is not None:
return [_choose_states(d, indices, b_axis)
for d, b_axis in zip(states, state_batch_axis)]
else:
return [_choose_states(d, indices, None) for d in states]
elif isinstance(states, dict):
if state_batch_axis is not None:
return {k: _choose_states(v, indices, state_batch_axis[k]) for k, v in states.items()}
else:
return {k: _choose_states(v, indices, None) for k, v in states.items()}
elif isinstance(states, mx.np.ndarray):
if state_batch_axis is None:
batch_axis = 0
else:
batch_axis = state_batch_axis
states = mx.np.take(states, indices, axis=batch_axis)
return states
else:
raise TypeError('The type of the states is not supported, type(states) = {}'.format(type(states)))
class _BeamSearchStepUpdate(HybridBlock):
def __init__(self, beam_size, vocab_size, eos_id, scorer, state_batch_axis,
stochastic=False):
"""
Parameters
----------
beam_size : int
vocab_size : int
eos_id : int
scorer : BeamSearchScorer
state_batch_axis :
stochastic: bool
prefix : None
params : None
"""
super().__init__()
self._beam_size = beam_size
self._vocab_size = vocab_size
self._eos_id = eos_id
self._scorer = scorer
self._state_batch_axis = state_batch_axis
self.stochastic = stochastic
assert eos_id is None or eos_id >= 0, 'eos_id cannot be negative! Received eos_id={}'.format(eos_id)
def gumbel_with_maximum(self, phi, T, dim=-1):
"""Calculate the Gumbel with maximum.
Parameters
----------
phi : mx.np.ndarray
Shape (batch_size, beam_size, L).
T : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
"""
g_phi = phi + mx.np.random.gumbel(mx.np.zeros_like(phi))
Z = g_phi.max(dim)
g = self.shift_gumbel_maximum(g_phi, T, dim, Z=Z)
return g
def shift_gumbel_maximum(self, g_phi, T, axis=-1, Z=None):
"""
Parameters
----------
g_phi : mx.np.ndarray
Shape (batch_size, beam_size, L).
T : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
axis
The axis
Z
The Z value
"""
if Z is None:
Z = g_phi.max(axis=axis)
T_ = mx.npx.reshape(T, (-4, 1))
Z_ = mx.npx.reshape(Z, (-4, 1))
u = T_ - g_phi + mx.np.log1p(-mx.np.exp(g_phi - Z_) + 1e-5)
return T_ - mx.npx.relu(u) - mx.np.log1p(mx.np.exp(-mx.np.abs(u)))
def forward(self, samples, valid_length, outputs, scores, step, beam_alive_mask, # pylint: disable=arguments-differ
states, batch_shift):
"""
Parameters
----------
F
samples : mx.np.ndarray
The current samples generated by beam search.
Shape (batch_size, beam_size, L).
valid_length : mx.np.ndarray
The current valid lengths of the samples
outputs : mx.np.ndarray
Outputs from predictor. If from_logits was set to True in scorer, then it's the
log probability of the current step. Else, it's the unnormalized outputs before
softmax or log_softmax.
Shape (batch_size * beam_size, V).
scores : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
step : mx.np.ndarray
The current step for doing beam search. Begins from 1. Shape ()
beam_alive_mask : mx.np.ndarray
Shape (batch_size, beam_size)
states : nested structure of mx.np.ndarray
Each mx.np.ndarray should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
batch_shift : mx.np.ndarray
Contains [0, beam_size, 2 * | |
up and don't annotate anything.
# Our line numbers are not reliable enough to be asserting on.
if start_indent is None:
return
cur_line = start_line + 1
end_line = cur_line
# After this loop, function body will be lines [start_line, end_line)
while cur_line < len(self.source):
cur_indent = self.indentation_level(cur_line)
if cur_indent is None:
# Consume the line, but don't mark it as belonging to the function yet.
cur_line += 1
elif start_indent is not None and cur_indent > start_indent:
# A non-blank line that belongs to the function.
cur_line += 1
end_line = cur_line
else:
# We reached a line outside the function definition.
break
is_typed = defn.type is not None
for line in range(start_line, end_line):
old_indent, _ = self.lines_covered[line]
# If there was an old indent level for this line, and the new
# level isn't increasing the indentation, ignore it.
# This is to be defensive against funniness in our line numbers,
# which are not always reliable.
if old_indent <= start_indent:
self.lines_covered[line] = (start_indent, is_typed)
# Visit the body, in case there are nested functions
super().visit_func_def(defn)
class LineCoverageReporter(AbstractReporter):
"""Exact line coverage reporter.
This reporter writes a JSON dictionary with one field 'lines' to
the file 'coverage.json' in the specified report directory. The
value of that field is a dictionary which associates to each
source file's absolute pathname the list of line numbers that
belong to typed functions in that file.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.lines_covered = {} # type: Dict[str, List[int]]
def on_file(self,
tree: MypyFile,
modules: Dict[str, MypyFile],
type_map: Dict[Expression, Type],
options: Options) -> None:
with open(tree.path) as f:
tree_source = f.readlines()
coverage_visitor = LineCoverageVisitor(tree_source)
tree.accept(coverage_visitor)
covered_lines = []
for line_number, (_, typed) in enumerate(coverage_visitor.lines_covered):
if typed:
covered_lines.append(line_number + 1)
self.lines_covered[os.path.abspath(tree.path)] = covered_lines
def on_finish(self) -> None:
with open(os.path.join(self.output_dir, 'coverage.json'), 'w') as f:
json.dump({'lines': self.lines_covered}, f)
register_reporter('linecoverage', LineCoverageReporter)
class FileInfo:
def __init__(self, name: str, module: str) -> None:
self.name = name
self.module = module
self.counts = [0] * len(stats.precision_names)
def total(self) -> int:
return sum(self.counts)
def attrib(self) -> Dict[str, str]:
return {name: str(val) for name, val in sorted(zip(stats.precision_names, self.counts))}
class MemoryXmlReporter(AbstractReporter):
"""Internal reporter that generates XML in memory.
This is used by all other XML-based reporters to avoid duplication.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.xslt_html_path = os.path.join(reports.data_dir, 'xml', 'mypy-html.xslt')
self.xslt_txt_path = os.path.join(reports.data_dir, 'xml', 'mypy-txt.xslt')
self.css_html_path = os.path.join(reports.data_dir, 'xml', 'mypy-html.css')
xsd_path = os.path.join(reports.data_dir, 'xml', 'mypy.xsd')
self.schema = etree.XMLSchema(etree.parse(xsd_path))
self.last_xml = None # type: Optional[Any]
self.files = [] # type: List[FileInfo]
# XML doesn't like control characters, but they are sometimes
# legal in source code (e.g. comments, string literals).
# Tabs (#x09) are allowed in XML content.
control_fixer = str.maketrans(
''.join(chr(i) for i in range(32) if i != 9), '?' * 31) # type: Final
def on_file(self,
tree: MypyFile,
modules: Dict[str, MypyFile],
type_map: Dict[Expression, Type],
options: Options) -> None:
self.last_xml = None
path = os.path.relpath(tree.path)
if should_skip_path(path):
return
visitor = stats.StatisticsVisitor(inferred=True,
filename=tree.fullname,
modules=modules,
typemap=type_map,
all_nodes=True)
tree.accept(visitor)
root = etree.Element('mypy-report-file', name=path, module=tree._fullname)
doc = etree.ElementTree(root)
file_info = FileInfo(path, tree._fullname)
for lineno, line_text in iterate_python_lines(path):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
file_info.counts[status] += 1
etree.SubElement(root, 'line',
any_info=self._get_any_info_for_line(visitor, lineno),
content=line_text.rstrip('\n').translate(self.control_fixer),
number=str(lineno),
precision=stats.precision_names[status])
# Assumes a layout similar to what XmlReporter uses.
xslt_path = os.path.relpath('mypy-html.xslt', path)
transform_pi = etree.ProcessingInstruction('xml-stylesheet',
'type="text/xsl" href="%s"' % pathname2url(xslt_path))
root.addprevious(transform_pi)
self.schema.assertValid(doc)
self.last_xml = doc
self.files.append(file_info)
@staticmethod
def _get_any_info_for_line(visitor: stats.StatisticsVisitor, lineno: int) -> str:
if lineno in visitor.any_line_map:
result = "Any Types on this line: "
counter = collections.Counter() # type: typing.Counter[int]
for typ in visitor.any_line_map[lineno]:
counter[typ.type_of_any] += 1
for any_type, occurrences in counter.items():
result += "\n{} (x{})".format(type_of_any_name_map[any_type], occurrences)
return result
else:
return "No Anys on this line!"
def on_finish(self) -> None:
self.last_xml = None
# index_path = os.path.join(self.output_dir, 'index.xml')
output_files = sorted(self.files, key=lambda x: x.module)
root = etree.Element('mypy-report-index', name='index')
doc = etree.ElementTree(root)
for file_info in output_files:
etree.SubElement(root, 'file',
file_info.attrib(),
module=file_info.module,
name=file_info.name,
total=str(file_info.total()))
xslt_path = os.path.relpath('mypy-html.xslt', '.')
transform_pi = etree.ProcessingInstruction('xml-stylesheet',
'type="text/xsl" href="%s"' % pathname2url(xslt_path))
root.addprevious(transform_pi)
self.schema.assertValid(doc)
self.last_xml = doc
register_reporter('memory-xml', MemoryXmlReporter, needs_lxml=True)
def get_line_rate(covered_lines: int, total_lines: int) -> str:
if total_lines == 0:
return str(1.0)
else:
return '{:.4f}'.format(covered_lines / total_lines)
class CoberturaPackage(object):
"""Container for XML and statistics mapping python modules to Cobertura package."""
def __init__(self, name: str) -> None:
self.name = name
self.classes = {} # type: Dict[str, Any]
self.packages = {} # type: Dict[str, CoberturaPackage]
self.total_lines = 0
self.covered_lines = 0
def as_xml(self) -> Any:
package_element = etree.Element('package',
complexity='1.0',
name=self.name)
package_element.attrib['branch-rate'] = '0'
package_element.attrib['line-rate'] = get_line_rate(self.covered_lines, self.total_lines)
classes_element = etree.SubElement(package_element, 'classes')
for class_name in sorted(self.classes):
classes_element.append(self.classes[class_name])
self.add_packages(package_element)
return package_element
def add_packages(self, parent_element: Any) -> None:
if self.packages:
packages_element = etree.SubElement(parent_element, 'packages')
for package in sorted(self.packages.values(), key=attrgetter('name')):
packages_element.append(package.as_xml())
class CoberturaXmlReporter(AbstractReporter):
"""Reporter for generating Cobertura compliant XML."""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.root = etree.Element('coverage',
timestamp=str(int(time.time())),
version=__version__)
self.doc = etree.ElementTree(self.root)
self.root_package = CoberturaPackage('.')
def on_file(self,
tree: MypyFile,
modules: Dict[str, MypyFile],
type_map: Dict[Expression, Type],
options: Options) -> None:
path = os.path.relpath(tree.path)
visitor = stats.StatisticsVisitor(inferred=True,
filename=tree.fullname,
modules=modules,
typemap=type_map,
all_nodes=True)
tree.accept(visitor)
class_name = os.path.basename(path)
file_info = FileInfo(path, tree._fullname)
class_element = etree.Element('class',
complexity='1.0',
filename=path,
name=class_name)
etree.SubElement(class_element, 'methods')
lines_element = etree.SubElement(class_element, 'lines')
with tokenize.open(path) as input_file:
class_lines_covered = 0
class_total_lines = 0
for lineno, _ in enumerate(input_file, 1):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
hits = 0
branch = False
if status == stats.TYPE_EMPTY:
continue
class_total_lines += 1
if status != stats.TYPE_ANY:
class_lines_covered += 1
hits = 1
if status == stats.TYPE_IMPRECISE:
branch = True
file_info.counts[status] += 1
line_element = etree.SubElement(lines_element, 'line',
branch=str(branch).lower(),
hits=str(hits),
number=str(lineno),
precision=stats.precision_names[status])
if branch:
line_element.attrib['condition-coverage'] = '50% (1/2)'
class_element.attrib['branch-rate'] = '0'
class_element.attrib['line-rate'] = get_line_rate(class_lines_covered,
class_total_lines)
# parent_module is set to whichever module contains this file. For most files, we want
# to simply strip the last element off of the module. But for __init__.py files,
# the module == the parent module.
parent_module = file_info.module.rsplit('.', 1)[0]
if file_info.name.endswith('__init__.py'):
parent_module = file_info.module
if parent_module not in self.root_package.packages:
self.root_package.packages[parent_module] = CoberturaPackage(parent_module)
current_package = self.root_package.packages[parent_module]
packages_to_update = [self.root_package, current_package]
for package in packages_to_update:
package.total_lines += class_total_lines
package.covered_lines += class_lines_covered
current_package.classes[class_name] = class_element
def on_finish(self) -> None:
self.root.attrib['line-rate'] = get_line_rate(self.root_package.covered_lines,
self.root_package.total_lines)
self.root.attrib['branch-rate'] = '0'
sources = etree.SubElement(self.root, 'sources')
source_element = etree.SubElement(sources, 'source')
source_element.text = os.getcwd()
self.root_package.add_packages(self.root)
out_path = os.path.join(self.output_dir, 'cobertura.xml')
self.doc.write(out_path, encoding='utf-8', pretty_print=True)
print('Generated Cobertura report:', os.path.abspath(out_path))
register_reporter('cobertura-xml', CoberturaXmlReporter, needs_lxml=True)
class AbstractXmlReporter(AbstractReporter):
"""Internal abstract class for reporters that work via XML."""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
memory_reporter = reports.add_report('memory-xml', '<memory>')
# The dependency will be called first.
self.memory_xml = cast(MemoryXmlReporter, memory_reporter)
class XmlReporter(AbstractXmlReporter):
"""Public reporter that exports XML.
The produced XML files contain a reference to the absolute path
of the html transform, so they will be locally viewable in a browser.
However, there is a bug in Chrome and all other WebKit-based browsers
that makes it fail from file:// URLs but work on http:// URLs.
"""
def on_file(self,
tree: MypyFile,
modules: Dict[str, MypyFile],
type_map: Dict[Expression, Type],
options: Options) -> None:
last_xml = self.memory_xml.last_xml
if last_xml is None:
return
path = os.path.relpath(tree.path)
if path.startswith('..'):
return
out_path = os.path.join(self.output_dir, 'xml', path + '.xml')
stats.ensure_dir_exists(os.path.dirname(out_path))
last_xml.write(out_path, encoding='utf-8')
def on_finish(self) -> None:
last_xml = self.memory_xml.last_xml
assert last_xml is not None
out_path = os.path.join(self.output_dir, 'index.xml')
out_xslt = os.path.join(self.output_dir, 'mypy-html.xslt')
out_css = os.path.join(self.output_dir, 'mypy-html.css')
last_xml.write(out_path, encoding='utf-8')
shutil.copyfile(self.memory_xml.xslt_html_path, out_xslt)
shutil.copyfile(self.memory_xml.css_html_path, out_css)
print('Generated XML report:', os.path.abspath(out_path))
register_reporter('xml', XmlReporter, needs_lxml=True)
class XsltHtmlReporter(AbstractXmlReporter):
"""Public reporter that exports HTML via XSLT.
This is slightly different than running `xsltproc` on the .xml files,
because it passes a parameter to rewrite the links.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.xslt_html = etree.XSLT(etree.parse(self.memory_xml.xslt_html_path))
self.param_html = etree.XSLT.strparam('html')
def on_file(self,
tree: MypyFile,
modules: Dict[str, MypyFile],
type_map: Dict[Expression, Type],
options: Options) -> None:
last_xml = self.memory_xml.last_xml
if last_xml is None:
return
path = os.path.relpath(tree.path)
if path.startswith('..'):
return
out_path = os.path.join(self.output_dir, 'html', path + '.html')
stats.ensure_dir_exists(os.path.dirname(out_path))
transformed_html = bytes(self.xslt_html(last_xml, ext=self.param_html))
with open(out_path, 'wb') as out_file:
out_file.write(transformed_html)
def on_finish(self) -> None:
last_xml = self.memory_xml.last_xml
assert last_xml is not None
out_path = os.path.join(self.output_dir, 'index.html')
| |
#coding:utf-8
from __future__ import division
import os
import sys
import readline
import time
import yaml
import re
import platform
import threading
for path in sys.path:
if re.search('\/myssh.+?.egg', path) :
sys.path.append(path+'/myssh')
break
import tab
import config
import data
import ssh
import sftp
import sshlogin
import common
import threads_func
import springboard
import signal
#中断事件
def sigint_handler(signum, frame):
stop_all_proxy()
exit()
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGHUP, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
def stop_all_proxy():
for key in data.proxy_conns:
springboard.proxy_stop(key)
symtem_name = platform.system()
source_path = config.source_path
yaml_path = config.yaml_path
regex = re.compile(r'([\s\S]+?)-\d+$')#正则匹配 名字 关联批量操作
regex_cmd = re.compile(r'^(\d+):([\w\W]+)')#多台服务器操作时 判断是否只操作一台
if int(platform.python_version()[0:1]) < 3: #python2
reload(sys)
sys.setdefaultencoding( "utf-8" )
else:
def raw_input(input_data):
return str(input(input_data))
COMMANDS = ['cmd ','quit','help']
def complete(text, state):
for cmd in COMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def complete_path(text, state):#自动补全
global server_list
temp_list_file=[]
for server_num in server_list:
cmd = 'cd '+ data.paths[ server_num ]+' && ls -F'
path =''
if '/' in text:
path = text[0:text.rindex('/')] +'/'
sub_text = text[text.rindex('/')+1:]
cmd = 'cd '+ data.paths[ server_num ]+' && cd ' + path +' && ls -F'
else:
sub_text = text
temp = ssh.cmd_cache( server_num, cmd)
if temp:
temp_list_file.extend(temp.split('\n'))
temp_list_file = list(set(temp_list_file)) #去重
list_file = list()
for line in temp_list_file:
if(line[-1] == '#' or line[-1]=='*' or line[-1]=='=' or line[-1]=='|' or line[-1]=='@'):
list_file.append(line[0:-1])
else:
list_file.append(line)
for file_name in list_file:
if file_name.startswith(sub_text):
if not state:
return path+file_name
else:
state -= 1
def check_up(server_num,sftp_conns,localPath,remotePath,fileName,cmdPath,n):
if( os.path.isdir( localPath ) ):
os.chdir(os.path.split(localPath)[0])
cmd = 'find ' + localPath + ' -type f | wc -l'
for line in os.popen(cmd):
file_num = int(line)
if( file_num > 15):
# input_result = raw_input( '上传文件数量为:%d,建议压缩后再上传(输入y继续上传,输入t打包下载,输入n退出):' %file_num )
input_result = 't'
if( input_result == 'y'):
sftp.up_files(sftp_conns,localPath,remotePath )
elif( input_result == 't'):
tar_name = common.getTarName(fileName)
if ( n != 0 ):
print('开始上传 %s' %tar_name)
else:
cmd = 'tar -czf %s %s' %(tar_name , fileName)
os.system( cmd )
print('打包完成,开始上传 %s' %tar_name)
sftp.upload(server_num,localPath[0:-len(fileName)]+tar_name,remotePath + tar_name)
# input_result2 = raw_input( '上传完成,是否解压(y/n):' )
input_result2 = 'y'
if( input_result2 == 'y'):
cmd = 'tar -xvf %s' %tar_name
print( cmd )
result = ssh.cmd(server_num,'cd '+cmdPath+' && '+ cmd)
cmd= 'rm %s' %tar_name
print( cmd )
result = ssh.cmd(server_num,'cd '+cmdPath+' && '+ cmd)
else:
return
else:
return
else:
sftp.up_files(sftp_conns,localPath,remotePath )
else:
sftp.upload(server_num,localPath,remotePath + fileName)
def check_down( server_num,remotePath,localPath,fileName ,cmdPath):#检查下载
scp = data.scp_conns[ server_num ]
try:
scp.listdir_attr(remotePath)
except (Exception) as e:
sftp.down(server_num,remotePath,localPath+fileName)
return 'ok'
try:
cmd = 'find ' + remotePath + ' -type f | wc -l'
file_num = int( ssh.cmd(server_num,cmd) )
if( file_num >15 ):
input_result = raw_input( '下载文件数量为:%d,建议压缩后再下载(输入y继续下载,输入t打包下载,输入n退出):' %file_num )
if(input_result == 'y'):
sftp.downs(server_num,remotePath,localPath)
elif(input_result == 't'):
if fileName == '':
temp=remotePath.split('/')
fileName = temp[ len(temp)-2]
global tar_name
tar_name = common.getTarName(fileName)
cmd = 'tar -czf %s %s' %(tar_name, fileName)
print( cmd )
cmd = 'cd '+cmdPath+' && '+ cmd
cmd_result = ssh.cmd(server_num,cmd)
if( cmd_result == '' ):
print( '打包完成,开始下载 %s' %tar_name )
sftp.down(server_num,cmdPath + '/'+ tar_name,localPath+tar_name)
cmd= 'rm %s' %tar_name
print( cmd )
ssh.cmd(server_num,'cd '+cmdPath+' && '+ cmd)
else:
print('操作失败')
elif(input_result == 'n'):
return 'n'
else:
return 'n'
else:
sftp.downs(server_num,remotePath,localPath)
except (Exception) as e:
print(e)
return 'n'
return 'ok'
def relation_add( l ,i ,sign):
global relation
result_str =''
expansion_key ='[[expansion:%d]]' %i
if( regex.match( l['name'] ) != None ):
relation_key = regex.match( l['name'] ).group(1)
if( len(sys.argv) >1 and sys.argv[1] == 'all' ):
result_str = sign+'\33[41m%s\33[0m:%s(%s) %s\n' %(i, l['name'],common.hideipFun(l['host']) ,expansion_key)
else:
if( not relation_key in relation ):
result_str = sign+'\33[41m%s\33[0m:%s(%s) %s<<%s>>\n' %(i, l['name'],common.hideipFun(l['host']) ,expansion_key,relation_key)
if( not relation_key in relation ):
relation[ relation_key ] = list()
relation[ relation_key ].append(i)
return result_str
else:
return sign+'%s:%s(%s) %s\n' %(i, l['name'],common.hideipFun(l['host']),expansion_key)
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def list_del_empty(data):
while '' in data:
data.remove('')
return data
def ssh_cmd_func(server_num,result,p_cmd,ssh_conns,source_path,n):
global cmds
server_num = int(server_num)
server_info = result[ server_num ]
print('\33[34m%d:\33[31m%s(%s)\33[0m' %(server_num,server_info['name'],common.hideipFun(server_info['host']) ) )
cmd = p_cmd
if( p_cmd[0:3] == 'cd '):
cmd = 'cd '+data.paths[server_num]+' && '+ cmd
temp_path = ssh.cd(server_num,cmd )
if( temp_path ):
data.paths[server_num] = temp_path
print('\33[34m%d:\33[0m\33[32mok\33[0m' %server_num)
else:
return 'notpath'
elif( p_cmd[0:3] == 'up ' ):
cmds = cmd.split(' ')
if cmds[1][len(cmds[1])-1] == '/':
cmds[1] = cmds[1][0:len(cmds[1])-1]
fileName = cmds[1].split('/')
check_up(server_num, data.scp_conns[ server_num ],source_path+'up/'+cmds[1],data.paths[server_num]+'/', fileName[ len(fileName)-1] ,data.paths[server_num],n)
# elif(p_cmd[0:5] =='downs'):
# cmds = cmd.split(' ')
# fileName = cmds[1].split('/')
# os.system( 'mkdir -p /Users/sam/ssh_data/'+server_info['name']+'/' )
# sftp.downs(scp_conns[ server_num ],data.paths[server_num] + '/' + cmds[1],'/Users/sam/ssh_data/'+server_info['name'])
elif( p_cmd[0:5] == 'down ' ):
cmds = cmd.split(' ')
if cmds[1][len(cmds[1])-1] == '/':
cmds[1] = cmds[1][0:len(cmds[1])-1]
fileName = cmds[1].split('/')
os.system( 'mkdir -p "'+source_path+server_info['name']+'/"' )
rs = check_down(
server_num,
data.paths[server_num] + '/' + cmds[1],
source_path+server_info['name']+'/' ,
fileName[ len(fileName)-1],data.paths[server_num] )
if rs != 'n':
if symtem_name == 'Darwin':
os.system('open "'+source_path+server_info['name']+'/"')
else:
print('文件已下载到 "'+source_path+server_info['name']+'/"')
elif(p_cmd == 'ls'):
cmd = 'cd '+data.paths[server_num]+' && '+ cmd
cmds[ n ] = ssh.cmd(server_num, cmd)
print( cmds[ n ].replace('\n',' ') )
else:
cmd = 'cd '+data.paths[server_num]+' && '+ cmd
cmds[ n ] = ssh.cmd(server_num, cmd)
print( cmds[ n ] )
def check_config_file():
if os.path.isdir( os.path.expanduser('~')+'/.myssh' ):
pass
else:
os.mkdir( os.path.expanduser('~')+'/.myssh' )
if os.path.exists(yaml_path):
pass
else:
f=open(yaml_path,'w')
f.write(config.yaml_demo_content)
f.close()
def cmd_copy(p_cmd):
global server_list
copy_info = p_cmd.split( '>' )
data.client_file={}
add_file ={}
temp_master = copy_info[0].split(' ')
temp_master = list_del_empty( temp_master )
master_server = int( temp_master[1] )
master_info = data.servers[ master_server ]
if len(copy_info) > 1 :
client_server = copy_info[1].split(' ')
client_server = list_del_empty( client_server )
else:
client_server =list()
client_server.extend( server_list )
client_server.remove( master_server )
file_name = temp_master[2]
if file_name[len(file_name)-1] == '/':
file_name = file_name[0:len(file_name)-1]
fileName = file_name.split('/')
os.system( 'mkdir -p "'+source_path+master_info['name']+'/"' )
global tar_name
tar_name = False
print('\33[34m%d:\33[31m%s(%s)\33[0m 下载中' %(master_server,master_info['name'],common.hideipFun(master_info['host']) ) )
rs = check_down(
master_server,
data.paths[master_server] + '/' + file_name,
source_path+master_info['name']+'/' ,
fileName[ len(fileName)-1],data.paths[master_server] )
if rs == 'n':
print('error')
return
n = 0
for server_num in client_server:
server_num = int(server_num)
server_info = data.servers[ server_num ]
print( '\33[34m%d:\33[31m%s@%s(%s)\33[0m 上传中' %(
server_num,server_info['user'],
server_info['name'],
common.hideipFun(server_info['host']) ))
if tar_name:
print(source_path+master_info['name']+'/'+tar_name)
check_up(server_num, data.scp_conns[ server_num ],source_path+master_info['name']+'/'+tar_name,data.paths[server_num]+'/', tar_name ,data.paths[server_num],n)
cmd = 'tar -xvf %s' %tar_name
print( cmd )
ssh.cmd(server_num,'cd '+data.paths[server_num]+' && '+ cmd)
cmd= 'rm %s' %tar_name
print( cmd )
ssh.cmd(server_num,'cd '+data.paths[server_num]+' && '+ cmd)
else:
check_up(server_num, data.scp_conns[ server_num ],source_path+master_info['name']+'/'+file_name,data.paths[server_num]+'/', fileName[ len(fileName)-1] ,data.paths[server_num],n)
def cmd_sync(p_cmd):
global server_list
sync_info = p_cmd.split( '>' )
data.client_file={}
add_file ={}
temp_master = sync_info[0].split(' ')
temp_master = list_del_empty( temp_master )
master_server = int( temp_master[1] )
if len(sync_info) > 1 :
client_server = sync_info[1].split(' ')
client_server = list_del_empty( client_server )
else:
client_server =list()
client_server.extend( server_list )
client_server.remove( master_server )
master_file = ssh.show_remote_file(
master_server,
data.paths[ master_server ])
master_remote_path = data.paths[master_server]
if not master_file:
print(' 没有需要同步的文件')
return
scan_documents = []
for server_num in client_server:
server_num = int(server_num)
scan_documents.append( threading.Thread(target=threads_func.scan_document,args=('scan_document',server_num)) )
threads_func.threads_handle(scan_documents)
is_all_sync_file = False
for server_num in client_server:
server_num = int(server_num)
server_info = data.servers[ server_num ]
print( '\33[34m%d:\33[31m%s(%s)\33[0m' %(
server_num,server_info['name'],
common.hideipFun(server_info['host']) ))
add_file[ server_num ] =list()
is_sync_file= False
for file_name in master_file:
if( file_name in data.client_file[server_num] ):
if( master_file[ file_name ] > data.client_file[ server_num ][ file_name ]):
x = time.localtime( master_file[ file_name ] )
mtime = time.strftime('%Y-%m-%d %H:%M:%S',x)
print( " 更新: %s 修改时间:%s" %( file_name, mtime) )
add_file[ server_num ].append(file_name)
is_all_sync_file =True
is_sync_file = True
else:
x = time.localtime( master_file[ file_name ] )
mtime = time.strftime('%Y-%m-%d %H:%M:%S',x)
print( " 添加: %s 修改时间:%s"%( file_name, mtime) )
add_file[ server_num ].append(file_name)
is_all_sync_file =True
is_sync_file = True
if( not is_sync_file ):
print(' 没有需要同步的文件')
if( not is_all_sync_file ):
return
certain = raw_input('确定要同步吗?(y/n):')
if( certain !='y'):
return
else:
files_list =list()
for server_num in add_file:
client_remote_path = data.paths[server_num]
server_info = data.servers[ master_server ]
files_list.extend(add_file[ server_num ])
files_list = list( set(files_list) )
print( '\33[34m%d:\33[31m%s@%s(%s)\33[0m 下载中' %(
master_server,server_info['user'],
server_info['name'],
common.hideipFun(server_info['host']) ))
for file_name in files_list:
os.system('mkdir -p "'+ source_path+data.servers[master_server]['name']+'-SYNC/' +file_name[0:file_name.rindex('/')] + '/"')
print(' ' + file_name[file_name.index('/')+1:])
sftp.down(
master_server,
master_remote_path +'/'+file_name[file_name.index('/')+1:],
source_path+data.servers[master_server]['name']+'-SYNC/'+file_name )
for server_num in add_file:
server_info = data.servers[ server_num ]
print( '\33[34m%d:\33[31m%s@%s(%s)\33[0m 上传中' %(
server_num,server_info['user'],
server_info['name'],
common.hideipFun(server_info['host']) ))
for file_name in add_file[server_num]:
if(file_name.count('/') > 1):
try:
cmd = 'mkdir -p "' + client_remote_path + file_name[ file_name.index('/'):file_name.rindex('/')] + '/"'
ssh.cmd(server_num, cmd)
except (Exception) as e:
pass
print(' ' + file_name[file_name.index('/')+1:])
sftp.upload(
server_num,
source_path+data.servers[master_server]['name']+'-SYNC/'+file_name,
client_remote_path +'/'+file_name[file_name.index('/')+1:] )
def main():
global relation
global cmds
global server_list
check_config_file()
springboard.clear_proxy_cache()
if len(sys.argv) > | |
number
extname = None # name of extension
filename = None # filename of image
numaxis = 2 # number of image planes, this is NAXIS
# tuple of each image plane, defaulted to 1 image plane
naxis = (0)
# data has more than 2 dimensions and loads in cube/slice frame
iscube = False
mef_file = False # used to check misleading headers in fits files
load_header = False # not used for in memory arrays
self._current_frame = frame
# see if any file is currently loaded into ds9,
# xpa returns '\n' for nothing loaded
# get the current frame
try:
filename_string = self.get('file').strip()
if len(filename_string) > 1 and '\n' not in filename_string:
filename = str(filename_string.strip().split('[')[0])
self._viewer[frame]['filename'] = os.path.abspath(filename)
load_header = True
else:
filename_string = ""
except XpaException:
filename_string = ""
try:
if "plane" in filename_string:
iscube = True
if ":" in filename_string:
naxis = filename_string.strip().split(']')[
1].split("=")[1].split(":")
else:
naxis = filename_string.strip().split(']')[
1].split('=')[1].split()
if len(naxis) == 1:
naxis.append("0")
naxis.reverse() # for astropy.fits row-major ordering
naxis = map(int, naxis)
naxis = [axis - 1 if axis > 0 else 0 for axis in naxis]
naxis = tuple(naxis)
except ValueError:
raise ValueError("Problem parsing filename")
if load_header:
# set the extension from the header information returned from
# DS9 this is the best way to get the information if the user
# changes the loaded file using the gui
header_cards = fits.Header.fromstring(
self.get_header(),
sep='\n')
mef_file, nextend, first_image = util.check_valid(filename)
if mef_file:
try:
extver = int(header_cards['EXTVER'])
except KeyError:
# fits doesn't require extver if there is only 1
# extension
extver = first_image
try:
extname = str(header_cards['EXTNAME'])
except KeyError:
extname = None
try:
numaxis = int(header_cards['NAXIS'])
except KeyError:
raise KeyError("Problem getting NAXIS from header")
if not iscube:
if numaxis > 2:
iscube = True
naxis = list()
# assume the first axis in each extension is displayed
for axis in range(numaxis, 2, -1):
naxis.append(0)
naxis = tuple(naxis)
# update the viewer dictionary, if the user changes what's
# displayed in a frame this should update correctly
# this dictionary will be referenced in the other parts of
# the code. This enables tracking user arrays through
# frame changes
self._viewer[frame] = {'filename': filename,
'extver': extver,
'extname': extname,
'naxis': naxis,
'numaxis': numaxis,
'iscube': iscube,
'user_array': user_array,
'mef': mef_file}
else:
warnings.warn("No frame loaded in viewer")
def valid_data_in_viewer(self):
"""return bool if valid file or array is loaded into the viewer."""
frame = self.frame()
if frame:
self._set_frameinfo()
if self._viewer[frame]['filename']:
return True
else:
try:
if self._viewer[frame]['user_array'].any():
return True
except AttributeError as ValueError:
print("error in array")
return False
def get_filename(self):
"""return the filename currently on display.
This function will check if there is already a filename saved. It's
possible that the user can connect to a ds9 window with no file loaded
and then ask for the data file name after loading one through the ds9
menu options. This will poll the private filename and then try and set
one if it's empty.
"""
# see if the user has loaded a file by hand or changed frames in the
# gui
frame = self.frame()
self._set_frameinfo()
return self._viewer[frame]['filename']
def get_frame_info(self):
"""return more explicit information about the data displayed."""
self._set_frameinfo()
return self._viewer[self.frame()]
def get_viewer_info(self):
"""Return a dictionary of information.
The dictionary contains information about all frames which are
loaded with data
Notes
-----
Consider adding a loop to verify that all the frames still exist
and the user has not deleted any through the gui.
"""
self._set_frameinfo()
return self._viewer
@classmethod
def _purge_tmp_dirs(cls):
"""Delete temporary directories made for the unix socket.
When used with ipython (pylab mode), it seems that the objects
are not properly deleted, i.e., temporary directories are not
deleted. This is a work around for that.
"""
if cls._tmp_dir:
shutil.rmtree(cls._tmp_dir)
@classmethod
def _stop_running_process(cls):
"""stop self generated DS9 windows when user quits python window."""
while cls._process_list:
process = cls._process_list.pop()
if process.poll() is None:
process.terminate()
def _stop_process(self):
"""stop the ds9 window process nicely.
but only if this package started it
"""
try:
if self._ds9_process:
# none means not yet terminated
if self._ds9_process.poll() is None:
self.set("exit")
if self._ds9_process in self._process_list:
self._process_list.remove(self._ds9_process)
except XpaException as e:
print(f"XPA Exception: {repr(e)}")
def _purge_local(self):
"""remove temporary directories from the unix socket."""
if not self._need_to_purge:
return
if not self._quit_ds9_on_del:
warnings.warn(
"You need to manually delete tmp. dir ({0:s})".format(
self._tmpd_name))
return
self._stop_process()
# add a wait for the process to terminate before trying to delete the
# tree
time.sleep(0.5)
try:
shutil.rmtree(self._tmpd_name)
except OSError:
warnings.warn(
"Warning : couldn't delete the temporary \
directory ({0:s})".format(self._tmpd_name,))
self._need_to_purge = False
def close(self):
"""close the window and end connection."""
# make sure we clean up the object and quit_ds9 local files
if 'local' in self._xpa_method or 'tmp' in self._xpa_method:
self._purge_local()
else:
self._stop_process()
def run_inet_ds9(self):
"""start a new ds9 window using an inet socket connection.
Notes
-----
It is given a unique title so it can be identified later.
"""
env = os.environ
# this is the title of the window, without a nameserver connection
# is there a way to get the inet x:x address?
# that should be unique enough, something better?
xpaname = "imexam" + str(time.time())
try:
p = Popen([self._ds9_path,
"-xpa", "inet",
"-title", xpaname],
shell=False, env=env)
self._ds9_process = p
self._process_list.append(p)
self._need_to_purge = False
return xpaname
except Exception as e: # refine error class
warnings.warn("Opening ds9 failed")
print(f"Exception: {repr(e)}")
from signal import SIGTERM
try:
pidtokill = p.pid
except NameError:
# in case p failed at the initialization level
pidtokill = None
if pidtokill is not None:
os.kill(pidtokill, SIGTERM)
raise e
def _run_unixonly_ds9(self):
"""start new ds9 window and connect to object using a unix socket.
Notes
-----
When the xpa method in libxpa parses a given template as a unix
socket, it checks if the template string starts with tmpdir
(from env["XPA_TMPDIR"] or default to /tmp/.xpa). This can make
having multiple instances of ds9 a bit difficult, but if you give it
unique names or use the inet address you should be fine
For unix only, we run ds9 with XPA_TMPDIR set to temporary directory
whose prefix start with /tmp/xpa (eg, /tmp/xpa_sf23f), them set
os.environ["XPA_TMPDIR"] (which affects xpa set and/or get command
from python) to /tmp/xpa.
"""
env = os.environ
wait_time = self.wait_time
self._tmpd_name = mkdtemp(
prefix="xpa_" +
env.get(
"USER",
""),
dir="/tmp")
# this is the first directory the servers looks for on the path
env["XPA_TMPDIR"] = self._tmpd_name
unix_name = "{0:s}/.IMT".format(self._tmpd_name)
# that should be unique enough, something better?
title = str(time.time())
try:
# unix only flag disables the fifos and inet connections
p = Popen([self._ds9_path,
"-xpa", "local",
"-unix_only", "-title", title,
"-unix", "{0:s}".format(unix_name)],
shell=False, env=env)
# wait until ds9 starts and the .IMT socket exists
while wait_time > 0:
file_list = os.listdir(self._tmpd_name)
if ".IMT" in file_list:
break
time.sleep(0.5)
wait_time -= 0.5
if wait_time == 0:
from signal import SIGTERM
os.kill(p.pid, SIGTERM)
print(f"Connection timeout with the ds9. Try to increase the \
*wait_time* parameter (current value \
is {self.wait_time} s)")
except (OSError, ValueError, AttributeError) as e:
warnings.warn("Starting ds9 failed")
shutil.rmtree(self._tmpd_name)
else:
self._tmp_dir = self._tmpd_name
self._ds9_process = p
self._process_list.append(p)
# this might be sketchy
try:
file_list.remove(".IMT") # should be in the directory, if not
except (ValueError, IOError):
warnings.warn("IMT not found in tmp, using first thing in list")
if len(file_list) > 0:
xpaname = os.path.join(self._tmpd_name, file_list[0])
else:
shutil.rmtree(self._tmpd_name)
raise ValueError("Problem starting ds9 local socket connection")
env["XPA_TMPDIR"] = "/tmp/xpa" # for all local connections
self._need_to_purge = True
self._xpa_method = 'local'
return xpaname, unix_name
def _check_ds9_process(self):
"""Check to see if the ds9 process is still running.
Notes
-----
If you start a ds9 window from the shell and then connect
to imexam, imexam will not have a reference for the process,
so this method ignores that state.
"""
if self._ds9_process:
ret = self._ds9_process.poll()
if ret:
raise RuntimeError("The ds9 process is externally killed.")
self._purge_local()
def set(self, param, buf=None):
"""XPA set method to ds9 instance.
Notes
-----
This function | |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""Lambda function that provides real-time personalization API functionality between public endpoints and recommenders
This file contains the logic that performs and delegates the core processing for real-time
recommendation requests as well as receiving events/interactions.
"""
import os
import time
import json
import traceback
import urllib.request
import botocore
import urllib
import zlib
from typing import Dict, List, Tuple, Union
from http import HTTPStatus
from aws_lambda_powertools import Logger, Tracer, Metrics
from aws_lambda_powertools.logging import correlation_paths
from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver, ProxyEventType, CORSConfig, Response
from personalization_config import PersonalizationConfig
from personalization_constants import ACTION_RECOMMEND_ITEMS, ACTION_RELATED_ITEMS, ACTION_RERANK_ITEMS
from response_decorator import ResponseDecorator
from personalize_resolver import PersonalizeResolver
from lambda_resolver import LambdaResolver
from sagemaker_resolver import SageMakerResolver
from response_post_process import PostProcessor
from personalization_error import ConfigError, PersonalizationError, ValidationError, JSONDecodeValidationError
from evidently import evidently_evaluate_feature, process_conversions
from event_targets import process_targets
from auto_values import resolve_auto_values
from background_tasks import BackgroundTasks
from util import CompatEncoder
PERSONALIZE_GET_RECS_MAX_NUM_RESULTS = 500
tracer = Tracer()
logger = Logger()
metrics = Metrics()
cors_config = CORSConfig(max_age=500)
api_type = os.environ.get('ApiType', 'REST')
compress_responses = False # Local compression
if api_type == 'HTTP':
app = ApiGatewayResolver(proxy_type=ProxyEventType.APIGatewayProxyEventV2, cors=cors_config)
compress_responses = True
elif api_type == 'ALB':
app = ApiGatewayResolver(proxy_type=ProxyEventType.ALBEvent, cors=cors_config)
compress_responses = True
else:
app = ApiGatewayResolver(cors=cors_config)
# Compression is off-loaded to API Gateway for REST API
config = PersonalizationConfig.get_instance()
region = os.environ['AWS_REGION']
personalize_resolver = PersonalizeResolver()
lambda_resolver = LambdaResolver()
sagemaker_resolver = SageMakerResolver()
post_processor = PostProcessor()
with BackgroundTasks() as background:
logger.info('Cold start prepare datastores')
ResponseDecorator.prepare_datastores(config.get_config(), background)
def generate_etag(max_age: int) -> str:
""" Creates and returns a simple ETag header value that combines a checksum of the request with the current time and max_age.
The ETag response header value is passed in subsequent requests in the If-None-Match header to check if the cached
resource is still valid. The logic for determining of an ETag value is still valid for caching is implemented in
the is_resource_not_modified() function.
"""
path = app.current_event.path
if 'rawQueryString' in app.current_event.raw_event:
query_string = app.current_event.get('rawQueryString', '')
else:
query_string_params = app.current_event.query_string_parameters
if query_string_params:
query_string = urllib.parse.urlencode(query_string_params)
else:
query_string = ''
checksum = zlib.adler32(f'{path}?{query_string}'.encode())
millis = round(time.time() * 1000)
return f'{checksum}-{millis}-{max_age}'
def is_resource_not_modified() -> bool:
""" Determines whether the resource for the current request has been modified by checking the If-None-Match request header """
# For the ETag/If-None-Match logic to yeild the most benefit, we need to implement a more intelligent
# algorithm here. For example, if campaign/recommender hasn't been updated or no new events have been received
# for the user, then we can return a 304 more often and more confidently.
if_none_match = app.current_event.get_header_value(name = 'If-None-Match', default_value='') or None
if not if_none_match:
return False
etag_elements = if_none_match.split('-')
if len(etag_elements) < 2:
return False
expires = int(etag_elements[-2]) + (int(etag_elements[-1]) * 1000)
return expires > (time.time() * 1000)
def set_cache_headers(config: Dict, headers: Dict, user_id: str, user_is_synthetic: bool = False):
""" Sets the caching related response headers based on the current configuration and request state.
The cacheControl directives in the configuration file control how caching is done for requests
that include a user, include a synthetic user, and do not include a user. The idea is that you
would cache responses more aggressively that did not include a user or for synthetic users and
less aggressively for specific users.
"""
if not config.get('cacheControl'):
return
control_type = 'noUserSpecified'
if user_id:
control_type = 'syntheticUserSpecified' if user_is_synthetic else 'userSpecified'
cache_control = config['cacheControl'].get(control_type)
if cache_control:
max_age = cache_control.get('maxAge')
directives = cache_control.get('directives')
if max_age:
headers['ETag'] = generate_etag(max_age)
if not directives:
headers['Cache-Control'] = f'max-age={max_age}'
elif 'max-age=' not in directives:
headers['Cache-Control'] = f'{directives},max-age={max_age}'
elif directives:
headers['Cache-Control'] = directives
@tracer.capture_method
def evaluate_variations(rec_config: Dict, user_id: str, background: BackgroundTasks) -> Tuple:
""" Evaluates the variations configured for a recommender to determine appropriate one to use for the request.
If there are multiple variations configured for the recommender and AWS CloudWatch Evidently is configured as
the evaluator, Evidently will determine the variation to use for the user. Otherwise, the first variation is used.
"""
variations = rec_config.get('variations')
if not variations or len(variations) == 0:
raise ConfigError(HTTPStatus.NOT_FOUND, 'NoVariationsConfigured', 'Recommender does not have any variations configured')
experiments = rec_config.get('experiments')
if not experiments or len(variations) == 1 or not user_id:
return config.inherit_config(rec_config, next(iter(variations.items()))[1]), None
feature = str(app.current_event.get_query_string_value(name="feature", default_value="")) or None
if feature:
experiment = experiments.get(feature)
if not experiment:
raise ValidationError('InvalidExperimentFeature', f'Experiment for feature {feature} is referencing an experiment that is not in configuration')
else:
feature, experiment = next(iter(experiments.items()))
if experiment.get('method') == 'evidently':
variation, experiment = evidently_evaluate_feature(feature, experiment, variations, user_id, background)
variation = config.inherit_config(rec_config, variation)
return variation, experiment
else:
raise ConfigError(HTTPStatus.INTERNAL_SERVER_ERROR, 'UnsupportedEvaluationMethod', 'Variation evaluation method is not configured/supported')
def resolve_context(variation_config: Dict) -> Union[str, Dict]:
""" Resolves automated context for the caller as configured for the recommender.
Automated context can be used to derive contextual field values based on intrinsic data in the
request. For example, device type and location information of the user can be derived from
CloudFront headers and time-based constructs such weekday, weekend, time of day, and seasonality
can be derived from the time of the request and the user's time zone.
"""
context = str(app.current_event.get_query_string_value(name="context", default_value="")) or None
auto_context_config = variation_config.get('autoContext')
if auto_context_config:
auto_context = resolve_auto_values(auto_context_config, app.current_event.headers)
if auto_context:
if not context:
context = {}
elif type(context) == str:
try:
context = json.loads(context)
except json.decoder.JSONDecodeError:
raise ValidationError('InvalidContextParameter', 'Parameter "context" is not valid JSON')
for field, resolved in auto_context.items():
if not field in context:
context[field] = str(resolved['values'][0])
return context
def resolve_filter_parameters(variation_config: Dict, user_id: str) -> Tuple:
""" Resolves automated filter values for the caller as configured in the recommender.
Automated filter values can be used to derive dynamic filter variable values based on intrinsic data
in the request. For example, device type and location information of the user can be derived from
CloudFront headers and time-based constructs such weekday, weekend, time of day, and seasonality
can be derived from the time of the request and the user's time zone.
"""
filter_name = str(app.current_event.get_query_string_value(name="filter", default_value="")) or None
filter_values = str(app.current_event.get_query_string_value(name="filterValues", default_value="")) or None
filter_arn = None
if filter_name:
filter_arn = f'arn:aws:personalize:{region}:{app.current_event.request_context.account_id}:filter/{filter_name}'
elif variation_config.get('filters'):
for filter in variation_config['filters']:
condition = filter.get('condition')
if not condition or (user_id and condition == 'user-required'):
filter_arn = filter.get('arn')
break
else:
filter_values = None
if filter_arn and variation_config.get('filter') and variation_config['filter'].get('autoDynamicFilterValues'):
filter_auto_values = resolve_auto_values(variation_config['filter']['autoDynamicFilterValues'], app.current_event.headers)
if filter_auto_values:
if not filter_values:
filter_values = {}
elif type(filter_values) == str:
try:
filter_values = json.loads(filter_values)
except json.decoder.JSONDecodeError:
raise ValidationError('InvalidFilterParameter', 'Parameter "filterValues" is not valid JSON')
for parameter, resolved in filter_auto_values.items():
if not parameter in filter_values:
if resolved.get('type') == 'string':
filter_values[parameter] = ','.join(f'\\"{val}\\"' for val in resolved['values'])
else:
filter_values[parameter] = str(resolved['values'][0])
return filter_arn, filter_values
@tracer.capture_method
def decorate_items(namespace: str, response: Dict):
""" Decorates items in the response with item metadata that is stored in a low-latency datastore
The caller can control whether item decoration is done or not. Default is to decorate.
"""
decorate_items = app.current_event.get_query_string_value(name='decorateItems', default_value='1').lower() in ['1','yes','true']
if decorate_items:
decorator = ResponseDecorator.get_instance(namespace, config)
if decorator:
decorator.decorate(response)
@app.get("/recommend-items/<namespace>/<recommender>/<user_id>", cors=True, compress=compress_responses)
@tracer.capture_method(capture_response=False)
def get_recommend_items(namespace: str, recommender: str, user_id: str) -> Response:
""" API entry point for getting recommended items for a given user """
tracer.put_annotation(key = 'EntryPoint', value = 'recommend-items')
tracer.put_annotation(key = 'Namespace', value = namespace)
tracer.put_annotation(key = 'Recommender', value = recommender)
if is_resource_not_modified():
return Response(status_code=HTTPStatus.NOT_MODIFIED,
content_type="text/plain",
body='Not mofified'
)
with BackgroundTasks() as background:
logger.debug('Conditionally refreshing datastores in the backgound')
ResponseDecorator.prepare_datastores(config.get_config(), background)
rec_config = config.get_recommender_config(namespace, recommender, ACTION_RECOMMEND_ITEMS)
if not rec_config:
raise ConfigError(HTTPStatus.NOT_FOUND, 'RecommenderNotConfigured', 'Recommender not configured for this namespace and recommender path')
variation, experiment = evaluate_variations(rec_config, user_id, background)
inference_num_results = num_results = int(app.current_event.get_query_string_value(name="numResults", default_value="25"))
context = resolve_context(variation)
post_process_config = rec_config.get('responsePostProcessor')
if post_process_config and post_process_config.get('lookAheadMultiplier'):
inference_num_results *= post_process_config['lookAheadMultiplier']
if post_process_config.get('lookAheadMaximumValue'):
inference_num_results = min(inference_num_results, post_process_config['lookAheadMaximumValue'])
if variation.get('type') in ['personalize-campaign', 'personalize-recommender']:
arn = variation.get('arn')
filter_arn, filter_values = resolve_filter_parameters(variation, user_id)
inference_num_results = min(inference_num_results, PERSONALIZE_GET_RECS_MAX_NUM_RESULTS)
response = personalize_resolver.get_recommend_items(
arn = arn,
user_id = user_id,
num_results = inference_num_results,
filter_arn = filter_arn,
filter_values = filter_values,
context = context
)
decorate_items(namespace, response)
elif variation.get('type') == 'sagemaker':
response = sagemaker_resolver.get_recommend_items(
rec_config,
variation,
user_id = user_id,
num_results = inference_num_results,
context = context
)
decorate_items(namespace, response)
elif variation.get('type') == 'lambda':
response = lambda_resolver.get_recommend_items(
rec_config,
variation,
user_id = user_id,
num_results = inference_num_results,
context = context
)
decorate_items(namespace, response)
elif variation.get('type') == 'http':
url = variation['url'].format(**app.current_event.query_string_parameters)
response = json.loads(urllib.request.urlopen(url).read())
if response and experiment:
response['matchedExperiment'] = experiment
if post_process_config:
response = | |
<reponame>amulyan7/sonic-mgmt
import logging
import json
from tests.common.helpers.constants import DEFAULT_NAMESPACE
from tests.common.devices.sonic_asic import SonicAsic
logger = logging.getLogger(__name__)
class RedisCli(object):
"""Base class for interface to RedisDb using redis-cli command.
Attributes:
host: a SonicHost or SonicAsic. Commands will be run on this shell.
database: Redis database number.
pid: Port number of redis db.
"""
def __init__(self, host, database=1, pid=6379):
"""Initializes base class with defaults"""
self.host = host
self.database = database
self.pid = pid
self.ip = None
def _cli_prefix(self):
"""Builds opening of redis CLI command for other methods."""
# return "docker exec -i {docker} redis-cli -p {pid} --raw -n {db} ".format(
# docker=self.docker, db=self.database, pid=self.pid)
return " -p {pid} --raw -n {db} ".format(db=self.database, pid=self.pid)
def _run_and_check(self, cmd):
"""
Executes a redis CLI command and checks the output for empty string.
Args:
cmd: Full CLI command to run.
Returns:
Ansible CLI output dictionary with stdout and stdout_lines keys on success.
Empty dictionary on error.
"""
result = self.host.run_redis_cli_cmd(cmd)
if len(result["stdout_lines"]) == 0:
logger.error("No command response: %s" % cmd)
return {}
return result
def _run_and_raise(self, cmd):
"""
Executes a redis CLI command and checks the output for empty string.
Args:
cmd: Full CLI command to run.
Returns:
Ansible CLI output dictionary with stdout and stdout_lines keys on success.
Raises:
Exception: If the command had no output.
"""
logger.debug("REDIS: %s", cmd)
result = self.host.run_redis_cli_cmd(cmd)
if len(result["stdout_lines"]) == 0:
logger.warning("No command response: %s" % cmd)
raise RedisNoCommandOutput("Command: %s returned no response." % cmd)
return result
def get_key_value(self, key):
"""
Executes a redis CLI get command.
Args:
key: full name of the key to get.
Returns:
The corresponding value of the key.
Raises:
RedisKeyNotFound: If the key has no value or is not present.
"""
cmd = self._cli_prefix() + "get " + key
result = self._run_and_check(cmd)
if result == {}:
raise RedisKeyNotFound("Key: %s not found in rediscmd: %s" % (key, cmd))
else:
return result['stdout'].decode('unicode-escape')
def hget_key_value(self, key, field):
"""
Executes a redis CLI hget command.
Args:
key: full name of the key to get.
field: Name of the hash field to get.
Returns:
The corresponding value of the key.
Raises:
RedisKeyNotFound: If the key or field has no value or is not present.
"""
cmd = self._cli_prefix() + "hget {} {}".format(key, field)
result = self._run_and_check(cmd)
if result == {}:
raise RedisKeyNotFound("Key: %s, field: %s not found in rediscmd: %s" % (key, field, cmd))
else:
return result['stdout'].decode('unicode-escape')
def get_and_check_key_value(self, key, value, field=None):
"""
Executes a redis CLI get or hget and validates the response against a provided field.
Args:
key: full name of the key to get.
value: expected value to test against.
field: Optional; Name of the hash field to use with hget.
Returns:
True if the validation succeeds.
Raises:
RedisKeyNotFound: If the key or field has no value or is not present.
AssertionError: If the fetched value from redis does not match the provided value.
"""
if field is None:
result = self.get_key_value(key)
else:
result = self.hget_key_value(key, field)
if str(result).lower() == str(value).lower():
logger.info("Value {val} matches output {out}".format(val=value, out=result))
return True
else:
raise AssertionError("redis value error: %s != %s key was: %s" % (result, value, key))
def get_keys(self, table):
"""
Gets the list of keys in a table.
Args:
table: full name of the table for which to get the keys.
Returns:
list of keys retrieved
Raises:
RedisKeyNotFound: If the key or field has no value or is not present.
"""
cmd = self._cli_prefix() + " keys {}".format(table)
result = self._run_and_check(cmd)
if result == {}:
raise RedisKeyNotFound("No keys for %s found in rediscmd: %s" % (table, cmd))
else:
return result['stdout'].decode('unicode-escape')
def dump(self, table):
"""
Dumps and entire table with redis-dump.
Args:
table: The table to dump.
Returns:
Dictionary containing the parsed json output of the redis_dump.
"""
cli = "/usr/local/bin/redis-dump"
cmd_str = ""
# If an IP is specified, include in cli options.
if self.ip is not None:
cmd_str += " -H {} ".format(self.ip)
cmd_str += "-p {pid} -d {db} -y -k *{t}*".format(db=self.database, pid=self.pid, t=table)
# We are on an asic, it could be single asic card, or multiasic and need a namespace.
if isinstance(self.host, SonicAsic):
if self.host.namespace != DEFAULT_NAMESPACE:
cmd = "sudo ip netns exec {} {} {}".format(self.host.namespace, cli, cmd_str)
output = self.host.sonichost.command(cmd)
# for single asic platform
else:
cmd = cli + " " + cmd_str
output = self.host.sonichost.command(cmd)
else:
# We are on a sonichost, no namespace required.
cmd = cli + " " + cmd_str
output = self.host.sonichost.command(cmd)
parsed = json.loads(output["stdout"])
return parsed
class AsicDbCli(RedisCli):
"""
Class to interface with the ASICDB on a host.
Attributes:
host: a SonicHost or SonicAsic. Commands will be run on this shell.
"""
ASIC_SWITCH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH"
ASIC_SYSPORT_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SYSTEM_PORT"
ASIC_PORT_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_PORT"
ASIC_HOSTIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF"
ASIC_LAG_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_LAG"
ASIC_LAG_MEMBER_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER"
ASIC_ROUTERINTF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"
ASIC_NEIGH_ENTRY_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY"
def __init__(self, host):
"""
Initializes a connection to the ASIC DB (database 1)
"""
super(AsicDbCli, self).__init__(host, 1)
# cache this to improve speed
self.hostif_portidlist = []
self.hostif_table = []
self.system_port_key_list = []
self.port_key_list = []
self.lagid_key_list = []
def get_switch_key(self):
"""Returns a list of keys in the switch table"""
cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_SWITCH_TABLE
return self._run_and_raise(cmd)["stdout_lines"][0]
def get_system_port_key_list(self, refresh=False):
"""Returns a list of keys in the system port table"""
if self.system_port_key_list != [] and refresh is False:
return self.system_port_key_list
cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_SYSPORT_TABLE
self.system_port_key_list = self._run_and_raise(cmd)["stdout_lines"]
return self.system_port_key_list
def get_port_key_list(self, refresh=False):
"""Returns a list of keys in the local port table"""
if self.port_key_list != [] and refresh is False:
return self.port_key_list
cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_PORT_TABLE
self.port_key_list = self._run_and_raise(cmd)["stdout_lines"]
return self.port_key_list
def get_hostif_list(self):
"""Returns a list of keys in the host interface table"""
cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_HOSTIF_TABLE
return self._run_and_raise(cmd)["stdout_lines"]
def get_asic_db_lag_list(self, refresh=False):
"""Returns a list of keys in the lag table"""
if self.lagid_key_list != [] and refresh is False:
return self.lagid_key_list
cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_LAG_TABLE
return self._run_and_raise(cmd)["stdout_lines"]
def get_asic_db_lag_member_list(self):
"""Returns a list of keys in the lag member table"""
cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_LAG_MEMBER_TABLE
return self._run_and_raise(cmd)["stdout_lines"]
def get_router_if_list(self):
"""Returns a list of keys in the router interface table"""
cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_ROUTERINTF_TABLE
return self._run_and_raise(cmd)["stdout_lines"]
def get_neighbor_list(self):
"""Returns a list of keys in the neighbor table"""
cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_NEIGH_ENTRY_TABLE
return self._run_and_raise(cmd)["stdout_lines"]
def get_neighbor_key_by_ip(self, ipaddr):
"""Returns the key in the neighbor table that is for a specific IP neighbor
Args:
ipaddr: The IP address to search for in the neighbor table.
"""
result = self._run_and_raise(self._cli_prefix() + "KEYS %s*%s*" % (AsicDbCli.ASIC_NEIGH_ENTRY_TABLE, ipaddr))
match_str = '"ip":"%s"' % ipaddr
for key in result["stdout_lines"]:
if match_str in key:
neighbor_key = key
break
else:
raise RedisKeyNotFound("Did not find key: %s*%s* in asicdb" % (AsicDbCli.ASIC_NEIGH_ENTRY_TABLE, ipaddr))
return neighbor_key
def get_neighbor_value(self, neighbor_key, field):
"""
Returns a value of a field in the neighbor table.
Note:
The structure of the keys in this table cause the command() method to fail, so this function uses shell() to
retrieve the command output.
Args:
neighbor_key: The full key of the neighbor table.
field: The field to get in the neighbor hash table.
"""
cmd = "%s ASIC_DB HGET '%s' %s" % (self.host.sonic_db_cli, neighbor_key, field)
result = self.host.sonichost.shell(cmd)
return result['stdout']
def get_hostif_table(self, refresh=False):
"""
Returns a fresh hostif table if refresh is true, else returns the entry from cache. Initializes instance
table on first run.
Args:
refresh: If True, get a fresh copy from the DUT.
Returns:
The table dump of ASIC_HOSTIF_TABLE
"""
if self.hostif_table != [] and refresh is False:
hostif_table = self.hostif_table
else:
hostif_table = self.dump("%s:" % AsicDbCli.ASIC_HOSTIF_TABLE)
self.hostif_table = hostif_table
return hostif_table
def get_hostif_portid_oidlist(self, refresh=False):
"""
Returns a list of portids associated with the hostif entries on the asics.
Walks through the HOSTIF table getting each port ID from the cache and returns the list. The list
is saved so it can be returned directly in subsequent calls.
Args:
refresh: | |
self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 72 - 72: i11iIiiIii * OoOoOO00 % oO0o / I1Ii111
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , Oo000o0o0 , III11I1 ,
len ( self . rloc_set ) ) )
for i1IIIIi1Ii111 in self . rloc_set : i1IIIIi1Ii111 . print_rloc ( rloc_indent )
if 9 - 9: iIii1I11I1II1 . IiII
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
def print_ttl ( self ) :
iiI = self . map_cache_ttl
if ( iiI == None ) : return ( "forever" )
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if ( iiI >= 3600 ) :
if ( ( iiI % 3600 ) == 0 ) :
iiI = str ( iiI / 3600 ) + " hours"
else :
iiI = str ( iiI * 60 ) + " mins"
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
elif ( iiI >= 60 ) :
if ( ( iiI % 60 ) == 0 ) :
iiI = str ( iiI / 60 ) + " mins"
else :
iiI = str ( iiI ) + " secs"
if 99 - 99: i11iIiiIii - I1Ii111
else :
iiI = str ( iiI ) + " secs"
if 4 - 4: o0oOOo0O0Ooo - i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
return ( iiI )
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
if 54 - 54: II111iiii * I1IiiI
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
ooooOoO0O = time . time ( ) - self . last_refresh_time
return ( ooooOoO0O >= self . map_cache_ttl )
if 49 - 49: I1ii11iIi11i
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
ooooOoO0O = time . time ( ) - self . stats . last_increment
return ( ooooOoO0O <= 60 )
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 15 - 15: oO0o
if 40 - 40: I1Ii111
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 77 - 77: II111iiii - o0oOOo0O0Ooo . Ii1I
if 47 - 47: o0oOOo0O0Ooo % OOooOOo + I1Ii111
def delete_rlocs_from_rloc_probe_list ( self ) :
for i1IIIIi1Ii111 in self . best_rloc_set :
i1IIIIi1Ii111 . delete_from_rloc_probe_list ( self . eid , self . group )
if 64 - 64: ooOoO0o / IiII . I1IiiI
if 77 - 77: o0oOOo0O0Ooo % I1Ii111 . OOooOOo
if 90 - 90: I11i
def build_best_rloc_set ( self ) :
OOooOo = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 42 - 42: iII111i
if 90 - 90: Ii1I . o0oOOo0O0Ooo
if 3 - 3: oO0o
if 42 - 42: Oo0Ooo
iIIII1I11iii = 256
for i1IIIIi1Ii111 in self . rloc_set :
if ( i1IIIIi1Ii111 . up_state ( ) ) : iIIII1I11iii = min ( i1IIIIi1Ii111 . priority , iIIII1I11iii )
if 22 - 22: o0oOOo0O0Ooo
if 45 - 45: I1Ii111 + OoooooooOO + o0oOOo0O0Ooo * II111iiii
if 12 - 12: I1ii11iIi11i / O0
if 18 - 18: OoOoOO00 . i11iIiiIii + i1IIi / OoooooooOO - IiII % OoO0O00
if 47 - 47: iII111i % IiII + I1Ii111 * o0oOOo0O0Ooo * OoooooooOO
if 100 - 100: Oo0Ooo / I1IiiI / iII111i / I1Ii111 / oO0o % o0oOOo0O0Ooo
if 16 - 16: I1IiiI + I11i
if 66 - 66: OoooooooOO % II111iiii / I1Ii111 . i11iIiiIii
if 67 - 67: Ii1I + Oo0Ooo - I1IiiI - IiII + oO0o + Oo0Ooo
if 84 - 84: I1ii11iIi11i % oO0o - OOooOOo * Ii1I
for i1IIIIi1Ii111 in self . rloc_set :
if ( i1IIIIi1Ii111 . priority <= iIIII1I11iii ) :
if ( i1IIIIi1Ii111 . unreach_state ( ) and i1IIIIi1Ii111 . last_rloc_probe == None ) :
i1IIIIi1Ii111 . last_rloc_probe = lisp_get_timestamp ( )
if 78 - 78: i1IIi / ooOoO0o / oO0o
self . best_rloc_set . append ( i1IIIIi1Ii111 )
if 21 - 21: IiII % Ii1I + OOooOOo + IiII
if 90 - 90: o0oOOo0O0Ooo
if 38 - 38: OoOoOO00 / OOooOOo % OoooooooOO * I1ii11iIi11i
if 7 - 7: I11i * O0 + Oo0Ooo / O0 * oO0o + i11iIiiIii
if 74 - 74: OoOoOO00
if 91 - 91: i11iIiiIii / Ii1I % OOooOOo % O0 - I11i . I11i
if 78 - 78: i1IIi + I11i % OoooooooOO + i1IIi + iII111i % Ii1I
if 87 - 87: ooOoO0o . iIii1I11I1II1
for i1IIIIi1Ii111 in OOooOo :
if ( i1IIIIi1Ii111 . priority < iIIII1I11iii ) : continue
i1IIIIi1Ii111 . delete_from_rloc_probe_list ( self . eid , self . group )
if 99 - 99: Ii1I + OoooooooOO * IiII * i11iIiiIii - iIii1I11I1II1
for i1IIIIi1Ii111 in self . best_rloc_set :
if ( i1IIIIi1Ii111 . rloc . is_null ( ) ) : continue
i1IIIIi1Ii111 . add_to_rloc_probe_list ( self . eid , self . group )
if 58 - 58: IiII % i1IIi . i11iIiiIii
if 5 - 5: OoOoOO00
if 75 - 75: OOooOOo
def select_rloc ( self , lisp_packet , ipc_socket ) :
iI1IIII1ii1 = lisp_packet . packet
o0OooooO0 = lisp_packet . inner_version
I1I1 = len ( self . best_rloc_set )
if ( I1I1 is 0 ) :
self . stats . increment ( len ( iI1IIII1ii1 ) )
return ( [ None , None , None , self . action , None , None ] )
if 71 - 71: OoO0O00
if 19 - 19: IiII - II111iiii % i1IIi + IiII
iIiII11OoO0000O000o = 4 if lisp_load_split_pings else 0
oO000o0o0oOo0 = lisp_packet . hash_ports ( )
if ( o0OooooO0 == 4 ) :
for oO in range ( 8 + iIiII11OoO0000O000o ) :
oO000o0o0oOo0 = oO000o0o0oOo0 ^ struct . unpack ( "B" , iI1IIII1ii1 [ oO + 12 ] ) [ 0 ]
if 74 - 74: i1IIi
elif ( o0OooooO0 == 6 ) :
for oO in range ( 0 , 32 + iIiII11OoO0000O000o , 4 ) :
oO000o0o0oOo0 = oO000o0o0oOo0 ^ struct . unpack ( "I" , iI1IIII1ii1 [ oO + 8 : oO + 12 ] ) [ 0 ]
if 63 - 63: I1ii11iIi11i + iII111i * o0oOOo0O0Ooo % II111iiii
oO000o0o0oOo0 = ( oO000o0o0oOo0 >> 16 ) + ( oO000o0o0oOo0 & 0xffff )
oO000o0o0oOo0 = ( oO000o0o0oOo0 >> 8 ) + ( oO000o0o0oOo0 & 0xff )
else :
for oO in range ( 0 , 12 + iIiII11OoO0000O000o , 4 ) :
oO000o0o0oOo0 = oO000o0o0oOo0 ^ struct . unpack ( "I" , iI1IIII1ii1 [ oO : oO + 4 ] ) [ 0 ]
if 23 - 23: i1IIi * oO0o * oO0o . | |
# encoding: utf-8
# module _sqlite3
# from (pre-generated)
# by generator 1.147
# no doc
# imports
import sqlite3 as __sqlite3
# Variables with simple values
PARSE_COLNAMES = 2
PARSE_DECLTYPES = 1
SQLITE_ALTER_TABLE = 26
SQLITE_ANALYZE = 28
SQLITE_ATTACH = 24
SQLITE_CREATE_INDEX = 1
SQLITE_CREATE_TABLE = 2
SQLITE_CREATE_TEMP_INDEX = 3
SQLITE_CREATE_TEMP_TABLE = 4
SQLITE_CREATE_TEMP_TRIGGER = 5
SQLITE_CREATE_TEMP_VIEW = 6
SQLITE_CREATE_TRIGGER = 7
SQLITE_CREATE_VIEW = 8
SQLITE_DELETE = 9
SQLITE_DENY = 1
SQLITE_DETACH = 25
SQLITE_DROP_INDEX = 10
SQLITE_DROP_TABLE = 11
SQLITE_DROP_TEMP_INDEX = 12
SQLITE_DROP_TEMP_TABLE = 13
SQLITE_DROP_TEMP_TRIGGER = 14
SQLITE_DROP_TEMP_VIEW = 15
SQLITE_DROP_TRIGGER = 16
SQLITE_DROP_VIEW = 17
SQLITE_IGNORE = 2
SQLITE_INSERT = 18
SQLITE_OK = 0
SQLITE_PRAGMA = 19
SQLITE_READ = 20
SQLITE_REINDEX = 27
SQLITE_SELECT = 21
SQLITE_TRANSACTION = 22
SQLITE_UPDATE = 23
sqlite_version = '3.14.2'
version = '2.6.0'
# functions
def adapt(obj, protocol, alternate): # real signature unknown; restored from __doc__
""" adapt(obj, protocol, alternate) -> adapt obj to given protocol. Non-standard. """
pass
def complete_statement(sql): # real signature unknown; restored from __doc__
"""
complete_statement(sql)
Checks if a string contains a complete SQL statement. Non-standard.
"""
pass
def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): # real signature unknown; restored from __doc__
"""
connect(database[, timeout, isolation_level, detect_types, factory])
Opens a connection to the SQLite database file *database*. You can use
":memory:" to open a database connection to a database that resides in
RAM instead of on disk.
"""
pass
def enable_callback_tracebacks(flag): # real signature unknown; restored from __doc__
"""
enable_callback_tracebacks(flag)
Enable or disable callback functions throwing errors to stderr.
"""
pass
def enable_shared_cache(do_enable): # real signature unknown; restored from __doc__
"""
enable_shared_cache(do_enable)
Enable or disable shared cache mode for the calling thread.
Experimental/Non-standard.
"""
pass
def register_adapter(type, callable): # real signature unknown; restored from __doc__
"""
register_adapter(type, callable)
Registers an adapter with pysqlite's adapter registry. Non-standard.
"""
pass
def register_converter(typename, callable): # real signature unknown; restored from __doc__
"""
register_converter(typename, callable)
Registers a converter with pysqlite. Non-standard.
"""
pass
# classes
class Cache(object):
# no doc
def display(self, *args, **kwargs): # real signature unknown
""" For debugging only. """
pass
def get(self, *args, **kwargs): # real signature unknown
""" Gets an entry from the cache or calls the factory function to produce one. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class Connection(object):
""" SQLite database connection object. """
def close(self, *args, **kwargs): # real signature unknown
""" Closes the connection. """
pass
def commit(self, *args, **kwargs): # real signature unknown
""" Commit the current transaction. """
pass
def create_aggregate(self, *args, **kwargs): # real signature unknown
""" Creates a new aggregate. Non-standard. """
pass
def create_collation(self, *args, **kwargs): # real signature unknown
""" Creates a collation function. Non-standard. """
pass
def create_function(self, *args, **kwargs): # real signature unknown
""" Creates a new function. Non-standard. """
pass
def cursor(self, *args, **kwargs): # real signature unknown
""" Return a cursor for the connection. """
pass
def enable_load_extension(self, *args, **kwargs): # real signature unknown
""" Enable dynamic loading of SQLite extension modules. Non-standard. """
pass
def execute(self, *args, **kwargs): # real signature unknown
""" Executes a SQL statement. Non-standard. """
pass
def executemany(self, *args, **kwargs): # real signature unknown
""" Repeatedly executes a SQL statement. Non-standard. """
pass
def executescript(self, *args, **kwargs): # real signature unknown
""" Executes a multiple SQL statements at once. Non-standard. """
pass
def interrupt(self, *args, **kwargs): # real signature unknown
""" Abort any pending database operation. Non-standard. """
pass
def iterdump(self, *args, **kwargs): # real signature unknown
""" Returns iterator to the dump of the database in an SQL text format. Non-standard. """
pass
def load_extension(self, *args, **kwargs): # real signature unknown
""" Load SQLite extension module. Non-standard. """
pass
def rollback(self, *args, **kwargs): # real signature unknown
""" Roll back the current transaction. """
pass
def set_authorizer(self, *args, **kwargs): # real signature unknown
""" Sets authorizer callback. Non-standard. """
pass
def set_progress_handler(self, *args, **kwargs): # real signature unknown
""" Sets progress handler callback. Non-standard. """
pass
def __call__(self, *more): # real signature unknown; restored from __doc__
""" x.__call__(...) <==> x(...) """
pass
def __enter__(self, *args, **kwargs): # real signature unknown
""" For context manager. Non-standard. """
pass
def __exit__(self, *args, **kwargs): # real signature unknown
""" For context manager. Non-standard. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
DatabaseError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
DataError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Error = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
IntegrityError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
InterfaceError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
InternalError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
isolation_level = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
NotSupportedError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
OperationalError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ProgrammingError = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
row_factory = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
text_factory = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
total_changes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Warning = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class Cursor(object):
""" SQLite database cursor class. """
def close(self, *args, **kwargs): # real signature unknown
""" Closes the cursor. """
pass
def execute(self, *args, **kwargs): # real signature unknown
""" Executes a SQL statement. """
pass
def executemany(self, *args, **kwargs): # real signature unknown
""" Repeatedly executes a SQL statement. """
pass
def executescript(self, *args, **kwargs): # real signature unknown
""" Executes a multiple SQL statements at once. Non-standard. """
pass
def fetchall(self, *args, **kwargs): # real signature unknown
""" Fetches all rows from the resultset. """
pass
def fetchmany(self, *args, **kwargs): # real signature unknown
""" Fetches several rows from the resultset. """
pass
def fetchone(self, *args, **kwargs): # real signature unknown
""" Fetches one row from the resultset. """
pass
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def setinputsizes(self, *args, **kwargs): # real signature unknown
""" Required by DB-API. Does nothing in pysqlite. """
pass
def setoutputsize(self, *args, **kwargs): # real signature unknown
""" Required by DB-API. Does nothing in pysqlite. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
arraysize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
connection = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
description = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
lastrowid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
rowcount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
row_factory = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class Error(StandardError):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class DatabaseError(__sqlite3.Error):
# no doc
| |
<reponame>nicholasburden/pymarl
import datetime
import os
import pprint
import time
import threading
import torch as th
import dill
import numpy as np
from types import SimpleNamespace as SN
from utils.logging import Logger
from utils.timehelper import time_left, time_str
from os.path import dirname, abspath
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
import collections
def recursive_dict_update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = recursive_dict_update(d.get(k, {}), v)
else:
d[k] = v
return d
import yaml
def _get_config(params, arg_name, subfolder):
config_name = None
for _i, _v in enumerate(params):
if _v.split("=")[0] == arg_name:
config_name = _v.split("=")[1]
del params[_i]
break
if config_name is not None:
with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)), "r") as f:
try:
config_dict = yaml.load(f)
except yaml.YAMLError as exc:
assert False, "{}.yaml error: {}".format(config_name, exc)
return config_dict
def run(_run, _config, _log):
# check args sanity
_config = args_sanity_check(_config, _log)
args = SN(**_config)
args.device = "cuda" if args.use_cuda else "cpu"
# setup loggers
logger = Logger(_log)
_log.info("Experiment Parameters:")
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# configure tensorboard logger
unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
args.unique_token = unique_token
if args.use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
# sacred is on by default
logger.setup_sacred(_run)
# Run and train
if args.meta == "reptile":
run_reptile(args=args, logger=logger, _log=_log, _run=_run)
else:
run_sequential(args=args, logger=logger)
# Clean up after finishing
print("Exiting Main")
print("Stopping all threads")
for t in threading.enumerate():
if t.name != "MainThread":
print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
t.join(timeout=1)
print("Thread joined")
print("Exiting script")
# Making sure framework really exits
os._exit(os.EX_OK)
def evaluate_sequential(args, runner):
for _ in range(args.test_nepisode):
runner.run(test_mode=True)
if args.save_replay:
runner.save_replay()
runner.close_env()
def run_sequential(args, logger):
# Init runner so we can get env info
runner = r_REGISTRY[args.runner](args=args, logger=logger)
# Set up schemes and groups here
env_info = runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.obs_decoder = dill.loads(env_info["obs_decoder"]) if env_info["obs_decoder"] is not None else None
args.avail_actions_encoder = dill.loads(env_info["avail_actions_encoder_grid"]) if env_info["avail_actions_encoder_grid"] is not None else None
args.state_shape = env_info["state_shape"]
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents", "vshape_decoded": env_info.get("obs_shape_decoded", env_info["obs_shape"])},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
if args.checkpoint_path != "":
timesteps = []
timestep_to_load = 0
if not os.path.isdir(args.checkpoint_path):
logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
return
# Go through all files in args.checkpoint_path
for name in os.listdir(args.checkpoint_path):
full_name = os.path.join(args.checkpoint_path, name)
# Check if they are dirs the names of which are numbers
if os.path.isdir(full_name) and name.isdigit():
timesteps.append(int(name))
if args.load_step == 0:
# choose the max timestep
timestep_to_load = max(timesteps)
else:
# choose the timestep closest to load_step
timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
logger.console_logger.info("Loading model from {}".format(model_path))
learner.load_models(model_path)
runner.t_env = timestep_to_load
if args.evaluate or args.save_replay:
evaluate_sequential(args, runner)
return
# start training
episode = 0
last_test_T = -args.test_interval - 1
last_log_T = 0
model_save_time = 0
start_time = time.time()
last_time = start_time
logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
while runner.t_env <= args.t_max:
th.cuda.empty_cache()
# Run for a whole episode at a time
episode_batch = runner.run(test_mode=False)
buffer.insert_episode_batch(episode_batch)
del episode_batch
if buffer.can_sample(args.batch_size):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, runner.t_env, episode)
th.cuda.empty_cache()
# Execute test runs once in a while
n_test_runs = max(1, args.test_nepisode // runner.batch_size)
if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
last_time = time.time()
last_test_T = runner.t_env
for _ in range(n_test_runs):
runner.run(test_mode=True)
th.cuda.empty_cache()
if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):
model_save_time = runner.t_env
save_path = os.path.join(args.local_results_path, "models", args.unique_token, str(runner.t_env))
#"results/models/{}".format(unique_token)
os.makedirs(save_path, exist_ok=True)
logger.console_logger.info("Saving models to {}".format(save_path))
# learner should handle saving/loading -- delegate actor save/load to mac,
# use appropriate filenames to do critics, optimizer states
learner.save_models(save_path)
episode += args.batch_size_run
th.cuda.empty_cache()
if (runner.t_env - last_log_T) >= args.log_interval:
logger.log_stat("episode", episode, runner.t_env)
logger.print_recent_stats()
last_log_T = runner.t_env
runner.close_env()
logger.console_logger.info("Finished Training")
def update_env(args, logger):
temp = args.env_args["map_name"]
args.env_args["map_name"] = args.env_args["map_name2"]
args.env_args["map_name2"] = temp
runner = r_REGISTRY[args.runner](args=args, logger=logger)
env_info = runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.obs_decoder = dill.loads(env_info["obs_decoder"]) if env_info["obs_decoder"] is not None else None
args.avail_actions_encoder = dill.loads(env_info["avail_actions_encoder_grid"]) if env_info[
"avail_actions_encoder_grid"] is not None else None
args.state_shape = env_info["state_shape"]
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents",
"vshape_decoded": env_info.get("obs_shape_decoded", env_info["obs_shape"])},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
return runner, buffer, learner
def args_sanity_check(config, _log):
# set CUDA flags
# config["use_cuda"] = True # Use cuda whenever possible!
if config["use_cuda"] and not th.cuda.is_available():
config["use_cuda"] = False
_log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
if config["test_nepisode"] < config["batch_size_run"]:
config["test_nepisode"] = config["batch_size_run"]
else:
config["test_nepisode"] = (config["test_nepisode"]//config["batch_size_run"]) * config["batch_size_run"]
return config
def run_reptile(args, logger, _log, _run):
loggers = {}
runners = {}
macs = {}
learners = {}
buffers = {}
agent_state_dict = None
import yaml
#from .main import _get_config
# compile all the relevant task configs
task_configs = {}
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
r = np.random.RandomState(args.seed)
for k, v in sorted(args.tasks.items()): # important for reproducibility of seeds!
# Get the defaults from default.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
try:
config_dict = yaml.load(f)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
# Load algorithm and env base configs
params = ["", "--config={}".format(v.pop("config")), "--env-config={}".format(v.pop("env-config"))]
alg_config = _get_config(params, "--config", "algs")
env_config = _get_config(params, "--env-config", "envs")
# config_dict = {**config_dict, **env_config, **alg_config}
config_dict = recursive_dict_update(config_dict, env_config)
config_dict = recursive_dict_update(config_dict, alg_config)
config_dict = recursive_dict_update(config_dict, v)
# from src.utils.dict2namedtuple import convert
config_dict.pop("no-mongo")
config_dict["seed"] = r.randint(0, 2**31-1) # have to set manually
config_dict["env_args"]["seed"] = r.randint(0, 2**31-1)
config_dict["device"] = args.device
config_dict["unique_token"] = "{}__{}".format(args.unique_token,
k)
task_configs[k] = Bunch(config_dict)
def setup_components(logger,
agent_state_dict):
task_names = []
for task_name, _ in task_configs.items():
task_names.append(task_name)
# set up tasks based on the configs
for task_name, task_config in task_configs.items():
task_args = task_config
from copy import deepcopy
logger = Logger(_log)
# sacred is on by default
logger.setup_sacred(_run)
# logger = deepcopy(meta_logger)
logger.prefix = task_name
loggers[task_name] = logger
# Init runner so we can get env info
runner = r_REGISTRY[task_args.runner](args=task_args,
logger=logger)
runners[task_name] = runner
# Set up schemes and groups here
env_info = runner.get_env_info()
task_args.n_agents = env_info["n_agents"]
task_args.n_actions = env_info["n_actions"]
task_args.obs_decoder = dill.loads(env_info["obs_decoder"]) if env_info["obs_decoder"] is not None else None
task_args.avail_actions_encoder = dill.loads(env_info["avail_actions_encoder_grid"]) if env_info[
"avail_actions_encoder_grid"] is not None else None
task_args.db_url = args.db_url
task_args.db_name = args.db_name
task_args.state_shape = env_info["state_shape"]
task_args.state_decoder = dill.loads(env_info["state_decoder"]) if env_info["state_decoder"] is not None else None
task_args.obs_decoder = dill.loads(env_info["obs_decoder"]) if env_info["obs_decoder"] is not None else None
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents",
"vshape_decoded": env_info.get("obs_shape_decoded", env_info["obs_shape"])},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": task_args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=task_args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, task_args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if task_args.buffer_cpu_only else args.device)
buffers[task_name] = buffer
# Setup multiagent controller here
mac = mac_REGISTRY[task_args.mac](buffer.scheme, groups, task_args)
#point model to same object
macs[task_name] = mac
mac.agent = | |
<filename>web/addons/report/models/report.py
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp import SUPERUSER_ID
from openerp.exceptions import AccessError
from openerp.osv import osv
from openerp.tools import config, which
from openerp.tools.translate import _
from openerp.addons.web.http import request
from openerp.tools.safe_eval import safe_eval as eval
import re
import time
import base64
import logging
import tempfile
import lxml.html
import os
import subprocess
from contextlib import closing
from distutils.version import LooseVersion
from functools import partial
from pyPdf import PdfFileWriter, PdfFileReader
#--------------------------------------------------------------------------
# Helpers
#--------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
def _get_wkhtmltopdf_bin():
defpath = os.environ.get('PATH', os.defpath).split(os.pathsep)
return which('wkhtmltopdf', path=os.pathsep.join(defpath))
#--------------------------------------------------------------------------
# Check the presence of Wkhtmltopdf and return its version at Odoo start-up
#--------------------------------------------------------------------------
wkhtmltopdf_state = 'install'
try:
process = subprocess.Popen(
[_get_wkhtmltopdf_bin(), '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (OSError, IOError):
_logger.info('You need Wkhtmltopdf to print a pdf version of the reports.')
else:
_logger.info('Will use the Wkhtmltopdf binary at %s' % _get_wkhtmltopdf_bin())
out, err = process.communicate()
version = re.search('([0-9.]+)', out).group(0)
if LooseVersion(version) < LooseVersion('0.12.0'):
_logger.info('Upgrade Wkhtmltopdf to (at least) 0.12.0')
wkhtmltopdf_state = 'upgrade'
else:
wkhtmltopdf_state = 'ok'
if config['workers'] == 1:
_logger.info('You need to start Odoo with at least two workers to print a pdf version of the reports.')
wkhtmltopdf_state = 'workers'
class Report(osv.Model):
_name = "report"
_description = "Report"
public_user = None
#--------------------------------------------------------------------------
# Extension of ir_ui_view.render with arguments frequently used in reports
#--------------------------------------------------------------------------
def render(self, cr, uid, ids, template, values=None, context=None):
"""Allow to render a QWeb template python-side. This function returns the 'ir.ui.view'
render but embellish it with some variables/methods used in reports.
:param values: additionnal methods/variables used in the rendering
:returns: html representation of the template
"""
if values is None:
values = {}
if context is None:
context = {}
context = dict(context, inherit_branding=True) # Tell QWeb to brand the generated html
view_obj = self.pool['ir.ui.view']
def translate_doc(doc_id, model, lang_field, template):
"""Helper used when a report should be translated into a specific lang.
<t t-foreach="doc_ids" t-as="doc_id">
<t t-raw="translate_doc(doc_id, doc_model, 'partner_id.lang', account.report_invoice_document')"/>
</t>
:param doc_id: id of the record to translate
:param model: model of the record to translate
:param lang_field': field of the record containing the lang
:param template: name of the template to translate into the lang_field
"""
ctx = context.copy()
doc = self.pool[model].browse(cr, uid, doc_id, context=ctx)
qcontext = values.copy()
# Do not force-translate if we chose to display the report in a specific lang
if ctx.get('translatable') is True:
qcontext['o'] = doc
else:
# Reach the lang we want to translate the doc into
ctx['lang'] = eval('doc.%s' % lang_field, {'doc': doc})
qcontext['o'] = self.pool[model].browse(cr, uid, doc_id, context=ctx)
return view_obj.render(cr, uid, template, qcontext, context=ctx)
user = self.pool['res.users'].browse(cr, uid, uid)
website = None
if request and hasattr(request, 'website'):
if request.website is not None:
website = request.website
context = dict(context, translatable=context.get('lang') != request.website.default_lang_code)
values.update(
time=time,
translate_doc=translate_doc,
editable=True,
user=user,
res_company=user.company_id,
website=website,
)
return view_obj.render(cr, uid, template, values, context=context)
#--------------------------------------------------------------------------
# Main report methods
#--------------------------------------------------------------------------
@api.v7
def get_html(self, cr, uid, ids, report_name, data=None, context=None):
"""This method generates and returns html version of a report.
"""
# If the report is using a custom model to render its html, we must use it.
# Otherwise, fallback on the generic html rendering.
try:
report_model_name = 'report.%s' % report_name
particularreport_obj = self.pool[report_model_name]
return particularreport_obj.render_html(cr, uid, ids, data=data, context=context)
except KeyError:
report = self._get_report_from_name(cr, uid, report_name)
report_obj = self.pool[report.model]
docs = report_obj.browse(cr, uid, ids, context=context)
docargs = {
'doc_ids': ids,
'doc_model': report.model,
'docs': docs,
}
return self.render(cr, uid, [], report.report_name, docargs, context=context)
@api.v8
def get_html(self, records, report_name, data=None):
return self._model.get_html(self._cr, self._uid, records.ids, report_name,
data=data, context=self._context)
@api.v7
def get_pdf(self, cr, uid, ids, report_name, html=None, data=None, context=None):
"""This method generates and returns pdf version of a report.
"""
if context is None:
context = {}
if html is None:
html = self.get_html(cr, uid, ids, report_name, data=data, context=context)
html = html.decode('utf-8') # Ensure the current document is utf-8 encoded.
# Get the ir.actions.report.xml record we are working on.
report = self._get_report_from_name(cr, uid, report_name)
# Check if we have to save the report or if we have to get one from the db.
save_in_attachment = self._check_attachment_use(cr, uid, ids, report)
# Get the paperformat associated to the report, otherwise fallback on the company one.
if not report.paperformat_id:
user = self.pool['res.users'].browse(cr, uid, uid)
paperformat = user.company_id.paperformat_id
else:
paperformat = report.paperformat_id
# Preparing the minimal html pages
css = '' # Will contain local css
headerhtml = []
contenthtml = []
footerhtml = []
irconfig_obj = self.pool['ir.config_parameter']
base_url = irconfig_obj.get_param(cr, SUPERUSER_ID, 'report.url') or irconfig_obj.get_param(cr, SUPERUSER_ID, 'web.base.url')
# Minimal page renderer
view_obj = self.pool['ir.ui.view']
render_minimal = partial(view_obj.render, cr, uid, 'report.minimal_layout', context=context)
# The received html report must be simplified. We convert it in a xml tree
# in order to extract headers, bodies and footers.
try:
root = lxml.html.fromstring(html)
for node in root.xpath("//html/head/style"):
css += node.text
for node in root.xpath("//div[@class='header']"):
body = lxml.html.tostring(node)
header = render_minimal(dict(css=css, subst=True, body=body, base_url=base_url))
headerhtml.append(header)
for node in root.xpath("//div[@class='footer']"):
body = lxml.html.tostring(node)
footer = render_minimal(dict(css=css, subst=True, body=body, base_url=base_url))
footerhtml.append(footer)
for node in root.xpath("//div[@class='page']"):
# Previously, we marked some reports to be saved in attachment via their ids, so we
# must set a relation between report ids and report's content. We use the QWeb
# branding in order to do so: searching after a node having a data-oe-model
# attribute with the value of the current report model and read its oe-id attribute
if ids and len(ids) == 1:
reportid = ids[0]
else:
oemodelnode = node.find(".//*[@data-oe-model='%s']" % report.model)
if oemodelnode is not None:
reportid = oemodelnode.get('data-oe-id')
if reportid:
reportid = int(reportid)
else:
reportid = False
# Extract the body
body = lxml.html.tostring(node)
reportcontent = render_minimal(dict(css=css, subst=False, body=body, base_url=base_url))
contenthtml.append(tuple([reportid, reportcontent]))
except lxml.etree.XMLSyntaxError:
contenthtml = []
contenthtml.append(html)
save_in_attachment = {} # Don't save this potentially malformed document
# Get paperformat arguments set in the root html tag. They are prioritized over
# paperformat-record arguments.
specific_paperformat_args = {}
for attribute in root.items():
if attribute[0].startswith('data-report-'):
specific_paperformat_args[attribute[0]] = attribute[1]
# Run wkhtmltopdf process
return self._run_wkhtmltopdf(
cr, uid, headerhtml, footerhtml, contenthtml, context.get('landscape'),
paperformat, specific_paperformat_args, save_in_attachment
)
@api.v8
def get_pdf(self, records, report_name, html=None, data=None):
return self._model.get_pdf(self._cr, self._uid, records.ids, report_name,
html=html, data=data, context=self._context)
@api.v7
def get_action(self, cr, uid, ids, report_name, data=None, context=None):
"""Return an action of type ir.actions.report.xml.
:param ids: Ids of the records to print (if not used, pass an empty list)
:param report_name: Name of the template to generate an action for
"""
if ids:
if not isinstance(ids, list):
ids = [ids]
context = dict(context or {}, active_ids=ids)
report_obj = self.pool['ir.actions.report.xml']
idreport = report_obj.search(cr, uid, [('report_name', '=', report_name)], context=context)
try:
report = report_obj.browse(cr, uid, idreport[0], context=context)
except IndexError:
raise osv.except_osv(
_('Bad Report Reference'),
_('This report is not loaded into the database: %s.' % report_name)
)
return {
'context': context,
'data': data,
'type': 'ir.actions.report.xml',
'report_name': report.report_name,
'report_type': report.report_type,
'report_file': report.report_file,
'context': context,
}
@api.v8
def get_action(self, records, report_name, data=None):
return self._model.get_action(self._cr, self._uid, records.ids, report_name,
data=data, context=self._context)
#--------------------------------------------------------------------------
# Report generation helpers
#--------------------------------------------------------------------------
@api.v7
def _check_attachment_use(self, cr, uid, ids, report):
""" Check attachment_use field. If set to true and an existing pdf is already saved, load
this one now. Else, mark save it.
"""
save_in_attachment = {}
save_in_attachment['model'] = report.model
save_in_attachment['loaded_documents'] = {}
if report.attachment:
for record_id in ids:
obj = self.pool[report.model].browse(cr, uid, record_id)
filename = eval(report.attachment, {'object': obj, 'time': time})
# If the user has checked 'Reload from Attachment'
if report.attachment_use:
alreadyindb = [('datas_fname', '=', filename),
('res_model', '=', report.model),
('res_id', '=', record_id)]
attach_ids = self.pool['ir.attachment'].search(cr, uid, | |
(224, 224, 3),classes=200)
temp_feature10=fake_base_model10.output
in10=fake_base_model10.input
outs =Average()([temp_feature1, temp_feature2,temp_feature3, temp_feature4,temp_feature5,temp_feature6,temp_feature7, temp_feature8,temp_feature9, temp_feature10])
model = Model(inputs=[in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='fake-ResNet50')
return model
def true_text_ResNet50_2(classes =200):
print ('bulid true image model')
true_image_model = true_ResNet50( classes =200)
output1=true_image_model.output
input1=true_image_model.input
#output1=Conv2D(512, (1, 1), padding='same', activation='relu')(output1)
print('bulid caption model')
text_model=mult_text_cnnmodel(classes=200)
output3=text_model.output
input3=text_model.input
merged=Add()([output1,output3])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[input1,input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
#m_model.summary()
return m_model
def true_fake_text_ResNet50_3(classes =200):
print('bulid true image model')
true_image_model = true_ResNet50( classes =200)
print ('bulid fake image model')
fake_image_model = fake_ResNet50_new( classes =200)
output1=true_image_model.output
input1=true_image_model.input
output2=fake_image_model.output
input2=fake_image_model.input
# print(input1.shape)
# print(input2)
print ('bulid caption model')
text_model=mult_text_cnnmodel_new(classes=200)
output3=text_model.output
input3=text_model.input
merged=Add()([output2,output3])
print(output2.shape)
print(output3.shape)
print(merged.shape)
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(2048)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
merged1=Add()([output1,bnn])
Flat1= Flatten()(merged1)
Dor1=Dropout(0.5)(Flat1)
fc1 = Dense(512)(Dor1)
#fc2=Dropout(0.6)(fc1)
bnn1 = BatchNormalization(name='bn3')(fc1)
Den1=Dense(classes, activation='softmax')(bnn1)
m_model=Model(inputs=[input1,input2[0],input2[1],input2[2],input2[3],input2[4],input2[5],input2[6],input2[7],input2[8],input2[9],input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den1)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
# m_model.summary()
return m_model
#from keras_attention_block import *
def true_fake_text_ResNet50_4(classes):
print('bulid true image model')
true_image_model = true_ResNet50( classes )
print ('bulid fake image model')
fake_image_model = fake_ResNet50_new( classes)
output1=true_image_model.output
input1=true_image_model.input
output2=fake_image_model.output
input2=fake_image_model.input
print(input1.shape)
print(input2)
print ('bulid caption model')
text_model=mult_text_cnnmodel_new(classes)
output3=text_model.output
input3=text_model.input
attentIuput1=SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(output1)
attentIuput2=SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(output2)
attentIuput3=SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(output3)
merged=Add()([attentInput2,attentInput3])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
merged1=Add()([attentInput1,bnn])
Flat1= Flatten()(merged1)
Dor1=Dropout(0.1)(Flat1)
fc1 = Dense(512)(Dor1)
bnn1 = BatchNormalization(name='bn3')(fc1)
Den1=Dense(classes, activation='softmax')(bnn1)
m_model=Model(inputs=[input1,input2[0],input2[1],input2[2],input2[3],input2[4],input2[5],input2[6],input2[7],input2[8],input2[9],input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den1)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
#m_model.summary()
return m_model
def true_fake_ResNet50(classes =200):
#print 'bulid true image model'
true_image_model = true_ResNet50( classes =200)
#print 'bulid fake image model'
fake_image_model = fake_ResNet50_new( classes =200)
output1=true_image_model.output
input1=true_image_model.input
output2=fake_image_model.output
input2=fake_image_model.input
#print input1.shape
#print input2
merged=Add()([output1,output2])
#print merged.shape
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization()(fc)
Den1=Dense(classes, activation='softmax')(bnn)
s_model=Model(inputs=[input1,input2[0],input2[1],input2[2],input2[3],input2[4],input2[5],input2[6],input2[7],input2[8],input2[9]], outputs=Den1)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
s_model.summary()
return s_model
def Our_ResNet50(classes):
#K.set_learning_phase(0)
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
res = base_model.get_layer('activation_49').output
# print res.shape
#K.set_learning_phase(1)
#x = GlobalAveragePooling2D()(res)
x = Flatten()(res)
#res1 = Activation('relu')(x)
predictions = Dense(classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.summary()
return model
import keras_resnet.models
def Our_ResNet50_1(classes=200):
#K.set_learning_phase(0)
shape=(224,224,3)
#x=keras.layers.Input(shape)
#base_model = keras_resnet.models.ResNet50(x, classes=102)
#predictions=base_model.output
#model = Model(inputs=base_model.input, outputs=predictions)
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=shape)
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
#K.set_learning_phase(1)
x = Flatten(name='flatten')(x)
predictions = Dense(classes, activation='softmax', name='predictions')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.summary()
for layer in model.layers[:141]:
layer.trainable=False
for layer in model.layers[141:]:
layer.trainable=True
return model
def Our_ResNet50_2(classes=200):
# K.set_learning_phase(0)
model = keras.applications.resnet50.ResNet50()
model.layers.pop()
for layer in model.layers:
layer.trainable=False
last = model.layers[-1].output
x = Dense(classes, activation="softmax")(last)
finetuned_model = Model(model.input, x)
finetuned_model.summary()
return finetuned_model
def Our_ResNet50_based_2(classes=200):
# K.set_learning_phase(0)
model = keras.applications.resnet50.ResNet50()
model.layers.pop()
#model.summary()
for layer in model.layers:
layer.trainable=False
res_layer='activation_49'
last = model.get_layer(res_layer).output
finetuned_model = Model(model.input, last)
finetuned_model.summary()
return finetuned_model
def Our_ResNet50_facke_based_2(index,classes=200):
# K.set_learning_phase(0)
base_model = keras.applications.resnet50.ResNet50()
base_model.layers.pop()
#base_model.summary()
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
last = base_model.layers[-1].output
#print(base_model.layers[-1])
Num=(index+2)*49+index*6
res_layer='activation_'+str(Num)+ str("_")+str(index)
#res_layer='activation_'+str(Num)
print(res_layer)
res = base_model.get_layer(res_layer).output
finetuned_model = Model(base_model.input, last)
#finetuned_model.summary()
return finetuned_model
def true_text_ResNet50_2(classes):
print ('bulid true image model')
true_image_model = true_ResNet50( classes )
output1=true_image_model.output
input1=true_image_model.input
#output1=Conv2D(512, (1, 1), padding='same', activation='relu')(output1)
#print input1.shape
print ('bulid caption model')
text_model=mult_text_cnnmodel(classes)
output3=text_model.output
input3=text_model.input
merged=Add()([output1,output3])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[input1,input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def caption_fake1_ResNet50_2(index,classes):
print('merge the fake images %d' % index)
fake_base_model1=Our_ResNet50_facke_based_2(index,classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
#print(temp_feature1.shape)
caption_model=text_cnnmodel_base(index,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([temp_feature1,caption_feature])
#Flat= Flatten()(merged)
#Dor=Dropout(0.5)(Flat)
#fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=merged,name='caption_fake1_ResNet50')
return model
def Muit_fake1_Feature_model_2(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake1_ResNet50_2(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake1_ResNet50_2(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake1_ResNet50_2(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake1_ResNet50_2(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake1_ResNet50_2(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake1_ResNet50_2(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake1_ResNet50_2(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake1_ResNet50_2(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake1_ResNet50_2(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake1_ResNet50_2(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
print(fakeCaption_featuer1.shape)
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
#print(outs.shape)
model = Model(inputs= [in1[0],in1[1],in2[0],in2[1],in3[0],in3[1],in4[0],in4[1],in5[0],in5[1],in6[0],in6[1],in7[0],in7[1],in8[0],in8[1],in9[0],in9[1],in10[0],in10[1]], outputs=outs,name='Muit_fake1_Feain1[0],in1[1],ture_model')
return model
def finnal_muilt1Feature_model_2(classes):
print('bulid true image model')
true_image_model = Our_ResNet50_based_2(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake_Feature_model')
mult_fake1_caption_model=Muit_fake1_Feature_model_2(classes)
mult_fake1_caption_feature=mult_fake1_caption_model.output
in1=mult_fake1_caption_model.input
print(mult_fake1_caption_feature.shape)
merged=Add()([true_image_feature,mult_fake1_caption_feature])
print(merged.shape)
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1[:][0],in1[:][1],in1[:][2],in1[:][3],in1[:][4],in1[:][5],in1[:][6],in1[:][7],in1[:][8],in1[:][9],in1[:][10],in1[:][11],in1[:][12],in1[:][13],in1[:][14],in1[:][15],in1[:][16],in1[:][17],in1[:][18],in1[:][19]], outputs=Den)
return m_model
def caption_fake1_ResNet50(index,classes):
print('merge the fake images %d' % index)
fake_base_model1=fake_ResNet50_base_new(index,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
#print(temp_feature1.shape)
caption_model=text_cnnmodel_base(index,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([temp_feature1,caption_feature])
#Flat= Flatten()(merged)
#Dor=Dropout(0.5)(Flat)
#fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=merged,name='caption_fake1_ResNet50')
return model
def Muit_fake1_Feature_model(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake1_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake1_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake1_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake1_ResNet50(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake1_ResNet50(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake1_ResNet50(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake1_ResNet50(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake1_ResNet50(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake1_ResNet50(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake1_ResNet50(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
#print(fakeCaption_featuer1.shape)
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
#print(outs.shape)
model = Model(inputs= [in1[0],in1[1],in2[0],in2[1],in3[0],in3[1],in4[0],in4[1],in5[0],in5[1],in6[0],in6[1],in7[0],in7[1],in8[0],in8[1],in9[0],in9[1],in10[0],in10[1]], outputs=outs,name='Muit_fake1_Feain1[0],in1[1],ture_model')
return model
def finnal_muilt1Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake_Feature_model')
mult_fake1_caption_model=Muit_fake1_Feature_model(classes)
mult_fake1_caption_feature=mult_fake1_caption_model.output
in1=mult_fake1_caption_model.input
merged=Add()([true_image_feature,mult_fake1_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
#in1_data=in1[0][0]
#print(in1_data.shape)
#in2_data=in1[1][0]
#print(in2_data.shape)
#m_model=Model(inputs=[in0,in1_0,in1_1,in2_0,in2_1,in3_0,in3_1,in4_0,in4_1,in5_0,in5_1,in6_0,in6_1,in7_0,in7_1,in8_0,in8_1,in9_0,in9_1,in10_0,in10_1],outputs=Den)
m_model=Model(inputs=[in0,in1[:][0],in1[:][1],in1[:][2],in1[:][3],in1[:][4],in1[:][5],in1[:][6],in1[:][7],in1[:][8],in1[:][9],in1[:][10],in1[:][11],in1[:][12],in1[:][13],in1[:][14],in1[:][15],in1[:][16],in1[:][17],in1[:][18],in1[:][19]], outputs=Den)
#m_model=Model(inputs=[in0,inall],outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def fake2_ResNet50(index,classes):
t1=index+0
fake_base_model1=fake_ResNet50_base(t1,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
t2=index+1
fake_base_model2=fake_ResNet50_base(t2,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
outs =Average()([temp_feature1, temp_feature2])
model = Model(inputs= [in1,in2], outputs=outs,name='fake-ResNet50')
return model
def caption_fake2_ResNet50(index,classes):
print('merge the fake images')
mult_fake_model=fake3_ResNet50(classes)
mult_fakeimage_feature=mult_fake_model.output
in1=mult_fake_model.input
nIndex=index*2
caption_model=text_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake2_ResNet50')
return model
def Muit_fake2_Feature_model(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake2_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake2_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake2_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake2_ResNet50(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake2_ResNet50(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake2_ResNet50(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake2_ResNet50(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake2_ResNet50(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake2_ResNet50(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake2_ResNet50(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
model = Model(inputs= [in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='Muit_fake3_Feature_model')
return model
def finnal_muilt2Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
mult_fake2_caption_model=Muit_fake2_Feature_model(classes)
mult_fake2_caption_feature=mult_fake2_caption_model.output
in1=mult_fake3_caption_model.input
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def fake3_ResNet50(index,classes):
t1=index+0
fake_base_model1=fake_ResNet50_base(t1,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
t2=index+1
fake_base_model2=fake_ResNet50_base(t2,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
t3=index+2
fake_base_model3=fake_ResNet50_base(t3,input_shape = (224, 224, 3),classes=200)
temp_feature3=fake_base_model3.output
in3=fake_base_model3.input
outs =Average()([temp_feature1, temp_feature2,temp_feature3])
model = Model(inputs= [in1,in2,in3], outputs=outs,name='fake-ResNet50')
return model
def caption_fake3_ResNet50(index,classes):
print('merge the fake images')
mult_fake_model=fake3_ResNet50(classes)
mult_fakeimage_feature=mult_fake_model.output
in1=mult_fake_model.input
nIndex=index*3
caption_model=text_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake3_ResNet50')
return model
def Muit_fake3_Feature_model(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake3_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake3_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake3_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake3_ResNet50(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake3_ResNet50(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake3_ResNet50(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake3_ResNet50(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake3_ResNet50(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake3_ResNet50(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake3_ResNet50(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
model = Model(inputs= [in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='Muit_fake3_Feature_model')
return model
def finnal_muilt3Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
mult_fake3_caption_model=Muit_fake3_Feature_model(classes)
mult_fake3_caption_feature=mult_fake3_caption_model.output
in1=mult_fake3_caption_model.input
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def fake5_ResNet50(classes):
fake_base_model1=fake_ResNet50_base55(0,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
fake_base_model2=fake_ResNet50_base55(1,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
fake_base_model3=fake_ResNet50_base55(2,input_shape = (224, 224, 3),classes=200)
temp_feature3=fake_base_model3.output
in3=fake_base_model3.input
fake_base_model4=fake_ResNet50_base55(3,input_shape = (224, 224, 3),classes=200)
temp_feature4=fake_base_model4.output
in4=fake_base_model4.input
fake_base_model5=fake_ResNet50_base55(4,input_shape = (224, 224, 3),classes=200)
temp_feature5=fake_base_model5.output
in5=fake_base_model5.input
#ins =Add()([inputall[0], inputall[1],inputall[2], inputall[3],inputall[4], inputall[5],inputall[6], inputall[7],inputall[8], inputall[9]])
outs =Average()([temp_feature1, temp_feature2,temp_feature3, temp_feature4,temp_feature5])
model = Model(inputs= [in1,in2,in3,in4,in5], outputs=outs,name='fake-ResNet50')
return model
def caption_fake5_ResNet50(index,classes):
print('merge the fake images')
mult_fake_model=fake5_ResNet50(classes)
mult_fakeimage_feature=mult_fake_model.output
in1=mult_fake_model.input
caption_model=text_cnnmodel_base(index,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake5_ResNet50')
return model
def Muit_fake5_Feature_model(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake5_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake5_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake5_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake5_ResNet50(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake5_ResNet50(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake5_ResNet50(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake5_ResNet50(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake5_ResNet50(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake5_ResNet50(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake5_ResNet50(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
model = Model(inputs= [in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='Muit_fake3_Feature_model')
return model
def finnal_muilt5Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake5_Feature_model')
mult_fake5_caption_model=Muit_fake5_Feature_model(classes)
mult_fake5_caption_feature=mult_fake5_caption_model.output
in1=mult_fake5_caption_model.input
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
###======================== PREPARE DATA ====================================###
#build myself data generator
#imgInfo_file_path: pickle (file name with path)
#classInfo_file_path: pickle( file class)
#image_direction: true image path
#fackimage_direction: fack image path
#txt_direction: text path
#image_size: input image size of model
#num: the value of K(StackMGAN++)
tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',lower=True,split=" ")
Alltxt=open('../data/flower-dataset/vacab.txt','r')
Alltext=Alltxt.read()
tokenizer.fit_on_texts(Alltext)
vocab = tokenizer.word_index
def data_generator_4(imgInfo_file_path,classInfo_file_path,image_direction,txt_direction,fackimage_direction,image_size,BATCHSIZE,num):
testfilenames = open(imgInfo_file_path,'rb')
rmesf= pickle.load(testfilenames)
testfilenames = open(classInfo_file_path,'rb')
rmesc= pickle.load(testfilenames)
txt1=[]
txt2=[]
txt3=[]
txt4=[]
txt5=[]
txt6=[]
txt7=[]
txt8=[]
txt9=[]
txt10=[]
fake1=[]
fake2=[]
fake3=[]
fake4=[]
fake5=[]
fake6=[]
fake7=[]
fake8=[]
fake9=[]
fake10=[]
images=[]
labels=[]
imagefile=[]
textfile=[]
iclass=[]
imagename=[]
num_of_examples=len(rmesf)
for i in range(len(rmesf)):
temp=rmesf[i]
tempimagename=image_direction+temp
#print(tempimagename)
if os.path.isfile(tempimagename)==False:
print('error! no such ture file: %s' %tempimagename)
continue
else:
#class_001/image_00000.txt
img=Image.open(tempimagename)
img = img.resize((image_size[0], image_size[1]),Image.ANTIALIAS)
img=np.array(img)
ttemp=rmesc[i]
templable=int(ttemp)
templable1=int(ttemp)-1
templable='%03d' % templable
#print(templable)
ftemp=temp[:-4]
txtPath=txt_direction+'class_'+templable+'/'+ftemp+'.txt'
#print(txtPath)
if os.path.isfile(txtPath)==False:
print('error! no such caption file: %s' %txtPath)
continue
else:
temptxt=[]
tempfake=[]
tmask=False
mm=0
for line in | |
position vectors for solar system objects.
Args:
currentTime (astropy Time array):
Current absolute mission time in MJD
bodyname (string):
Solar system object name
eclip (boolean):
Boolean used to switch to heliocentric ecliptic frame. Defaults to
False, corresponding to heliocentric equatorial frame.
Returns:
r_body (astropy Quantity nx3 array):
Solar system body positions in heliocentric equatorial (default)
or ecliptic frame in units of AU
Note: Use eclip=True to get ecliptic coordinates.
"""
# dictionary of solar system bodies available in spice kernel (in km)
bodies = {'Mercury': 199,
'Venus': 299,
'Earth': 399,
'Mars': 4,
'Jupiter': 5,
'Saturn': 6,
'Uranus': 7,
'Neptune': 8,
'Pluto': 9,
'Sun': 10,
'Moon': 301}
assert bodies.has_key(bodyname), \
"%s is not a recognized body name."%(bodyname)
# julian day time
jdtime = np.array(currentTime.jd, ndmin=1)
# body positions vector in heliocentric equatorial frame
if bodies[bodyname] == 199:
r_body = (self.kernel[0,1].compute(jdtime) +
self.kernel[1,199].compute(jdtime) -
self.kernel[0,10].compute(jdtime))
elif bodies[bodyname] == 299:
r_body = (self.kernel[0,2].compute(jdtime) +
self.kernel[2,299].compute(jdtime) -
self.kernel[0,10].compute(jdtime))
elif bodies[bodyname] == 399:
r_body = (self.kernel[0,3].compute(jdtime) +
self.kernel[3,399].compute(jdtime) -
self.kernel[0,10].compute(jdtime))
elif bodies[bodyname] == 301:
r_body = (self.kernel[0,3].compute(jdtime) +
self.kernel[3,301].compute(jdtime) -
self.kernel[0,10].compute(jdtime))
else:
r_body = (self.kernel[0,bodies[bodyname]].compute(jdtime) -
self.kernel[0,10].compute(jdtime))
# reshape and convert units
r_body = (r_body*u.km).T.to('AU')
if eclip:
# body positions vector in heliocentric ecliptic frame
r_body = self.equat2eclip(r_body, currentTime)
return r_body
def keplerplanet(self, currentTime, bodyname, eclip=False):
"""Finds solar system body positions vector in heliocentric equatorial (default)
or ecliptic frame for current time (MJD).
This method uses algorithms 2 and 10 from Vallado 2013 to find
heliocentric equatorial position vectors for solar system objects.
Args:
currentTime (astropy Time array):
Current absolute mission time in MJD
bodyname (string):
Solar system object name
eclip (boolean):
Boolean used to switch to heliocentric ecliptic frame. Defaults to
False, corresponding to heliocentric equatorial frame.
Returns:
r_body (astropy Quantity nx3 array):
Solar system body positions in heliocentric equatorial (default)
or ecliptic frame in units of AU
Note: Use eclip=True to get ecliptic coordinates.
"""
# Moon positions based on Earth positions
if bodyname == 'Moon':
r_Earth = self.keplerplanet(currentTime, 'Earth')
return r_Earth + self.moon_earth(currentTime)
assert bodyname in self.planets,\
"%s is not a recognized body name."%(bodyname)
# find Julian centuries from J2000
TDB = self.cent(currentTime)
# update ephemerides data (convert sma from km to AU)
planet = self.planets[bodyname]
a = (self.propeph(planet.a, TDB)*u.km).to('AU').value
e = self.propeph(planet.e, TDB)
I = np.radians(self.propeph(planet.I, TDB))
O = np.radians(self.propeph(planet.O, TDB))
w = np.radians(self.propeph(planet.w, TDB))
lM = np.radians(self.propeph(planet.lM, TDB))
# find mean anomaly and argument of perigee
M = (lM - w) % (2*np.pi)
wp = (w - O) % (2*np.pi)
# find eccentric anomaly
E = eccanom(M,e)[0]
# find true anomaly
nu = np.arctan2(np.sin(E) * np.sqrt(1 - e**2), np.cos(E) - e)
# find semiparameter
p = a*(1 - e**2)
# body positions vector in orbital plane
rx = p*np.cos(nu)/(1 + e*np.cos(nu))
ry = p*np.sin(nu)/(1 + e*np.cos(nu))
rz = np.zeros(currentTime.size)
r_orb = np.array([rx,ry,rz])
# body positions vector in heliocentric ecliptic plane
r_body = np.array([np.dot(np.dot(self.rot(-O[x], 3),
self.rot(-I[x], 1)), np.dot(self.rot(-wp[x], 3),
r_orb[:,x])) for x in range(currentTime.size)])*u.AU
if not eclip:
# body positions vector in heliocentric equatorial frame
r_body = self.eclip2equat(r_body, currentTime)
return r_body
def moon_earth(self, currentTime):
"""Finds geocentric equatorial positions vector for Earth's moon
This method uses Algorithm 31 from Vallado 2013 to find the geocentric
equatorial positions vector for Earth's moon.
Args:
currentTime (astropy Time array):
Current absolute mission time in MJD
Returns:
r_moon (astropy Quantity nx3 array):
Geocentric equatorial position vector in units of AU
"""
TDB = np.array(self.cent(currentTime), ndmin=1)
la = np.radians(218.32 + 481267.8813*TDB +
6.29*np.sin(np.radians(134.9 + 477198.85*TDB)) -
1.27*np.sin(np.radians(259.2 - 413335.38*TDB)) +
0.66*np.sin(np.radians(235.7 + 890534.23*TDB)) +
0.21*np.sin(np.radians(269.9 + 954397.70*TDB)) -
0.19*np.sin(np.radians(357.5 + 35999.05*TDB)) -
0.11*np.sin(np.radians(186.6 + 966404.05*TDB)))
phi = np.radians(5.13*np.sin(np.radians(93.3 + 483202.03*TDB)) +
0.28*np.sin(np.radians(228.2 + 960400.87*TDB)) -
0.28*np.sin(np.radians(318.3 + 6003.18*TDB)) -
0.17*np.sin(np.radians(217.6 - 407332.20*TDB)))
P = np.radians(0.9508 + 0.0518*np.cos(np.radians(134.9 + 477198.85*TDB)) +
0.0095*np.cos(np.radians(259.2 - 413335.38*TDB)) +
0.0078*np.cos(np.radians(235.7 + 890534.23*TDB)) +
0.0028*np.cos(np.radians(269.9 + 954397.70*TDB)))
e = np.radians(23.439291 - 0.0130042*TDB - 1.64e-7*TDB**2 + 5.04e-7*TDB**3)
r = 1./np.sin(P)*6378.137 # km
r_moon = r*np.array([np.cos(phi)*np.cos(la),
np.cos(e)*np.cos(phi)*np.sin(la) - np.sin(e)*np.sin(phi),
np.sin(e)*np.cos(phi)*np.sin(la) + np.cos(e)*np.sin(phi)])
# set format and units
r_moon = (r_moon*u.km).T.to('AU')
return r_moon
def cent(self, currentTime):
"""Finds time in Julian centuries since J2000 epoch
This quantity is needed for many algorithms from Vallado 2013.
Args:
currentTime (astropy Time array):
Current absolute mission time in MJD
Returns:
TDB (float ndarray):
time in Julian centuries since the J2000 epoch
"""
j2000 = Time(2000., format='jyear',scale='tai')
TDB = (currentTime.jd - j2000.jd)/36525.
return TDB
def propeph(self, x, TDB):
"""Propagates an ephemeris from Vallado 2013 to current time.
Args:
x (list):
ephemeride list (maximum of 4 elements)
TDB (float):
time in Julian centuries since the J2000 epoch
Returns:
y (float ndarray):
ephemeride value at current time
"""
if isinstance(x, list):
if len(x) < 4:
q = 4 - len(x)
i = 0
while i < q:
x.append(0.)
i += 1
elif (isinstance(x, float) or isinstance(x, int)):
x = [float(x)]
i = 0
while i < 3:
x.append(0.)
i += 1
# propagated ephem
y = x[0] + x[1]*TDB + x[2]*(TDB**2) + x[3]*(TDB**3)
# cast to array
y = np.array(y, ndmin=1, copy=False)
return y
def rot(self, th, axis):
"""Finds the rotation matrix of angle th about the axis value
Args:
th (float):
Rotation angle in radians
axis (int):
Integer value denoting rotation axis (1,2, or 3)
Returns:
rot_th (float 3x3 ndarray):
Rotation matrix
"""
if axis == 1:
rot_th = np.array([[1., 0., 0.],
[0., np.cos(th), np.sin(th)],
[0., -np.sin(th), np.cos(th)]])
elif axis == 2:
rot_th = np.array([[np.cos(th), 0., -np.sin(th)],
[0., 1., 0.],
[np.sin(th), 0., np.cos(th)]])
elif axis == 3:
rot_th = np.array([[np.cos(th), np.sin(th), 0.],
[-np.sin(th), np.cos(th), 0.],
[0., 0., 1.]])
return rot_th
def distForces(self, TL, sInd, currentTime):
"""Finds lateral and axial disturbance forces on an occulter
Args:
TL (TargetList module):
TargetList class object
sInd (integer):
Integer index of the star of interest
currentTime (astropy Time):
Current absolute mission time in MJD
Returns:
dF_lateral (astropy Quantity):
Lateral disturbance force in units of N
dF_axial (astropy Quantity):
Axial disturbance force in units of N
"""
# get spacecraft position vector
r_obs = self.orbit(currentTime)[0]
# sun -> earth position vector
r_Es = self.solarSystem_body_position(currentTime, 'Earth')[0]
# Telescope -> target vector and unit vector
r_targ = TL.starprop(sInd, currentTime)[0] - r_obs
u_targ = r_targ.value/np.linalg.norm(r_targ)
# sun -> occulter vector
r_Os = r_obs + self.occulterSep*u_targ
# Earth-Moon barycenter -> spacecraft vectors
r_TE = r_obs - r_Es
r_OE = r_Os - r_Es
# force on occulter
Mfactor = -self.scMass*const.M_sun*const.G
F_sO = r_Os/(np.linalg.norm(r_Os)*r_Os.unit)**3 * Mfactor
F_EO = r_OE/(np.linalg.norm(r_OE)*r_OE.unit)**3 * Mfactor/328900.56
F_O = F_sO + F_EO
# force on telescope
Mfactor = -self.coMass*const.M_sun*const.G
F_sT = r_obs/(np.linalg.norm(r_obs)*r_obs.unit)**3 * Mfactor
F_ET = r_TE/(np.linalg.norm(r_TE)*r_TE.unit)**3 * Mfactor/328900.56
F_T = F_sT + F_ET
# differential forces
dF = F_O - F_T*self.scMass/self.coMass
dF_axial = dF.dot(u_targ).to('N')
dF_lateral = (dF - dF_axial*u_targ).to('N')
dF_lateral = np.linalg.norm(dF_lateral)*dF_lateral.unit
dF_axial = np.abs(dF_axial)
return dF_lateral, dF_axial
def mass_dec(self, dF_lateral, t_int):
"""Returns mass_used and deltaV
The values returned by this method are used to decrement spacecraft
mass for station-keeping.
Args:
dF_lateral (astropy Quantity):
Lateral disturbance force in units of N
t_int (astropy Quantity):
Integration time in units of day
Returns:
intMdot (astropy Quantity):
Mass flow rate in units of kg/s
mass_used (astropy Quantity):
Mass used in station-keeping units of kg
deltaV (astropy Quantity):
Change in velocity required for station-keeping in units of km/s
"""
intMdot = (1./np.cos(np.radians(45))*np.cos(np.radians(5))*
dF_lateral/const.g0/self.skIsp).to('kg/s')
mass_used = (intMdot*t_int).to('kg')
deltaV = (dF_lateral/self.scMass*t_int).to('km/s')
return intMdot, mass_used, deltaV
def mass_dec_sk(self, TL, sInd, currentTime, t_int):
"""Returns mass_used, deltaV | |
j = 0; j != cols; ++j) o[i][j] = parse_SerAr(blob);
return o;
}
/* 172.16.58.3 */
function parse_PtgName(blob, length) {
var type = (blob.read_shift(1) >>> 5) & 0x03;
var nameindex = blob.read_shift(4);
return [type, 0, nameindex];
}
/* 172.16.17.32 */
function parse_PtgNameX(blob, length) {
var type = (blob.read_shift(1) >>> 5) & 0x03;
var ixti = blob.read_shift(2); // XtiIndex
var nameindex = blob.read_shift(4);
return [type, ixti, nameindex];
}
/* 192.168.3.11 */
function parse_PtgMemArea(blob, length) {
var type = (blob.read_shift(1) >>> 5) & 0x03;
blob.l += 4;
var cce = blob.read_shift(2);
return [type, cce];
}
/* 172.16.58.3 */
function parse_PtgMemFunc(blob, length) {
var type = (blob.read_shift(1) >>> 5) & 0x03;
var cce = blob.read_shift(2);
return [type, cce];
}
/* 172.16.17.32 */
function parse_PtgRefErr(blob, length) {
var type = (blob.read_shift(1) >>> 5) & 0x03;
blob.l += 4;
return [type];
}
/* 192.168.127.12 */
var parse_PtgAdd = parseread1;
/* 172.16.17.32 */
var parse_PtgDiv = parseread1;
/* 192.168.3.11 */
var parse_PtgEq = parseread1;
/* 192.168.3.11 */
var parse_PtgGe = parseread1;
/* 172.16.58.3 */
var parse_PtgGt = parseread1;
/* 172.16.17.32 */
var parse_PtgIsect = parseread1;
/* 192.168.3.11 */
var parse_PtgLe = parseread1;
/* 172.16.31.10 */
var parse_PtgLt = parseread1;
/* 172.16.17.32 */
var parse_PtgMissArg = parseread1;
/* 192.168.3.11 */
var parse_PtgMul = parseread1;
/* 192.168.127.12 */
var parse_PtgNe = parseread1;
/* 192.168.127.12 */
var parse_PtgParen = parseread1;
/* 172.16.31.10 */
var parse_PtgPercent = parseread1;
/* 172.16.17.32 */
var parse_PtgPower = parseread1;
/* 192.168.127.12 */
var parse_PtgRange = parseread1;
/* 172.16.31.10 */
var parse_PtgSub = parseread1;
/* 172.16.17.323 */
var parse_PtgUminus = parseread1;
/* 192.168.127.12 */
var parse_PtgUnion = parseread1;
/* 172.16.17.325 */
var parse_PtgUplus = parseread1;
/* 172.16.31.101 */
var parse_PtgMemErr = parsenoop;
/* 172.16.17.32 */
var parse_PtgMemNoMem = parsenoop;
/* 172.16.31.107 */
var parse_PtgRefErr3d = parsenoop;
/* 192.168.127.12 */
var parse_PtgTbl = parsenoop;
/* 172.16.31.10 */
var PtgTypes = {
0x01: { n:'PtgExp', f:parse_PtgExp },
0x02: { n:'PtgTbl', f:parse_PtgTbl },
0x03: { n:'PtgAdd', f:parse_PtgAdd },
0x04: { n:'PtgSub', f:parse_PtgSub },
0x05: { n:'PtgMul', f:parse_PtgMul },
0x06: { n:'PtgDiv', f:parse_PtgDiv },
0x07: { n:'PtgPower', f:parse_PtgPower },
0x08: { n:'PtgConcat', f:parse_PtgConcat },
0x09: { n:'PtgLt', f:parse_PtgLt },
0x0A: { n:'PtgLe', f:parse_PtgLe },
0x0B: { n:'PtgEq', f:parse_PtgEq },
0x0C: { n:'PtgGe', f:parse_PtgGe },
0x0D: { n:'PtgGt', f:parse_PtgGt },
0x0E: { n:'PtgNe', f:parse_PtgNe },
0x0F: { n:'PtgIsect', f:parse_PtgIsect },
0x10: { n:'PtgUnion', f:parse_PtgUnion },
0x11: { n:'PtgRange', f:parse_PtgRange },
0x12: { n:'PtgUplus', f:parse_PtgUplus },
0x13: { n:'PtgUminus', f:parse_PtgUminus },
0x14: { n:'PtgPercent', f:parse_PtgPercent },
0x15: { n:'PtgParen', f:parse_PtgParen },
0x16: { n:'PtgMissArg', f:parse_PtgMissArg },
0x17: { n:'PtgStr', f:parse_PtgStr },
0x1C: { n:'PtgErr', f:parse_PtgErr },
0x1D: { n:'PtgBool', f:parse_PtgBool },
0x1E: { n:'PtgInt', f:parse_PtgInt },
0x1F: { n:'PtgNum', f:parse_PtgNum },
0x20: { n:'PtgArray', f:parse_PtgArray },
0x21: { n:'PtgFunc', f:parse_PtgFunc },
0x22: { n:'PtgFuncVar', f:parse_PtgFuncVar },
0x23: { n:'PtgName', f:parse_PtgName },
0x24: { n:'PtgRef', f:parse_PtgRef },
0x25: { n:'PtgArea', f:parse_PtgArea },
0x26: { n:'PtgMemArea', f:parse_PtgMemArea },
0x27: { n:'PtgMemErr', f:parse_PtgMemErr },
0x28: { n:'PtgMemNoMem', f:parse_PtgMemNoMem },
0x29: { n:'PtgMemFunc', f:parse_PtgMemFunc },
0x2A: { n:'PtgRefErr', f:parse_PtgRefErr },
0x2B: { n:'PtgAreaErr', f:parse_PtgAreaErr },
0x2C: { n:'PtgRefN', f:parse_PtgRefN },
0x2D: { n:'PtgAreaN', f:parse_PtgAreaN },
0x39: { n:'PtgNameX', f:parse_PtgNameX },
0x3A: { n:'PtgRef3d', f:parse_PtgRef3d },
0x3B: { n:'PtgArea3d', f:parse_PtgArea3d },
0x3C: { n:'PtgRefErr3d', f:parse_PtgRefErr3d },
0x3D: { n:'PtgAreaErr3d', f:parse_PtgAreaErr3d },
0xFF: {}
};
/* These are duplicated in the PtgTypes table */
var PtgDupes = {
0x40: 0x20, 0x60: 0x20,
0x41: 0x21, 0x61: 0x21,
0x42: 0x22, 0x62: 0x22,
0x43: 0x23, 0x63: 0x23,
0x44: 0x24, 0x64: 0x24,
0x45: 0x25, 0x65: 0x25,
0x46: 0x26, 0x66: 0x26,
0x47: 0x27, 0x67: 0x27,
0x48: 0x28, 0x68: 0x28,
0x49: 0x29, 0x69: 0x29,
0x4A: 0x2A, 0x6A: 0x2A,
0x4B: 0x2B, 0x6B: 0x2B,
0x4C: 0x2C, 0x6C: 0x2C,
0x4D: 0x2D, 0x6D: 0x2D,
0x59: 0x39, 0x79: 0x39,
0x5A: 0x3A, 0x7A: 0x3A,
0x5B: 0x3B, 0x7B: 0x3B,
0x5C: 0x3C, 0x7C: 0x3C,
0x5D: 0x3D, 0x7D: 0x3D
};
(function(){for(var y in PtgDupes) PtgTypes[y] = PtgTypes[PtgDupes[y]];})();
var Ptg18 = {};
var Ptg19 = {
0x01: { n:'PtgAttrSemi', f:parse_PtgAttrSemi },
0x02: { n:'PtgAttrIf', f:parse_PtgAttrIf },
0x04: { n:'PtgAttrChoose', f:parse_PtgAttrChoose },
0x08: { n:'PtgAttrGoto', f:parse_PtgAttrGoto },
0x10: { n:'PtgAttrSum', f:parse_PtgAttrSum },
0x20: { n:'PtgAttrBaxcel', f:parse_PtgAttrBaxcel },
0x40: { n:'PtgAttrSpace', f:parse_PtgAttrSpace },
0x41: { n:'PtgAttrSpaceSemi', f:parse_PtgAttrSpaceSemi },
0xFF: {}
};
/* TODO: it will be useful to parse the function str */
function rc_to_a1(fstr, base) {
return fstr.replace(/(^|[^A-Za-z])R(\\[?)(-?\\d+|)\\]?C(\\[?)(-?\\d+|)\\]?/g,function($$,$1,$2,$3,$4,$5) {
var R = $3.length?+$2:0, C = $5.length?+$4:0;
if(C<0 && !$4) C=0;
return ($1||"")+encode_cell({c:$4?base.c+C:C,r:$2?base.r+R:R});
});
}
/* 2.4.127 TODO */
function parse_Formula(blob, length) {
var cell = parse_Cell(blob, 6);
var val = parse_FormulaValue(blob,8);
var flags = blob.read_shift(1);
blob.read_shift(1);
var chn = blob.read_shift(4);
var cbf = parse_CellParsedFormula(blob, length-20);
return {cell:cell, val:val[0], formula:cbf, shared: (flags >> 3) & 1, tt:val[1]};
}
/* 2.5.133 TODO: how to emit empty strings? */
function parse_FormulaValue(blob) {
var b;
if(__readUInt16LE(blob,blob.l + 6) !== 0xFFFF) return [parse_Xnum(blob),'n'];
switch(blob[blob.l]) {
case 0x00: blob.l += 8; return ["String", 's'];
case 0x01: b = blob[blob.l+2] === 0x1; blob.l += 8; return [b,'b'];
case 0x02: b = BERR[blob[blob.l+2]]; blob.l += 8; return [b,'e'];
case 0x03: blob.l += 8; return ["",'s'];
}
}
/* 192.168.3.11 */
function parse_RgbExtra(blob, length, rgce) {
var target = blob.l + length;
var o = [];
for(var i = 0; i !== rgce.length; ++i) {
switch(rgce[i][0]) {
case 'PtgArray': /* PtgArray -> PtgExtraArray */
rgce[i][1] = parse_PtgExtraArray(blob);
o.push(rgce[i][1]);
break;
case 'PtgMemArea': /* PtgMemArea -> PtgExtraMem */
rgce[i][2] = parse_PtgExtraMem(blob, rgce[i][1]);
o.push(rgce[i][2]);
break;
default: break;
}
}
length = target - blob.l;
if(length !== 0) o.push(parsenoop(blob, length));
return o;
}
/* 192.168.127.12 */
function parse_NameParsedFormula(blob, length, cce) {
var target = blob.l + length;
var rgce = parse_Rgce(blob, cce);
var rgcb;
if(target !== blob.l) rgcb = parse_RgbExtra(blob, target - blob.l, rgce);
return [rgce, rgcb];
}
/* 192.168.127.12 TODO */
function parse_CellParsedFormula(blob, length) {
var target = blob.l + length;
var rgcb, cce = blob.read_shift(2); // length of rgce
if(cce == 0xFFFF) return [[],parsenoop(blob, length-2)];
var rgce = parse_Rgce(blob, cce);
if(length !== cce + 2) rgcb = parse_RgbExtra(blob, length - cce - 2, rgce);
return [rgce, rgcb];
}
/* 172.16.17.32 TODO */
function parse_SharedParsedFormula(blob, length) {
var target = blob.l + length;
var rgcb, cce = blob.read_shift(2); // length of rgce
var rgce = parse_Rgce(blob, cce);
if(cce == 0xFFFF) return [[],parsenoop(blob, length-2)];
if(length !== cce + 2) rgcb = parse_RgbExtra(blob, target - cce - 2, rgce);
return [rgce, rgcb];
}
/* 192.168.127.12 TODO */
function parse_ArrayParsedFormula(blob, length, opts, ref) {
var target = blob.l + length;
var rgcb, cce = blob.read_shift(2); // length of rgce
if(cce == 0xFFFF) return [[],parsenoop(blob, length-2)];
var rgce = parse_Rgce(blob, cce);
if(length !== cce + 2) rgcb = parse_RgbExtra(blob, target - cce - 2, rgce);
return [rgce, rgcb];
}
/* 172.16.17.32 */
function parse_Rgce(blob, length) {
var target = blob.l + length;
var R, id, ptgs = [];
while(target != blob.l) {
length = target - blob.l;
id = blob[blob.l];
R = PtgTypes[id];
//console.log("ptg", id, R)
if(id === 0x18 || id === 0x19) {
id = blob[blob.l + 1];
R = (id === 0x18 ? Ptg18 : Ptg19)[id];
}
if(!R || !R.f) { ptgs.push(parsenoop(blob, length)); }
else { ptgs.push([R.n, R.f(blob, length)]); }
}
return ptgs;
}
/* 2.2.2 + Magic TODO */
function stringify_formula(formula, range, cell, supbooks) {
range = range || {s:{c:0, r:0}};
var stack = [], e1, e2, type, c, ixti, nameidx, r;
if(!formula[0] || !formula[0][0]) return "";
//console.log("--",cell,formula[0])
formula[0].forEach(function(f) {
//console.log("++",f, stack)
switch(f[0]) {
/* 2.2.2.1 Unary Operator Tokens */
/* 192.168.127.12 */
case 'PtgUminus': stack.push("-" + stack.pop()); break;
/* 192.168.3.11 */
case 'PtgUplus': stack.push("+" + stack.pop()); break;
/* 172.16.31.10 */
case 'PtgPercent': stack.push(stack.pop() + "%"); break;
/* 2.2.2.1 Binary Value Operator Token */
/* 192.168.127.12 */
case 'PtgAdd':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"+"+e1);
break;
/* 172.16.31.10 */
case 'PtgSub':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"-"+e1);
break;
/* 192.168.3.11 */
case 'PtgMul':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"*"+e1);
break;
/* 172.16.17.32 */
case 'PtgDiv':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"/"+e1);
break;
/* 172.16.17.32 */
case 'PtgPower':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"^"+e1);
break;
/* 172.16.31.10 */
case 'PtgConcat':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"&"+e1);
break;
/* 172.16.31.10 */
case 'PtgLt':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"<"+e1);
break;
/* 192.168.3.11 */
case 'PtgLe':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"<="+e1);
break;
/* 192.168.3.11 */
case 'PtgEq':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"="+e1);
break;
/* 192.168.3.11 */
case 'PtgGe':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+">="+e1);
break;
/* 172.16.58.3 */
case 'PtgGt':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+">"+e1);
break;
/* 192.168.127.12 */
case 'PtgNe':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+"<>"+e1);
break;
/* 2.2.2.1 Binary Reference Operator Token */
/* 172.16.17.32 */
case 'PtgIsect':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+" "+e1);
break;
case 'PtgUnion':
e1 = stack.pop(); e2 = stack.pop();
stack.push(e2+","+e1);
break;
case 'PtgRange': break;
/* 2.2.2.3 Control Tokens "can be ignored" */
/* 192.168.3.11 */
case 'PtgAttrChoose': break;
/* 172.16.31.10 */
case 'PtgAttrGoto': break;
/* 192.168.127.126 */
case 'PtgAttrIf': break;
/* 172.16.31.10 */
case 'PtgRef':
type = f[1][0]; c = shift_cell(decode_cell(encode_cell(f[1][1])), range);
stack.push(encode_cell(c));
break;
/* 172.16.31.10 */
case 'PtgRefN':
type = f[1][0]; c = shift_cell(decode_cell(encode_cell(f[1][1])), cell);
stack.push(encode_cell(c));
break;
case 'PtgRef3d': // TODO: lots of stuff
type = f[1][0]; ixti = f[1][1]; c = shift_cell(f[1][2], range);
stack.push(supbooks[1][ixti+1]+"!"+encode_cell(c));
break;
/* Function Call */
/* 192.168.3.11 */
case 'PtgFunc':
/* 172.16.31.10 */
case 'PtgFuncVar':
/* f[1] = [argc, func] */
var argc = f[1][0], func = f[1][1];
if(!argc) argc = 0;
var args = stack.slice(-argc);
stack.length -= argc;
if(func === 'User') func = args.shift();
stack.push(func + "(" + args.join(",") + ")");
break;
/* 172.16.58.3 */
case 'PtgBool': stack.push(f[1] ? "TRUE" : "FALSE"); break;
/* 172.16.31.10 */
case 'PtgInt': stack.push(f[1]); break;
/* 172.16.31.10 TODO: precision? */
case 'PtgNum': stack.push(String(f[1])); break;
/* 172.16.31.109 */
case 'PtgStr': stack.push('"' + f[1] + '"'); break;
/* 172.16.58.3 */
case 'PtgErr': stack.push(f[1]); break;
/* 172.16.31.10 TODO: fixed points */
case 'PtgArea':
type = f[1][0]; r = shift_range(f[1][1], range);
stack.push(encode_range(r));
break;
/* 192.168.127.12 */
case 'PtgArea3d': // TODO: lots of stuff
type = f[1][0]; ixti = f[1][1]; r = f[1][2];
stack.push(supbooks[1][ixti+1]+"!"+encode_range(r));
break;
/* 172.16.58.3 */
case 'PtgAttrSum':
stack.push("SUM(" + stack.pop() + ")");
break;
/* Expression Prefixes */
/* 172.16.31.10 */
case 'PtgAttrSemi': break;
/* 172.16.58.3 TODO: do something different for revisions */
case 'PtgName':
/* f[1] = type, 0, nameindex */
nameidx = f[1][2];
var lbl = supbooks[0][nameidx];
var name = lbl.Name;
if(name in XLSXFutureFunctions) name = XLSXFutureFunctions[name];
stack.push(name);
break;
/* 172.16.31.10 TODO: do | |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for TEAMS models."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import models
class ReplacedTokenDetectionHead(tf.keras.layers.Layer):
"""Replaced token detection discriminator head.
Arguments:
encoder_cfg: Encoder config, used to create hidden layers and head.
num_task_agnostic_layers: Number of task agnostic layers in the
discriminator.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
encoder_cfg,
num_task_agnostic_layers,
output='logits',
name='rtd',
**kwargs):
super(ReplacedTokenDetectionHead, self).__init__(name=name, **kwargs)
self.num_task_agnostic_layers = num_task_agnostic_layers
self.hidden_size = encoder_cfg['embedding_cfg']['hidden_size']
self.num_hidden_instances = encoder_cfg['num_hidden_instances']
self.hidden_cfg = encoder_cfg['hidden_cfg']
self.activation = self.hidden_cfg['intermediate_activation']
self.initializer = self.hidden_cfg['kernel_initializer']
self.hidden_layers = []
for i in range(self.num_task_agnostic_layers, self.num_hidden_instances):
self.hidden_layers.append(
layers.Transformer(
num_attention_heads=self.hidden_cfg['num_attention_heads'],
intermediate_size=self.hidden_cfg['intermediate_size'],
intermediate_activation=self.activation,
dropout_rate=self.hidden_cfg['dropout_rate'],
attention_dropout_rate=self.hidden_cfg['attention_dropout_rate'],
kernel_initializer=self.initializer,
name='transformer/layer_%d_rtd' % i))
self.dense = tf.keras.layers.Dense(
self.hidden_size,
activation=self.activation,
kernel_initializer=self.initializer,
name='transform/rtd_dense')
self.rtd_head = tf.keras.layers.Dense(
units=1, kernel_initializer=self.initializer,
name='transform/rtd_head')
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
def call(self, sequence_data, input_mask):
"""Compute inner-products of hidden vectors with sampled element embeddings.
Args:
sequence_data: A [batch_size, seq_length, num_hidden] tensor.
input_mask: A [batch_size, seq_length] binary mask to separate the input
from the padding.
Returns:
A [batch_size, seq_length] tensor.
"""
attention_mask = layers.SelfAttentionMask()([sequence_data, input_mask])
data = sequence_data
for hidden_layer in self.hidden_layers:
data = hidden_layer([sequence_data, attention_mask])
rtd_logits = self.rtd_head(self.dense(data))
return tf.squeeze(rtd_logits, axis=-1)
class MultiWordSelectionHead(tf.keras.layers.Layer):
"""Multi-word selection discriminator head.
Arguments:
embedding_table: The embedding table.
activation: The activation, if any, for the dense layer.
initializer: The intializer for the dense layer. Defaults to a Glorot
uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
embedding_table,
activation=None,
initializer='glorot_uniform',
output='logits',
name='mws',
**kwargs):
super(MultiWordSelectionHead, self).__init__(name=name, **kwargs)
self.embedding_table = embedding_table
self.activation = activation
self.initializer = tf.keras.initializers.get(initializer)
self._vocab_size, self.embed_size = self.embedding_table.shape
self.dense = tf.keras.layers.Dense(
self.embed_size,
activation=self.activation,
kernel_initializer=self.initializer,
name='transform/mws_dense')
self.layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name='transform/mws_layernorm')
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
def call(self, sequence_data, masked_positions, candidate_sets):
"""Compute inner-products of hidden vectors with sampled element embeddings.
Args:
sequence_data: A [batch_size, seq_length, num_hidden] tensor.
masked_positions: A [batch_size, num_prediction] tensor.
candidate_sets: A [batch_size, num_prediction, k] tensor.
Returns:
A [batch_size, num_prediction, k] tensor.
"""
# Gets shapes for later usage
candidate_set_shape = tf_utils.get_shape_list(candidate_sets)
num_prediction = candidate_set_shape[1]
# Gathers hidden vectors -> (batch_size, num_prediction, 1, embed_size)
masked_lm_input = self._gather_indexes(sequence_data, masked_positions)
lm_data = self.dense(masked_lm_input)
lm_data = self.layer_norm(lm_data)
lm_data = tf.expand_dims(
tf.reshape(lm_data, [-1, num_prediction, self.embed_size]), 2)
# Gathers embeddings -> (batch_size, num_prediction, embed_size, k)
flat_candidate_sets = tf.reshape(candidate_sets, [-1])
candidate_embeddings = tf.gather(self.embedding_table, flat_candidate_sets)
candidate_embeddings = tf.reshape(
candidate_embeddings,
tf.concat([tf.shape(candidate_sets), [self.embed_size]], axis=0)
)
candidate_embeddings.set_shape(
candidate_sets.shape.as_list() + [self.embed_size])
candidate_embeddings = tf.transpose(candidate_embeddings, [0, 1, 3, 2])
# matrix multiplication + squeeze -> (batch_size, num_prediction, k)
logits = tf.matmul(lm_data, candidate_embeddings)
logits = tf.squeeze(logits, 2)
if self._output_type == 'logits':
return logits
return tf.nn.log_softmax(logits)
def _gather_indexes(self, sequence_tensor, positions):
"""Gathers the vectors at the specific positions.
Args:
sequence_tensor: Sequence output of shape
(`batch_size`, `seq_length`, `num_hidden`) where `num_hidden` is
number of hidden units.
positions: Positions ids of tokens in batched sequences.
Returns:
Sequence tensor of shape (batch_size * num_predictions,
num_hidden).
"""
sequence_shape = tf_utils.get_shape_list(
sequence_tensor, name='sequence_output_tensor')
batch_size, seq_length, width = sequence_shape
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
@tf.keras.utils.register_keras_serializable(package='Text')
class TeamsPretrainer(tf.keras.Model):
"""TEAMS network training model.
This is an implementation of the network structure described in "Training
ELECTRA Augmented with Multi-word Selection"
(https://arxiv.org/abs/2106.00139).
The TeamsPretrainer allows a user to pass in two transformer encoders, one
for generator, the other for discriminator (multi-word selection). The
pretrainer then instantiates the masked language model (at generator side) and
classification networks (including both multi-word selection head and replaced
token detection head) that are used to create the training objectives.
*Note* that the model is constructed by Keras Subclass API, where layers are
defined inside `__init__` and `call()` implements the computation.
Args:
generator_network: A transformer encoder for generator, this network should
output a sequence output.
discriminator_mws_network: A transformer encoder for multi-word selection
discriminator, this network should output a sequence output.
num_discriminator_task_agnostic_layers: Number of layers shared between
multi-word selection and random token detection discriminators.
vocab_size: Size of generator output vocabulary
candidate_size: Candidate size for multi-word selection task,
including the correct word.
mlm_activation: The activation (if any) to use in the masked LM and
classification networks. If None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM and
classification networks. Defaults to a Glorot uniform initializer.
output_type: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
generator_network,
discriminator_mws_network,
num_discriminator_task_agnostic_layers,
vocab_size,
candidate_size=5,
mlm_activation=None,
mlm_initializer='glorot_uniform',
output_type='logits',
**kwargs):
super().__init__()
self._config = {
'generator_network':
generator_network,
'discriminator_mws_network':
discriminator_mws_network,
'num_discriminator_task_agnostic_layers':
num_discriminator_task_agnostic_layers,
'vocab_size':
vocab_size,
'candidate_size':
candidate_size,
'mlm_activation':
mlm_activation,
'mlm_initializer':
mlm_initializer,
'output_type':
output_type,
}
for k, v in kwargs.items():
self._config[k] = v
self.generator_network = generator_network
self.discriminator_mws_network = discriminator_mws_network
self.vocab_size = vocab_size
self.candidate_size = candidate_size
self.mlm_activation = mlm_activation
self.mlm_initializer = mlm_initializer
self.output_type = output_type
self.embedding_table = (
self.discriminator_mws_network.embedding_network.get_embedding_table())
self.masked_lm = layers.MaskedLM(
embedding_table=self.embedding_table,
activation=mlm_activation,
initializer=mlm_initializer,
output=output_type,
name='generator_masked_lm')
discriminator_cfg = self.discriminator_mws_network.get_config()
self.num_task_agnostic_layers = num_discriminator_task_agnostic_layers
self.discriminator_rtd_head = ReplacedTokenDetectionHead(
encoder_cfg=discriminator_cfg,
num_task_agnostic_layers=self.num_task_agnostic_layers,
output=output_type,
name='discriminator_rtd')
hidden_cfg = discriminator_cfg['hidden_cfg']
self.discriminator_mws_head = MultiWordSelectionHead(
embedding_table=self.embedding_table,
activation=hidden_cfg['intermediate_activation'],
initializer=hidden_cfg['kernel_initializer'],
output=output_type,
name='discriminator_mws')
def call(self, inputs):
"""TEAMS forward pass.
Args:
inputs: A dict of all inputs, same as the standard BERT model.
Returns:
outputs: A dict of pretrainer model outputs, including
(1) lm_outputs: A `[batch_size, num_token_predictions, vocab_size]`
tensor indicating logits on masked positions.
(2) disc_rtd_logits: A `[batch_size, sequence_length]` tensor indicating
logits for discriminator replaced token detection task.
(3) disc_rtd_label: A `[batch_size, sequence_length]` tensor indicating
target labels for discriminator replaced token detection task.
(4) disc_mws_logits: A `[batch_size, num_token_predictions,
candidate_size]` tensor indicating logits for discriminator multi-word
selection task.
(5) disc_mws_labels: A `[batch_size, num_token_predictions]` tensor
indicating target labels for discriminator multi-word selection task.
"""
input_word_ids = inputs['input_word_ids']
input_mask = inputs['input_mask']
input_type_ids = inputs['input_type_ids']
masked_lm_positions = inputs['masked_lm_positions']
# Runs generator.
sequence_output = self.generator_network(
[input_word_ids, input_mask, input_type_ids])['sequence_output']
lm_outputs = self.masked_lm(sequence_output, masked_lm_positions)
# Samples tokens from generator.
fake_data = self._get_fake_data(inputs, lm_outputs)
# Runs discriminator.
disc_input = fake_data['inputs']
disc_rtd_label = fake_data['is_fake_tokens']
disc_mws_candidates = fake_data['candidate_set']
mws_sequence_outputs = self.discriminator_mws_network([
disc_input['input_word_ids'], disc_input['input_mask'],
disc_input['input_type_ids']
])['encoder_outputs']
# Applies replaced token detection with input selected based on
# self.num_discriminator_task_agnostic_layers
disc_rtd_logits = self.discriminator_rtd_head(
mws_sequence_outputs[self.num_task_agnostic_layers - 1], input_mask)
# Applies multi-word selection.
disc_mws_logits = self.discriminator_mws_head(mws_sequence_outputs[-1],
masked_lm_positions,
disc_mws_candidates)
disc_mws_label = tf.zeros_like(masked_lm_positions, dtype=tf.int32)
outputs = {
'lm_outputs': lm_outputs,
'disc_rtd_logits': disc_rtd_logits,
'disc_rtd_label': disc_rtd_label,
'disc_mws_logits': disc_mws_logits,
'disc_mws_label': disc_mws_label,
}
return outputs
def _get_fake_data(self, inputs, mlm_logits):
"""Generate corrupted data for discriminator.
Note it is poosible for sampled token to be the same as the correct one.
Args:
inputs: A dict of all inputs, same as the input of `call()` function
mlm_logits: The generator's output logits
Returns:
A dict of generated fake data
"""
inputs = models.electra_pretrainer.unmask(inputs, duplicate=True)
# Samples replaced token.
sampled_tokens = tf.stop_gradient(
models.electra_pretrainer.sample_from_softmax(
mlm_logits, disallow=None))
sampled_tokids = tf.argmax(sampled_tokens, axis=-1, output_type=tf.int32)
# Prepares input and label for replaced token detection task.
updated_input_ids, masked = models.electra_pretrainer.scatter_update(
inputs['input_word_ids'], sampled_tokids, inputs['masked_lm_positions'])
rtd_labels = masked * (1 - tf.cast(
tf.equal(updated_input_ids, inputs['input_word_ids']), tf.int32))
updated_inputs = models.electra_pretrainer.get_updated_inputs(
inputs, duplicate=True, input_word_ids=updated_input_ids)
# Samples (candidate_size-1) negatives and concat with true tokens
disallow = tf.one_hot(
inputs['masked_lm_ids'], depth=self.vocab_size, dtype=tf.float32)
sampled_candidates = tf.stop_gradient(
sample_k_from_softmax(mlm_logits, k=self.candidate_size-1,
disallow=disallow))
true_token_id = tf.expand_dims(inputs['masked_lm_ids'], -1)
candidate_set = tf.concat([true_token_id, sampled_candidates], -1)
return {
'inputs': updated_inputs,
'is_fake_tokens': rtd_labels,
'sampled_tokens': sampled_tokens,
'candidate_set': candidate_set
}
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally | |
thread = api.new_ldap_domain(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param LdapDomain body:
:param bool skip_test:
:return: LdapConfiguration
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'skip_test']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_ldap_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `new_ldap_domain`")
resource_path = '/storage-systems/{system-id}/ldap/addDomain'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'skip_test' in params:
query_params['skipTest'] = params['skip_test']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LdapConfiguration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def remove_ldap_domain(self, system_id, id, **kwargs):
"""
Deletes the specified domain from the LDAP configuration
Mode: Embedded Only.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_ldap_domain(system_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_ldap_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `remove_ldap_domain`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `remove_ldap_domain`")
resource_path = '/storage-systems/{system-id}/ldap/{id}'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def set_ldap_configuration(self, system_id, **kwargs):
"""
Sets the LDAP configuration for a single or multiple domains. Warning: This will remove previous configurations
Mode: Embedded Only.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_ldap_configuration(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param LdapConfiguration body:
:param bool skip_test:
:return: LdapConfiguration
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'skip_test']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_ldap_configuration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `set_ldap_configuration`")
resource_path = '/storage-systems/{system-id}/ldap'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'skip_test' in params:
query_params['skipTest'] = params['skip_test']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LdapConfiguration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def test_ldap(self, system_id, **kwargs):
"""
Tests current LDAP configuration. If no bind user is defined only a communication test will be performed
Mode: Embedded Only.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_ldap(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:return: list[LdapDomainTestResponse]
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_ldap" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `test_ldap`")
resource_path = '/storage-systems/{system-id}/ldap/test'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[LdapDomainTestResponse]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_ldap_domain(self, system_id, domain_id, **kwargs):
"""
Updates LDAP configuration for a single domain
Mode: Embedded Only.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def | |
<reponame>samson0v/python_tb_rest_client<gh_stars>10-100
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class GroupPermissionControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_group_permission_using_delete(self, group_permission_id, **kwargs): # noqa: E501
"""Delete group permission (deleteGroupPermission) # noqa: E501
Deletes the group permission. Referencing non-existing group permission Id will cause an error. Security check is performed to verify that the user has 'DELETE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_group_permission_using_delete(group_permission_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group_permission_id: A string value representing the group permission id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_group_permission_using_delete_with_http_info(group_permission_id, **kwargs) # noqa: E501
else:
(data) = self.delete_group_permission_using_delete_with_http_info(group_permission_id, **kwargs) # noqa: E501
return data
def delete_group_permission_using_delete_with_http_info(self, group_permission_id, **kwargs): # noqa: E501
"""Delete group permission (deleteGroupPermission) # noqa: E501
Deletes the group permission. Referencing non-existing group permission Id will cause an error. Security check is performed to verify that the user has 'DELETE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_group_permission_using_delete_with_http_info(group_permission_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group_permission_id: A string value representing the group permission id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group_permission_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_group_permission_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'group_permission_id' is set
if ('group_permission_id' not in params or
params['group_permission_id'] is None):
raise ValueError("Missing the required parameter `group_permission_id` when calling `delete_group_permission_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group_permission_id' in params:
path_params['groupPermissionId'] = params['group_permission_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/groupPermission/{groupPermissionId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_group_permissions_using_get(self, entity_group_id, **kwargs): # noqa: E501
"""Get group permissions by Entity Group Id (getEntityGroupPermissions) # noqa: E501
Returns a list of group permission objects that is assigned for the specified Entity Group Id. Group permission entity represents list of allowed operations for certain User Group to perform against certain Entity Group. Basically, this entity wires three other entities: * Role that defines set of allowed operations; * User Group that defines set of users who may perform the operations; * Entity Group that defines set of entities which will be accessible to users; Group Permission Info object extends the Group Permissions with the full information about Role and User and/or Entity Groups. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_permissions_using_get(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: list[GroupPermissionInfo]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_group_permissions_using_get_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.get_entity_group_permissions_using_get_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def get_entity_group_permissions_using_get_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Get group permissions by Entity Group Id (getEntityGroupPermissions) # noqa: E501
Returns a list of group permission objects that is assigned for the specified Entity Group Id. Group permission entity represents list of allowed operations for certain User Group to perform against certain Entity Group. Basically, this entity wires three other entities: * Role that defines set of allowed operations; * User Group that defines set of users who may perform the operations; * Entity Group that defines set of entities which will be accessible to users; Group Permission Info object extends the Group Permissions with the full information about Role and User and/or Entity Groups. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_permissions_using_get_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: list[GroupPermissionInfo]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_group_permissions_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `get_entity_group_permissions_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/groupPermissions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GroupPermissionInfo]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_group_permission_by_id_using_get(self, group_permission_id, **kwargs): # noqa: E501
"""Get Group Permission (getGroupPermissionById) # noqa: E501
Fetch the Group Permission object based on the provided Group Permission Id. Group permission entity represents list of allowed operations for certain User Group to perform against certain Entity Group. Basically, this entity wires three other entities: * Role that defines set of allowed operations; * User Group that defines set of users who may perform the operations; * Entity Group that defines set of entities which will be accessible to users; Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_permission_by_id_using_get(group_permission_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group_permission_id: A string value representing the group permission id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: GroupPermission
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_group_permission_by_id_using_get_with_http_info(group_permission_id, **kwargs) # noqa: E501
else:
(data) = self.get_group_permission_by_id_using_get_with_http_info(group_permission_id, **kwargs) # noqa: E501
return data
def get_group_permission_by_id_using_get_with_http_info(self, group_permission_id, **kwargs): # noqa: E501
"""Get Group Permission (getGroupPermissionById) # noqa: E501
Fetch the Group Permission object based on the provided Group Permission Id. Group permission entity represents list of allowed operations for certain User Group to perform against certain Entity Group. Basically, this entity wires three other entities: * Role that defines set of allowed operations; * User Group that defines set of users who may perform the operations; * Entity Group that defines set of entities which will be accessible to users; Security check is performed to verify that the user has 'READ' permission for | |
# pylint: disable=redefined-outer-name, comparison-with-callable
"""Test helper functions."""
import gzip
import importlib
import logging
import os
import pickle
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pytest
from _pytest.outcomes import Skipped
from packaging.version import Version
from ..data import InferenceData, from_dict
_log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def eight_schools_params():
"""Share setup for eight schools."""
return {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
@pytest.fixture(scope="module")
def draws():
"""Share default draw count."""
return 500
@pytest.fixture(scope="module")
def chains():
"""Share default chain count."""
return 2
def create_model(seed=10):
"""Create model with fake data."""
np.random.seed(seed)
nchains = 4
ndraws = 500
data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, data["J"]),
"theta": np.random.randn(nchains, ndraws, data["J"]),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"]))}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
"max_depth": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, data["J"]),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, data["J"]) / 2,
"theta": np.random.randn(nchains, ndraws, data["J"]) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"])) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={"y": ["obs_dim"], "log_likelihood": ["obs_dim"]},
coords={"obs_dim": range(data["J"])},
)
return model
def create_multidimensional_model(seed=10):
"""Create model with fake data."""
np.random.seed(seed)
nchains = 4
ndraws = 500
ndim1 = 5
ndim2 = 7
data = {
"y": np.random.normal(size=(ndim1, ndim2)),
"sigma": np.random.normal(size=(ndim1, ndim2)),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2),
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2)}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={"y": ["dim1", "dim2"], "log_likelihood": ["dim1", "dim2"]},
coords={"dim1": range(ndim1), "dim2": range(ndim2)},
)
return model
def create_data_random(groups=None, seed=10):
"""Create InferenceData object using random data."""
if groups is None:
groups = ["posterior", "sample_stats", "observed_data", "posterior_predictive"]
rng = np.random.default_rng(seed)
data = rng.normal(size=(4, 500, 8))
idata_dict = dict(
posterior={"a": data[..., 0], "b": data},
sample_stats={"a": data[..., 0], "b": data},
observed_data={"b": data[0, 0, :]},
posterior_predictive={"a": data[..., 0], "b": data},
prior={"a": data[..., 0], "b": data},
prior_predictive={"a": data[..., 0], "b": data},
warmup_posterior={"a": data[..., 0], "b": data},
warmup_posterior_predictive={"a": data[..., 0], "b": data},
)
idata = from_dict(
**{group: ary for group, ary in idata_dict.items() if group in groups}, save_warmup=True
)
return idata
@pytest.fixture()
def data_random():
"""Fixture containing InferenceData object using random data."""
idata = create_data_random()
return idata
@pytest.fixture(scope="module")
def models():
"""Fixture containing 2 mock inference data instances for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_model(seed=10)
model_2 = create_model(seed=11)
return Models()
@pytest.fixture(scope="module")
def multidim_models():
"""Fixture containing 2 mock inference data instances with multidimensional data for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_multidimensional_model(seed=10)
model_2 = create_multidimensional_model(seed=11)
return Models()
def check_multiple_attrs(
test_dict: Dict[str, List[str]], parent: InferenceData
) -> List[Union[str, Tuple[str, str]]]:
"""Perform multiple hasattr checks on InferenceData objects.
It is thought to first check if the parent object contains a given dataset,
and then (if present) check the attributes of the dataset.
Given the ouput of the function, all missmatches between expectation and reality can
be retrieved: a single string indicates a group mismatch and a tuple of strings
``(group, var)`` indicates a mismatch in the variable ``var`` of ``group``.
Parameters
----------
test_dict: dict of {str : list of str}
Its structure should be `{dataset1_name: [var1, var2], dataset2_name: [var]}`.
A ``~`` at the beggining of a dataset or variable name indicates the name NOT
being present must be asserted.
parent: InferenceData
InferenceData object on which to check the attributes.
Returns
-------
list
List containing the failed checks. It will contain either the dataset_name or a
tuple (dataset_name, var) for all non present attributes.
Examples
--------
The output below indicates that ``posterior`` group was expected but not found, and
variables ``a`` and ``b``:
["posterior", ("prior", "a"), ("prior", "b")]
Another example could be the following:
[("posterior", "a"), "~observed_data", ("sample_stats", "~log_likelihood")]
In this case, the output indicates that variable ``a`` was not found in ``posterior``
as it was expected, however, in the other two cases, the preceding ``~`` (kept from the
input negation notation) indicates that ``observed_data`` group should not be present
but was found in the InferenceData and that ``log_likelihood`` variable was found
in ``sample_stats``, also against what was expected.
"""
failed_attrs = []
for dataset_name, attributes in test_dict.items():
if dataset_name.startswith("~"):
if hasattr(parent, dataset_name[1:]):
failed_attrs.append(dataset_name)
elif hasattr(parent, dataset_name):
dataset = getattr(parent, dataset_name)
for attribute in attributes:
if attribute.startswith("~"):
if hasattr(dataset, attribute[1:]):
failed_attrs.append((dataset_name, attribute))
elif not hasattr(dataset, attribute):
failed_attrs.append((dataset_name, attribute))
else:
failed_attrs.append(dataset_name)
return failed_attrs
def emcee_version():
"""Check emcee version.
Returns
-------
int
Major version number
"""
import emcee
return int(emcee.__version__[0])
def needs_emcee3_func():
"""Check if emcee3 is required."""
# pylint: disable=invalid-name
needs_emcee3 = pytest.mark.skipif(emcee_version() < 3, reason="emcee3 required")
return needs_emcee3
def _emcee_lnprior(theta):
"""Proper function to allow pickling."""
mu, tau, eta = theta[0], theta[1], theta[2:]
# Half-cauchy prior, hwhm=25
if tau < 0:
return -np.inf
prior_tau = -np.log(tau ** 2 + 25 ** 2)
prior_mu = -((mu / 10) ** 2) # normal prior, loc=0, scale=10
prior_eta = -np.sum(eta ** 2) # normal prior, loc=0, scale=1
return prior_mu + prior_tau + prior_eta
def _emcee_lnprob(theta, y, sigma):
"""Proper function to allow pickling."""
mu, tau, eta = theta[0], theta[1], theta[2:]
prior = _emcee_lnprior(theta)
like_vect = -(((mu + tau * eta - y) / sigma) ** 2)
like = np.sum(like_vect)
return like + prior, (like_vect, np.random.normal((mu + tau * eta), sigma))
def emcee_schools_model(data, draws, chains):
"""Schools model in emcee."""
import emcee
chains = 10 * chains # emcee is sad with too few walkers
y = data["y"]
sigma = data["sigma"]
J = data["J"] # pylint: disable=invalid-name
ndim = J + 2
pos = np.random.normal(size=(chains, ndim))
pos[:, 1] = np.absolute(pos[:, 1]) # pylint: disable=unsupported-assignment-operation
if emcee_version() < 3:
sampler = emcee.EnsembleSampler(chains, ndim, _emcee_lnprob, args=(y, sigma))
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws)
else:
here = os.path.dirname(os.path.abspath(__file__))
data_directory = os.path.join(here, "saved_models")
filepath = os.path.join(data_directory, "reader_testfile.h5")
backend = emcee.backends.HDFBackend(filepath) # pylint: disable=no-member
backend.reset(chains, ndim)
# pylint: disable=unexpected-keyword-arg
sampler = emcee.EnsembleSampler(
chains, ndim, _emcee_lnprob, args=(y, sigma), backend=backend
)
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws, store=True)
return sampler
# pylint:disable=no-member,no-value-for-parameter,invalid-name
def _pyro_noncentered_model(J, sigma, y=None):
import pyro
import pyro.distributions as dist
mu = pyro.sample("mu", dist.Normal(0, 5))
tau = pyro.sample("tau", dist.HalfCauchy(5))
with pyro.plate("J", J):
eta = pyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return pyro.sample("obs", dist.Normal(theta, sigma), obs=y)
def pyro_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation in Pyro."""
import torch
from pyro.infer import MCMC, NUTS
y = torch.from_numpy(data["y"]).float()
sigma = torch.from_numpy(data["sigma"]).float()
nuts_kernel = NUTS(_pyro_noncentered_model, jit_compile=True, ignore_jit_warnings=True)
posterior = MCMC(nuts_kernel, num_samples=draws, warmup_steps=draws, num_chains=chains)
posterior.run(data["J"], sigma, y)
# This block lets the posterior be pickled
posterior.sampler = None
posterior.kernel.potential_fn = None
return posterior
# pylint:disable=no-member,no-value-for-parameter,invalid-name
def _numpyro_noncentered_model(J, sigma, y=None):
import numpyro
import numpyro.distributions as dist
mu = numpyro.sample("mu", dist.Normal(0, 5))
tau = numpyro.sample("tau", dist.HalfCauchy(5))
with numpyro.plate("J", J):
eta = numpyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return numpyro.sample("obs", dist.Normal(theta, sigma), obs=y)
def numpyro_schools_model(data, draws, chains):
"""Centered eight schools implementation in NumPyro."""
from jax.random import PRNGKey
from numpyro.infer import MCMC, NUTS
mcmc = MCMC(
NUTS(_numpyro_noncentered_model),
num_warmup=draws,
num_samples=draws,
num_chains=chains,
chain_method="sequential",
)
mcmc.run(PRNGKey(0), extra_fields=("num_steps", "energy"), **data)
# This block lets the posterior be pickled
mcmc.sampler._sample_fn = None # pylint: disable=protected-access
mcmc.sampler._init_fn = None # pylint: disable=protected-access
mcmc.sampler._postprocess_fn = None # pylint: disable=protected-access
mcmc.sampler._potential_fn = None # pylint: disable=protected-access
mcmc._cache = {} # pylint: disable=protected-access
return | |
"""
This module contain the Vec2d class that is used in all of easymunk when a
vector is needed.
The Vec2d class is used almost everywhere in easymunk for 2d coordinates and
vectors, for example to define position vector in a Body. However, easymunk is
smart enough to convert tuples to Vec2ds so you usually do not need to
explicitly do conversions if you happen to have a tuple.
"""
import numbers
from math import cos as _rcos, sin as _rsin
from typing import (
NamedTuple,
Tuple,
Any,
overload,
TYPE_CHECKING,
Type,
)
from .math import (
sqrt,
atan2,
cos,
sin,
radians as _radians,
degrees as _degrees,
isfinite,
isnan,
)
from ..typing import Num, VecLike
class Vec2d(NamedTuple):
"""2d vector class, supports vector and scalar operators, and also
provides some high level functions.
"""
x: float
y: float
# noinspection PyTypeChecker,PyUnresolvedReferences
@classmethod
def from_coords(cls, obj: VecLike) -> "Vec2d":
"""
Create Vec2d from 2-sequence of (x, y) coordinates.
This is useful to normalize input tuples into Vec2d instances.
>>> Vec2d.from_coords((1, 2))
Vec2d(1, 2)
This is a no-op for Vec2d inputs
>>> u = Vec2d(1, 2)
>>> Vec2d.from_coords(u) is u
True
"""
if obj.__class__ is cls:
return obj # type: ignore
try:
return cls(*obj)
except TypeError:
pass
try:
return cls(obj.x, obj.y) # type: ignore
except AttributeError:
raise TypeError(f"invalid vec-like type: {type(obj).__name__}")
@classmethod
def from_displacement(cls: Type["Vec2d"], a: VecLike, b: VecLike) -> "Vec2d":
"""
Create Vec2d from displacement a - b.
>>> Vec2d.from_displacement((1, 2), (3, 4))
Vec2d(-2, -2)
"""
x, y = a
u, v = b
return cls(x - u, y - v)
@staticmethod
def polar(angle=0.0, r=1.0) -> "Vec2d":
"""
Create vector from polar coordinates passing its angle and length.
Args:
angle: Angle in degrees
r: Length of resulting vector.
>>> Vec2d.polar(90)
Vec2d(0, 1)
"""
return Vec2d(r * cos(angle), r * sin(angle))
@staticmethod
def polar_radians(radians=0.0, r=1.0) -> "Vec2d":
"""
Create vector from polar coordinates, with angle given in radians.
>>> Vec2d.polar_radians(pi / 4)
Vec2d(0.707107, 0.707107)
"""
return Vec2d(r * _rcos(radians), r * _rsin(radians))
@staticmethod
def zero() -> "Vec2d":
"""A vector of zero length.
>>> Vec2d.zero()
Vec2d(0, 0)
"""
return Vec2d(0.0, 0.0)
@staticmethod
def ux() -> "Vec2d":
"""A unit basis vector pointing right.
>>> Vec2d.ux()
Vec2d(1, 0)
"""
return Vec2d(1.0, 0.0)
@staticmethod
def uy() -> "Vec2d":
"""A unit basis vector pointing up.
>>> Vec2d.uy()
Vec2d(0, 1)
"""
return Vec2d(0.0, 1.0)
@property
def length_sqr(self) -> float:
"""Squared length of vector.
If the squared length is enough it is more efficient to use this method
instead of access .length and then do a x**2.
>>> v = Vec2d(3, 4)
>>> v.length_sqr == v.length ** 2 == 25.0
True
"""
return self.x ** 2 + self.y ** 2
@property
def length(self) -> float:
"""Length of vector.
>>> Vec2d(3, 4).length
5.0
"""
return sqrt(self.x ** 2 + self.y ** 2)
@property
def angle(self) -> float:
"""The angle (in degrees) of the vector"""
if self.length_sqr == 0:
return 0
return atan2(self.y, self.x)
@property
def radians(self) -> float:
"""The angle (in radians) of the vector"""
return _radians(self.angle)
@property
def is_finite(self):
"""True if both components are finite non-nan numbers."""
x, y = self
return isfinite(x) and isfinite(y)
@property
def is_nan(self):
"""True if any of the components is NaN"""
x, y = self
return isnan(x) and isnan(y)
# PyCharm's type check sometimes get confused by constructor signatures.
def __new__(cls, x: Num, y: Num): # type: ignore
new = tuple.__new__(cls, (x, y))
return new
def __init__(self, x: Num, y: Num): # type: ignore
tuple.__init__(self)
if not TYPE_CHECKING: # type: ignore
del __init__
del __new__
def __repr__(self) -> str:
x, y = self
return f"Vec2d({x:n}, {y:n})"
@overload # type: ignore[override]
def __add__(self, other: "Vec2d") -> "Vec2d":
...
@overload
def __add__(self, other: VecLike) -> "Vec2d":
...
def __add__(self, other):
try:
u, v = other
except (IndexError, TypeError):
return NotImplemented
else:
return Vec2d(self.x + u, self.y + v)
def __radd__(self, other: VecLike) -> "Vec2d":
return self.__add__(other)
def __sub__(self, other: VecLike) -> "Vec2d":
try:
u, v = other
except (IndexError, TypeError):
return NotImplemented
else:
return Vec2d(self.x - u, self.y - v)
def __rsub__(self, other: VecLike) -> "Vec2d":
try:
u, v = other
except (IndexError, TypeError):
return NotImplemented
else:
return Vec2d(u - self.x, v - self.y)
def __mul__(self, other: float) -> "Vec2d":
if other.__class__ is float or isinstance(other, numbers.Real):
return Vec2d(self.x * other, self.y * other)
return NotImplemented
def __rmul__(self, other: float) -> "Vec2d":
return self.__mul__(other)
def __floordiv__(self, other: float) -> "Vec2d":
if other.__class__ is float or isinstance(other, numbers.Real):
return Vec2d(self.x // other, self.y // other)
return NotImplemented
def __truediv__(self, other: float) -> "Vec2d":
if other.__class__ is float or isinstance(other, numbers.Real):
return Vec2d(self.x / other, self.y / other)
return NotImplemented
def __neg__(self) -> "Vec2d":
return Vec2d(-self.x, -self.y)
def __pos__(self) -> "Vec2d":
return Vec2d(+self.x, +self.y)
def __abs__(self) -> float:
return self.length
def scale_to_length(self, length: float) -> "Vec2d":
"""Return a copy of this vector scaled to the given length.
>>> Vec2d(3, 4).scale_to_length(10)
Vec2d(6, 8)
"""
old_length = self.length
return Vec2d(self.x * length / old_length, self.y * length / old_length)
def rotated(self, angle: float) -> "Vec2d":
"""
Create and return a new vector by rotating this vector by
angle (in degrees).
"""
cos_ = cos(angle)
sin_ = sin(angle)
x = self.x * cos_ - self.y * sin_
y = self.x * sin_ + self.y * cos_
return Vec2d(x, y)
def rotated_radians(self, radians: float) -> "Vec2d":
"""
Create and return a new vector by rotating this vector by
angle (in radians).
"""
return self.rotated(_degrees(radians))
def angle_between(self, other: VecLike) -> float:
"""Get the angle between the vector and the other in degrees."""
u, v = other
cross = self.x * v - self.y * u
dot = self.x * u + self.y * v
return atan2(cross, dot)
def radians_between(self, other: VecLike) -> float:
"""Get the angle between the vector and the other in radians."""
return _radians(self.angle_between(other))
def normalized(self) -> "Vec2d":
"""
Get a normalized copy of the vector
Note:
This function will return 0 if the length of the vector is 0.
"""
length = self.length
if length != 0:
return self / length
return Vec2d(0, 0)
def normalized_and_length(self) -> Tuple["Vec2d", float]:
"""
Normalize the vector and return its length before the normalization
"""
length = self.length
if length != 0:
return self / length, length
return Vec2d(0, 0), 0
def perpendicular(self) -> "Vec2d":
"""
Return a perpendicular vector rotated by 90 degrees counterclockwise.
"""
return Vec2d(-self.y, self.x)
def perpendicular_normal(self) -> "Vec2d":
"""
Return a normalized perpendicular vector.
Rotate counterclockwise.
"""
length = self.length
if length != 0:
return Vec2d(-self.y / length, self.x / length)
return Vec2d(self.x, self.y)
def dot(self, other: VecLike) -> float:
"""The dot product between the vector and other vector
u.dot(v) -> u.x * v.x + u.y * v.y
"""
u, v = other
return float(self.x * u + self.y * v)
def distance(self, other: VecLike) -> float:
"""The distance between the vector and other vector."""
u, v = other
return sqrt((self.x - u) ** 2 + (self.y - v) ** 2)
def distance_sqr(self, other: VecLike) -> float:
"""The squared distance between the vector and other vector.
It is more efficient to use this method than to call get_distance()
first and then do a sqrt() on the result.
"""
u, v = other
return (self.x - u) ** 2 + (self.y - v) ** 2
def projection(self, other: VecLike) -> "Vec2d":
"""Project this vector on top of other vector"""
u, v = other
other_length_sqrd = u * u + v * v
if other_length_sqrd == 0.0:
return Vec2d(0, 0)
projected_length_times_other_length = self.dot(other)
new_length = projected_length_times_other_length / other_length_sqrd
return Vec2d(u * new_length, v * new_length)
def cross(self, other: VecLike) -> float:
"""The cross product between the vector and other vector
u.cross(v) -> u.x * v.y - v.y * u.x
"""
u, v = other
return self.x * v - self.y * u
def interpolate_to(self, other: VecLike, ratio: float = 0.5) -> "Vec2d":
"""Interpolate with other vector.
The "ratio" | |
<gh_stars>0
########################################################################
########################################################################
# Ready for experiments
# Update 2020.07.14
########################################################################
########################################################################
import numpy as np
########################################################################
# Hospital class
########################################################################
class Hospital:
"""A Hospital supports the following functions:
** Setup: Initialization **
__init__(self, h_id, capacity, cap) : only hospital_id is necessary
** Setup: Add one contract to priority_ordering **
add_priority_ordering(self, contract)
** Setup: Randomize priority_ordering from a list of integers **
reset_priority_ordering(self, list_of_integers)
** Algorithm: Clear applicant pool for each iteration **
clear_applicant_pool(self)
** Algorithm: Add one proposal from some doctor to applicant_pool **
add_applicant_pool(self, contract)
** Algorithm: (GDA-RH) Hospital chooses contracts by priority up to capacity **
choose_from_applicant_pool_capacity(self)
** Algorithm: (ADA) Hospital chooses contracts by priority up to artificial_cap **
choose_from_applicant_pool_artificial_cap(self)
** Algorithm: Hospital notifies doctors about acceptance and rejection **
feedback_to_doctor(self):
"""
def __init__(self, h_id, capacity=10, cap=8):
"""
Create a new hospital instance. Only hospital_ID is necessary when initialization.
Members:
hospital_id : an integer
capacity : an integer
artificial_cap : an integer
weight : an integer
associated_region_id : an integer
priority_ordering : a list of contracts
applicant_pool : a list of contracts
current_match_contract : a list of contracts
current_match_total_number : an integer
final_ranking : a list of integers
"""
self._hospital_id = h_id
self._capacity = capacity
self._artificial_cap = cap
self._weight = -1
self._associated_region_id = -1
# for algorithm_choice_function_hospitals
self._priority_ordering = [None] * 0
self._applicant_pool = [None] * 0
self._current_match_contract = [None] * 0
self._current_match_total_number = 0
# for outcome_analysis
self._final_ranking = [None] * 0
########################################################################
# methods on hospital_id
# update on 2020.07.24
########################################################################
# Necessary: get hospital_id
def get_hospital_id(self):
"""
Return hospital_id
"""
return self._hospital_id
# Optional: set a new hospital_id
def set_hospital_id(self, new_id):
"""
Set a new ID
Parameter: an integer
"""
self._hospital_id = new_id
########################################################################
# methods on capacity
# update on 2020.07.24
########################################################################
# Necessary: get capacity
def get_capacity(self):
"""
Return capacity
"""
return self._capacity
# Optional: set a new capacity
def set_capacity(self, capacity):
"""
Set a new capacity
Parameter: an integer
"""
self._capacity = capacity
########################################################################
# methods on artificial_cap
# update on 2020.07.24
########################################################################
# Necessary: get artificial_cap
def get_artificial_cap(self):
"""
Return artificial_cap
"""
return self._artificial_cap
# Optional: set a new artificial_cap
def set_artificial_cap(self, cap):
"""
Set a new artificial_cap
Parameter: an integer
"""
self._artificial_cap = cap
########################################################################
# methods on weight
# update on 2020.07.24
########################################################################
# Optional: get weight
def get_weight(self):
"""
Return weight
"""
return self._weight
# Optional: set a new weight
def set_weight(self, weight):
"""
Set a new weight
Parameter: an integer
"""
self._weight = weight
########################################################################
# methods on associated_region_id
# update on 2020.07.24
########################################################################
# Necessary: get associated_region_id
def get_associated_region_id(self):
"""
Return associated_region_id
"""
return self._associated_region_id
# optional: set a new associated_region_id
def set_associated_region_id(self, r_id):
"""
Set a new associated_region_id
Parameter: an integer
"""
self._associated_region_id = r_id
########################################################################
# methods on priority_ordering
# update on 2020.07.24
########################################################################
# Important: Add one contract to self._priority_ordering
def add_priority_ordering(self, contract):
"""
This function is called by market when initializing priority profile of hospitals.
"""
# check whether the contract has been added to self._priority_ordering
if contract not in self._priority_ordering:
self._priority_ordering.append(contract)
# Necessary: reset self._priority_ordering in according with a list of integers
def reset_priority_ordering_from_integers(self, list_of_integers):
"""
This function is called by market when randomizing priority profile of hospitals.
Warn: self._priority_ordering cannot be empty!
"""
if len(list_of_integers) != len(self._priority_ordering):
raise Exception("Check the length of input list! It should be the same as the priority ordering")
new_ordering = [None] * 0
for i in list_of_integers:
new_ordering.append(self._priority_ordering[i])
self._priority_ordering = new_ordering
# Optional: clear self._priority_ordering
def clear_priority_ordering(self):
"""
Clear priority ordering
"""
self._priority_ordering.clear()
# Optional: print priority ordering
def print_priority_ordering(self):
"""
Print priority ordering
Warn: Contract class must support print
"""
for m in range(0, len(self._priority_ordering)):
print(self._priority_ordering[m])
########################################################################
# methods on applicant_pool
# update on 2020.07.24
########################################################################
# Optional: return applicant_pool
def get_applicant_pool(self):
"""
Return applicant_pool
"""
return self._applicant_pool
# Necessary: Add one contract to self._applicant_pool
def add_applicant_pool(self, contract):
"""
This function is called by market during Algorithm after doctors propose contracts.
It assigns proposals from doctors to corresponding hospitals.
"""
h_id = contract.get_hospital_id()
if self._hospital_id != h_id:
raise Exception("Check hospital_id of the contract! It should be the same as self._hospital_id.")
# check whether the contract has been added to self._applicant_pool
if contract not in self._applicant_pool:
self._applicant_pool.append(contract)
# Necessary: sort applicant_pool by self._priority_ordering
def sort_applicant_pool_by_priority(self):
"""
This function is called by self.choose_from_applicant_pool() when hospital chooses contracts.
"""
tmp_list = sorted(self._applicant_pool, key=lambda x: self._priority_ordering.index(x))
self._applicant_pool = tmp_list
# Necessary: Clear applicant pool
def clear_applicant_pool(self):
"""
This function is called by market during Algorithm - choice function of hospitals
in the beginning of each iteration of choice functions of hospitals.
"""
self._applicant_pool.clear()
# Optional: display applicant_pool
def print_applicant_pool(self):
"""
print proposal pool
Warn: Contract class must support print
"""
for m in range(0, len(self._applicant_pool)):
print(self._applicant_pool[m])
########################################################################
# methods on current_match_contract & current_match_total_number
# update on 2020.07.24
########################################################################
# Optional: return current_match_contract
def get_current_match_contract(self):
"""
return a list of contracts
"""
return self._current_match_contract
# Necessary: return current_match_total_number
def get_current_match_total_number(self):
"""
return total_number of current matched contracts
"""
return self._current_match_total_number
# Optional: reset current_match_contract from a list of contracts
def reset_current_match_contract(self, list_contracts):
"""
Reset current_match_contract from a list of contracts
parameter : a list of contracts
"""
# clear_current_match_contract first
self.clear_current_match_contract()
for contract in list_contracts:
self._current_match_contract.append(contract)
# Update current_match_total_number
self._current_match_total_number = len(list_contracts)
# Necessary: clear current_match_contract
def clear_current_match_contract(self):
"""
This function is called before each iteration of choice function of hospitals.
"""
self._current_match_contract.clear()
# Update current_match_total_number
self._current_match_total_number = 0
# Optional: print current_match_contract
def print_current_match_contract(self):
"""
print current_match_contract
Warn: Contract class must support print
"""
for contract in self._current_match_contract:
print(contract)
########################################################################
# methods: final_ranking
# update: 2020.07.24
########################################################################
# Necessary: return final_ranking
def get_final_ranking(self):
"""
This function is called when analyzing final outcome.
"""
return self._final_ranking
# Necessary: add the position of given contract to self._final_ranking
def add_contract_final_ranking(self, contract):
"""
This function is called by market.initialize_market_current_match()
"""
pos = self._priority_ordering.index(contract)
if pos not in self._final_ranking:
self._final_ranking.append(pos)
# Necessary: clear final_ranking
def clear_final_ranking(self):
"""
This function is called by market.initialize_market_current_match()
"""
self._final_ranking.clear()
# optional: print final ranking
def print_final_ranking(self):
"""
print_final_ranking
"""
print(self._final_ranking)
########################################################################
# Methods for Algorithm - Choice function of hospital
# update: 2020.07.24
########################################################################
# Necessary: (GDA-RH) Choose contracts from applicant_pool up to capacity
def choose_from_applicant_pool_capacity(self):
"""
This function is called by GDA-RH as choice function of hospital
"""
# Sort applicant_pool first
self.sort_applicant_pool_by_priority()
# Clear current_match_contract
self.clear_current_match_contract()
count = 0
if count < self._capacity:
for contract in self._applicant_pool:
self._current_match_contract.append(contract)
count += 1
# check whether the number of currently matched contracts reaches hospital capacity
if count == self._capacity:
break
# Update current_match_total_number
self._current_match_total_number = len(self._current_match_contract)
# Necessary: (ADA) Choose contracts from applicant_pool to artificial_cap
def choose_from_applicant_pool_artificial_cap(self):
"""
This function is called by ADA as choice function of hospital.
"""
# Sort applicant_pool first
self.sort_applicant_pool_by_priority()
# Clear current_match_contract
self.clear_current_match_contract()
count = 0
if count < self._artificial_cap:
for contract in self._applicant_pool:
self._current_match_contract.append(contract)
count += 1
# check whether the number of currently matched contracts reaches hospital artificial_cap
if count == self._artificial_cap:
break
# Update current_match_total_number
self._current_match_total_number = len(self._current_match_contract)
# Necessary: feedback_to_doctors that whether proposals are accepted or rejected
def feedback_to_doctor(self, display_procedure=False):
"""
This function is called by choice function of hospitals after hospitals select contracts.
"""
for contract in self._applicant_pool:
d = contract.get_doctor()
if contract in self._current_match_contract:
d.proposal_accepted()
else:
d.proposal_rejected()
if display_procedure:
print("doctor", d.get_doctor_id(), | |
ef = dflux[chunk[i]:chunk[i+1]]
crow = crowd[chunk[i]:chunk[i+1]]
maf = rsky(e, period, t0, 1e-8, t)
npts=len(maf)
#use this version for full lightcurve treatment...
r = a*(1.-e**2) / (1.+e*np.cos(maf))
zcomp = np.sin(omega+maf) * np.sin(inc)
#z = r*np.sqrt(1.-zcomp**2)
pe = ((r*zcomp>0.))# & (z <= 1.05*(r1+r2)*r2au))
se = ((r*zcomp<0.))# & (z <= 1.05*(r1+r2)*r2au))
model = np.ones(npts)
# sse = (((maf+omega) % (TWOPI))>np.pi)
# ppe = (((maf+omega) % (TWOPI))<=np.pi)
# plt.plot(t, f, 'ro', t[pe], f[pe], 'go', t[se], f[se], 'bo')
# plt.title(str(i))
# plt.show()
if pe.any():
# shift = period * np.round((np.mean(t[pe]) - tpe)/period)
model[pe] = np.interp(t[pe]%period, tempt1, tempres1)
model[pe] = (model[pe] - 1.) * crow[pe] + 1.
# print "PE: mean(t[pe]), tpe, (mean(t[pe])-tpe)/period, round ver"
# print np.mean(t[pe]), tpe, (np.mean(t[pe]) - tpe)/period, np.round((np.mean(t) - tpe)/period), len(t[pe]), len(f[pe]), len(tempt1), len(tempres1), len(model[pe])
# plt.plot(t[pe]-shift, f[pe], 'ro', tempt1, (tempres1-1.)*crow[pe][0] + 1., 'bo', t[pe]-shift, model[pe], 'go')
# plt.title('pe')
# plt.show()
# plt.close('all')
if se.any():
# shift = period * np.round((np.mean(t[se]) - tse)/period)
model[se] = np.interp(t[se]%period, tempt2, tempres2)
model[se] = (model[se] - 1.) * crow[se] + 1.
# print "SE"
# print np.mean(t[se]), tse, (np.mean(t[se]) - tse)/period, np.round((np.mean(t[se]) - tse)/period)
# plt.plot(t[se]-shift, f[se], 'ro', tempt2, (tempres2-1.)*crow[se][0] + 1., 'bo', t[se]-shift, model[se], 'go')
# plt.title('se')
# plt.show()
# plt.close('all')
# else:
# print "This data bundle does not belong to SE or PE"
# marginalization (2nd order polynomial fit to residuals)
bad = (model<1)
tt = t[~bad]
mmodel = model[~bad]
ff = f[~bad]
eef = ef[~bad]
nnpts = len(ff)
tnew = tt - np.mean(tt)
#if len(t[~bad]) < 1:
#print "Npts ooe = ",len(t[~bad])
# Bk = sum over i (D_i/M_i)(tdiff_i)^k / (sigma_i/M_i)^2
# matrix 3 rows x npts columns since quadratic polynomial
# fit requires 3 coeffs
if bad[0] or bad[-1]:
poly_remember = polyorder
polyorder=1
#number of 'i' data or model points; polynomial order
order_pow = np.arange(polyorder+1)
t_pow = tnew[:,np.newaxis]**order_pow
Bk = np.ones(shape=(polyorder+1,nnpts))*((ff/mmodel)/(eef/mmodel)**2)
Bk*=t_pow.T
#sum along 'i' (or along each row)
Bksum = np.sum(Bk,axis=1)
#Mjk = sum over i (tdiff_i)^j * (tdiff_i)^k / (sigma_i/M_i)^2
#construct 3 rows x npts columns
Mj = np.ones(shape=(polyorder+1,nnpts))/(eef/mmodel)**2
Mj*=t_pow.T
#transform from 2D (j rows x i columns) to 3D (k x j x i)
t_pow_3d = tnew[:,np.newaxis,np.newaxis]**order_pow
Mjk = t_pow_3d.T * Mj[np.newaxis,:,:]
#now sum along 'i'
Mjksum = np.sum(Mjk,axis=2)
#do matrix inversion solver thing to get polynomial coeffs
try:
Aj = np.linalg.lstsq(Mjksum,Bksum)[0]
pol = np.polyval(Aj[::-1],t-np.mean(t))
except:
pol = np.ones(npts)
#Aj = np.dot(np.linalg.pinv(Mjksum), Bksum)
# plt.plot(t, f, 'ro', t, model*pol, 'go')
# plt.plot(t, pol, 'ms', tt, np.polyval(Aj[::-1],tnew), 'cs')
# plt.show()
if bad[0] or bad[-1]:
polyorder = poly_remember
totmod[chunk[i]:chunk[i+1]] = model
totpol[chunk[i]:chunk[i+1]] = pol
else:
maf = rsky(e, period, t0, 1e-8, jd)
r = a*(1.-e**2) / (1.+e*np.cos(maf))
zcomp = np.sin(omega+maf) * np.sin(inc)
#z = r*np.sqrt(1.-zcomp**2)
pe = ((r*zcomp>0.)) #& (z <= 1.05*(r1+r2)*r2au))
se = ((r*zcomp<0.)) #& (z <= 1.05*(r1+r2)*r2au))
tt = jd % period
if pe.any():
totmod[pe] = np.interp(tt[pe], tempt1, tempres1)
totmod[pe] = (totmod[pe] - 1.) * crowd[pe] + 1.
if se.any():
totmod[se] = np.interp(tt[se], tempt2, tempres2)
totmod[se] = (totmod[se] - 1.) * crowd[se] + 1.
# if np.sum(totmod[se]-1.) == 0.:
# return np.ones_like(totmod), totpol
# if np.sum(totmod-1.) == 0.:
# return totmod, totmod
return totmod, totpol
def lcfit_slow2(self, lcpars, jd, phase, flux, dflux, crowd,
polyorder=2):
"""Computes light curve model
Parameters
----------
lcpars : float array
parameters for LC fitting:
msum, rsum, rratio, period, tpe, esinw, ecosw, b, frat, q1, q2, q3, q4
jd : float array
time array
phase : float array
corresponding phase
flux : float array
observed flux
dflux : float array
flux error
crowd : float array
array of crowding values (additional flux)
polyorder : int
order of polynomial to detrend lightcurve
Returns
-------
totmod : float array
array of model fluxes
totpol : float array
array of polynomials for detrending
"""
# r1, r2, frat derive from m1, m2, z0, t0, dist, E(B-V), scaleheight
msum, rsum, rrat, period, tpe, esinw, ecosw, b, frat, \
q1, q2, q3, q4 = lcpars
# self.updatepars(m1=m1, m2=m2, period=period, tpe=tpe, esinw=esinw,
# ecosw=ecosw, b=b, q1=q1, q2=q2, q3=q3, q4=q4)
# LD transformations (Kipping 2013)
c1 = 2.*np.sqrt(q1)*q2
c2 = np.sqrt(q1)*(1.-2.*q2)
c3 = 2.*np.sqrt(q3)*q4
c4 = np.sqrt(q3)*(1.-2.*q4)
ldcoeffs1 = np.array([c1, c2])
ldcoeffs2 = np.array([c3, c4])
# if r2 > r1:
# r1, r2 = r2, r1
# m1, m2 = m2, m1
# frat = 1./frat
omega=np.arctan2(esinw,ecosw)
e=np.sqrt(esinw**2+ecosw**2)
# nip it at the bud.
if (e>=1.):
#print "e>=1", e
return -np.inf, -np.inf
r1 = rsum/(1.+rrat)
r2 = rsum/(1.+1./rrat)
a = ((period/d2y)**2 * (msum))**(1./3.)
inc = np.arccos(b*r1/(a/r2au))
if np.isnan(inc):
#print "inc is nan", inc
return -np.inf, -np.inf
fpe = np.pi/2. - omega
fse = -np.pi/2. - omega
# transform time of center of PE to time of periastron (t0)
# from Eq 9 of Sudarsky et al (2005)
t0 = tpe - (-np.sqrt(1.-e**2) * period / (2.*np.pi)) * \
(e*np.sin(fpe)/(1.+e*np.cos(fpe)) - 2.*(1.-e**2)**(-0.5) * \
np.arctan(np.sqrt(1.-e**2) * np.tan((fpe)/2.) / (1.+e)))
tse = t0 + (-np.sqrt(1.-e**2) * period / (2.*np.pi)) * \
(e*np.sin(fse)/(1.+e*np.cos(fse)) - 2.*(1.-e**2)**(-0.5) * \
np.arctan(np.sqrt(1.-e**2) * np.tan((fse)/2.) / (1.+e)))
# if tse<tpe:
# tse+=period
tempt1, tempres1 = self.lctemplate(lcpars, period, omega, e, a, inc, r1,
ldcoeffs1, r2/r1, tpe, t0,
cadence = self.cadence,
exp = self.exp, pe=True)
tempt2, tempres2 = self.lctemplate(lcpars, period, omega, e, a, inc, r2,
ldcoeffs2, r1/r2, tse, t0,
cadence = self.cadence,
exp = self.exp, pe=False)
tempt1 = tempt1 % period
tempt2 = tempt2 % period
tempres1 = (tempres1 - 1.)/(1. + frat) + 1.
tempres2 = (tempres2 - 1.)/(1. + 1./frat) + 1.
sorting1 = np.argsort(tempt1)
sorting2 = np.argsort(tempt2)
tempres1 = tempres1[sorting1]
tempt1 = tempt1[sorting1]
tempres2 = tempres2[sorting2]
tempt2 = tempt2[sorting2]
#not including crowdsap term.
#tempres1 = (tempres1 + frat) / (1.+frat)
#tempres2 = (tempres2 * frat + 1.) / (1. + frat)
totpol, totmod = np.ones(len(jd)), np.ones(len(jd))
if polyorder>0:
# mask out continuum data
#clip = ((abs(phase)<1.5*self.pwidth) | (abs(phase-self.sep) < 1.5*self.swidth))
clip = (jd>0)
chunk = np.array(np.where(np.diff(jd[clip]) > self.pwidth*period))[0]
#put in dummy first and last element # placeholders
chunk = np.append(chunk, len(jd[clip])-2).flatten()
_, chunk3 = np.unique(np.searchsorted(jd[clip][chunk], jd), return_index=True)
chunk=chunk3
chunk[-1]+=1
# plt.plot(self.jd, self.flux, 'ro', self.jd[clip], self.flux[clip], 'go', self.jd[chunk[:-1]],self.flux[chunk[:-1]], 'mo')
# plt.show()
for i in range(len(chunk)-1):
#print i, chunk[i], chunk[i+1], self.jd[chunk[i]:chunk[i+1]]
t = jd[chunk[i]:chunk[i+1]]
f = flux[chunk[i]:chunk[i+1]]
ef = dflux[chunk[i]:chunk[i+1]]
crow = crowd[chunk[i]:chunk[i+1]]
maf = rsky(e, period, t0, 1e-8, t)
npts=len(maf)
#use this version for full lightcurve treatment...
r = a*(1.-e**2) / (1.+e*np.cos(maf))
zcomp = np.sin(omega+maf) * np.sin(inc)
#z = r*np.sqrt(1.-zcomp**2)
pe = ((r*zcomp>0.))# & (z <= 1.05*(r1+r2)*r2au))
se = ((r*zcomp<0.))# & (z <= 1.05*(r1+r2)*r2au))
model = np.ones(npts)
# sse = (((maf+omega) % (TWOPI))>np.pi)
# ppe = (((maf+omega) % (TWOPI))<=np.pi)
# plt.plot(t, f, 'ro', t[pe], f[pe], 'go', t[se], f[se], 'bo')
# plt.title(str(i))
# plt.show()
if pe.any():
# shift = period * np.round((np.mean(t[pe]) - tpe)/period)
model[pe] = np.interp(t[pe]%period, tempt1, tempres1)
model[pe] = (model[pe] - 1.) * crow[pe] + 1.
# print "PE: mean(t[pe]), tpe, (mean(t[pe])-tpe)/period, round ver"
# print np.mean(t[pe]), tpe, (np.mean(t[pe]) - tpe)/period, np.round((np.mean(t) - tpe)/period), len(t[pe]), len(f[pe]), len(tempt1), len(tempres1), len(model[pe])
# plt.plot(t[pe]-shift, f[pe], 'ro', tempt1, (tempres1-1.)*crow[pe][0] + 1., 'bo', t[pe]-shift, model[pe], 'go')
# plt.title('pe')
# plt.show()
# plt.close('all')
if se.any():
# shift = period * np.round((np.mean(t[se]) - tse)/period)
model[se] = np.interp(t[se]%period, tempt2, tempres2)
model[se] = (model[se] - 1.) * crow[se] + 1.
# print "SE"
# print np.mean(t[se]), tse, (np.mean(t[se]) - tse)/period, np.round((np.mean(t[se]) - tse)/period)
# plt.plot(t[se]-shift, f[se], 'ro', tempt2, (tempres2-1.)*crow[se][0] + 1., 'bo', t[se]-shift, model[se], 'go')
# plt.title('se')
# plt.show()
# plt.close('all')
# else:
# print "This data bundle does not belong to SE or PE"
# marginalization (2nd order polynomial fit to residuals)
bad = (model<1)
tt = t[~bad]
mmodel = model[~bad]
ff = f[~bad]
eef = ef[~bad]
nnpts = len(ff)
tnew = tt - np.mean(tt)
#if len(t[~bad]) < 1:
#print "Npts ooe = ",len(t[~bad])
# Bk = sum over i (D_i/M_i)(tdiff_i)^k / (sigma_i/M_i)^2
# matrix 3 rows x npts | |
import glob
import itertools
from collections import defaultdict
import os
from configparser import ConfigParser, MissingSectionHeaderError, NoSectionError, NoOptionError
from datetime import datetime
import numpy as np
import pandas as pd
from shapely import geometry
from shapely.geometry import Point
from simba.drop_bp_cords import getBpHeaders
from simba.rw_dfs import *
def roiAnalysis(inifile, inputcsv, calculate_dist):
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
config.read(inifile)
## get dataframe column name
noAnimals = config.getint('ROI settings', 'no_of_animals')
projectPath = config.get('General settings', 'project_path')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
try:
probability_threshold = config.getfloat('ROI settings', 'probability_threshold')
except NoOptionError:
probability_threshold = 0.000
animalBodypartList = []
for bp in range(noAnimals):
animalName = 'animal_' + str(bp + 1) + '_bp'
animalBpName = config.get('ROI settings', animalName)
animalBpNameX, animalBpNameY, animalBpNameP = animalBpName + '_x', animalBpName + '_y', animalBpName + '_p'
animalBodypartList.append([animalBpNameX, animalBpNameY, animalBpNameP])
print(animalBodypartList)
columns2grab = [item[0:3] for item in animalBodypartList]
columns2grab = [item for sublist in columns2grab for item in sublist]
try:
multiAnimalIDList = config.get('Multi animal IDs', 'id_list')
multiAnimalIDList = multiAnimalIDList.split(",")
if multiAnimalIDList[0] != '':
multiAnimalStatus = True
print('Applying settings for multi-animal tracking...')
else:
multiAnimalStatus = False
multiAnimalIDList = []
for animal in range(noAnimals):
multiAnimalIDList.append('Animal_' + str(animal+1) + '_')
print('Applying settings for classical tracking...')
except NoSectionError:
multiAnimalIDList = []
for animal in range(noAnimals):
multiAnimalIDList.append('Animal_' + str(animal + 1) + '_')
multiAnimalStatus = False
print('Applying settings for classical tracking...')
logFolderPath = os.path.join(projectPath, 'logs')
detailed_ROI_data_path = os.path.join(projectPath, 'logs', 'Detailed_ROI_data')
if not os.path.exists(detailed_ROI_data_path): os.makedirs(detailed_ROI_data_path)
if calculate_dist:
out_df_list = []
vidInfPath = os.path.join(logFolderPath, 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
csv_dir_in = os.path.join(projectPath, 'csv', inputcsv)
ROIcoordinatesPath = os.path.join(logFolderPath, 'measures', 'ROI_definitions.h5')
rectanglesInfo = pd.read_hdf(ROIcoordinatesPath, key='rectangles')
circleInfo = pd.read_hdf(ROIcoordinatesPath, key='circleDf')
polygonInfo = pd.read_hdf(ROIcoordinatesPath, key='polygons')
outputDfTime = pd.DataFrame(columns=['Video'])
rectangleNames, circleNames, polygonNames = (list(rectanglesInfo['Name'].unique()), list(circleInfo['Name'].unique()), list(polygonInfo['Name'].unique()))
shapeList = list(itertools.chain(rectangleNames, circleNames, polygonNames))
for newcol in range(len(shapeList)):
for bp in multiAnimalIDList:
colName = str(bp) + ' ' + shapeList[newcol] + ' (s)'
outputDfTime[colName] = 0
for newcol in range(len(shapeList)):
for bp in multiAnimalIDList:
colName = str(bp) + ' ' + shapeList[newcol] + ' (% of session)'
outputDfTime[colName] = 0
outputDfEntries = outputDfTime.copy()
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
movement_in_ROIs_dict = {}
for i in filesFound:
CurrVidFn = os.path.basename(i)
CurrentVideoName = CurrVidFn.replace('.' + wfileType, '')
print('Analysing ' + str(CurrentVideoName) + '...')
videoSettings = vidinfDf.loc[vidinfDf['Video'] == str(CurrentVideoName)]
try:
currFps = int(videoSettings['fps'])
pix_per_mm = float(videoSettings['pixels/mm'])
except TypeError:
print('The FPS / pixels per millimeter for ' + CurrentVideoName + ' could not be found in the project/logs/video_info.csv file, or multiple entries for ' + CurrentVideoName + ' exist in your project/logs/video_info.csv file. Make sure each video in your project is represented once in your project/logs/video_info.csv file')
noRectangles = len(rectanglesInfo.loc[rectanglesInfo['Video'] == str(CurrentVideoName)])
noCircles = len(circleInfo.loc[circleInfo['Video'] == str(CurrentVideoName)])
noPolygons = len(polygonInfo.loc[polygonInfo['Video'] == str(CurrentVideoName)])
rectangleTimes, rectangleEntries = ([[0] * len(multiAnimalIDList) for i in range(noRectangles)] , [[0] * len(multiAnimalIDList) for i in range(noRectangles)])
circleTimes, circleEntries = ([[0] * len(multiAnimalIDList) for i in range(noCircles)], [[0] * len(multiAnimalIDList) for i in range(noCircles)])
polygonTime, polyGonEntries = ([[0] * len(multiAnimalIDList) for i in range(noPolygons)], [[0] * len(multiAnimalIDList) for i in range(noPolygons)])
Rectangles = (rectanglesInfo.loc[rectanglesInfo['Video'] == str(CurrentVideoName)])
Circles = (circleInfo.loc[circleInfo['Video'] == str(CurrentVideoName)])
Polygons = (polygonInfo.loc[polygonInfo['Video'] == str(CurrentVideoName)])
rectangleEntryCheck = [[True] * len(multiAnimalIDList) for i in range(noRectangles)]
circleEntryCheck = [[True] * len(multiAnimalIDList) for i in range(noCircles)]
polygonEntryCheck = [[True] * len(multiAnimalIDList) for i in range(noPolygons)]
currDfPath = os.path.join(csv_dir_in, CurrVidFn)
csv_df = read_df(currDfPath, wfileType)
csv_df = csv_df.loc[:, ~csv_df.columns.str.contains('^Unnamed')]
try:
csv_df = csv_df.set_index('scorer')
except KeyError:
pass
bpHeaders = getBpHeaders(inifile)
csv_df.columns = bpHeaders
currDf = csv_df[columns2grab]
totalSecInSession = currDf.shape[0] / currFps
rect_ee_dict, circle_ee_dict, polygon_ee_dict = {}, {}, {}
for animal in range(noAnimals):
rect_ee_dict[multiAnimalIDList[animal]] = {}
for rectangle in range(noRectangles):
rect_ee_dict[multiAnimalIDList[animal]][Rectangles['Name'].iloc[rectangle]] = {'Entry_times': [], 'Exit_times': []}
for animal in range(noAnimals):
circle_ee_dict[multiAnimalIDList[animal]] = {}
for circle in range(noCircles):
circle_ee_dict[multiAnimalIDList[animal]][Circles['Name'].iloc[circle]] = {'Entry_times': [], 'Exit_times': []}
for animal in range(noAnimals):
polygon_ee_dict[multiAnimalIDList[animal]] = {}
for poly in range(noPolygons):
polygon_ee_dict[multiAnimalIDList[animal]][Polygons['Name'].iloc[poly]] = {'Entry_times': [], 'Exit_times': []}
for index, row in currDf.iterrows():
currentPoints = np.empty((noAnimals, 2), dtype=int)
current_probability_list = []
for animal in range(noAnimals):
currentPoints[animal][0], currentPoints[animal][1] = int(row[animalBodypartList[animal][0]]), int(row[animalBodypartList[animal][1]])
current_probability_list.append(row[animalBodypartList[animal][2]])
for rectangle in range(noRectangles):
topLeftX, topLeftY = (Rectangles['topLeftX'].iloc[rectangle], Rectangles['topLeftY'].iloc[rectangle])
bottomRightX, bottomRightY = (topLeftX + Rectangles['width'].iloc[rectangle], topLeftY + Rectangles['height'].iloc[rectangle])
rectName = Rectangles['Name'].iloc[rectangle]
for bodyparts in range(len(currentPoints)):
if ((((topLeftX-10) <= currentPoints[bodyparts][0] <= (bottomRightX+10)) and ((topLeftY-10) <= currentPoints[bodyparts][1] <= (bottomRightY+10)))) and (current_probability_list[bodyparts] > probability_threshold):
rectangleTimes[rectangle][bodyparts] = round((rectangleTimes[rectangle][bodyparts] + (1 / currFps)), 2)
if rectangleEntryCheck[rectangle][bodyparts] == True:
rect_ee_dict[multiAnimalIDList[bodyparts]][rectName]['Entry_times'].append(index)
rectangleEntries[rectangle][bodyparts] += 1
rectangleEntryCheck[rectangle][bodyparts] = False
else:
if rectangleEntryCheck[rectangle][bodyparts] == False:
rect_ee_dict[multiAnimalIDList[bodyparts]][rectName]['Exit_times'].append(index)
rectangleEntryCheck[rectangle][bodyparts] = True
for circle in range(noCircles):
circleName, centerX, centerY, radius = (Circles['Name'].iloc[circle], Circles['centerX'].iloc[circle], Circles['centerY'].iloc[circle], Circles['radius'].iloc[circle])
for bodyparts in range(len(currentPoints)):
euclidPxDistance = int(np.sqrt((currentPoints[bodyparts][0] - centerX) ** 2 + (currentPoints[bodyparts][1] - centerY) ** 2))
if (euclidPxDistance <= radius) and (current_probability_list[bodyparts] > probability_threshold):
circleTimes[circle][bodyparts] = round((circleTimes[circle][bodyparts] + (1 / currFps)),2)
if circleEntryCheck[circle][bodyparts] == True:
circle_ee_dict[multiAnimalIDList[bodyparts]][circleName]['Entry_times'].append(index)
circleEntries[circle][bodyparts] += 1
circleEntryCheck[circle][bodyparts] = False
else:
if circleEntryCheck[circle][bodyparts] == False:
circle_ee_dict[multiAnimalIDList[bodyparts]][circleName]['Exit_times'].append(index)
circleEntryCheck[circle][bodyparts] = True
for polygon in range(noPolygons):
PolygonName, vertices = (Polygons['Name'].iloc[polygon], Polygons['vertices'].iloc[polygon])
vertices = np.array(vertices, np.int32)
for bodyparts in range(len(currentPoints)):
pointList = []
for i in vertices:
point = geometry.Point(i)
pointList.append(point)
polyGon = geometry.Polygon([[p.x, p.y] for p in pointList])
CurrPoint = Point(int(currentPoints[bodyparts][0]), int(currentPoints[bodyparts][1]))
polyGonStatus = (polyGon.contains(CurrPoint))
if (polyGonStatus == True) and (current_probability_list[bodyparts] > probability_threshold):
polygonTime[polygon][bodyparts] = round((polygonTime[polygon][bodyparts] + (1 / currFps)), 2)
if polygonEntryCheck[polygon][bodyparts] == True:
polygon_ee_dict[multiAnimalIDList[bodyparts]][PolygonName]['Entry_times'].append(index)
polyGonEntries[polygon][bodyparts] += 1
polygonEntryCheck[polygon][bodyparts] = False
else:
if polygonEntryCheck[polygon][bodyparts] == False:
polygon_ee_dict[multiAnimalIDList[bodyparts]][PolygonName]['Exit_times'].append(index)
polygonEntryCheck[polygon][bodyparts] = True
video_ee_df = pd.DataFrame(columns=['Animal_name', 'Shape_name', 'Entry_frame', 'Exit_frame'])
for animal_dict in rect_ee_dict:
for shape_dict in rect_ee_dict[animal_dict]:
for entry, exit in itertools.zip_longest(rect_ee_dict[animal_dict][shape_dict]['Entry_times'], rect_ee_dict[animal_dict][shape_dict]['Exit_times'], fillvalue=-1):
video_ee_df.loc[len(video_ee_df)] = [animal_dict, shape_dict, entry, exit]
for animal_dict in circle_ee_dict:
for shape_dict in circle_ee_dict[animal_dict]:
for entry, exit in itertools.zip_longest(circle_ee_dict[animal_dict][shape_dict]['Entry_times'], circle_ee_dict[animal_dict][shape_dict]['Exit_times'], fillvalue=-1):
video_ee_df.loc[len(video_ee_df)] = [animal_dict, shape_dict, entry, exit]
for animal_dict in polygon_ee_dict:
for shape_dict in polygon_ee_dict[animal_dict]:
for entry, exit in itertools.zip_longest(polygon_ee_dict[animal_dict][shape_dict]['Entry_times'], polygon_ee_dict[animal_dict][shape_dict]['Exit_times'], fillvalue=-1):
video_ee_df.loc[len(video_ee_df)] = [animal_dict, shape_dict, entry, exit]
video_ee_df = video_ee_df.sort_values(by='Entry_frame')
save_path = os.path.join(detailed_ROI_data_path, CurrentVideoName + '_' + dateTime + '.csv')
video_ee_df.to_csv(save_path, index=False)
rectangleTimes, circleTimes, polygonTime = (list(itertools.chain(*rectangleTimes)), list(itertools.chain(*circleTimes)), list(itertools.chain(*polygonTime)))
rectangleEntries, circleEntries, polyGonEntries = (list(itertools.chain(*rectangleEntries)), list(itertools.chain(*circleEntries)), list(itertools.chain(*polyGonEntries)))
collapsedListTime = [CurrentVideoName, rectangleTimes, circleTimes, polygonTime]
collapsedListTime = list(itertools.chain.from_iterable(itertools.repeat(x, 1) if isinstance(x, str) else x for x in collapsedListTime))
timesInCollTime = collapsedListTime[1:]
timesInCollTime = [x / totalSecInSession for x in timesInCollTime]
timesInCollTime = ['%.3f' % elem for elem in timesInCollTime]
collapsedListTime.extend(timesInCollTime)
collapsedListEntry = [CurrentVideoName, rectangleEntries, circleEntries, polyGonEntries]
collapsedListEntry = list(itertools.chain.from_iterable(itertools.repeat(x, 1) if isinstance(x, str) else x for x in collapsedListEntry))
EntrieInEntryList = collapsedListEntry[1:]
sumEntries = sum(EntrieInEntryList)
EntrieInEntryList = [x / sumEntries for x in EntrieInEntryList]
EntrieInEntryList = ['%.3f' % elem for elem in EntrieInEntryList]
collapsedListEntry.extend(EntrieInEntryList)
outputDfTime = outputDfTime.append(pd.Series(dict(zip(outputDfTime.columns, collapsedListTime))),ignore_index=True)
outputDfEntries = outputDfEntries.append(pd.Series(dict(zip(outputDfEntries.columns, collapsedListEntry))),ignore_index=True)
if calculate_dist:
shape_list_of_dicts = [rect_ee_dict, circle_ee_dict, polygon_ee_dict]
movement_in_ROIs_dict = {}
for curr_animal in multiAnimalIDList: movement_in_ROIs_dict[curr_animal] = {}
for shape_type in range(len(shape_list_of_dicts)):
curr_shape_dict = shape_list_of_dicts[shape_type]
for animal_counter, curr_animal in enumerate(curr_shape_dict):
curr_animal_dict = curr_shape_dict[curr_animal]
current_animal_df = csv_df[animalBodypartList[animal_counter]]
for shape in curr_animal_dict:
movement_in_ROIs_dict[curr_animal][shape] = {}
entry_list, exit_list = curr_animal_dict[shape]['Entry_times'], curr_animal_dict[shape]['Exit_times']
movement_list_in_shape = []
all_df_movements_list = []
try:
for entry_frame, exit_frame in zip(entry_list, exit_list):
entry_and_exit = current_animal_df.loc[entry_frame:exit_frame]
entry_and_exit = entry_and_exit.reset_index(drop=True)
entry_and_exit_shifted = entry_and_exit.shift(1)
entry_and_exit_shifted = entry_and_exit_shifted.combine_first(entry_and_exit).add_prefix('Shifted_')
entry_and_exit = pd.concat([entry_and_exit, entry_and_exit_shifted], axis=1)
entry_and_exit['Movement'] = (np.sqrt((entry_and_exit.iloc[:, 0] - entry_and_exit.iloc[:, 3]) ** 2 + (entry_and_exit.iloc[:, 1] - entry_and_exit.iloc[:, 4]) ** 2)) / pix_per_mm
#entry_and_exit.at[0, 'Movement'] = 0
#entry_and_exit.at[1, 'Movement'] = 0
all_df_movements_list.append(entry_and_exit)
all_movements = pd.concat(all_df_movements_list, axis=0).reset_index(drop=True)
inside_shape_df_list = [all_movements[i:i + currFps] for i in range(0, all_movements.shape[0], currFps)]
for s_inside_shape in inside_shape_df_list:
movement_list_in_shape.append(s_inside_shape['Movement'].mean())
total_movement_of_animal_in_shape = sum(movement_list_in_shape)
movement_in_ROIs_dict[curr_animal][shape] = total_movement_of_animal_in_shape
except ValueError:
movement_in_ROIs_dict[curr_animal][shape] = 0
video_list, out_list_headers = [CurrentVideoName], ['Video']
for animal in movement_in_ROIs_dict.keys():
animal_dict = movement_in_ROIs_dict[animal]
for shape in animal_dict.keys():
time = movement_in_ROIs_dict[animal][shape]
out_list_headers.append(animal + ' ' + shape + ' (movement inside shape (cm))')
video_list.append(time)
video_df = pd.DataFrame([video_list], columns=out_list_headers)
out_df_list.append(video_df)
if len(filesFound) < 1:
print('No files found. Have you corrected outliers or clicked to skip outlier correction?')
else:
outputDfTimeFilePath , outputDfEntryFilePath = (os.path.join(logFolderPath, 'ROI_time_data_' + dateTime + '.csv'), os.path.join(logFolderPath, 'ROI_entry_data_' + dateTime + '.csv'))
outputDfTime.to_csv(outputDfTimeFilePath, index=False)
outputDfEntries.to_csv(outputDfEntryFilePath, index=False)
print('Summery ROI data saved in ' + 'project_folder\logs.')
print('Detailed per video ROI data saved in ' + 'project_folder\logs\Detailed_ROI_data.')
if calculate_dist:
out_df = pd.concat(out_df_list, axis=0).set_index('Video').astype(int)
out_movement_df_path = os.path.join(logFolderPath, 'Movement_within_ROIs_' + dateTime + '.csv')
out_df.to_csv(out_movement_df_path)
print('Movement-in-ROI data saved | |
'''
Author: <NAME>
Date: 2021-11-28 20:44:31
LastEditTime: 2021-12-15 10:21:31
LastEditors: <NAME>
Description:
FilePath: /UGA_INF/INF101/TP/Projet Final/main.py
'''
from operator import truediv
import random
import time
import distutils.core
import multiprocessing as mp
import pyqtgraph as pg
import numpy as np
def history_save_to_txt(path, data):
""" Cette fonction permet de sauvegarder l'historique dans un fichier txt.
Args:
path (str): Fichiers TXT correspondants
data (dict): Donnees à déposer dans le fichier
"""
count_round = len(data["history"])
success = False
out = False
give_up = False
draw = False
history = ""
difficulty = str(data["difficulty"])
croupier_premier_round = str(data["croupier_premier_round"])
croupier_value_final = str(data["croupier_value_final"])
if count_round >= 1:
for items in data:
if data["success"]:
success = True
if data["out"]:
out = True
if data["give_up"]:
give_up = True
if data["draw"]:
draw = True
for key, items in data["history"].items():
history = history + str(key) + ":" + str(items) + ","
string = str(
count_round
) + "," + difficulty + "," + croupier_premier_round + "," + croupier_value_final + "," + history + str(
success) + "," + str(out) + "," + str(give_up) + "," + str(
draw) + "\n"
with open(path, 'a+') as f:
f.write(string)
f.close()
def paquet():
""" Cette fonction est utilisée pour générer un jeu de 52 cartes mélangées.
Returns:
list: Une liste avec 52 cartes mélangées
"""
liste_carte = []
# Ajouter des cartes carreau dans la liste
list_temp = [" de carreau", " de pique", " de trefle", " de coeur"]
for temp in list_temp:
for i in range(1, 14):
if i == 1:
i = "A"
elif i == 11:
i = "valet"
elif i == 12:
i = "dame"
elif i == 13:
i = "roi"
mot = str(i) + temp
liste_carte.append(mot)
liste_ramplacer = []
# Mélanger les cartes
for i in range(len(liste_carte)):
temp_random = random.randint(0, len(liste_carte) - 1)
temp = liste_carte[i]
liste_carte[i] = liste_carte[temp_random]
liste_carte[temp_random] = temp
liste_ramplacer.append(i)
liste_ramplacer.append(temp_random)
return liste_carte
def valeurCarte(carte):
""" Cette fonction est utilisée pour obtenir la valeur d'une carte demandée.
Args:
carte (str): Cartes saisies. Ex: "As de carreau"
Returns:
int: Retourne la valeur de cette carte. Si la fonction retourne 0, alors la carte d'entrée est un As.
"""
temp = str(carte)
nombre_liste = temp.split(" ")
# Si la carte obtenu est un As, alors la valeur est 0
# Si la valeur obtenu vaut 0, le script suivant demande au joueur de choisir une valeur entre 1 et 11
if nombre_liste[0] == "A":
nombre = 0
elif nombre_liste[0] == "valet" or nombre_liste[
0] == "dame" or nombre_liste[0] == "roi":
nombre = 10
else:
nombre = int(nombre_liste[0])
return nombre
def initPioche(n):
""" Cette fonction est utilisée pour générer une pioche de taille correspondante au nombre de joueurs en jeu.
Si le nombre de participants est de 3, alors la taille du jeu est de 3*52=156, si le nombre de participants est de 6, alors la taille du jeu est de 6*52=312.
Args:
n (int): Nombre de participants au jeu.
"""
liste_carte_remplacer = []
for i in range(n):
liste_paquet = paquet()
for j in liste_paquet:
liste_carte_remplacer.append(j)
return liste_carte_remplacer
def piocheCarte(liste_pioche, x):
""" Cette fonction renvoie le nombre x de cartes de la pioche.
Args:
x (int): Nombre de cartes à retourner.
Returns:
list: Cartes retournées avec le nombre x.
"""
liste_carte = []
for i in range(x):
liste_carte.append(liste_pioche[i])
del liste_pioche[0]
return liste_carte
def initJoueurs(n):
""" Cette fonction est utilisée pour demander à l'utilisateur les noms des joueurs et renvoyer une liste avec les noms de chacun d'entre eux.
Args:
n (int): Nombre de participants au jeu.
Returns:
dict: Une liste avec les noms de tous les joueurs.
"""
liste_joueurs = []
for i in range(n):
nom = input("Quel est le nom du joueur? ")
# Si le nom contient "Ordi" alors return error et le redemande.
while "Ordi" in nom:
print("Votre nom ne peu pas contenir le mot 'ordi'.")
nom = input("Quel est le nom du joueur? ")
liste_joueurs.append(nom)
return liste_joueurs
def initOrdi(n):
""" Cette fonction permet d'initialier les noms des bots.
Args:
n (int): le nombre de bots.
Returns:
list: Une liste avec les noms des bots.
"""
liste_ordi = []
for i in range(n):
liste_ordi.append("Ordi " + str(i + 1))
return liste_ordi
def initScores(liste_joueurs, liste_ordi, v):
""" Fonction permettant d'initialiser les informations du joueur, y compris la remise à zéro de son score et de ses différents
statuts. Renvoie également ["ordi"] = Vrai s'il s'agit d'un bot, et ["ordi"] = Faux s'il s'agit d'un joueur humain.
Args:
liste_joueurs (list): Une liste avec les noms de tous les joueurs.
liste_ordi (list): Une liste avec les noms de tous les bots.
v (int): Attribuer un score spécifique à tous les joueurs (par défaut à 0)
Returns:
dict: Retourne le dictionnaire initialisé.
"""
global difficulty
dict_joueurs = {}
for nom in liste_joueurs:
dict_joueurs[nom] = {
"difficulty": difficulty,
"score": v,
"round": 0,
"croupier_premier_round": 0,
"croupier_value_final": 0,
"ordi": False,
"give_up": False,
"out": False,
"success": False,
"draw": False,
"blackjack": False,
"point": 0,
"mise": 1000,
"mise_round": 0,
"history": {}
}
for nom in liste_ordi:
dict_joueurs[nom] = {
"difficulty": difficulty,
"score": v,
"round": 0,
"croupier_premier_round": 0,
"croupier_value_final": 0,
"ordi": True,
"give_up": False,
"out": False,
"success": False,
"draw": False,
"blackjack": False,
"point": 0,
"mise": 1000,
"mise_round": 0,
"history": {}
}
return dict_joueurs
def premierTour(liste_pioche, scores):
""" Cette fonction est utilisée pour initialiser le score du joueur et permet de tirer deux cartes pour le premier tour.
Args:
scores (dict): Scores des joueurs
Returns:
dict: Scores des joueurs
"""
global mise_croupier
global mise_croupier_round
liste_carte_joueurs = []
for i in range(len(scores)):
if len(liste_pioche) <= 2:
liste_pioche = initPioche(len(scores))
valeur_premier_round = piocheCarte(liste_pioche, 2)
liste_carte_joueurs.append(valeur_premier_round)
count = 0
for nom in scores:
liste_carte_joueur = liste_carte_joueurs[count]
for carte in liste_carte_joueur:
temp = int(valeurCarte(carte))
if temp == 0:
if scores[nom]["ordi"] == False:
if scores[nom]["score"] == 0:
print("Cest ton premier tour!")
else:
print("Tu as %s maintenant." % scores[nom]["score"])
nombre = int(
input(
"Vous avez obtenu un as! Quel valeur voulez-vous choisir? (1 ou 11) "
))
if nombre == 1:
temp = 1
elif nombre == 11:
temp = 11
else:
temp = 1
else:
temp = 11
scores[nom]["score"] += temp
count += 1
# Si le score du joueur est de 21 dès le premiere tour, le joueur obtient un blackjack.
for nom in scores:
if scores[nom]["score"] == 21:
scores[nom]["success"] = True
scores[nom]["blackjack"] = True
scores[nom]["point"] += 1
mise_round = scores[nom]["mise_round"]
scores[nom]["mise"] += mise_round * 2.5 + mise_croupier_round
mise_croupier = mise_croupier - mise_croupier_round - mise_round * 2.5
return scores
def gagnant(scores, valeur_croupier):
""" Cette fonction est utilisée pour comparer les scores de tous les joueurs encore en jeu avec celui de la banque.
Args:
scores (dict): Scores des joueurs
valeur_croupier (int): Score du croupier
Returns:
list: Liste des noms des joueurs encore en jeu.
"""
global mise_croupier
nom_gagnant_plus = []
point_gagnant_plus = 0
for nom in scores:
score = scores[nom]["score"]
if scores[nom]["give_up"] == True and scores[nom][
"out"] == False and scores[nom]["success"] == False:
# Si le joueur a fini son tour sans se faire éliminer et qu'il a un score inférieur à celui du croupier, alors on aplique le script suivant :
if score > valeur_croupier:
point_gagnant_plus = score
nom_gagnant_plus.append(nom)
elif score == valeur_croupier:
print("%s, vous etes à égalité avec le croupier !" % nom)
mise_round = scores[nom]["mise_round"]
scores[nom]["mise"] += mise_round
mise_croupier -= mise_round
scores[nom]["draw"] = True
else:
print("%s vous avez perdu!" % nom)
return nom_gagnant_plus, point_gagnant_plus
def joueur_continuer() -> bool:
""" Cette fonction est utilisée pour demander au joueur s'il veut continuer ou non de piocher.
Returns:
bool: Retourne True si le joueur veut piocher, False sinon.
"""
continuer_le_jeux = input("Voulez-vous piocher? y ou n ")
if continuer_le_jeux == "y":
return True
else:
return False
def tourJoueur(liste_pioche, j, scores, score_croupier_premier_round):
""" Cette fonction lance un nouveau tour de jeu et peut afficher le nombre de tours passés, le scores de tous les joueurs, et celui de ceux encore en jeu.
Args:
j (str): Nom du joueur
scores (dict): Scores des joueurs
score_croupier_premier_round (int): Score du croupier au | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by <NAME>
25/06/2010
The package contains functions to analyse all sentence of a utterance
Functions:
dispatching : to distribute the sentence
separ_sentence : to process the beginning of the sentence
exclama_sentence : to process exclamatively sentence
w_quest_where : to process many different type of where question
w_quest_class : to process what question about classification
w_quest_what : to process many different type of what question
w_quest_quant : to process many different type of how question
w_quest_how : to process many different type of how question
w_quest_which : to process which question
stc_start_subsentence : to process the subsentence at the beginning of the sentence
w_quest_whose : to process many different type of whose question
w_quest_whom : to process whom question
y_n_ques : to process the yes or no question from of a sentence
other_sentence : to process the other from of a sentence
sentences_analyzer : is the basic function of parsing
"""
from dialogs.sentence import *
from dialogs.sentence_types import *
from dialogs.resources_manager import ResourcePool
from . import analyse_nominal_group
from . import analyse_nominal_structure
from . import analyse_verb
from . import analyse_verbal_structure
from . import other_functions
from . import preprocessing
def dispatching(sentence):
"""
distributes the sentence according to:
Their functionality and their type
Input=sentence, beginning sentence list Output=class Sentence
"""
if len(sentence) > 0:
#For ending dialogue
if sentence[0].endswith('bye'):
return [Sentence(END, '', [], [])]
#When others
for x in ResourcePool().sentence_starts:
#If we find a knowing case
if sentence[0] == x[0]:
#For
if x[1] == '1':
return [Sentence(START, '', [], [])]
#It's a w_question or subsentence
if x[1] == '2':
#If there is which or no nominal group it is a question
if sentence[0] != 'which' and analyse_nominal_group.find_sn_pos(sentence, 1) != []:
#Here we have the condition of the subsentences
return [stc_start_subsentence(sentence)]
#For 'when'
if x[2] == '1':
#If we remove the first word => it becomes like y_n_question
return [y_n_ques(W_QUESTION, 'date', sentence[1:])]
#For 'where'
elif x[2] == '2':
return [w_quest_where(W_QUESTION, 'place', sentence)]
#For 'what'
elif x[2] == '3':
#Here we have to use a specific processing for 'type' and 'kind'
if sentence[1] == 'type' or sentence[1] == 'kind':
#We start by processing the end of the sentence like a y_n_question
return [w_quest_class(sentence)]
#For other type of 'what' question
else:
return [w_quest_what(W_QUESTION, sentence)]
#For 'how'
elif x[2] == '4':
if sentence[1] == 'many' or sentence[1] == 'much':
return [w_quest_quant(W_QUESTION, 'quantity', sentence)]
elif sentence[1] == 'about':
#We replace 'about' by 'is' to have a y_n_question
sentence[1] = 'is'
return [y_n_ques(W_QUESTION, 'invitation', sentence[1:])]
#For other type of 'how' question
else:
return [w_quest_how(W_QUESTION, sentence)]
#For 'why'
elif x[2] == '5':
return [y_n_ques(W_QUESTION, 'reason', sentence[1:])]
#For 'whose'
elif x[2] == '6':
return [w_quest_whose(W_QUESTION, 'owner', sentence)]
#For 'who'
elif x[2] == '7':
return [y_n_ques(W_QUESTION, 'people', sentence[1:])]
#For 'which'
elif x[2] == '8':
return [w_quest_which(W_QUESTION, 'choice', sentence[1:])]
#For 'to whom'
elif x[2] == '9':
return [w_quest_whom(W_QUESTION, 'people', sentence[1:])]
#It's a y_n_question
elif x[1] == '3':
return [y_n_ques(YES_NO_QUESTION, '', sentence)]
#It's a conditional sentence
elif x[1] == '4':
return [stc_start_subsentence(sentence)]
#Agree
elif x[1] == '5':
return separ_sentence(sentence, AGREEMENT)
#Disagree
elif x[1] == '6':
return separ_sentence(sentence, DISAGREEMENT)
#Gratulation
elif x[1] == '7':
return separ_sentence(sentence, GRATULATION)
#Interjunction
elif x[1] == '8':
return [exclama_sentence(sentence)]
#For exclamatively
if sentence[len(sentence) - 1] == '!':
return [exclama_sentence(sentence)]
#It's a statement or an imperative sentence
return [other_sentence('', '', sentence)]
#Default case
return []
def separ_sentence(sentence, data_type):
"""
process the beginning of the sentence
Input=the sentence Output=class Sentence
"""
#If we have good followed by another word it can be start
if data_type == AGREEMENT and len(sentence) > 1 and (
sentence[1] == 'morning' or sentence[1] == 'evening' or sentence[1] == 'afternoon'):
sentences = [Sentence(START, '', [], [])]
else:
#init
sentences = [Sentence(data_type, '', [], [])]
for i in sentence:
if i == ';':
#We put the first sentence in the aim
sentences[0].aim = " ".join(sentence[:sentence.index(i)]).rstrip('; ') + '.'
sentence = sentence[sentence.index(i) + 1:]
#We process the end of the sentence as a complete sentence
sentence = preprocessing.process_and_beginning_sentence(sentence)
sentences = sentences + dispatching(sentence)
break
else:
#In this case, it is the end of the sentence
sentences[0].aim = " ".join(sentence).rstrip('. ') + '.'
return sentences
def exclama_sentence(sentence):
"""
process exclamatively sentence
Input=the sentence Output=class Sentence
"""
for i in ResourcePool().sentence_starts:
if i[0] == sentence[0]:
if i[1] == '0':
analysis = Sentence(INTERJECTION, '', [], [])
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 1)
return analysis
elif i[1] == '2':
#It is an exclamation sentence
analysis = Sentence(EXCLAMATION, '', [], [])
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
return analysis
#If we have an imperative it can be forced
analysis = other_sentence(INTERJECTION, '', sentence)
if analysis.data_type == INTERJECTION and not analysis.sv:
pass
else:
analysis.data_type = IMPERATIVE
return analysis
def w_quest_where(type, request, stc):
"""
process many different type of where question
Input=type and requesting of sentence, the sentence Output=class Sentence
"""
#If there is 'form' at the end => question about the origin
if stc[len(stc) - 1] == 'from' or (stc[len(stc) - 1] == '?' and stc[len(stc) - 2] == 'from'):
#If we remove the first word => it becomes like y_n_question
return y_n_ques(type, 'origin', stc[1:])
else:
#If we remove the first word => it becomes like y_n_question
return y_n_ques(type, request, stc[1:])
def w_quest_class(sentence):
"""
process what question about classification
Input=sentence Output=class Sentence
"""
analysis = y_n_ques(W_QUESTION, 'classification' + '+' + sentence[4], sentence[5:])
if analysis.sn:
#The direct object must be empty
if analysis.sv[0].d_obj:
analysis.sv[0].i_cmpl = analysis.sv[0].i_cmpl + [IndirectComplement([], analysis.sv[0].d_obj)]
analysis.sv[0].d_obj = []
return analysis
def w_quest_what(type, sentence):
"""
process many different type of what question
Input=type of sentence, the sentence and position of subject
Output=class Sentence
"""
aux_list = other_functions.recover_aux_list()
if sentence[1] in aux_list:
#We start with a processing with the function of y_n_question's case
analysis = y_n_ques(type, 'thing', sentence[1:])
vg = analysis.sv[0]
#The case when we have 'happen'
if analysis.sv[0].vrb_main[0].endswith('happen'):
analysis.aim = 'situation'
#The case when we have 'think'
elif analysis.sv[0].vrb_main[0].endswith('think+of') or analysis.sv[0].vrb_main[0].endswith('think+about'):
analysis.aim = 'opinion'
#The case when we have 'like' + conditional
elif analysis.sv[0].vrb_main[0].endswith('like') and not (analysis.sv[0].vrb_tense.endswith('conditional')):
analysis.aim = 'description'
#The case when we have 'do' + ing form
elif vg.vrb_main[0].endswith('do') and \
vg.i_cmpl != [] and \
vg.i_cmpl[0].gn[0].adj != [] and \
vg.i_cmpl[0].gn[0].adj[0][0].endswith('ing'):
analysis.aim = 'explication'
#There is a noun before the auxiliary
else:
#We will use the same code as the which questions
sentence = ['the'] + sentence[1:]
#We need to have a nominal group at the beginning
analysis = w_quest_which(type, 'thing', sentence)
return analysis
def w_quest_quant(type, request, sentence):
"""
process many different type of quantity question
Input=type and requesting of sentence, the sentence and beginning sentence list
Output=class Sentence
"""
for j in ResourcePool().sentence_starts:
if sentence[2] == j[0]:
if j[1] == '3':
#This case is the same with y_n_question
return y_n_ques(type, request, sentence[2:])
analysis = y_n_ques(type, request, sentence[3:])
#There is not sn in the sentence
if not analysis.sn:
analysis.sn = [NominalGroup(['a'], [sentence[2]], [], [], [])]
else:
#There is not direct object in the sentence
analysis.sv[0].d_obj = [NominalGroup(['a'], [sentence[2]], [], [], [])]
return analysis
def w_quest_how(type, sentence):
"""
process many different type of how question
Input=type of sentence, the sentence Output=class Sentence
"""
aux_list = other_functions.recover_aux_list()
if sentence[1] in aux_list:
analysis = y_n_ques(type, 'manner', sentence[1:])
#The case when we have 'do' + ing form
if analysis.sv[0].vrb_main[0].endswith('like'):
analysis.aim = 'opinion'
return analysis
analysis = y_n_ques(type, sentence[1], sentence[2:])
return analysis
def w_quest_which(type, request, sentence):
"""
process which question
Input=type of sentence, the sentence Output=class Sentence
"""
#We start by finding the nominal group
gr = preprocessing.determination_nominal_group(sentence, 0, 'of')
#If the nominal group contain just 2 elements
if len(gr) == 2:
return y_n_ques(type, sentence[1], sentence[2:])
else:
#After the first gr if there is no nominal group
if not analyse_nominal_group.find_sn_pos(sentence, len(gr)):
for i in ResourcePool().sentence_starts:
#If just | |
<reponame>ketyi/dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import random
import numpy as np
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
def init_emb2neg_index(negative, batch_size):
'''select embedding of negative nodes from a batch of node embeddings
for fast negative sampling
Return
------
index_emb_negu torch.LongTensor : the indices of u_embeddings
index_emb_negv torch.LongTensor : the indices of v_embeddings
Usage
-----
# emb_u.shape: [batch_size, dim]
batch_emb2negu = torch.index_select(emb_u, 0, index_emb_negu)
'''
idx_list_u = list(range(batch_size)) * negative
idx_list_v = list(range(batch_size)) * negative
random.shuffle(idx_list_v)
index_emb_negu = torch.LongTensor(idx_list_u)
index_emb_negv = torch.LongTensor(idx_list_v)
return index_emb_negu, index_emb_negv
def adam(grad, state_sum, nodes, lr, device, only_gpu):
""" calculate gradients according to adam """
grad_sum = (grad * grad).mean(1)
if not only_gpu:
grad_sum = grad_sum.cpu()
state_sum.index_add_(0, nodes, grad_sum) # cpu
std = state_sum[nodes].to(device) # gpu
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
grad = (lr * grad / std_values) # gpu
return grad
def async_update(num_threads, model, queue):
""" Asynchronous embedding update for entity embeddings.
"""
torch.set_num_threads(num_threads)
print("async start")
while True:
(grad_u, grad_v, grad_v_neg, nodes, neg_nodes, first_flag) = queue.get()
if grad_u is None:
return
with torch.no_grad():
if first_flag:
model.fst_u_embeddings.weight.data.index_add_(0, nodes[:, 0], grad_u)
model.fst_u_embeddings.weight.data.index_add_(0, nodes[:, 1], grad_v)
if neg_nodes is not None:
model.fst_u_embeddings.weight.data.index_add_(0, neg_nodes, grad_v_neg)
else:
model.snd_u_embeddings.weight.data.index_add_(0, nodes[:, 0], grad_u)
model.snd_v_embeddings.weight.data.index_add_(0, nodes[:, 1], grad_v)
if neg_nodes is not None:
model.snd_v_embeddings.weight.data.index_add_(0, neg_nodes, grad_v_neg)
class SkipGramModel(nn.Module):
""" Negative sampling based skip-gram """
def __init__(self,
emb_size,
emb_dimension,
batch_size,
only_cpu,
only_gpu,
only_fst,
only_snd,
mix,
neg_weight,
negative,
lr,
lap_norm,
fast_neg,
record_loss,
async_update,
num_threads,
):
""" initialize embedding on CPU
Paremeters
----------
emb_size int : number of nodes
emb_dimension int : embedding dimension
batch_size int : number of node sequences in each batch
only_cpu bool : training with CPU
only_gpu bool : training with GPU
only_fst bool : only embedding for first-order proximity
only_snd bool : only embedding for second-order proximity
mix bool : mixed training with CPU and GPU
negative int : negative samples for each positve node pair
neg_weight float : negative weight
lr float : initial learning rate
lap_norm float : weight of laplacian normalization
fast_neg bool : do negative sampling inside a batch
record_loss bool : print the loss during training
use_context_weight : give different weights to the nodes in a context window
async_update : asynchronous training
"""
super(SkipGramModel, self).__init__()
self.emb_size = emb_size
self.batch_size = batch_size
self.only_cpu = only_cpu
self.only_gpu = only_gpu
if only_fst:
self.fst = True
self.snd = False
self.emb_dimension = emb_dimension
elif only_snd:
self.fst = False
self.snd = True
self.emb_dimension = emb_dimension
else:
self.fst = True
self.snd = True
self.emb_dimension = int(emb_dimension / 2)
self.mixed_train = mix
self.neg_weight = neg_weight
self.negative = negative
self.lr = lr
self.lap_norm = lap_norm
self.fast_neg = fast_neg
self.record_loss = record_loss
self.async_update = async_update
self.num_threads = num_threads
# initialize the device as cpu
self.device = torch.device("cpu")
# embedding
initrange = 1.0 / self.emb_dimension
if self.fst:
self.fst_u_embeddings = nn.Embedding(
self.emb_size, self.emb_dimension, sparse=True)
init.uniform_(self.fst_u_embeddings.weight.data, -initrange, initrange)
if self.snd:
self.snd_u_embeddings = nn.Embedding(
self.emb_size, self.emb_dimension, sparse=True)
init.uniform_(self.snd_u_embeddings.weight.data, -initrange, initrange)
self.snd_v_embeddings = nn.Embedding(
self.emb_size, self.emb_dimension, sparse=True)
init.constant_(self.snd_v_embeddings.weight.data, 0)
# lookup_table is used for fast sigmoid computing
self.lookup_table = torch.sigmoid(torch.arange(-6.01, 6.01, 0.01))
self.lookup_table[0] = 0.
self.lookup_table[-1] = 1.
if self.record_loss:
self.logsigmoid_table = torch.log(torch.sigmoid(torch.arange(-6.01, 6.01, 0.01)))
self.loss_fst = []
self.loss_snd = []
# indexes to select positive/negative node pairs from batch_walks
self.index_emb_negu, self.index_emb_negv = init_emb2neg_index(self.negative, self.batch_size)
# adam
if self.fst:
self.fst_state_sum_u = torch.zeros(self.emb_size)
if self.snd:
self.snd_state_sum_u = torch.zeros(self.emb_size)
self.snd_state_sum_v = torch.zeros(self.emb_size)
def create_async_update(self):
""" Set up the async update subprocess.
"""
self.async_q = Queue(1)
self.async_p = mp.Process(target=async_update, args=(self.num_threads, self, self.async_q))
self.async_p.start()
def finish_async_update(self):
""" Notify the async update subprocess to quit.
"""
self.async_q.put((None, None, None, None, None))
self.async_p.join()
def share_memory(self):
""" share the parameters across subprocesses """
if self.fst:
self.fst_u_embeddings.weight.share_memory_()
self.fst_state_sum_u.share_memory_()
if self.snd:
self.snd_u_embeddings.weight.share_memory_()
self.snd_v_embeddings.weight.share_memory_()
self.snd_state_sum_u.share_memory_()
self.snd_state_sum_v.share_memory_()
def set_device(self, gpu_id):
""" set gpu device """
self.device = torch.device("cuda:%d" % gpu_id)
print("The device is", self.device)
self.lookup_table = self.lookup_table.to(self.device)
if self.record_loss:
self.logsigmoid_table = self.logsigmoid_table.to(self.device)
self.index_emb_negu = self.index_emb_negu.to(self.device)
self.index_emb_negv = self.index_emb_negv.to(self.device)
def all_to_device(self, gpu_id):
""" move all of the parameters to a single GPU """
self.device = torch.device("cuda:%d" % gpu_id)
self.set_device(gpu_id)
if self.fst:
self.fst_u_embeddings = self.fst_u_embeddings.cuda(gpu_id)
self.fst_state_sum_u = self.fst_state_sum_u.to(self.device)
if self.snd:
self.snd_u_embeddings = self.snd_u_embeddings.cuda(gpu_id)
self.snd_v_embeddings = self.snd_v_embeddings.cuda(gpu_id)
self.snd_state_sum_u = self.snd_state_sum_u.to(self.device)
self.snd_state_sum_v = self.snd_state_sum_v.to(self.device)
def fast_sigmoid(self, score):
""" do fast sigmoid by looking up in a pre-defined table """
idx = torch.floor((score + 6.01) / 0.01).long()
return self.lookup_table[idx]
def fast_logsigmoid(self, score):
""" do fast logsigmoid by looking up in a pre-defined table """
idx = torch.floor((score + 6.01) / 0.01).long()
return self.logsigmoid_table[idx]
def fast_pos_bp(self, emb_pos_u, emb_pos_v, first_flag):
""" get grad for positve samples """
pos_score = torch.sum(torch.mul(emb_pos_u, emb_pos_v), dim=1)
pos_score = torch.clamp(pos_score, max=6, min=-6)
# [batch_size, 1]
score = (1 - self.fast_sigmoid(pos_score)).unsqueeze(1)
if self.record_loss:
if first_flag:
self.loss_fst.append(torch.mean(self.fast_logsigmoid(pos_score)).item())
else:
self.loss_snd.append(torch.mean(self.fast_logsigmoid(pos_score)).item())
# [batch_size, dim]
if self.lap_norm > 0:
grad_u_pos = score * emb_pos_v + self.lap_norm * (emb_pos_v - emb_pos_u)
grad_v_pos = score * emb_pos_u + self.lap_norm * (emb_pos_u - emb_pos_v)
else:
grad_u_pos = score * emb_pos_v
grad_v_pos = score * emb_pos_u
return grad_u_pos, grad_v_pos
def fast_neg_bp(self, emb_neg_u, emb_neg_v, first_flag):
""" get grad for negative samples """
neg_score = torch.sum(torch.mul(emb_neg_u, emb_neg_v), dim=1)
neg_score = torch.clamp(neg_score, max=6, min=-6)
# [batch_size * negative, 1]
score = - self.fast_sigmoid(neg_score).unsqueeze(1)
if self.record_loss:
if first_flag:
self.loss_fst.append(self.negative * self.neg_weight * torch.mean(self.fast_logsigmoid(-neg_score)).item())
else:
self.loss_snd.append(self.negative * self.neg_weight * torch.mean(self.fast_logsigmoid(-neg_score)).item())
grad_u_neg = self.neg_weight * score * emb_neg_v
grad_v_neg = self.neg_weight * score * emb_neg_u
return grad_u_neg, grad_v_neg
def fast_learn(self, batch_edges, neg_nodes=None):
""" Learn a batch of edges in a fast way. It has the following features:
1. It calculating the gradients directly without the forward operation.
2. It does sigmoid by a looking up table.
Specifically, for each positive/negative node pair (i,j), the updating procedure is as following:
score = self.fast_sigmoid(u_embedding[i].dot(v_embedding[j]))
# label = 1 for positive samples; label = 0 for negative samples.
u_embedding[i] += (label - score) * v_embedding[j]
v_embedding[i] += (label - score) * u_embedding[j]
Parameters
----------
batch_edges list : a list of node sequnces
neg_nodes torch.LongTensor : a long tensor of sampled true negative nodes. If neg_nodes is None,
then do negative sampling randomly from the nodes in batch_walks as an alternative.
Usage example
-------------
batch_walks = torch.LongTensor([[1,2], [3,4], [5,6]])
neg_nodes = None
"""
lr = self.lr
# [batch_size, 2]
nodes = batch_edges
if self.only_gpu:
nodes = nodes.to(self.device)
if neg_nodes is not None:
neg_nodes = neg_nodes.to(self.device)
bs = len(nodes)
if self.fst:
emb_u = self.fst_u_embeddings(nodes[:, 0]).view(-1, self.emb_dimension).to(self.device)
emb_v = self.fst_u_embeddings(nodes[:, 1]).view(-1, self.emb_dimension).to(self.device)
## Postive
emb_pos_u, emb_pos_v = emb_u, emb_v
grad_u_pos, grad_v_pos = self.fast_pos_bp(emb_pos_u, emb_pos_v, True)
## Negative
emb_neg_u = emb_pos_u.repeat((self.negative, 1))
if bs < self.batch_size:
index_emb_negu, index_emb_negv = init_emb2neg_index(self.negative, bs)
index_emb_negu = index_emb_negu.to(self.device)
index_emb_negv = index_emb_negv.to(self.device)
else:
index_emb_negu = self.index_emb_negu
index_emb_negv = self.index_emb_negv
if neg_nodes is None:
emb_neg_v = torch.index_select(emb_v, 0, index_emb_negv)
else:
emb_neg_v = self.fst_u_embeddings.weight[neg_nodes].to(self.device)
grad_u_neg, grad_v_neg = self.fast_neg_bp(emb_neg_u, emb_neg_v, True)
## Update
grad_u_pos.index_add_(0, index_emb_negu, grad_u_neg)
grad_u = grad_u_pos
if neg_nodes is None:
grad_v_pos.index_add_(0, index_emb_negv, grad_v_neg)
grad_v = grad_v_pos
else:
grad_v = grad_v_pos
# use adam optimizer
grad_u = adam(grad_u, self.fst_state_sum_u, nodes[:, 0], lr, self.device, self.only_gpu)
grad_v = adam(grad_v, self.fst_state_sum_u, nodes[:, 1], lr, self.device, self.only_gpu)
if neg_nodes is not None:
grad_v_neg = adam(grad_v_neg, self.fst_state_sum_u, neg_nodes, lr, self.device, self.only_gpu)
if self.mixed_train:
grad_u = grad_u.cpu()
grad_v = grad_v.cpu()
if neg_nodes is not None:
grad_v_neg = grad_v_neg.cpu()
else:
grad_v_neg = None
if self.async_update:
grad_u.share_memory_()
grad_v.share_memory_()
nodes.share_memory_()
if neg_nodes is not None:
neg_nodes.share_memory_()
grad_v_neg.share_memory_()
self.async_q.put((grad_u, grad_v, grad_v_neg, nodes, neg_nodes, True))
if not self.async_update:
self.fst_u_embeddings.weight.data.index_add_(0, nodes[:, 0], grad_u)
self.fst_u_embeddings.weight.data.index_add_(0, nodes[:, 1], grad_v)
if neg_nodes is not None:
self.fst_u_embeddings.weight.data.index_add_(0, neg_nodes, grad_v_neg)
if self.snd:
emb_u = self.snd_u_embeddings(nodes[:, 0]).view(-1, self.emb_dimension).to(self.device)
emb_v = self.snd_v_embeddings(nodes[:, 1]).view(-1, self.emb_dimension).to(self.device)
## Postive
emb_pos_u, emb_pos_v = emb_u, emb_v
grad_u_pos, grad_v_pos = self.fast_pos_bp(emb_pos_u, emb_pos_v, False)
## Negative
emb_neg_u = emb_pos_u.repeat((self.negative, 1))
if bs < self.batch_size:
index_emb_negu, index_emb_negv = init_emb2neg_index(self.negative, bs)
index_emb_negu = index_emb_negu.to(self.device)
index_emb_negv = index_emb_negv.to(self.device)
else:
index_emb_negu = self.index_emb_negu
index_emb_negv = self.index_emb_negv
if neg_nodes is None:
emb_neg_v = torch.index_select(emb_v, 0, index_emb_negv)
else:
emb_neg_v = self.snd_v_embeddings.weight[neg_nodes].to(self.device)
grad_u_neg, grad_v_neg = self.fast_neg_bp(emb_neg_u, | |
import copy, StringIO
import ete3
import warnings
from Bio import Phylo
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio.Phylo.TreeConstruction import _DistanceMatrix
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import haplotools as hap
def newick_to_node_name(nwk):
"""
Create a formalized node name.
"""
try:
tree = HsTree(nwk)
except ete3.parser.newick.NewickError:
try:
tree = HsTree(nwk + ';')
except ete3.parser.newick.NewickError, e:
raise e
tree.sort_descendants()
s = tree.write(format=9)[:-1]
return s
class MassMigration(object):
def __init__(self, source, destination, fraction, time):
self.source = source
self.destination = destination
self.fraction = fraction
self.time = time
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_dict():
pass
def to_dict(self):
return {'source_name':self.source.get_name(),
'destination_nane':self.destination.get_name(),
'fraction':self.fraction,
'time':self.get_time()}
class HsTree(ete3.Tree):
# USE_NODE_DICT = True
def __init__(tree, *args, **kwa):
super(HsTree, tree).__init__(*args, **kwa)
tree.mass_migrations = []
# if HsTree.USE_NODE_DICT:
# tree._create_node_dict()
# def _create_node_dict(tree):
# max_dist = max([tree.get_distance(l) for l in tree.get_leaves()])
# tree.node_dict = {}
# for node in tree.traverse():
# node.time = max_dist - tree.get_distance(node)
# node.name = node.get_name()
# tree.node_dict.update({node.name: node})
# def get_node(tree, node_name):
# return tree.node_dict[node_name]
def get_time(node):
rt = node.get_tree_root()
max_dist = rt.get_farthest_leaf()[1]
return max_dist - rt.get_distance(node)
def get_name(tree):
node1 = copy.deepcopy(tree)
node1.sort_descendants()
s = node1.write(format=9)[:-1]
return s
def add_mass_migration(tree, source, destination, fraction, time):
# if HsTree.USE_NODE_DICT:
# source = tree.node_dict[source_name]
# destination = tree.node_dict[destination_name]
# else:
# for node in tree.traverse():
# if node.get_name() == source_name:
# source = node
# if node.get_name() == destination_name:
# destintion = node
mm = MassMigration(source, destination, fraction, time)
if mm not in tree.mass_migrations:
tree.mass_migrations.append(mm)
def add_property_to_nodes(tree, property_name, property_node_dict):
"""
Adds the attribute property_name to nodes with newick
given as property_node_dict keys and property values as
dictionary values.
TODO: Update for case if node_dict is available.
Example:
print tree
/-A1
/-|
/-| \-A2
| |
--| \-B
|
\-C
property='ne'
property_node_dict={'(A2,A1);':4, 'C;':1}
This adds the property node.ne to the nodes:
/-A1
--|
\-A2
--C
"""
dic = {}
for k,v in property_node_dict.iteritems():
dic[newick_to_node_name(k)] = v
for node in tree.traverse():
node_name = node.get_name()
try:
setattr(node, property_name, dic[node_name])
except KeyError:
pass
def add_properties_to_nodes(tree, properties, properties_node_dict):
"""
Adds the attributes in the list properties to nodes with newick
given as property_node_dict keys and a dictionary of
{property:value} as dictionary values.
Example:
print tree
/-A1
/-|
/-| \-A2
| |
--| \-B
|
\-C
properties=['ne', 'color']
property_node_dict={'(A2,A1);': {'ne':4, 'color': 'black'},
'C;': {'color': 'green'}}
"""
dic = {}
for k,v in properties_node_dict.iteritems():
dic[newick_to_node_name(k)] = v
for node in tree.traverse():
node_name = node.get_name()
for prop in properties:
try:
setattr(node, prop, dic[node_name][prop])
except KeyError:
pass
def plot(tree, ax=None, style='orthogonal',
node_name_fun=None,
node_name_format_fun=None,
leaf_name_fun=None,
leaf_name_format_fun=None,
line_format_fun=None,
migration_arrow_format_fun=None,
xtick_label_format_fun=None):
"""
Plot ete tree.
"""
default_node_format_args = dict(xycoords='data', ha='center',
xytext=(0,1),
textcoords='offset points',
va='bottom',
bbox=dict(boxstyle="round,pad=0.05", fc="w", alpha=0.5, lw=0),
size=11)
default_leaf_format_args = {'xytext':(5,0),
'textcoords':'offset points',
'va':'center'}
default_line_format_args = {'color':'k'}
default_migration_arrow_format_args = dict(arrowstyle="->, head_length = 0.5, head_width = .5",
color='r', linestyle='solid',linewidth=2,
zorder=-1)
leaf_order = tree.get_leaf_names()
if ax is None:
fig = plt.figure(figsize=(12,len(leaf_order)*0.3))
ax = plt.gca()
assert style in ['orthogonal', 'diagonal']
# don't plot node names if no function given
if node_name_fun is None:
node_name_fun = lambda node: False
if node_name_format_fun is None:
node_name_format_fun = lambda node: {}
# plot leaf.name as leaf name by default
if leaf_name_fun is None:
leaf_name_fun = lambda node: node.name
if leaf_name_format_fun is None:
leaf_name_format_fun = lambda node: {}
if line_format_fun is None:
line_format_fun = lambda node: {}
if migration_arrow_format_fun is None:
migration_arrow_format_fun = lambda node: {}
if xtick_label_format_fun is None:
xtick_label_format_fun = lambda x, p: format(-int(x), ',')
max_dist = max([tree.get_distance(l) for l in tree.get_leaves()])
for i, node in enumerate(tree.traverse('postorder')):
time = node.get_time()
if node.is_leaf():
node.y = -leaf_order.index(node.name)
leaf_name = leaf_name_fun(node)
if leaf_name:
leaf_format_args = copy.deepcopy(default_leaf_format_args)
leaf_format_args.update(leaf_name_format_fun(node))
x = ax.annotate(leaf_name, xy=(-time, node.y),
xycoords='data', **leaf_format_args)
else:
l = node.children[0]
r = node.children[1]
node.y = (l.y+r.y)/2.
for c in (l,r):
line_format_args = copy.deepcopy(default_line_format_args)
line_format_args.update(line_format_fun(c))
if style == 'orthogonal':
ax.hlines(c.y, -time, -c.get_time(), **line_format_args)
ax.vlines(-time,*sorted([c.y,node.y]), **line_format_args)
elif style == 'diagonal':
ax.plot([-time,-c.get_time()],[node.y, c.y])
if not c.is_leaf():
node_name = node_name_fun(c)
if node_name:
node_format_args = copy.deepcopy(default_node_format_args)
node_format_args.update(node_name_format_fun(c))
ax.annotate(node_name, xy=((-time-c.get_time())/2., c.y),
**node_format_args)
#if "Haplochromis" in node_name:
# print 'wtf'
# return node
for mm in tree.mass_migrations:
#print "plotting migration one", mm.time, mm.source.get_name(), mm.destination.get_name()
#ax.plot([-mm.time, -mm.time],sorted([mm.source.y, mm.destination.y]), color='r')
#ax.arrow(-mm.time, mm.destination.y, 0 , mm.source.y - mm.destination.y,
# length_includes_head=True, color='r', linestyle='dashed')
migration_arrow_format_args = copy.deepcopy(default_migration_arrow_format_args)
migration_arrow_format_args.update(migration_arrow_format_fun(c))
ax.annotate("",xytext=(-mm.time, mm.destination.y), xy=(-mm.time,mm.source.y),
arrowprops=migration_arrow_format_args)
ax.annotate("{}%".format(int(round(mm.fraction*100))), xy=(-mm.time, (mm.destination.y + mm.source.y)/2.),
ha='right',va='center', xytext=(-3,0),bbox=dict(boxstyle="round,pad=0.1", fc="w", alpha=0.5, lw=0),
textcoords='offset points', color='r')
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks([])
ax.set_ylabel('')
ymin, ymax = ax.get_ylim()
ax.set_ylim([ymin-(ymax-ymin)*0.05,ymax+(ymax-ymin)*0.01])
ax.xaxis.tick_bottom()
ax.get_xaxis().set_major_formatter(
mpl.ticker.FuncFormatter(xtick_label_format_fun))
return ax
def set_leaf_order(tree, order):
"""
Changes the tree so that the leaves
conform to order.
The order must be consistent with
the branching structure.
Parameters:
tree ... ete3 tree object
order ... list of leaf names
Returns:
None (tree changed in place)
"""
for i, node in enumerate(tree.traverse('postorder')):
if not node.is_leaf():
l = node.children[0]
r = node.children[1]
lnames = l.get_leaf_names()
rnames = r.get_leaf_names()
if order.index(lnames[0]) > order.index(rnames[0]):
node.swap_children()
def dm_to_tree(dm, names=None, method='nj', ladderize=True, outgroup=None,
prune_outgroup=True):
dm1 = np.array(dm).astype(float)
distance_triangular = [list(dm1[i,:i+1]) for i in np.arange(len(dm1))]
#try:
if names is None:
try:
names = dm.columns
except AttributeError:
names = np.arange(len(dm))
dm = _DistanceMatrix(names= [str(i.encode("utf-8")) for i in names],
matrix=distance_triangular)
# except Exception,e:
# print names
# #print [type(i) for i in dm.columns]
# print type(distance_triangular)
# print type(distance_triangular[0])
# print set([str(type(i)) for j in distance_triangular for i in j])
# print distance_triangular
# raise e
constructor = DistanceTreeConstructor()
algorithm = getattr(constructor,method)
tree = algorithm(dm)
for c in tree.get_nonterminals():
c.name = None
tree1 = phylo_to_hs(tree)
if outgroup is not None:
tree1.set_outgroup(outgroup)
if prune_outgroup:
tree1.prune([t for t in tree1.get_leaf_names() if t!=outgroup])
else:
l,r = tree1.get_children()
if l.is_leaf():
o = l
i = r
elif r.is_leaf():
o = r
i = l
else:
raise UserException("There does not seem to be a single outgroup branch.")
mean_leaf_time = np.mean([t.get_time() for t in i.get_leaves()])
time = i.get_time()
original_dist = o.dist
if original_dist > time - mean_leaf_time:
d = mean_leaf_time - time
o.dist = (original_dist-mean_leaf_time)/2. + time - mean_leaf_time
i.dist = (original_dist-mean_leaf_time)/2.
warnings.warn("Outgroup time is set as average of other samples. "
"This is ad-hoc and cannot be supported by data.")
else:
warnings.warn("Split outgroup vs ingroups is arbitrarily set at 1/2 of the branch length.")
if ladderize:
tree1.ladderize()
return tree1
def get_local_tree(chrom, start, end, vcf_fn, samples=None, outgroup=None, plot=False):
pwd = hap.get_pairwise_diff(vcf_fn, chrom=chrom, start=start, end=end, samples=samples, chunksize=30000)
distance_triangular = [list(pwd.values[i,:i+1]) for i in range(len(pwd))]
dm = _DistanceMatrix(names= list(pwd.columns),
matrix=distance_triangular)
constructor = DistanceTreeConstructor()
tree = constructor.nj(dm)
if outgroup is not None:
tree.root_with_outgroup(outgroup)
tree.ladderize()
for t in tree.get_nonterminals():
t.name = None
tree_no = copy.deepcopy(tree)
if outgroup is not None:
tree_no.prune(outgroup)
#tree_no.prune('AstTwe1')
#tree_no.prune('SerRob1')
if plot:
fig = plt.figure(figsize=(15,50))
ax = plt.gca()
Phylo.draw(tree_no, axes=ax, do_show=False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks([])
ax.set_ylabel('')
return pwd, tree
def phylo_to_hs(phylo_tree):
treeio = StringIO.StringIO()
Phylo.write(phylo_tree, treeio, format='newick')
tree = HsTree(treeio.getvalue())
#make sure no additional outer quotes in names with whitespace
for t in tree.get_leaves():
t.name = t.name.strip("' ")
return tree
def phylo_to_ete(phylo_tree):
treeio = StringIO.StringIO()
Phylo.write(phylo_tree, treeio, format='newick')
ete_tree = ete3.Tree(treeio.getvalue())
#make sure no additional outer quotes in names with whitespace
for t in ete_tree.get_leaves():
t.name = t.name.strip("' ")
return ete_tree
def phylo_from_str(tree_str):
treeio = StringIO.StringIO(tree_str)
phylo_tree = Phylo.read(treeio, format='newick')
return phylo_tree
def consistent_with_tree(ete_tree, h1, h2, h3, h4):
""""
Returns True if the lineages have relationship
/-h4
/
| /-h3
|--|
| /-h2
\-|
\-h1
and h1 != h2.
Returns False otherwise.
"""
if h1 == h2:
return False
try:
h1h2 = ete_tree.get_common_ancestor([h1,h2])
h1h2h3 = ete_tree.get_common_ancestor([h1,h2,h3])
h1h2h3h4 = ete_tree.get_common_ancestor([h1,h2,h3,h4])
except ValueError:
#One of the samples not in tree.
return False
return bool(h1h2.get_distance(h1h2h3, topology_only=True)) \
& bool(h1h2h3.get_distance(h1h2h3h4, topology_only=True))
def get_consistent_df(stat_df, ete_tree):
"""
Get a data frame with the subset
of tuples that are consistent with a
given ete tree.
Parameters:
stat_df : data frame with f4-like statistics
where index levels are [h1, h2, h3, h4]
ete_tree : ete3 tree object of all samples.
Needs to be rooted and include
all outgroups.
"""
consistent_tpls = [consistent_with_tree(ete_tree, *t) for t in stat_df.index.values]
return stat_df[consistent_tpls]
#-------------------------------------------
###TREE PLOTTING AND VISUALISATION#########
#-------------------------------------------
def draw_tree(tree, label_func=str, do_show=True, show_confidence=True,
# For power users
axes=None, branch_labels=None, *args, **kwargs):
""" HS: This | |
None:
self.tags_initialized = false_tags
return false
if false is None:
self.tags_initialized = true_tags
return true
# we need to merge tags
final_tags = {}
for tag in set(list(true_tags.keys()) + list(false_tags.keys())):
true_val = true_tags.get(tag, False)
false_val = false_tags.get(tag, False)
if true_val is True and false_val is True:
final_tags[tag] = True
else:
# it's not certain
if not isinstance(true_val, bool) and not isinstance(false_val, bool) and true_val.name == false_val.name:
# these are the same bit that's been passed between two different branches.
final_tags[tag] = true_val
else:
tag_llvm_value = self.builder.phi(llvm_i1, 'is_initialized.merge.' + tag)
if isinstance(true_val, bool):
true_val = llvmBool(true_val)
if isinstance(false_val, bool):
false_val = llvmBool(false_val)
tag_llvm_value.add_incoming(true_val, true_block)
tag_llvm_value.add_incoming(false_val, false_block)
final_tags[tag] = tag_llvm_value
self.tags_initialized = final_tags
if true.native_type != false.native_type:
raise Exception("Expected left and right branches to have same type, but %s != %s\n\n%s" % (true, false, expr))
if true.native_type.matches.Void:
return TypedLLVMValue(None, native_ast.Type.Void())
final = self.builder.phi(type_to_llvm_type(true.native_type))
final.add_incoming(true.llvm_value, true_block)
final.add_incoming(false.llvm_value, false_block)
return TypedLLVMValue(final, true.native_type)
if expr.matches.While:
tags = dict(self.tags_initialized)
loop_block = self.builder.append_basic_block("while")
self.builder.branch(loop_block)
self.builder.position_at_start(loop_block)
cond = self.convert(expr.cond)
cond_llvm = cond.llvm_value
zero_like = llvmlite.ir.Constant(cond_llvm.type, 0)
if cond.native_type.matches.Pointer:
cond_llvm = self.builder.ptrtoint(cond_llvm, llvm_i64)
cond_llvm = self.builder.icmp_signed("!=", cond_llvm, zero_like)
elif cond.native_type.matches.Int:
if cond_llvm.type.width != 1:
cond_llvm = self.builder.icmp_signed("!=", cond_llvm, zero_like)
elif cond.native_type.matches.Float:
cond_llvm = self.builder.fcmp_unordered("!=", cond_llvm, zero_like)
else:
cond_llvm = llvmlite.ir.Constant(llvm_i1, 0)
with self.builder.if_else(cond_llvm) as (then, otherwise):
with then:
true = self.convert(expr.while_true)
if true is not None:
self.builder.branch(loop_block)
with otherwise:
false = self.convert(expr.orelse)
# it's currently illegal to modify the initialized set in a while loop
assertTagDictsSame(tags, self.tags_initialized)
if false is None:
self.builder.unreachable()
return None
return false
if expr.matches.ElementPtr:
arg = self.convert(expr.left)
offsets = [self.convert(a) for a in expr.offsets]
def gep_type(native_type, offsets):
if len(offsets) == 1:
if native_type.matches.Pointer:
return native_type
if native_type.matches.Struct:
assert offsets[0].matches.Constant and offsets[0].val.matches.Int
i = offsets[0].val.val
return native_type.element_types[i][1]
else:
assert native_type.matches.Pointer, f"Can't take element '{offsets[0]}' of {native_type}"
return gep_type(native_type.value_type, offsets[1:]).pointer()
return TypedLLVMValue(
self.builder.gep(arg.llvm_value, [o.llvm_value for o in offsets]),
gep_type(arg.native_type, expr.offsets)
)
if expr.matches.Variable:
assert expr.name in self.arg_assignments, (expr.name, list(self.arg_assignments.keys()))
return self.arg_assignments[expr.name]
if expr.matches.Unaryop:
operand = self.convert(expr.operand)
if operand.native_type == native_ast.Bool:
if expr.op.matches.LogicalNot:
return TypedLLVMValue(
self.builder.not_(operand.llvm_value),
operand.native_type
)
else:
if expr.op.matches.Add:
return operand
if expr.op.matches.LogicalNot:
zero_like = llvmlite.ir.Constant(operand.llvm_value.type, 0)
if operand.native_type.matches.Int:
return TypedLLVMValue(
self.builder.icmp_signed("==", operand.llvm_value, zero_like),
native_ast.Bool
)
if operand.native_type.matches.Float:
return TypedLLVMValue(
self.builder.fcmp_unordered("==", operand.llvm_value, zero_like),
native_ast.Bool
)
if expr.op.matches.BitwiseNot and operand.native_type.matches.Int:
return TypedLLVMValue(self.builder.not_(operand.llvm_value), operand.native_type)
if expr.op.matches.Negate:
if operand.native_type.matches.Int:
return TypedLLVMValue(self.builder.neg(operand.llvm_value), operand.native_type)
if operand.native_type.matches.Float:
if operand.native_type.bits == 32:
return TypedLLVMValue(
self.builder.fmul(operand.llvm_value, llvmlite.ir.FloatType()(-1.0)),
operand.native_type
)
else:
return TypedLLVMValue(
self.builder.fmul(operand.llvm_value, llvmlite.ir.DoubleType()(-1.0)),
operand.native_type
)
assert False, "can't apply unary operand %s to %s" % (expr.op, str(operand.native_type))
if expr.matches.Binop:
lhs = self.convert(expr.left)
if lhs is None:
return
rhs = self.convert(expr.right)
if rhs is None:
return
for which, rep in [('Gt', '>'), ('Lt', '<'), ('GtE', '>='),
('LtE', '<='), ('Eq', "=="), ("NotEq", "!=")]:
if getattr(expr.op.matches, which):
if lhs.native_type.matches.Float:
return TypedLLVMValue(
self.builder.fcmp_ordered(rep, lhs.llvm_value, rhs.llvm_value),
native_ast.Bool
)
elif lhs.native_type.matches.Int:
if lhs.native_type.signed:
return TypedLLVMValue(
self.builder.icmp_signed(rep, lhs.llvm_value, rhs.llvm_value),
native_ast.Bool
)
else:
return TypedLLVMValue(
self.builder.icmp_unsigned(rep, lhs.llvm_value, rhs.llvm_value),
native_ast.Bool
)
for py_op, floatop, intop_s, intop_u in [('Add', 'fadd', 'add', 'add'),
('Mul', 'fmul', 'mul', 'mul'),
('Div', 'fdiv', 'sdiv', 'udiv'),
('Mod', 'frem', 'srem', 'urem'),
('Sub', 'fsub', 'sub', 'sub'),
('LShift', None, 'shl', 'shl'),
('RShift', None, 'ashr', 'lshr'),
('BitOr', None, 'or_', 'or_'),
('BitXor', None, 'xor', 'xor'),
('BitAnd', None, 'and_', 'and_')]:
if getattr(expr.op.matches, py_op):
assert lhs.native_type == rhs.native_type, \
"malformed types: expect lhs&rhs to be the same but got %s,%s,%s\n\nexpr=%s"\
% (py_op, lhs.native_type, rhs.native_type, expr)
if lhs.native_type.matches.Float and floatop is not None:
return TypedLLVMValue(
getattr(self.builder, floatop)(lhs.llvm_value, rhs.llvm_value),
lhs.native_type
)
elif lhs.native_type.matches.Int:
llvm_op = intop_s if lhs.native_type.signed else intop_u
if llvm_op is not None:
return TypedLLVMValue(
getattr(self.builder, llvm_op)(lhs.llvm_value, rhs.llvm_value),
lhs.native_type
)
if expr.matches.Call:
target_or_ptr = expr.target
args = [self.convert(a) for a in expr.args]
for i in range(len(args)):
if args[i] is None:
return
if target_or_ptr.matches.Named:
target = target_or_ptr.target
func = self.namedCallTargetToLLVM(target)
if self.converter._printAllNativeCalls:
self.builder.call(
self.namedCallTargetToLLVM(
native_ast.NamedCallTarget(
name="np_print_bytes",
arg_types=(native_ast.UInt8.pointer(),),
output_type=native_ast.Void,
external=True,
varargs=False,
intrinsic=False,
can_throw=False
)
).llvm_value,
[constant_to_typed_llvm_value(
self.module,
self.builder, native_ast.Constant.ByteArray(
("calling native fun " + target.name + "\n").encode("ASCII")
)
).llvm_value]
)
else:
target = self.convert(target_or_ptr.expr)
assert (target.native_type.matches.Pointer and target.native_type.value_type.matches.Function), \
f"{target.native_type} is not a Function pointer"
func = target
try:
if func.native_type.value_type.can_throw:
normal_target = self.builder.append_basic_block()
exception_target = self.builder.append_basic_block()
llvm_call_result = self.builder.invoke(
func.llvm_value,
[a.llvm_value for a in args],
normal_target,
exception_target
)
self.generate_exception_landing_pad(exception_target)
self.builder.position_at_start(normal_target)
else:
llvm_call_result = self.builder.call(func.llvm_value, [a.llvm_value for a in args])
except Exception:
print("failing while calling ", target)
for a in args:
print("\t", a.llvm_value, a.native_type)
raise
output_type = func.native_type.value_type.output
if output_type.matches.Void:
llvm_call_result = None
return TypedLLVMValue(llvm_call_result, output_type)
if expr.matches.Sequence:
res = TypedLLVMValue(None, native_ast.Type.Void())
for e in expr.vals:
res = self.convert(e)
if res is None:
return
return res
if expr.matches.Comment:
return self.convert(expr.expr)
if expr.matches.ActivatesTeardown:
assert expr.name not in self.tags_initialized, "already initialized tag %s" % expr.name
self.tags_initialized[expr.name] = True
return TypedLLVMValue(None, native_ast.Type.Void())
if expr.matches.Throw:
arg = self.convert(expr.expr)
self.builder.store(
self.builder.bitcast(arg.llvm_value, llvm_i8ptr),
self.exception_slot
)
block = self.teardown_handler.acceptIncoming(
self.builder.block,
self.tags_initialized,
self.teardown_handler.controlFlowSwitchForException()
)
self.builder.branch(block)
return
if expr.matches.TryCatch or expr.matches.ExceptionPropagator:
self.teardown_handler = TeardownHandler(
self,
self.teardown_handler
)
new_handler = self.teardown_handler
result = self.convert(expr.expr)
self.teardown_handler = new_handler.parent_scope
def generator(tags, resume_normal_block):
with self.tags_as(tags):
prior = self.arg_assignments.get(expr.varname, None)
self.arg_assignments[expr.varname] = \
TypedLLVMValue(
self.builder.load(
self.exception_slot
),
native_ast.Int8Ptr
)
handler_res = self.convert(expr.handler)
if prior is None:
del self.arg_assignments[expr.varname]
else:
self.arg_assignments[expr.varname] = prior
if handler_res is not None:
self.builder.branch(resume_normal_block)
target_resume_block = self.builder.append_basic_block("try_catch_resume")
if result is not None:
self.builder.branch(target_resume_block)
new_handler.generate_trycatch_unwind(target_resume_block, generator)
self.builder.position_at_start(target_resume_block)
# if we returned 'none', and we're a TryCatch, then by definition we return
# 'void', which means we might return void ourselves. If we are an unwind
# handler, we don't need to do this because we're just going to propgate
# the exception anyways
if result is None and expr.matches.TryCatch:
result = TypedLLVMValue(None, native_ast.Type.Void())
elif result is None and not self.builder.block.is_terminated:
self.builder.unreachable()
return result
if expr.matches.FunctionPointer:
return self.namedCallTargetToLLVM(expr.target)
if expr.matches.Finally:
self.teardown_handler = TeardownHandler(
self,
self.teardown_handler,
expr.name
)
new_handler = self.teardown_handler
finally_result = self.convert(expr.expr)
self.teardown_handler = self.teardown_handler.parent_scope
# if we have a result, then we need to generate teardowns
# in the normal course of execution
if finally_result is not None:
for teardown in expr.teardowns:
self.convert_teardown(teardown)
else:
for teardown in expr.teardowns:
self.convert_teardown(teardown, justClearTags=True)
def generate_teardowns(new_tags):
with self.tags_as(new_tags):
for teardown in expr.teardowns:
self.convert_teardown(teardown)
if expr.name is not None:
finalBlock = self.builder.append_basic_block(self.teardown_handler.blockName() + "_resume")
if finally_result is None:
# someone might be jumping here. just because we don't have
# a value doesn't mean this particular expression doesn't have
# a result, because we might be a 'finally' catching a result.
finally_result = TypedLLVMValue(None, native_ast.Type.Void())
if not self.builder.block.is_terminated:
self.builder.branch(finalBlock)
self.builder.position_at_start(finalBlock)
else:
finalBlock = None
new_handler.generate_teardown(generate_teardowns, normal_slot=finalBlock)
return finally_result
assert False, "can't handle %s" % repr(expr)
def populate_needed_externals(external_function_references, module):
def define(fname, output, inputs, vararg=False):
external_function_references[fname] = \
llvmlite.ir.Function(
module,
llvmlite.ir.FunctionType(
output,
inputs,
var_arg=vararg
),
fname
)
define("__cxa_allocate_exception", llvm_i8ptr, [llvm_i64])
define("__cxa_throw", llvm_void, [llvm_i8ptr, llvm_i8ptr, llvm_i8ptr])
define("__cxa_end_catch", llvm_i8ptr, [llvm_i8ptr])
define("__cxa_begin_catch", llvm_i8ptr, [llvm_i8ptr])
define("__gxx_personality_v0", llvm_i32, [], vararg=True)
class Converter:
def __init__(self):
object.__init__(self)
self._modules = {}
self._functions_by_name = {}
self._function_definitions = {}
# a map from function name to function type for functions that
# are defined in external shared objects and linked in to this one.
self._externallyDefinedFunctionTypes = {}
# total number of instructions in each function, by name
self._function_complexity = {}
self._inlineRequests = []
self._printAllNativeCalls = os.getenv("TP_COMPILER_LOG_NATIVE_CALLS")
self.verbose = False
def markExternal(self, functionNameToType):
"""Provide type signatures for a set of external functions."""
self._externallyDefinedFunctionTypes.update(functionNameToType)
def canBeInlined(self, name):
return name not in self._externallyDefinedFunctionTypes
def totalFunctionComplexity(self, name):
"""Return the total number of instructions contained in a function.
The function must already have been defined in a prior parss. We use this
information to decide which functions to repeat in new module definitions.
"""
if name in self._function_complexity:
return self._function_complexity[name]
res = 0
for block in self._functions_by_name[name].basic_blocks:
res += len(block.instructions)
self._function_complexity[name] = res
return res
def repeatFunctionInModule(self, name, module):
"""Request that the function given by 'name' be inlined into 'module'.
It must already have been defined in another module.
Returns:
a fresh unpopulated function definition for the given function.
"""
assert name in self._functions_by_name
assert self._functions_by_name[name].module != module
existingFunctionDef = self._functions_by_name[name]
funcType = existingFunctionDef.type
if funcType.is_pointer:
funcType = funcType.pointee
assert isinstance(funcType, llvmlite.ir.FunctionType)
| |
#! /usr/bin/env python
"""Module providing GUI capability to prune any directory.
The code presented in this module is for the purposes of: (1) ascertaining
the space taken up by a directory, its files, its sub-directories, and its
sub-files; (2) allowing for the removal of the sub-files, sub-directories,
files, and directory found in the first purpose; (3) giving the user a GUI
to accomplish said purposes in a convenient way that is easily accessible."""
################################################################################
__author__ = '<NAME> <<EMAIL>>'
__date__ = '15 February 2011'
__version__ = '$Revision: 298 $'
################################################################################
# Import several GUI libraries.
import tkinter
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
# Import other needed modules.
import zlib
import base64
import os
import math
################################################################################
ICON = b'<KEY>
<KEY>
<KEY>
################################################################################
class GUISizeTree(tkinter.ttk.Frame):
"Widget for examining size of directory with optional deletion."
WARN = True # Should warnings be made for permanent operations?
MENU = True # Should the (destructive) context menu be enabled?
# Give names to columns.
CLMS = 'total_size', 'file_size', 'path'
TREE = '#0'
########################################################################
# Allow direct execution of GUISizeTree widget.
@classmethod
def main(cls):
"Create an application containing a single GUISizeTree widget."
tkinter.NoDefaultRoot()
root = cls.create_application_root()
cls.attach_window_icon(root, ICON)
view = cls.setup_class_instance(root)
root.mainloop()
@staticmethod
def create_application_root():
"Create and configure the main application window."
root = tkinter.Tk()
root.minsize(430, 215)
root.title('Directory Pruner')
root.option_add('*tearOff', tkinter.FALSE)
return root
@staticmethod
def attach_window_icon(root, icon):
"Generate and use the icon in the window's corner."
with open('tree.ico', 'wb') as file:
file.write(zlib.decompress(base64.b64decode(ICON)))
root.iconbitmap('tree.ico')
os.remove('tree.ico')
@classmethod
def setup_class_instance(cls, root):
"Build GUISizeTree instance that expects resizing."
instance = cls(root)
instance.grid(row=0, column=0, sticky=tkinter.NSEW)
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
return instance
########################################################################
# Initialize the GUISizeTree object.
def __init__(self, master=None, **kw):
"Initialize the GUISizeTree instance and configure for operation."
super().__init__(master, **kw)
# Initialize and configure this frame widget.
self.capture_root()
self.create_widgets()
self.create_supports()
self.create_bindings()
self.configure_grid()
self.configure_tree()
self.configure_menu()
# Set focus to path entry.
self.__path.focus_set()
def capture_root(self):
"Capture the root (Tk instance) of this application."
widget = self.master
while not isinstance(widget, tkinter.Tk):
widget = widget.master
self.__tk = widget
def create_widgets(self):
"Create all the widgets that will be placed in this frame."
self.__label = tkinter.ttk.Button(self, text='Path:',
command=self.choose)
self.__path = tkinter.ttk.Entry(self, cursor='xterm')
self.__run = tkinter.ttk.Button(self, text='Search',
command=self.search)
self.__cancel = tkinter.ttk.Button(self, text='Cancel',
command=self.stop_search)
self.__progress = tkinter.ttk.Progressbar(self,
orient=tkinter.HORIZONTAL)
self.__tree = tkinter.ttk.Treeview(self, columns=self.CLMS,
selectmode=tkinter.BROWSE)
self.__scroll_1 = tkinter.ttk.Scrollbar(self, orient=tkinter.VERTICAL,
command=self.__tree.yview)
self.__scroll_2 = tkinter.ttk.Scrollbar(self, orient=tkinter.HORIZONTAL,
command=self.__tree.xview)
self.__grip = tkinter.ttk.Sizegrip(self)
def create_supports(self):
"Create all GUI elements not placed directly in this frame."
self.__menu = tkinter.Menu(self)
self.create_directory_browser()
self.create_error_message()
self.create_warning_message()
def create_directory_browser(self):
"Find root of file system and create directory browser."
head, tail = os.getcwd(), True
while tail:
head, tail = os.path.split(head)
self.__dialog = tkinter.filedialog.Directory(self, initialdir=head)
def create_error_message(self):
"Create error message when trying to search bad path."
options = {'title': 'Path Error',
'icon': tkinter.messagebox.ERROR,
'type': tkinter.messagebox.OK,
'message': 'Directory does not exist.'}
self.__error = tkinter.messagebox.Message(self, **options)
def create_warning_message(self):
"Create warning message for permanent operations."
options = {'title': 'Important Warning',
'icon': tkinter.messagebox.QUESTION,
'type': tkinter.messagebox.YESNO,
'message': '''\
You cannot undo these operations.
Are you sure you want to do this?'''}
self.__warn = tkinter.messagebox.Message(self, **options)
def create_bindings(self):
"Bind the widgets to any events they will need to handle."
self.__label.bind('<Return>', self.choose)
self.__path.bind('<Control-Key-a>', self.select_all)
self.__path.bind('<Control-Key-/>', lambda event: 'break')
self.__path.bind('<Return>', self.search)
self.__run.bind('<Return>', self.search)
self.__cancel.bind('<Return>', self.stop_search)
self.bind_right_click(self.__tree, self.open_menu)
@staticmethod
def select_all(event):
"Select all of the contents in this Entry widget."
event.widget.selection_range(0, tkinter.END)
return 'break'
def bind_right_click(self, widget, action):
"Bind action to widget while considering Apple computers."
if self.__tk.tk.call('tk', 'windowingsystem') == 'aqua':
widget.bind('<2>', action)
widget.bind('<Control-1>', action)
else:
widget.bind('<3>', action)
def configure_grid(self):
"Place all widgets on the grid in their respective locations."
self.__label.grid(row=0, column=0)
self.__path.grid(row=0, column=1, sticky=tkinter.EW)
self.__run.grid(row=0, column=2, columnspan=2)
self.__run.grid_remove()
self.__cancel.grid(row=0, column=2, columnspan=2)
self.__cancel.grid_remove()
self.__run.grid()
self.__progress.grid(row=1, column=0, columnspan=4, sticky=tkinter.EW)
self.__tree.grid(row=2, column=0, columnspan=3, sticky=tkinter.NSEW)
self.__scroll_1.grid(row=2, column=3, sticky=tkinter.NS)
self.__scroll_2.grid(row=3, column=0, columnspan=3, sticky=tkinter.EW)
self.__grip.grid(row=3, column=3, sticky=tkinter.SE)
# Configure the grid to automatically resize internal widgets.
self.grid_rowconfigure(2, weight=1)
self.grid_columnconfigure(1, weight=1)
def configure_tree(self):
"Configure the Treeview widget."
# Setup the headings.
self.__tree.heading(self.TREE, text=' Name', anchor=tkinter.W,
command=self.sort_name)
self.__tree.heading(self.CLMS[0], text=' Total Size', anchor=tkinter.W,
command=self.sort_total_size)
self.__tree.heading(self.CLMS[1], text=' File Size', anchor=tkinter.W,
command=self.sort_file_size)
self.__tree.heading(self.CLMS[2], text=' Path', anchor=tkinter.W,
command=self.sort_path)
# Setup the columns.
self.__tree.column(self.TREE, minwidth=100, width=200)
self.__tree.column(self.CLMS[0], minwidth=100, width=200)
self.__tree.column(self.CLMS[1], minwidth=100, width=200)
self.__tree.column(self.CLMS[2], minwidth=100, width=200)
# Connect the Scrollbars.
self.__tree.configure(yscrollcommand=self.__scroll_1.set)
self.__tree.configure(xscrollcommand=self.__scroll_2.set)
def configure_menu(self):
"Configure the (context) Menu widget."
# Shortcut for narrowing the search.
self.__menu.add_command(label='Search Directory',
command=self.search_dir)
self.__menu.add_separator()
# Operations committed on directory.
self.__menu.add_command(label='Remove Directory', command=self.rm_dir)
self.__menu.add_command(label='Remove Files', command=self.rm_files)
self.__menu.add_separator()
# Operations that recurse on sub-directories.
self.__menu.add_command(label='Remove Sub-directories',
command=self.rm_subdirs)
self.__menu.add_command(label='Remove Sub-files',
command=self.rm_subfiles)
# Only add "Open Directory" command on Windows.
if hasattr(os, 'startfile'):
self.__menu.add_separator()
self.__menu.add_command(label='Open Directory',
command=self.open_dir)
########################################################################
# This property is used to control access to operations.
def __get_operations_enabled(self):
"Return if run button is in normal state."
return self.__run['state'].string == tkinter.NORMAL
def __set_operations_enabled(self, value):
"Enable or disable run button's state according to value."
self.__run['state'] = tkinter.NORMAL if value else tkinter.DISABLED
operations_enabled = property(__get_operations_enabled,
__set_operations_enabled,
doc="Flag controlling certain operations")
########################################################################
# Handle path browsing and searching actions.
def choose(self, event=None):
"Show directory browser and set path as needed."
path = self.__dialog.show()
if path:
# Entry is cleared before absolute path is added.
self.__path.delete(0, tkinter.END)
self.__path.insert(0, os.path.abspath(path))
def search(self, event=None):
"Search the path and display the size of the directory."
if self.operations_enabled:
self.operations_enabled = False
# Get absolute path and check existence.
path = os.path.abspath(self.__path.get())
if os.path.isdir(path):
# Enable operations after finishing search.
self.__search(path)
self.operations_enabled = True
else:
self.shake()
def __search(self, path):
"Execute the search procedure and display in Treeview."
self.__run.grid_remove()
self.__cancel.grid()
children = self.start_search()
try:
tree = SizeTree(self.update_search, path)
except StopIteration:
self.handle_stop_search(children)
else:
self.finish_search(children, tree)
self.__cancel.grid_remove()
self.__run.grid()
########################################################################
# Execute various phases of a search.
def start_search(self):
"Edit the GUI in preparation for executing a search."
self.__stop_search = False
children = Apply(TreeviewNode(self.__tree).children)
children.detach()
self.__progress.configure(mode='indeterminate', maximum=100)
self.__progress.start()
return children
def update_search(self):
"Check if search has been stopped and update the GUI."
self.validate_search()
self.update()
def validate_search(self):
"Check that the current search action is valid."
if self.__stop_search:
self.__stop_search = False
raise StopIteration('Search has been canceled!')
def stop_search(self, event=None):
"Cancel a search by setting its stop flag."
self.__stop_search = True
def handle_stop_search(self, children):
"Reset the Treeview and Progressbar on premature termination."
children.reattach()
self.__progress.stop()
self.__progress['mode'] = 'determinate'
def finish_search(self, children, tree):
"Delete old children, update Progressbar, and update Treeview."
children.delete()
self.__progress.stop()
self.__progress.configure(mode='determinate',
maximum=tree.total_nodes+1)
node = TreeviewNode(self.__tree).append(tree.name)
try:
self.build_tree(node, tree)
except StopIteration:
pass
########################################################################
# Handle Treeview column sorting events initiated by user.
def sort_name(self):
"Sort children of selected node by name."
TreeviewNode.current(self.__tree).sort_name()
def sort_total_size(self):
"Sort children of selected node by total size."
TreeviewNode.current(self.__tree).sort_total_size()
def sort_file_size(self):
"Sort children of selected node by file size."
TreeviewNode.current(self.__tree).sort_file_size()
def sort_path(self):
"Sort children of selected node by path."
TreeviewNode.current(self.__tree).sort_path()
########################################################################
# Handle right-click events on the Treeview widget.
def open_menu(self, event):
"Select Treeview row and show context menu if allowed."
item = event.widget.identify_row(event.y)
if item:
event.widget.selection_set(item)
if self.menu_allowed:
self.__menu.post(event.x_root, event.y_root)
@property
def menu_allowed(self):
"Check if menu is enabled along with operations."
return self.MENU and self.operations_enabled
def search_dir(self):
"Search the path of the currently selected row."
path = TreeviewNode.current(self.__tree).path
self.__path.delete(0, tkinter.END)
self.__path.insert(0, path)
self.search()
def rm_dir(self):
"Remove the currently selected directory."
if self.commit_permanent_operation:
self.do_remove_directory()
def rm_files(self):
"Remove the files in the currently selected directory."
if self.commit_permanent_operation:
self.do_remove_files()
def rm_subdirs(self):
"Remove the sub-directories of the currently selected directory."
if self.commit_permanent_operation:
self.do_remove_subdirectories()
def rm_subfiles(self):
"Remove the sub-files of the currently selected directory."
if self.commit_permanent_operation:
self.do_remove_subfiles()
@property
def commit_permanent_operation(self):
"Check if warning should be issued before committing operation."
return not self.WARN or self.__warn.show() == tkinter.messagebox.YES
def open_dir(self):
"Open up the current directory (only available on Windows)."
os.startfile(TreeviewNode.current(self.__tree).path)
########################################################################
# Execute actions requested by context menu.
def do_remove_directory(self):
"Remove a directory and all of its sub-directories."
self.begin_rm()
# Get the current Treeview node and delete it.
node = TreeviewNode.current(self.__tree)
directory_size, path = node.total_size, node.path
position, parent = node.position, node.delete(True)
# Delete the entire directory at path.
self.__rm_dir(self.update, path, True, True)
if os.path.isdir(path):
# Add the directory back to the Treeview.
tree = SizeTree(self.update, path)
self.begin_rm_update(tree.total_nodes + 1)
# Rebuild the Treeview under the parent.
node = parent.insert(position, tree.name)
self.build_tree(node, tree)
# New directory size.
total_size = tree.total_size
else:
self.begin_rm_update()
# New directory size.
total_size = 0
# If the size has changed, update parent nodes.
if directory_size | |
<filename>tests/conftest.py
import enum
import math
from collections import OrderedDict, UserString
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from ruamel import yaml
import pytest # type: ignore
import yatiml
from yatiml.recognizer import Recognizer
@pytest.fixture
def string_loader():
class StringLoader(yatiml.Loader):
pass
yatiml.set_document_type(StringLoader, str)
return StringLoader
@pytest.fixture
def datetime_loader():
class DatetimeLoader(yatiml.Loader):
pass
yatiml.set_document_type(DatetimeLoader, datetime)
return DatetimeLoader
@pytest.fixture
def string_list_loader():
class StringListLoader(yatiml.Loader):
pass
yatiml.set_document_type(StringListLoader, List[str])
return StringListLoader
@pytest.fixture
def int_list_list_loader():
class IntListListLoader(yatiml.Loader):
pass
yatiml.set_document_type(IntListListLoader, List[List[int]])
return IntListListLoader
@pytest.fixture
def int_list_loader():
class IntListLoader(yatiml.Loader):
pass
yatiml.set_document_type(IntListLoader, List[int])
return IntListLoader
@pytest.fixture
def string_dict_loader():
class StringDictLoader(yatiml.Loader):
pass
yatiml.set_document_type(StringDictLoader, Dict[str, str])
return StringDictLoader
@pytest.fixture
def int_key_dict_loader():
class IntKeyDictLoader(yatiml.Loader):
pass
yatiml.set_document_type(IntKeyDictLoader, Dict[int, str])
return IntKeyDictLoader
@pytest.fixture
def nested_dict_loader():
class NestedDictLoader(yatiml.Loader):
pass
yatiml.set_document_type(NestedDictLoader, Dict[str, Dict[str, bool]])
return NestedDictLoader
@pytest.fixture
def mixed_dict_list_loader():
class MixedDictListLoader(yatiml.Loader):
pass
yatiml.set_document_type(MixedDictListLoader, List[Dict[str, int]])
return MixedDictListLoader
@pytest.fixture
def union_loader():
class UnionLoader(yatiml.Loader):
pass
yatiml.set_document_type(UnionLoader, Union[str, int])
return UnionLoader
@pytest.fixture
def optional_loader():
class OptionalLoader(yatiml.Loader):
pass
yatiml.set_document_type(OptionalLoader, Optional[str])
return OptionalLoader
@pytest.fixture
def plain_dumper():
class PlainDumper(yatiml.Dumper):
pass
return PlainDumper
class Document1:
def __init__(self, attr1: str) -> None:
self.attr1 = attr1
class Vector2D:
def __init__(self, x: float, y: float) -> None:
self.x = x
self.y = y
class Shape:
def __init__(self, center: Vector2D) -> None:
self.center = center
class Rectangle(Shape):
def __init__(self, center: Vector2D, width: float, height: float) -> None:
super().__init__(center)
self.width = width
self.height = height
class Circle(Shape):
def __init__(self, center: Vector2D, radius: float) -> None:
super().__init__(center)
self.radius = radius
class Ellipse(Shape):
def __init__(
self, center: Vector2D,
semi_major: float, semi_minor: float
) -> None:
super().__init__(center)
self.semi_major = semi_major
self.semi_minor = semi_minor
self.eccentricity = math.sqrt(1.0 - semi_minor**2 / semi_major**2)
@classmethod
def yatiml_subobjects(cls) -> List[Tuple[str, Type, bool]]:
return [('semi_major', float, True),
('semi_minor', float, True)
]
class Color(enum.Enum):
red = 'red'
orange = 'orange'
yellow = 'yellow'
green = 'green'
blue = 'blue'
class Color2(enum.Enum):
RED = 1
ORANGE = 2
YELLOW = 3
GREEN = 4
BLUE = 5
@classmethod
def yatiml_savorize(self, node: yatiml.Node) -> None:
if node.is_scalar(str):
node.set_value(node.get_value().upper()) # type: ignore
@classmethod
def yatiml_sweeten(self, node: yatiml.Node) -> None:
node.set_value(node.get_value().lower()) # type: ignore
class Document2:
def __init__(self, cursor_at: Vector2D, shapes: List[Shape]=None,
color: Color2=Color2.RED, extra_shape: Optional[Shape] = None
) -> None:
# Yes, having [] as a default value is a bad idea, but ok here
self.cursor_at = cursor_at
self.shapes = shapes if shapes is not None else list()
self.color = color
self.extra_shape = extra_shape
class Super:
def __init__(self, subclass: str) -> None:
pass
class SubA(Super):
def __init__(self, subclass: str) -> None:
super().__init__(subclass)
@classmethod
def yatiml_recognize(cls, node: yatiml.UnknownNode) -> None:
node.require_attribute_value('subclass', 'A')
class SubB(Super):
def __init__(self, subclass: str) -> None:
super().__init__(subclass)
@classmethod
def yatiml_recognize(cls, node: yatiml.UnknownNode) -> None:
node.require_attribute_value('subclass', 'B')
class Super2:
def __init__(self) -> None:
pass
class SubA2(Super2):
def __init__(self) -> None:
pass
@classmethod
def yatiml_recognize(cls, node: yatiml.UnknownNode) -> None:
node.require_attribute_value('subclass', 'A2')
@classmethod
def yatiml_savorize(cls, node: yatiml.Node) -> None:
node.remove_attribute('subclass')
@classmethod
def yatiml_sweeten(cls, node: yatiml.Node) -> None:
node.set_attribute('subclass', 'A2')
class SubB2(Super2):
def __init__(self) -> None:
pass
@classmethod
def yatiml_recognize(cls, node: yatiml.UnknownNode) -> None:
node.require_attribute_value('subclass', 'B2')
@classmethod
def yatiml_savorize(cls, node: yatiml.Node) -> None:
node.remove_attribute('subclass')
@classmethod
def yatiml_sweeten(cls, node: yatiml.Node) -> None:
node.set_attribute('subclass', 'B2')
class Universal:
def __init__(self, a: int, b: List[int]) -> None:
self.a = a
self.b = b
@classmethod
def yatiml_recognize(cls, node: yatiml.Node) -> None:
# recognizes anything as being of this type
pass
def yatiml_attributes(self) -> None: # type: ignore
# intentionally broken
pass
class Extensible:
def __init__(self, a: int, yatiml_extra: OrderedDict) -> None:
self.a = a
self.yatiml_extra = yatiml_extra
class UnionAttribute:
def __init__(self, a: Union[int, str]) -> None:
self.a = a
class PrivateAttributes:
def __init__(self, a: int, b: float) -> None:
self.__a = a
self.__b = b
def yatiml_attributes(self) -> OrderedDict:
attrs = OrderedDict() # type: OrderedDict
attrs['a'] = self.__a
attrs['b'] = self.__b
return attrs
class BrokenPrivateAttributes:
def __init__(self, a: int, b: float) -> None:
self.__a = a
self.__b = b
class ComplexPrivateAttributes:
def __init__(self, a: Vector2D) -> None:
self.__a = a
def yatiml_attributes(self) -> OrderedDict:
attrs = OrderedDict() # type: OrderedDict[str, Vector2D]
attrs['a'] = self.__a
return attrs
class ConstrainedString(UserString):
def __init__(self, seq: Any) -> None:
super().__init__(seq)
if not self.data.startswith('a'): # type: ignore
raise ValueError('ConstrainedString must start with an a')
class Postcode:
def __init__(self, digits: int, letters: str) -> None:
self.digits = digits
self.letters = letters
@classmethod
def yatiml_recognize(cls, node: yatiml.UnknownNode) -> None:
node.require_scalar(str)
@classmethod
def yatiml_savorize(cls, node: yatiml.Node) -> None:
text = str(node.get_value())
node.make_mapping()
node.set_attribute('digits', int(text[0:4]))
node.set_attribute('letters', text[5:7])
@classmethod
def yatiml_sweeten(self, node: yatiml.Node) -> None:
digits = node.get_attribute('digits').get_value()
letters = node.get_attribute('letters').get_value()
node.set_value('{} {}'.format(digits, letters))
class DashedAttribute:
def __init__(self, dashed_attribute: int) -> None:
self.dashed_attribute = dashed_attribute
@classmethod
def yatiml_savorize(cls, node: yatiml.Node) -> None:
node.dashes_to_unders_in_keys()
@classmethod
def yatiml_sweeten(cls, node: yatiml.Node) -> None:
node.unders_to_dashes_in_keys()
@pytest.fixture
def document1_loader():
class Document1Loader(yatiml.Loader):
pass
yatiml.add_to_loader(Document1Loader, Document1)
yatiml.set_document_type(Document1Loader, Document1)
return Document1Loader
@pytest.fixture
def document1_dumper():
class Document1Dumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(Document1Dumper, Document1)
return Document1Dumper
@pytest.fixture
def vector_loader():
class VectorLoader(yatiml.Loader):
pass
yatiml.add_to_loader(VectorLoader, Vector2D)
yatiml.set_document_type(VectorLoader, Vector2D)
return VectorLoader
@pytest.fixture
def shape_loader():
class ShapeLoader(yatiml.Loader):
pass
yatiml.add_to_loader(
ShapeLoader, [Shape, Rectangle, Circle, Ellipse, Vector2D])
yatiml.set_document_type(ShapeLoader, Shape)
return ShapeLoader
@pytest.fixture
def missing_circle_loader():
class MissingCircleLoader(yatiml.Loader):
pass
yatiml.add_to_loader(
MissingCircleLoader, [Shape, Rectangle, Ellipse, Vector2D])
yatiml.set_document_type(MissingCircleLoader, Shape)
return MissingCircleLoader
@pytest.fixture
def document2_loader():
class Document2Loader(yatiml.Loader):
pass
yatiml.add_to_loader(
Document2Loader,
[Color2, Document2, Shape, Rectangle, Circle, Vector2D])
yatiml.set_document_type(Document2Loader, Document2)
return Document2Loader
@pytest.fixture
def document2_dumper():
class Document2Dumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(
Document2Dumper,
[Color2, Document2, Shape, Rectangle, Circle, Vector2D])
return Document2Dumper
@pytest.fixture
def enum_loader():
class EnumLoader(yatiml.Loader):
pass
yatiml.add_to_loader(EnumLoader, Color)
yatiml.set_document_type(EnumLoader, Color)
return EnumLoader
@pytest.fixture
def enum_dumper():
class EnumDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(EnumDumper, Color)
return EnumDumper
@pytest.fixture
def enum_loader2():
class EnumLoader2(yatiml.Loader):
pass
yatiml.add_to_loader(EnumLoader2, Color2)
yatiml.set_document_type(EnumLoader2, Color2)
return EnumLoader2
@pytest.fixture
def enum_dumper2():
class EnumDumper2(yatiml.Dumper):
pass
yatiml.add_to_dumper(EnumDumper2, Color2)
return EnumDumper2
@pytest.fixture
def user_string_loader():
class UserStringLoader(yatiml.Loader):
pass
yatiml.add_to_loader(UserStringLoader, ConstrainedString)
yatiml.set_document_type(UserStringLoader, ConstrainedString)
return UserStringLoader
@pytest.fixture
def user_string_dumper():
class UserStringDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(UserStringDumper, ConstrainedString)
return UserStringDumper
@pytest.fixture
def super_loader():
class SuperLoader(yatiml.Loader):
pass
yatiml.add_to_loader(SuperLoader, [Super, SubA, SubB])
yatiml.set_document_type(SuperLoader, Super)
return SuperLoader
@pytest.fixture
def super2_loader():
class Super2Loader(yatiml.Loader):
pass
yatiml.add_to_loader(Super2Loader, [Super2, SubA2, SubB2])
yatiml.set_document_type(Super2Loader, Super2)
return Super2Loader
@pytest.fixture
def super2_dumper():
class Super2Dumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(Super2Dumper, [Super2, SubA2, SubB2])
return Super2Dumper
@pytest.fixture
def universal_loader():
class UniversalLoader(yatiml.Loader):
pass
yatiml.add_to_loader(UniversalLoader, Universal)
yatiml.set_document_type(UniversalLoader, Universal)
return UniversalLoader
@pytest.fixture
def universal_dumper():
class UniversalDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(UniversalDumper, Universal)
return UniversalDumper
@pytest.fixture
def extensible_loader():
class ExtensibleLoader(yatiml.Loader):
pass
yatiml.add_to_loader(ExtensibleLoader, Extensible)
yatiml.set_document_type(ExtensibleLoader, Extensible)
return ExtensibleLoader
@pytest.fixture
def extensible_dumper():
class ExtensibleDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(ExtensibleDumper, Extensible)
return ExtensibleDumper
@pytest.fixture
def private_attributes_dumper():
class PrivateAttributesDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(PrivateAttributesDumper, PrivateAttributes)
return PrivateAttributesDumper
@pytest.fixture
def broken_private_attributes_dumper():
class BrokenPrivateAttributesDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(
BrokenPrivateAttributesDumper,
BrokenPrivateAttributes)
return BrokenPrivateAttributesDumper
@pytest.fixture
def complex_private_attributes_dumper():
class ComplexPrivateAttributesDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(ComplexPrivateAttributesDumper, Vector2D)
yatiml.add_to_dumper(
ComplexPrivateAttributesDumper, ComplexPrivateAttributes)
return ComplexPrivateAttributesDumper
@pytest.fixture
def union_attribute_loader():
class UnionAttributeLoader(yatiml.Loader):
pass
yatiml.add_to_loader(UnionAttributeLoader, UnionAttribute)
yatiml.set_document_type(UnionAttributeLoader, UnionAttribute)
return UnionAttributeLoader
@pytest.fixture
def parsed_class_loader():
class ParsedClassLoader(yatiml.Loader):
pass
yatiml.add_to_loader(ParsedClassLoader, Postcode)
yatiml.set_document_type(ParsedClassLoader, Postcode)
return ParsedClassLoader
@pytest.fixture
def parsed_class_dumper():
class ParsedClassDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(ParsedClassDumper, Postcode)
return ParsedClassDumper
@pytest.fixture
def dashed_attribute_loader():
class DashedAttributeLoader(yatiml.Loader):
pass
yatiml.add_to_loader(DashedAttributeLoader, DashedAttribute)
yatiml.set_document_type(DashedAttributeLoader, DashedAttribute)
return DashedAttributeLoader
@pytest.fixture
def dashed_attribute_dumper():
class DashedAttributeDumper(yatiml.Dumper):
pass
yatiml.add_to_dumper(DashedAttributeDumper, DashedAttribute)
return DashedAttributeDumper
@pytest.fixture
def yaml_seq_node():
# A yaml.SequenceNode representing a sequence of mappings
tag1 = 'tag:yaml.org,2002:map'
item1_key1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'item_id')
item1_value1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'item1')
item1_key2_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'price')
item1_value2_node = yaml.ScalarNode('tag:yaml.org,2002:float', '100.0')
value1 = [
(item1_key1_node, item1_value1_node),
(item1_key2_node, item1_value2_node)
]
item1 = yaml.MappingNode(tag1, value1)
tag2 = 'tag:yaml.org,2002:map'
item2_key1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'item_id')
item2_value1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'item2')
item2_key2_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'price')
item2_value2_node = yaml.ScalarNode('tag:yaml.org,2002:float', '200.0')
item2_key3_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'on_sale')
item2_value3_node = yaml.ScalarNode('tag:yaml.org,2002:bool', 'True')
value2 = [
(item2_key1_node, item2_value1_node),
(item2_key2_node, item2_value2_node),
(item2_key3_node, item2_value3_node)
]
item2 = yaml.MappingNode(tag2, value2)
return yaml.SequenceNode('tag:yaml.org,2002:seq', [item1, item2])
@pytest.fixture
def yaml_map_node():
# A yaml.MappingNode representing a mapping of mappings
tag1 = 'tag:yaml.org,2002:map'
item1_key1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'price')
item1_value1_node = yaml.ScalarNode('tag:yaml.org,2002:float', '100.0')
value1 = [(item1_key1_node, item1_value1_node)]
item1 = yaml.MappingNode(tag1, value1)
key1 = yaml.ScalarNode('tag:yaml.org,2002:str', 'item1')
tag2 = 'tag:yaml.org,2002:map'
item2_key1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'price')
item2_value1_node = yaml.ScalarNode('tag:yaml.org,2002:float', '200.0')
value2 = [(item2_key1_node, item2_value1_node)]
item2 = yaml.MappingNode(tag2, value2)
key2 = yaml.ScalarNode('tag:yaml.org,2002:str', 'item2')
item3 = yaml.ScalarNode('tag:yaml.org,2002:float', '150.0')
key3 = yaml.ScalarNode('tag:yaml.org,2002:str', 'item3')
outer_map_value = [(key1, item1), (key2, item2), (key3, item3)]
outer_tag = 'tag:yaml.org,2002:map'
outer_map = yaml.MappingNode(outer_tag, outer_map_value)
return outer_map
@pytest.fixture
def yaml_node(yaml_seq_node, yaml_map_node):
tag = 'tag:yaml.org,2002:map'
attr1_key_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'attr1')
attr1_value_node = yaml.ScalarNode('tag:yaml.org,2002:int', '42')
null_attr_key_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'null_attr')
null_attr_value_node = yaml.ScalarNode('tag:yaml.org,2002:null', '')
list1_key_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'list1')
dict1_key_node = yaml.ScalarNode('tag:yaml.org,2002:map', 'dict1')
dashed_key_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'dashed-attr')
dashed_value_node = yaml.ScalarNode('tag:yaml.org,2002:int', '13')
undered_key_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'undered_attr')
undered_value_node = yaml.ScalarNode('tag:yaml.org,2002:float', '13.0')
value = [
(attr1_key_node, attr1_value_node),
(null_attr_key_node, null_attr_value_node),
(list1_key_node, yaml_seq_node),
(dict1_key_node, yaml_map_node),
(dashed_key_node, dashed_value_node),
(undered_key_node, undered_value_node)
]
return yaml.MappingNode(tag, value)
@pytest.fixture
def class_node(yaml_node):
return yatiml.Node(yaml_node)
@pytest.fixture
def scalar_node():
ynode = yaml.ScalarNode('tag:yaml.org,2002:int', '42')
return yatiml.Node(ynode)
@pytest.fixture
def unknown_node(yaml_node):
return yatiml.UnknownNode(Recognizer({}), yaml_node)
@pytest.fixture
def unknown_scalar_node():
ynode = yaml.ScalarNode('tag:yaml.org,2002:int', '23')
return yatiml.UnknownNode(Recognizer({}), ynode)
@pytest.fixture
def unknown_sequence_node():
ynode = yaml.SequenceNode('tag:yaml.org,2002:seq', [])
return yatiml.UnknownNode(Recognizer({}), ynode)
@pytest.fixture
def class_node_dup_key():
# A Node wrapping a yaml.SequenceNode representing a sequence of
# mappings with a duplicate key.
tag1 = 'tag:yaml.org,2002:map'
item1_key1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'item_id')
item1_value1_node = yaml.ScalarNode('tag:yaml.org,2002:str', 'item')
value1 = [(item1_key1_node, item1_value1_node)]
item1 = yaml.MappingNode(tag1, value1)
tag2 | |
from __future__ import annotations
from math import ceil, floor
from random import uniform
from typing import TYPE_CHECKING, Dict, List
from components.starship_system import StarshipSystem
from data_globals import PRECISION_SCANNING_VALUES
from get_config import CONFIG_OBJECT
import colors
from global_functions import scan_assistant
if TYPE_CHECKING:
from ship_class import ShipClass
from nation import Nation
class LifeSupport(StarshipSystem):
def __init__(self, ship_class:ShipClass) -> None:
super().__init__("Life Support:")
self.turn_without_lifesupport = 0
self.able_crew = ship_class.max_crew
self.injured_crew = 0
# format is Dict[nation of ship that send over boarding party, List[able boarders, injured boarders]]
self.hostiles_on_board: Dict[Nation, List[int,int]] = {}
@property
def is_derlict(self):
return self.able_crew < 1 and self.injured_crew < 1
@property
def crew_readyness(self):
return self.caluclate_crew_readyness(
self.able_crew, self.injured_crew
)
@property
def has_boarders(self):
if self.hostiles_on_board:
for v in self.hostiles_on_board.values():
if v[0] + v[1] > 0:
return True
return False
def scan_crew_readyness(self, precision:int):
return self.caluclate_crew_readyness(
scan_assistant(self.able_crew, precision), scan_assistant(self.injured_crew, precision)
)
def caluclate_crew_readyness(self, able_crew:int, injured_crew:int):
if self.starship.is_automated:
return 1.0
total = able_crew + injured_crew * 0.25
return 0.0 if total == 0.0 else (total / self.starship.ship_class.max_crew) * 0.5 + 0.5
@property
def get_total_crew(self):
return self.able_crew + self.injured_crew
def heal_crew(self, percentage_of_injured_crew:float, minimal_crew_to_heal:int):
p = percentage_of_injured_crew * self.get_effective_value
heal_crew = min(self.injured_crew, ceil(self.injured_crew * p) + minimal_crew_to_heal)
self.able_crew+= heal_crew
self.injured_crew-= heal_crew
def take_control_of_ship(
self,
*,
able_crew:int, injured_crew:int=0, nation:Nation
):
self.able_crew = able_crew
self.injured_crew = injured_crew
self.starship.nation = nation
self.starship.get_sub_sector.enable_ship(self.starship)
def injuries_and_deaths(self, injured:int, killed_outright:int, killed_in_sickbay:int):
self.able_crew -= injured + killed_outright
self.injured_crew += injured - killed_in_sickbay
if self.able_crew < 0:
self.able_crew = 0
if self.injured_crew < 0:
self.injured_crew = 0
if self.is_derlict:
self.starship.get_sub_sector.disable_ship(self.starship)
def on_turn(self):
are_hostiles_on_board = self.hostiles_on_board
life_support_offline_past_critical = False
if not self.is_opperational:
self.turn_without_lifesupport += 1
if self.turn_without_lifesupport > CONFIG_OBJECT.life_support_offline_turn_limit:
life_support_offline_past_critical = True
elif self.turn_without_lifesupport > 0:
self.turn_without_lifesupport -= 1
if are_hostiles_on_board or life_support_offline_past_critical:
defender_is_player = self.starship.is_controllable
in_same_system = self.starship.sector_coords == self.starship.game_data.player.sector_coords
set_of_allied_nations = self.starship.game_data.scenerio.get_set_of_allied_nations
set_of_enemy_nations = self.starship.game_data.scenerio.get_set_of_enemy_nations
ship_is_on_players_side = self.starship.nation in set_of_allied_nations
ship_is_on_enemy_side = self.starship.nation in set_of_enemy_nations
keys_to_remove = []
message_log = self.starship.game_data.engine.message_log
if are_hostiles_on_board:
all_defenders_died = False
for k,v in self.hostiles_on_board.items():
boarders_are_from_player = k == self.starship.game_data.scenerio.your_nation
able_boarders, injured_boarders = v[0], v[1]
total_borders = able_boarders + injured_boarders
if total_borders > 0:
# if the boarding party is on the same side as the ships crew:
if ship_is_on_players_side == (
k in set_of_allied_nations
) or ship_is_on_enemy_side == (
k in set_of_enemy_nations
):
able = v[0]
injured = v[1]
self.able_crew += able
self.injured_crew += injured
if defender_is_player:
message:List[str] = [f"The {k.name_short} force of"]
if able > 0:
message.append(f"{able} able boarders")
if injured > 0:
message.append("and")
if injured > 0:
message.append(f"{injured} boarders")
message.append("intergrated into our crew.")
message_log.add_message(
" ".join(message), colors.cyan
)
elif in_same_system and boarders_are_from_player:
message:List[str] = [f"Our boarding party of"]
if able > 0:
message.append(f"{able} crew menbers")
if injured > 0:
message.append("and")
if injured > 0:
message.append(f"{injured} injured crew")
message.append(f"intergrated into the crew of the {self.starship.proper_name}.")
message_log.add_message(" ".join(message_log), colors.cyan)
v[0] = 0
v[1] = 0
keys_to_remove.append(k)
else:
attacker_firepower = ceil(max(self.able_crew, self.injured_crew * 0.25) * 0.125)
attacker_firepower_vs_able_crew = min(attacker_firepower, self.able_crew)
attacker_firepower_vs_injured_crew = attacker_firepower - attacker_firepower_vs_able_crew
# the defenders won't be abler to concintrate all of their firepower on the boarders
defender_firepower = ceil(
min(total_borders * 2, max(self.able_crew, self.injured_crew * 0.25)) * 0.125
)
defender_firepower_vs_able_boarders = min(defender_firepower, able_boarders)
defender_firepower_vs_injured_boarders = (
attacker_firepower - defender_firepower_vs_able_boarders
)
injured = round(attacker_firepower_vs_able_crew * 0.4)
killed_outright = round(attacker_firepower_vs_able_crew * 0.6)
killed_in_sickbay = attacker_firepower_vs_injured_crew
self.injuries_and_deaths(
injured, killed_outright,
killed_in_sickbay
)
all_defenders_died = self.is_derlict
new_injured_boarders = round(
defender_firepower_vs_able_boarders * 0.4
) - defender_firepower_vs_injured_boarders
injured_boarders += new_injured_boarders
injured_boarders_killed = 0
able_boarders_killed = round(defender_firepower_vs_able_boarders * 0.6)
able_boarders -= able_boarders_killed
# the boarders don't have access to sickbay, so some of them die from their injuries
if injured_boarders > 0:
f = able_boarders * 0.125
injured_boarders_killed += int((f // 1) + (f % 1))
injured_boarders -= injured_boarders_killed
injured_boarders = max(0, injured_boarders)
able_boarders = max(0, able_boarders)
did_defender_suffer_casulties = injured + killed_outright + killed_in_sickbay > 0
did_attacker_suffer_casulties = (
injured_boarders_killed + able_boarders_killed + new_injured_boarders > 0
)
all_boarders_died = injured_boarders + able_boarders <= 0
if all_defenders_died:
self.take_control_of_ship(
able_crew=able_boarders, injured_crew=injured_boarders, nation=k
)
keys_to_remove.append(k)
if defender_is_player and not all_defenders_died:
if did_defender_suffer_casulties or did_attacker_suffer_casulties:
message:List[str] = []
if did_defender_suffer_casulties:
message.append(f"During the fighting with {k.name_short} forces, we suffered")
if injured > 0:
message.append(f"{injured}")
if killed_outright > 0:
message.append(f"injured and")
elif killed_in_sickbay > 0:
message.append(f"injured, as well as")
else:
message.append("injured.")
if killed_outright > 0:
message.append(f"{killed_outright}")
message.append(
f"killed, as well as" if killed_in_sickbay > 0 else "killed."
)
if killed_in_sickbay > 0:
message.append(f"{killed_in_sickbay} deaths of wounded personel.")
if did_attacker_suffer_casulties:
message.append("We were able to")
if new_injured_boarders > 0:
message.append(f"injure {new_injured_boarders} of ")
if able_boarders_killed > 0:
message.append("attackers, and")
elif injured_boarders_killed > 0:
message.append("attackers, in addition")
else:
message.append("attackers.")
if able_boarders_killed > 0:
message.append(f"kill {able_boarders_killed} of")
message.append("them and" if injured_boarders_killed > 0 else "them.")
if injured_boarders_killed > 0:
message.append(f"kill {injured_boarders_killed} injured combatants.")
message_log.add_message(" ".join(message), colors.orange)
if all_boarders_died:
message_log.add_message(
f"The last of the {k.name_long} attackers have been wiped out!",
colors.cyan
)
elif not defender_is_player and in_same_system and boarders_are_from_player:
if did_defender_suffer_casulties or did_attacker_suffer_casulties:
if all_boarders_died:
message_log.add_message(
"We have lost contact with our boarding party.", colors.red
)
else:
message:List[str] = [
f"During the fighting with {self.starship.nation.name_short} forces, our boarding party inflicted"
]
if did_defender_suffer_casulties:
if injured > 0:
message.append(f"{injured}")
if killed_outright > 0:
message.append(f"injured and")
elif killed_in_sickbay > 0:
message.append(f"injured, as well as")
else:
message.append("injured.")
if killed_outright > 0:
message.append(f"{killed_outright}")
message.append(
f"killed, as well as" if killed_in_sickbay > 0 else "killed."
)
if killed_in_sickbay > 0:
message.append(f"{killed_in_sickbay} deaths of wounded personel.")
if did_attacker_suffer_casulties:
message.append(f"The crew of the {self.starship.proper_name} were able to")
if new_injured_boarders > 0:
message.append(f"injure {new_injured_boarders} of ")
if able_boarders_killed > 0:
message.append("our boarding party, and")
elif injured_boarders_killed > 0:
message.append("our boarding party, and they were also able to")
else:
message.append("our boarding party.")
if able_boarders_killed > 0:
message.append(f"kill {able_boarders_killed} of")
message.append("them and" if injured_boarders_killed > 0 else "them.")
if injured_boarders_killed > 0:
message.append(f"kill {injured_boarders_killed} injured combatants.")
message_log.add_message(" ".join(message), colors.orange)
if all_defenders_died:
message_log.add_message(
"Our forces have taken control of the ship!", colors.cyan
)
if all_boarders_died:
keys_to_remove.append(k)
else:
self.hostiles_on_board[k][0] = able_boarders
self.hostiles_on_board[k][1] = injured_boarders
else:
keys_to_remove.append(k)
if life_support_offline_past_critical:
critical_turns = self.turn_without_lifesupport - CONFIG_OBJECT.life_support_offline_turn_limit
_able_crew_deaths = critical_turns * uniform(0.1, 0.12)
_injured_crew_deaths = critical_turns * uniform(0.12, 0.15)
able_crew_deaths = min(round(self.able_crew * _able_crew_deaths), self.able_crew)
injured_crew_deaths = min(round(self.injured_crew * _injured_crew_deaths), self.injured_crew)
total_crew_deaths = able_crew_deaths + injured_crew_deaths
if total_crew_deaths > 0:
self.injuries_and_deaths(0, able_crew_deaths, injured_crew_deaths)
all_defenders_died = self.is_derlict
if defender_is_player:
message:List[str] = ["Our crew report that"]
if able_crew_deaths > 0:
m = "members" if able_crew_deaths > 1 else "member"
message.append(f"{able_crew_deaths} able crew {m}")
if injured_crew_deaths > 0:
message.append("and")
if injured_crew_deaths > 0:
m2 = "members" if injured_crew_deaths > 1 else "member"
message.append(f"{injured_crew_deaths} injured crew {m2}")
message.append("have died from enviromental exposure.")
self.starship.game_data.engine.message_log.add_message(
" ".join(message), colors.orange
)
for k,v in self.hostiles_on_board.items():
boarders_are_from_player = k == self.starship.game_data.scenerio.your_nation
_able_boarder_deaths = critical_turns * uniform(0.12, 0.16)
_injured_boarder_deaths = critical_turns * uniform(0.14, 0.18)
able_boarders, injured_boarders = v[0], v[1]
able_boarder_deaths = min(
round(_able_boarder_deaths * able_boarders), able_boarders
)
injured_boarder_deaths = min(
round(_injured_boarder_deaths * injured_boarders), injured_boarders
)
total_border_deaths = able_boarder_deaths + injured_boarder_deaths
if total_border_deaths > 0:
| |
expression false doesn't run step."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'run': '{key5}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_bool_formatting_true(
mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression true runs step."""
step = Step({
'name': 'step1',
# key6 will evaluate True because it's a bool and it's True
'run': '{key6}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_string_true(mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression True runs step."""
step = Step({
'name': 'step1',
# 'True' will evaluate bool True
'run': 'True'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run 1 runs step."""
step = Step({
'name': 'step1',
# 1 will evaluate True because it's an int and 1
'run': 1},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_99_true(mock_invoke_step,
mock_get_module):
"""Complex step with run 99 runs step."""
step = Step({
'name': 'step1',
# 99 will evaluate True because it's an int and > 0
'run': 99
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_neg1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run -1 runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'run': -1
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_with_single_retry(mock_invoke_step,
mock_get_module):
"""Complex step with retry runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'retry': {'max': 10}
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'retryCounter': 1})
# validate all the in params ended up in context as intended
assert len(context) == original_len + 1
assert context['retryCounter'] == 1
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_with_retries(mock_invoke_step,
mock_get_module):
"""Complex step with retry runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'retry': {'max': 0}
},
None)
context = get_test_context()
original_len = len(context)
mock_invoke_step.side_effect = [ValueError('arb'), None]
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
assert mock_invoke_step.call_count == 2
mock_invoke_step.assert_called_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'retryCounter': 2})
# validate all the in params ended up in context as intended
assert len(context) == original_len + 1
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_on_error(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false raises error."""
complex_step_info = CommentedMap({
'name': 'step1',
'swallow': 0,
'onError': {'arb': 'value'}
})
complex_step_info._yaml_set_line_col(5, 6)
step = Step(complex_step_info, None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
mock_logger_error.assert_called_once_with(
"Error while running step step1 at pipeline yaml line: 6, col: 7")
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': 7,
'customError': {'arb': 'value'},
'description': 'arb error here',
'exception': err_info.value,
'line': 6,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
# endregion Step: run_step: run
# region Step: run_step: skip
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_false(mock_invoke_step,
mock_get_module):
"""Complex step with skip decorator set false will run step."""
step = Step({
'name': 'step1',
'skip': False
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_true(mock_invoke_step,
mock_get_module):
"""Complex step with skip decorator set true runa step."""
step = Step({
'name': 'step1',
'skip': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression false doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate True
'skip': '{key6}'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_true(mock_invoke_step,
mock_get_module):
"""Complex step with skip set to string False doesn't run step."""
step = Step({
'name': 'step1',
# skip evaluates True because it's a string and TRUE parses to True.
'skip': 'TRUE'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_lower_true(mock_invoke_step,
mock_get_module):
"""Complex step with run set to string true doesn't run step."""
step = Step({
'name': 'step1',
# skip will evaluate true because it's a string and true is True.
'skip': 'true'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_and_skip_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with run doesn't run step, evals before skip."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'run': '{key5}',
'skip': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression true runs step."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'skip': '{key5}'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': | |
created_collection.create_item(body=document_definition)
self.assertEqual(created_document.get('id'), document_definition.get('id'))
self.assertEqual(created_document.get('key'), document_definition.get('key'))
# read document
read_document = await created_collection.read_item(
item=created_document.get('id'),
partition_key=created_document.get('id')
)
self.assertEqual(read_document.get('id'), created_document.get('id'))
self.assertEqual(read_document.get('key'), created_document.get('key'))
# Read document feed doesn't require partitionKey as it's always a cross partition query
documentlist = [document async for document in created_collection.read_all_items()]
self.assertEqual(1, len(documentlist))
# replace document
document_definition['key'] = 'new value'
replaced_document = await created_collection.replace_item(
item=read_document,
body=document_definition
)
self.assertEqual(replaced_document.get('key'), document_definition.get('key'))
# upsert document(create scenario)
document_definition['id'] = 'document2'
document_definition['key'] = 'value2'
upserted_document = await created_collection.upsert_item(body=document_definition)
self.assertEqual(upserted_document.get('id'), document_definition.get('id'))
self.assertEqual(upserted_document.get('key'), document_definition.get('key'))
documentlist = [document async for document in created_collection.read_all_items()]
self.assertEqual(2, len(documentlist))
# delete document
await created_collection.delete_item(item=upserted_document, partition_key=upserted_document.get('id'))
# query document on the partition key specified in the predicate will pass even without setting enableCrossPartitionQuery or passing in the partitionKey value
documentlist = [document async for document in created_collection.query_items(
query='SELECT * FROM root r WHERE r.id=\'' + replaced_document.get('id') + '\'' # nosec
)]
self.assertEqual(1, len(documentlist))
# query document on any property other than partitionKey will fail without setting enableCrossPartitionQuery or passing in the partitionKey value
try:
[document async for document in created_collection.query_items(
query='SELECT * FROM root r WHERE r.key=\'' + replaced_document.get('key') + '\'' # nosec
)]
except Exception:
pass
# cross partition query
documentlist = [document async for document in created_collection.query_items(
query='SELECT * FROM root r WHERE r.key=\'' + replaced_document.get('key') + '\'', # nosec
enable_cross_partition_query=True
)]
self.assertEqual(1, len(documentlist))
# query document by providing the partitionKey value
documentlist = [document async for document in created_collection.query_items(
query='SELECT * FROM root r WHERE r.key=\'' + replaced_document.get('key') + '\'', # nosec
partition_key=replaced_document.get('id')
)]
self.assertEqual(1, len(documentlist))
async def test_partitioned_collection_permissions(self):
created_db = self.databaseForTest
collection_id = 'test_partitioned_collection_permissions all collection' + str(uuid.uuid4())
all_collection = await created_db.create_container(
id=collection_id,
partition_key=PartitionKey(path='/key', kind=documents.PartitionKind.Hash)
)
collection_id = 'test_partitioned_collection_permissions read collection' + str(uuid.uuid4())
read_collection = await created_db.create_container(
id=collection_id,
partition_key=PartitionKey(path='/key', kind=documents.PartitionKind.Hash)
)
user = await created_db.create_user(body={'id': 'user' + str(uuid.uuid4())})
permission_definition = {
'id': 'all permission',
'permissionMode': documents.PermissionMode.All,
'resource': all_collection.container_link,
'resourcePartitionKey': [1]
}
all_permission = await user.create_permission(body=permission_definition)
permission_definition = {
'id': 'read permission',
'permissionMode': documents.PermissionMode.Read,
'resource': read_collection.container_link,
'resourcePartitionKey': [1]
}
read_permission = await user.create_permission(body=permission_definition)
resource_tokens = {}
# storing the resource tokens based on Resource IDs
resource_tokens["dbs/" + created_db.id + "/colls/" + all_collection.id] = (all_permission.properties['_token'])
resource_tokens["dbs/" + created_db.id + "/colls/" + read_collection.id] = (read_permission.properties['_token'])
async with CosmosClient(
CRUDTests.host, resource_tokens, consistency_level="Session", connection_policy=CRUDTests.connectionPolicy) as restricted_client:
print('Async Initialization')
document_definition = {'id': 'document1',
'key': 1
}
all_collection.client_connection = restricted_client.client_connection
read_collection.client_connection = restricted_client.client_connection
# Create document in all_collection should succeed since the partitionKey is 1 which is what specified as resourcePartitionKey in permission object and it has all permissions
created_document = await all_collection.create_item(body=document_definition)
# Create document in read_collection should fail since it has only read permissions for this collection
await self.__AssertHTTPFailureWithStatus(
StatusCodes.FORBIDDEN,
read_collection.create_item,
document_definition)
document_definition['key'] = 2
# Create document should fail since the partitionKey is 2 which is different that what is specified as resourcePartitionKey in permission object
await self.__AssertHTTPFailureWithStatus(
StatusCodes.FORBIDDEN,
all_collection.create_item,
document_definition)
document_definition['key'] = 1
# Delete document should succeed since the partitionKey is 1 which is what specified as resourcePartitionKey in permission object
created_document = await all_collection.delete_item(item=created_document['id'],
partition_key=document_definition['key'])
# Delete document in read_collection should fail since it has only read permissions for this collection
await self.__AssertHTTPFailureWithStatus(
StatusCodes.FORBIDDEN,
read_collection.delete_item,
document_definition['id'],
document_definition['id']
)
await created_db.delete_container(all_collection)
await created_db.delete_container(read_collection)
async def test_partitioned_collection_execute_stored_procedure(self):
created_db = self.databaseForTest
created_collection = await self.databaseForTest.create_container(test_config._test_config.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY, PartitionKey(path="/pk"))
sproc = {
'id': 'storedProcedure' + str(uuid.uuid4()),
'body': (
'function () {' +
' var client = getContext().getCollection();' +
' client.createDocument(client.getSelfLink(), { id: \'testDoc\', pk : 2}, {}, function(err, docCreated, options) { ' +
' if(err) throw new Error(\'Error while creating document: \' + err.message);' +
' else {' +
' getContext().getResponse().setBody(1);' +
' }' +
' });}')
}
created_sproc = await created_collection.scripts.create_stored_procedure(body=sproc)
# Partiton Key value same as what is specified in the stored procedure body
result = await created_collection.scripts.execute_stored_procedure(sproc=created_sproc['id'], partition_key=2)
self.assertEqual(result, 1)
# Partiton Key value different than what is specified in the stored procedure body will cause a bad request(400) error
await self.__AssertHTTPFailureWithStatus(
StatusCodes.BAD_REQUEST,
created_collection.scripts.execute_stored_procedure,
created_sproc['id'],
3)
async def test_partitioned_collection_partition_key_value_types(self):
created_db = self.databaseForTest
created_collection = await created_db.create_container(
id='test_partitioned_collection_partition_key_value_types ' + str(uuid.uuid4()),
partition_key=PartitionKey(path='/pk', kind='Hash')
)
document_definition = {'id': 'document1' + str(uuid.uuid4()),
'pk': None,
'spam': 'eggs'}
# create document with partitionKey set as None here
await created_collection.create_item(body=document_definition)
document_definition = {'id': 'document1' + str(uuid.uuid4()),
'spam': 'eggs'}
# create document with partitionKey set as Undefined here
await created_collection.create_item(body=document_definition)
document_definition = {'id': 'document1' + str(uuid.uuid4()),
'pk': True,
'spam': 'eggs'}
# create document with bool partitionKey
await created_collection.create_item(body=document_definition)
document_definition = {'id': 'document1' + str(uuid.uuid4()),
'pk': 'value',
'spam': 'eggs'}
# create document with string partitionKey
await created_collection.create_item(body=document_definition)
document_definition = {'id': 'document1' + str(uuid.uuid4()),
'pk': 100,
'spam': 'eggs'}
# create document with int partitionKey
await created_collection.create_item(body=document_definition)
document_definition = {'id': 'document1' + str(uuid.uuid4()),
'pk': 10.50,
'spam': 'eggs'}
# create document with float partitionKey
await created_collection.create_item(body=document_definition)
document_definition = {'name': 'sample document',
'spam': 'eggs',
'pk': 'value'}
# Should throw an error because automatic id generation is disabled always.
await self.__AssertHTTPFailureWithStatus(
StatusCodes.BAD_REQUEST,
created_collection.create_item,
document_definition
)
await created_db.delete_container(created_collection)
async def test_partitioned_collection_conflict_crud_and_query(self):
created_db = self.databaseForTest
created_collection = await self.databaseForTest.create_container(test_config._test_config.TEST_COLLECTION_MULTI_PARTITION)
conflict_definition = {'id': 'new conflict',
'resourceId': 'doc1',
'operationType': 'create',
'resourceType': 'document'
}
# read conflict here will return resource not found(404) since there is no conflict here
await self.__AssertHTTPFailureWithStatus(
StatusCodes.NOT_FOUND,
created_collection.read_conflict,
conflict_definition['id'],
conflict_definition['id']
)
# Read conflict feed doesn't requires partitionKey to be specified as it's a cross partition thing
conflictlist = [conflict async for conflict in created_collection.list_conflicts()]
self.assertEqual(0, len(conflictlist))
# delete conflict here will return resource not found(404) since there is no conflict here
await self.__AssertHTTPFailureWithStatus(
StatusCodes.NOT_FOUND,
created_collection.delete_conflict,
conflict_definition['id'],
conflict_definition['id']
)
# query conflicts on any property other than partitionKey will fail without setting enableCrossPartitionQuery or passing in the partitionKey value
try:
[conflict async for conflict in created_collection.query_conflicts(
query='SELECT * FROM root r WHERE r.resourceType=\'' + conflict_definition.get( # nosec
'resourceType') + '\''
)]
except Exception:
pass
conflictlist = [conflict async for conflict in created_collection.query_conflicts(
query='SELECT * FROM root r WHERE r.resourceType=\'' + conflict_definition.get('resourceType') + '\'',
# nosec
enable_cross_partition_query=True
)]
self.assertEqual(0, len(conflictlist))
# query conflicts by providing the partitionKey value
options = {'partitionKey': conflict_definition.get('id')}
conflictlist = [conflict async for conflict in created_collection.query_conflicts(
query='SELECT * FROM root r WHERE r.resourceType=\'' + conflict_definition.get('resourceType') + '\'',
# nosec
partition_key=conflict_definition['id']
)]
self.assertEqual(0, len(conflictlist))
async def test_document_crud(self):
# create database
created_db = self.databaseForTest
# create collection
created_collection = await self.databaseForTest.create_container(test_config._test_config.TEST_COLLECTION_MULTI_PARTITION)
# read documents
documents = [document async for document in created_collection.read_all_items()]
# create a document
before_create_documents_count = len(documents)
# create a document with auto ID generation
document_definition = {'name': 'sample document',
'spam': 'eggs',
'key': 'value'}
created_document = await created_collection.create_item(body=document_definition, enable_automatic_id_generation=True)
self.assertEqual(created_document.get('name'),
document_definition['name'])
document_definition = {'name': 'sample document',
'spam': 'eggs',
'key': 'value',
'id': str(uuid.uuid4())}
created_document = await created_collection.create_item(body=document_definition)
self.assertEqual(created_document.get('name'),
document_definition['name'])
self.assertEqual(created_document.get('id'),
document_definition['id'])
# duplicated documents are not allowed when 'id' is provided.
duplicated_definition_with_id = document_definition.copy()
await self.__AssertHTTPFailureWithStatus(StatusCodes.CONFLICT,
created_collection.create_item,
duplicated_definition_with_id)
# read documents after creation
documents = [document async for document in created_collection.read_all_items()]
self.assertEqual(
len(documents),
before_create_documents_count + 2,
'create should increase the number of documents')
# query documents
documents = [document async for document in created_collection.query_items(
query='SELECT * FROM root r WHERE r.name=@name',
parameter=[
{'name': '@name', 'value': document_definition['name']}
]
, enable_cross_partition_query=True
)]
self.assertTrue(documents)
documents = [document async for document in created_collection.query_items(
query='SELECT * FROM root r WHERE r.name=@name',
parameter=[
{'name': '@name', 'value': document_definition['name']}
]
, enable_cross_partition_query=True,
enable_scan_in_query=True
)]
self.assertTrue(documents)
# replace document.
created_document['name'] = 'replaced document'
created_document['spam'] = 'not eggs'
old_etag = created_document['_etag']
replaced_document = await created_collection.replace_item(
item=created_document['id'],
body=created_document
)
self.assertEqual(replaced_document['name'],
'replaced document',
'document id property should change')
self.assertEqual(replaced_document['spam'],
'not eggs',
'property should have changed')
self.assertEqual(created_document['id'],
replaced_document['id'],
'document id should stay the same')
# replace document based on condition
replaced_document['name'] = 'replaced document based on condition'
replaced_document['spam'] = 'new spam field'
# should fail for stale etag
await self.__AssertHTTPFailureWithStatus(
StatusCodes.PRECONDITION_FAILED,
created_collection.replace_item,
replaced_document['id'],
replaced_document,
if_match=old_etag,
)
# should fail if only etag specified
with self.assertRaises(ValueError):
await created_collection.replace_item(
etag=replaced_document['_etag'],
item=replaced_document['id'],
body=replaced_document
)
# should fail if only match condition specified
with self.assertRaises(ValueError):
await created_collection.replace_item(
match_condition=MatchConditions.IfNotModified,
item=replaced_document['id'],
body=replaced_document
)
with self.assertRaises(ValueError):
await created_collection.replace_item(
match_condition=MatchConditions.IfModified,
item=replaced_document['id'],
body=replaced_document
)
# should | |
from arkouda.client import generic_msg, verbose
from arkouda.pdarrayclass import pdarray, create_pdarray
from arkouda.pdarraycreation import zeros, array
from arkouda.sorting import argsort
from arkouda.strings import Strings
global verbose
__all__ = ["unique", "in1d", "concatenate", "union1d", "intersect1d",
"setdiff1d", "setxor1d"]
global verbose
def unique(pda, return_counts=False):
"""
Find the unique elements of an array.
Returns the unique elements of an array, sorted if the values are integers.
There is an optional output in addition to the unique elements: the number
of times each unique value comes up in the input array.
Parameters
----------
pda : pdarray or Strings or Categorical
Input array.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `pda`.
Returns
-------
unique : pdarray or Strings
The unique values. If input dtype is int64, return values will be sorted.
unique_counts : pdarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
For integer arrays, this function checks to see whether `pda` is sorted and, if so,
whether it is already unique. This step can save considerable computation.
Otherwise, this function will sort `pda`. For
Examples
--------
>>> A = ak.array([3, 2, 1, 1, 2, 3])
>>> ak.unique(A)
array([1, 2, 3])
"""
if hasattr(pda, 'unique'):
return pda.unique()
elif isinstance(pda, pdarray):
repMsg = generic_msg("unique {} {} {}".format(pda.objtype, pda.name, return_counts))
if return_counts:
vc = repMsg.split("+")
if verbose: print(vc)
return create_pdarray(vc[0]), create_pdarray(vc[1])
else:
return create_pdarray(repMsg)
elif isinstance(pda, Strings):
name = '{}+{}'.format(pda.offsets.name, pda.bytes.name)
repMsg = generic_msg("unique {} {} {}".format(pda.objtype, name, return_counts))
vc = repMsg.split('+')
if verbose: print(vc)
if return_counts:
return Strings(vc[0], vc[1]), create_pdarray(vc[2])
else:
return Strings(vc[0], vc[1])
else:
raise TypeError("must be pdarray or Strings {}".format(pda))
def in1d(pda1, pda2, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
Returns a boolean array the same length as `pda1` that is True
where an element of `pda1` is in `pda2` and False otherwise.
Parameters
----------
pda1 : pdarray or Strings or Categorical
Input array.
pda2 : pdarray or Strings
The values against which to test each value of `pda1`. Must be the
same type as `pda1`.
invert : bool, optional
If True, the values in the returned array are inverted (that is,
False where an element of `pda1` is in `pda2` and True otherwise).
Default is False. ``ak.in1d(a, b, invert=True)`` is equivalent
to (but is faster than) ``~ak.in1d(a, b)``.
Returns
-------
pdarray, bool
The values `pda1[in1d]` are in `pda2`.
See Also
--------
unique, intersect1d, union1d
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is logically
equivalent to ``ak.array([item in b for item in a])``, but is much
faster and scales to arbitrarily large ``a``.
"""
if hasattr(pda1, 'in1d'):
return pda1.in1d(pda2)
elif isinstance(pda1, pdarray) and isinstance(pda2, pdarray):
repMsg = generic_msg("in1d {} {} {}".format(pda1.name, pda2.name, invert))
return create_pdarray(repMsg)
elif isinstance(pda1, Strings) and isinstance(pda2, Strings):
repMsg = generic_msg("segmentedIn1d {} {} {} {} {} {} {}".format(pda1.objtype,
pda1.offsets.name,
pda1.bytes.name,
pda2.objtype,
pda2.offsets.name,
pda2.bytes.name,
invert))
return create_pdarray(repMsg)
else:
raise TypeError("must be pdarray {} or {}".format(pda1,pda2))
def concatenate(arrays):
"""
Concatenate an iterable of ``pdarray`` objects into one ``pdarray``.
Parameters
----------
arrays : iterable of ``pdarray`` or Strings or Categorical
The arrays to concatenate. Must all have same dtype.
Returns
-------
pdarray
Single array containing all values, in original order
Examples
--------
>>> ak.concatenate([ak.array([1, 2, 3]), ak.array([4, 5, 6])])
array([1, 2, 3, 4, 5, 6])
"""
size = 0
objtype = None
dtype = None
names = []
if len(arrays) < 1:
raise ValueError("concatenate called on empty iterable")
if len(arrays) == 1:
return arrays[0]
if hasattr(arrays[0], 'concatenate'):
return arrays[0].concatenate(arrays[1:])
for a in arrays:
if not isinstance(a, pdarray) and not isinstance(a, Strings):
raise ValueError("Argument must be an iterable of pdarrays or Strings")
if objtype == None:
objtype = a.objtype
if objtype == "pdarray":
if dtype == None:
dtype = a.dtype
elif dtype != a.dtype:
raise ValueError("All pdarrays must have same dtype")
names.append(a.name)
elif objtype == "str":
names.append('{}+{}'.format(a.offsets.name, a.bytes.name))
else:
raise NotImplementedError("concatenate not implemented for object type {}".format(objtype))
size += a.size
if size == 0:
if objtype == "pdarray":
return zeros_like(arrays[0])
else:
return arrays[0]
repMsg = generic_msg("concatenate {} {} {}".format(len(arrays), objtype, ' '.join(names)))
if objtype == "pdarray":
return create_pdarray(repMsg)
elif objtype == "str":
return Strings(*(repMsg.split('+')))
# (A1 | A2) Set Union: elements are in one or the other or both
def union1d(pda1, pda2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
pda1 : pdarray
Input array
pda2 : pdarray
Input array
Returns
-------
pdarray
Unique, sorted union of the input arrays.
See Also
--------
intersect1d, unique
Examples
--------
>>> ak.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
"""
if isinstance(pda1, pdarray) and isinstance(pda2, pdarray):
if pda1.size == 0:
return pda2 # union is pda2
if pda2.size == 0:
return pda1 # union is pda1
return unique(concatenate((unique(pda1), unique(pda2))))
else:
raise TypeError("must be pdarray {} or {}".format(pda1,pda2))
# (A1 & A2) Set Intersection: elements have to be in both arrays
def intersect1d(pda1, pda2, assume_unique=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
pda1 : pdarray
Input array
pda2 : pdarray
Input array
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
pdarray
Sorted 1D array of common and unique elements.
See Also
--------
unique, union1d
Examples
--------
>>> ak.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
"""
if isinstance(pda1, pdarray) and isinstance(pda2, pdarray):
if pda1.size == 0:
return pda1 # nothing in the intersection
if pda2.size == 0:
return pda2 # nothing in the intersection
if not assume_unique:
pda1 = unique(pda1)
pda2 = unique(pda2)
aux = concatenate((pda1, pda2))
aux_sort_indices = argsort(aux)
aux = aux[aux_sort_indices]
mask = aux[1:] == aux[:-1]
int1d = aux[:-1][mask]
return int1d
else:
raise TypeError("must be pdarray {} or {}".format(pda1,pda2))
# (A1 - A2) Set Difference: elements have to be in first array but not second
def setdiff1d(pda1, pda2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the sorted, unique values in `pda1` that are not in `pda2`.
Parameters
----------
pda1 : pdarray
Input array.
pda2 : pdarray
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
pdarray
Sorted 1D array of values in `pda1` that are not in `pda2`.
See Also
--------
unique, setxor1d
Examples
--------
>>> a = ak.array([1, 2, 3, 2, 4, 1])
>>> b = ak.array([3, 4, 5, 6])
>>> ak.setdiff1d(a, b)
array([1, 2])
"""
if isinstance(pda1, pdarray) and isinstance(pda2, pdarray):
if pda1.size == 0:
return pda1 # return a zero length pdarray
if pda2.size == 0:
return pda1 # subtracting nothing return orig pdarray
if not assume_unique:
pda1 = unique(pda1)
pda2 = unique(pda2)
return pda1[in1d(pda1, pda2, invert=True)]
else:
raise TypeError("must be pdarray {} or {}".format(pda1,pda2))
# (A1 ^ A2) Set Symmetric Difference: elements are not in the intersection
def setxor1d(pda1, pda2, assume_unique=False):
"""
Find the set exclusive-or (symmetric difference) of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
pda1 : pdarray
Input array.
pda2 : pdarray
Input array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
pdarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = ak.array([1, 2, 3, 2, 4])
>>> b = ak.array([2, 3, 5, 7, 5])
>>> ak.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if isinstance(pda1, pdarray) and isinstance(pda2, pdarray):
if pda1.size == | |
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import typing
import json
import azlmbr.scene as sceneApi
from enum import IntEnum
# Wraps the AZ.SceneAPI.Containers.SceneGraph.NodeIndex internal class
class SceneGraphNodeIndex:
def __init__(self, scene_graph_node_index) -> None:
self.nodeIndex = scene_graph_node_index
def as_number(self):
return self.nodeIndex.AsNumber()
def distance(self, other):
return self.nodeIndex.Distance(other)
def is_valid(self) -> bool:
return self.nodeIndex.IsValid()
def equal(self, other) -> bool:
return self.nodeIndex.Equal(other)
# Wraps AZ.SceneAPI.Containers.SceneGraph.Name internal class
class SceneGraphName:
def __init__(self, scene_graph_name) -> None:
self.name = scene_graph_name
def get_path(self) -> str:
return self.name.GetPath()
def get_name(self) -> str:
return self.name.GetName()
# Wraps AZ.SceneAPI.Containers.SceneGraph class
class SceneGraph:
def __init__(self, scene_graph_instance) -> None:
self.sceneGraph = scene_graph_instance
@classmethod
def is_valid_name(cls, name):
return sceneApi.SceneGraph_IsValidName(name)
@classmethod
def node_seperation_character(cls):
return sceneApi.SceneGraph_GetNodeSeperationCharacter()
def get_node_name(self, node):
return self.sceneGraph.GetNodeName(node)
def get_root(self):
return self.sceneGraph.GetRoot()
def has_node_content(self, node) -> bool:
return self.sceneGraph.HasNodeContent(node)
def has_node_sibling(self, node) -> bool:
return self.sceneGraph.HasNodeSibling(node)
def has_node_child(self, node) -> bool:
return self.sceneGraph.HasNodeChild(node)
def has_node_parent(self, node) -> bool:
return self.sceneGraph.HasNodeParent(node)
def is_node_end_point(self, node) -> bool:
return self.sceneGraph.IsNodeEndPoint(node)
def get_node_parent(self, node):
return self.sceneGraph.GetNodeParent(node)
def get_node_sibling(self, node):
return self.sceneGraph.GetNodeSibling(node)
def get_node_child(self, node):
return self.sceneGraph.GetNodeChild(node)
def get_node_count(self):
return self.sceneGraph.GetNodeCount()
def find_with_path(self, path):
return self.sceneGraph.FindWithPath(path)
def find_with_root_and_path(self, root, path):
return self.sceneGraph.FindWithRootAndPath(root, path)
def get_node_content(self, node):
return self.sceneGraph.GetNodeContent(node)
class ColorChannel(IntEnum):
RED = 0
""" Red color channel """
GREEN = 1
""" Green color channel """
BLUE = 2
""" Blue color channel """
ALPHA = 3
""" Alpha color channel """
class TangentSpaceSource(IntEnum):
SCENE = 0
""" Extract the tangents and bitangents directly from the source scene file. """
MIKKT_GENERATION = 1
""" Use MikkT algorithm to generate tangents """
class TangentSpaceMethod(IntEnum):
TSPACE = 0
""" Generates the tangents and bitangents with their true magnitudes which can be used for relief mapping effects.
It calculates the 'real' bitangent which may not be perpendicular to the tangent.
However, both, the tangent and bitangent are perpendicular to the vertex normal.
"""
TSPACE_BASIC = 1
""" Calculates unit vector tangents and bitangents at pixel/vertex level which are sufficient for basic normal mapping. """
class PrimitiveShape(IntEnum):
BEST_FIT = 0
""" The algorithm will determine which of the shapes fits best. """
SPHERE = 1
""" Sphere shape """
BOX = 2
""" Box shape """
CAPSULE = 3
""" Capsule shape """
class DecompositionMode(IntEnum):
VOXEL = 0
""" Voxel-based approximate convex decomposition """
TETRAHEDRON = 1
""" Tetrahedron-based approximate convex decomposition """
# Contains a dictionary to contain and export AZ.SceneAPI.Containers.SceneManifest
class SceneManifest:
def __init__(self):
self.manifest = {'values': []}
def add_mesh_group(self, name: str) -> dict:
"""Adds a Mesh Group to the scene manifest.
Parameters
----------
name :
Name of the mesh group. This will become a file on disk and be usable as a Mesh in the editor.
Returns
-------
dict
Newly created mesh group.
"""
mesh_group = {
'$type': '{07B356B7-3635-40B5-878A-FAC4EFD5AD86} MeshGroup',
'name': name,
'nodeSelectionList': {'selectedNodes': [], 'unselectedNodes': []},
'rules': {'rules': [{'$type': 'MaterialRule'}]}
}
self.manifest['values'].append(mesh_group)
return mesh_group
def add_prefab_group(self, name: str, id: str, json: dict) -> dict:
"""Adds a Prefab Group to the scene manifest. This will become a file on disk and be usable as a ProceduralPrefab in the editor.
Parameters
----------
name :
Name of the prefab.
id :
Unique ID for this prefab group.
json :
The prefab template data.
Returns
-------
dict
The newly created Prefab group
"""
prefab_group = {
'$type': '{99FE3C6F-5B55-4D8B-8013-2708010EC715} PrefabGroup',
'name': name,
'id': id,
'prefabDomData': json
}
self.manifest['values'].append(prefab_group)
return prefab_group
def add_actor_group(self, group) -> dict:
groupDict = group.to_dict()
self.manifest['values'].append(groupDict)
return groupDict
def add_motion_group(self, group) -> dict:
groupDict = group.to_dict()
self.manifest['values'].append(groupDict)
return groupDict
def mesh_group_select_node(self, mesh_group: dict, node_name: str) -> None:
"""Adds a node as a selected node.
Parameters
----------
mesh_group :
Mesh group to apply the selection to.
node_name :
Path of the node.
"""
mesh_group['nodeSelectionList']['selectedNodes'].append(node_name)
def mesh_group_unselect_node(self, mesh_group: dict, node_name: str) -> None:
"""Adds a node as an unselected node.
Parameters
----------
mesh_group :
Mesh group to apply the selection to.
node_name :
Path of the node.
"""
mesh_group['nodeSelectionList']['unselectedNodes'].append(node_name)
def mesh_group_add_advanced_coordinate_system(self, mesh_group: dict,
origin_node_name: str = '',
translation: typing.Optional[object] = None,
rotation: typing.Optional[object] = None,
scale: float = 1.0) -> None:
"""Adds an Advanced Coordinate System rule which modifies the target coordinate system,
applying a transformation to all data (transforms and vertex data if it exists).
Parameters
----------
mesh_group :
Mesh group to add the Advanced Coordinate System rule to.
origin_node_name :
Path of the node to use as the origin.
translation :
Moves the group along the given vector.
rotation :
Sets the orientation offset of the processed mesh in degrees. Rotates the group after translation.
scale :
Sets the scale offset of the processed mesh.
"""
origin_rule = {
'$type': 'CoordinateSystemRule',
'useAdvancedData': True,
'originNodeName': self.__default_or_value(origin_node_name, '')
}
if translation is not None:
origin_rule['translation'] = translation
if rotation is not None:
origin_rule['rotation'] = rotation
if scale != 1.0:
origin_rule['scale'] = scale
mesh_group['rules']['rules'].append(origin_rule)
def mesh_group_add_comment(self, mesh_group: dict, comment: str) -> None:
"""Adds a Comment rule.
Parameters
----------
mesh_group :
Mesh group to add the comment rule to.
comment :
Text for the comment rule.
"""
comment_rule = {
'$type': 'CommentRule',
'comment': comment
}
mesh_group['rules']['rules'].append(comment_rule)
def __default_or_value(self, val, default):
return default if val is None else val
def mesh_group_add_cloth_rule(self, mesh_group: dict,
cloth_node_name: str,
inverse_masses_stream_name: typing.Optional[str],
inverse_masses_channel: typing.Optional[ColorChannel],
motion_constraints_stream_name: typing.Optional[str],
motion_constraints_channel: typing.Optional[ColorChannel],
backstop_stream_name: typing.Optional[str],
backstop_offset_channel: typing.Optional[ColorChannel],
backstop_radius_channel: typing.Optional[ColorChannel]) -> None:
"""Adds a Cloth rule.
Parameters
----------
mesh_group :
Mesh Group to add the cloth rule to
cloth_node_name :
Name of the node that the rule applies to
inverse_masses_stream_name :
Name of the color stream to use for inverse masses
inverse_masses_channel :
Color channel (index) for inverse masses
motion_constraints_stream_name :
Name of the color stream to use for motion constraints
motion_constraints_channel :
Color channel (index) for motion constraints
backstop_stream_name :
Name of the color stream to use for backstop
backstop_offset_channel :
Color channel (index) for backstop offset value
backstop_radius_channel :
Color channel (index) for backstop radius value
"""
cloth_rule = {
'$type': 'ClothRule',
'meshNodeName': cloth_node_name,
'inverseMassesStreamName': self.__default_or_value(inverse_masses_stream_name, 'Default: 1.0')
}
if inverse_masses_channel is not None:
cloth_rule['inverseMassesChannel'] = int(inverse_masses_channel)
cloth_rule['motionConstraintsStreamName'] = self.__default_or_value(motion_constraints_stream_name, 'Default: 1.0')
if motion_constraints_channel is not None:
cloth_rule['motionConstraintsChannel'] = int(motion_constraints_channel)
cloth_rule['backstopStreamName'] = self.__default_or_value(backstop_stream_name, 'None')
if backstop_offset_channel is not None:
cloth_rule['backstopOffsetChannel'] = int(backstop_offset_channel)
if backstop_radius_channel is not None:
cloth_rule['backstopRadiusChannel'] = int(backstop_radius_channel)
mesh_group['rules']['rules'].append(cloth_rule)
def mesh_group_add_lod_rule(self, mesh_group: dict) -> dict:
"""Adds an LOD rule.
Parameters
----------
mesh_group :
Mesh Group to add the rule to.
Returns
-------
dict
LOD rule.
"""
lod_rule = {
'$type': '{6E796AC8-1484-4909-860A-6D3F22A7346F} LodRule',
'nodeSelectionList': []
}
mesh_group['rules']['rules'].append(lod_rule)
return lod_rule
def lod_rule_add_lod(self, lod_rule: dict) -> dict:
"""Adds an LOD level to the LOD rule. Nodes are added in order. The first node added represents LOD1, 2nd LOD2, etc.
Parameters
----------
lod_rule :
LOD rule to add the LOD level to.
Returns
-------
dict
LOD level.
"""
lod = {'selectedNodes': [], 'unselectedNodes': []}
lod_rule['nodeSelectionList'].append(lod)
return lod
def lod_select_node(self, lod: dict, selected_node: str) -> None:
"""Adds a node as a selected node.
Parameters
----------
lod :
LOD level to add the node to.
selected_node :
Path of the node.
"""
lod['selectedNodes'].append(selected_node)
def lod_unselect_node(self, lod: dict, unselected_node: str) -> None:
"""Adds a node as an unselected node.
Parameters
----------
lod :
LOD rule to add the node to.
unselected_node :
Path of the node.
"""
lod['unselectedNodes'].append(unselected_node)
def mesh_group_add_advanced_mesh_rule(self, mesh_group: dict,
use_32bit_vertices: bool = False,
merge_meshes: bool = True,
use_custom_normals: bool = True,
vertex_color_stream: typing.Optional[str] = None) -> None:
"""Adds an Advanced Mesh rule.
Parameters
----------
mesh_group :
Mesh Group to add the rule to.
use_32bit_vertices :
False = 16bit vertex position precision. True = 32bit vertex position precision.
merge_meshes :
Merge all meshes into a single mesh.
use_custom_normals :
True = use normals from DCC tool. False = average normals.
vertex_color_stream :
Color stream name to use for Vertex Coloring.
"""
rule = {
'$type': 'StaticMeshAdvancedRule',
'use32bitVertices': use_32bit_vertices,
'mergeMeshes': merge_meshes,
'useCustomNormals': use_custom_normals
}
if vertex_color_stream is | |
therefore merge this into @task once it gets fixed.
actor_name = inspect.stack()[1].frame.f_code.co_name
profile_actor = gluetool.utils.normalize_bool_option(actor_control_value(actor_name, 'PROFILE', False))
if profile_actor:
profiler = Profiler()
profiler.start()
db = db or get_root_db()
cancel = cancel or threading.Event()
doer_args = doer_args or tuple()
doer_kwargs = doer_kwargs or dict()
doer_result: DoerReturnType = Error(Failure('undefined doer result'))
# Updating context - this function is the entry point into Artemis code, therefore context
# is probably left empty or with absolutely wrong objects.
LOGGER.set(logger)
DATABASE.set(db)
CURRENT_MESSAGE.set(dramatiq.middleware.current_message.CurrentMessage.get_current_message())
# Small helper so we can keep all session-related stuff inside one block, and avoid repetition or more than
# one `get_session()` call.
def _run_doer(session: sqlalchemy.orm.session.Session) -> DoerReturnType:
SESSION.set(session)
assert db is not None
assert cancel is not None
assert doer_args is not None
assert doer_kwargs is not None
doer_result = run_doer(
logger,
db,
session,
cancel,
doer,
actor_name,
*doer_args,
**doer_kwargs
)
# "Ignored" failures - failures the tasks don't wish to repeat by running the task again - need
# special handling: we have to mark the guest request as failed. Without this step, client will
# spin endlessly until it finally gives up.
if not is_ignore_result(doer_result):
return doer_result
failure = cast(_IgnoreType, doer_result.unwrap()).failure
# Not all failures influence their parent guest request.
if failure.recoverable is not False or failure.fail_guest_request is not True:
return doer_result
# Also, not all failures relate to guests. Such failures are easy to deal with, there's nothing to update.
if 'guestname' not in failure.details:
return doer_result
guestname = failure.details['guestname']
r_state_change = _update_guest_state(
logger,
session,
guestname,
GuestState.ERROR
)
# If the change failed, we're left with a loose end: the task marked the failure as something that will not
# get better over time, but here we failed to mark the request as failed because of issue that may be
# transient. If that's the case, we should probably try again. Otherwise, we log the error that killed the
# state change, and move on.
if r_state_change.is_error:
# Describes the problem encountered when changing the guest request state.
state_change_failure = r_state_change.unwrap_error()
# Describes *when* this change was needed, i.e. what we attempted to do. Brings more context for humans.
failure = Failure.from_failure(
'failed to mark guest request as failed',
state_change_failure
)
# State change failed because of recoverable failure => use it as an excuse to try again. We can expect the
# task to fail, but we will get another chance to mark the guest as failed. This effectively drops the
# original `IGNORE` result, replacing it with an error.
if state_change_failure.recoverable is True:
return Error(failure)
# State change failed because of irrecoverable failure => no point to try again. Probably not very common,
# but still possible, in theory. At least try to log the situation before proceeding with the original
# `IGNORE`.
failure.handle(logger)
if r_state_change.unwrap() is not True:
failure = Failure('failed to mark guest request as failed')
# State change failed because the expected record might be missing or changed in some way => use it as an
# excuse to try again. We can expect the task to *not* perform its work because it's higly likely its
# initial attempt to "grab" the guest request would fail. Imagine acquire-guest-request to fail
# irrecoverably, and before we can get to mark the request as failed, user removes it. The state change fail
# is then quite expected, and the next iteration of acquire-guest-request will not even try to provision (
# and fail irrecoverable again) because the guest request would be gone, resulting in successfull no-op.
return Error(failure)
# State change succeeded, and changed exactly the request we're working with. There is nothing left to do,
# we proceed by propagating the original "ignore" result, closing the chapter.
return doer_result
try:
if session is None:
with db.get_session() as session:
doer_result = _run_doer(session)
else:
doer_result = _run_doer(session)
except Exception as exc:
failure = Failure.from_exc('unhandled doer exception', exc)
failure.handle(logger)
doer_result = Error(failure)
if profile_actor:
profiler.stop()
profiler.log(logger, 'profiling report (outer)')
if doer_result.is_ok:
result = doer_result.unwrap()
if is_ignore_result(doer_result):
logger.warning('message processing encountered error and requests waiver')
logger.finished()
if result is Reschedule:
raise Exception('message processing requested reschedule')
return
# To avoid chain a of exceptions in the log - which we already logged above - raise a generic,
# insignificant exception to notify scheduler that this task failed and needs to be retried.
raise Exception('message processing failed: {}'.format(doer_result.unwrap_error().message))
def _cancel_task_if(
logger: gluetool.log.ContextAdapter,
cancel: threading.Event,
undo: Optional[Callable[[], None]] = None
) -> bool:
"""
Check given cancellation event, and if it's set, call given (optional) undo callback.
Returns ``True`` if task is supposed to be cancelled, ``False`` otherwise.
"""
if not cancel.is_set():
logger.debug('cancellation not requested')
return False
logger.warning('cancellation requested')
if undo:
logger.debug('running undo step')
undo()
return True
def _randomize_delay(delay: int) -> int:
"""
Modify a given delay by a randomized value withing spread specified by :py:const:`KNOB_DELAY_UNIFORM_SPREAD`.
"""
# Use `max()` to always return positive delay - if `delay == 0`, then the randomized delay could
# fall bellow zero, and that does not seem to be a good practice.
return max(0, delay + int(random.uniform(-KNOB_DELAY_UNIFORM_SPREAD.value, KNOB_DELAY_UNIFORM_SPREAD.value)))
def dispatch_task(
logger: gluetool.log.ContextAdapter,
task: Actor,
*args: Any,
delay: Optional[int] = None
) -> Result[None, Failure]:
"""
Dispatch a given task.
:param logger: logger to use for logging.
:param task: callable, a Dramatiq task, to dispatch.
:param args: positional parameters to pass to the task.
:param delay: if set, the task will be delayed by this many seconds.
"""
if delay is None:
r = safe_call(task.send, *args)
else:
delay = _randomize_delay(delay)
# The underlying Dramatiq code treats delay as miliseconds, hence the multiplication.
r = safe_call(task.send_with_options, args=args, delay=delay * 1000)
if r.is_ok:
formatted_args = [
str(arg) for arg in args
]
if delay is not None:
formatted_args += [
'delay={}'.format(delay)
]
logger.info('scheduled task {}({})'.format(
task.actor_name,
', '.join(formatted_args)
))
if KNOB_CLOSE_AFTER_DISPATCH.value:
logger.debug('closing broker connection as requested')
BROKER.connection.close()
return Ok(None)
return Error(Failure.from_failure(
'failed to dispatch task',
r.unwrap_error(),
task_name=task.actor_name,
task_args=args,
task_delay=delay
))
def dispatch_group(
logger: gluetool.log.ContextAdapter,
tasks: List[Actor],
*args: Any,
on_complete: Optional[Actor] = None,
delay: Optional[int] = None
) -> Result[None, Failure]:
"""
Dispatch given tasks as a group.
:param logger: logger to use for logging.
:param tasks: list of callables, Dramatiq tasks, to dispatch.
:param args: positional parameters to pass to all tasks.
:param on_complete: a task to dispatch when group tasks complete.
:param delay: if set, the task will be delayed by this many seconds.
"""
try:
group = dramatiq.group([
task.message(*args)
for task in tasks
])
if on_complete:
group.add_completion_callback(on_complete.message(*args))
if delay is None:
group.run()
else:
delay = _randomize_delay(delay)
group.run(delay=delay * 1000)
formatted_args = [
str(arg) for arg in args
]
if delay is not None:
formatted_args += [
'delay={}'.format(delay)
]
logger.info('scheduled group ({})({})'.format(
' | '.join([task.actor_name for task in tasks]),
', '.join(formatted_args)
))
except Exception as exc:
return Error(Failure.from_exc(
'failed to dispatch group',
exc,
group_tasks=[task.actor_name for task in tasks],
group_args=args
))
return Ok(None)
def _update_guest_state(
logger: gluetool.log.ContextAdapter,
session: sqlalchemy.orm.session.Session,
guestname: str,
new_state: GuestState,
current_state: Optional[GuestState] = None,
set_values: Optional[Dict[str, Union[str, int, None, datetime.datetime, GuestState]]] = None,
current_pool_data: Optional[str] = None,
**details: Any
) -> Result[bool, Failure]:
workspace = Workspace(
logger,
session,
threading.Event(),
guestname=guestname,
current_state=current_state.value if current_state is not None else None,
new_state=new_state.value,
**details
)
current_state_label = current_state.value if current_state is not None else '<ignored>'
logger.warning('state switch: {} => {}'.format(current_state_label, new_state.value))
now = datetime.datetime.utcnow()
if set_values:
values = set_values
values.update({
'state': new_state,
'state_mtime': now
})
else:
values = {
'state': new_state,
'state_mtime': now
}
query = sqlalchemy \
.update(GuestRequest.__table__) \
.where(GuestRequest.guestname == guestname)
if current_state is not None:
query = query.where(GuestRequest.state == current_state)
if current_pool_data:
query = query.where(GuestRequest.pool_data == current_pool_data)
query = query.values(**values)
r = safe_db_change(logger, session, query)
if r.is_error:
return Error(Failure.from_failure(
'failed to switch guest state',
r.unwrap_error(),
current_state=current_state_label,
new_state=new_state.value
))
if r.value is False:
logger.warning('state switch: {} => {}: failed'.format(current_state_label, new_state.value))
return Error(Failure(
'did not switch guest state',
current_state=current_state_label,
new_state=new_state.value
))
logger.warning('state switch: {} => {}: succeeded'.format(current_state_label, new_state.value))
workspace.handle_success('state-changed')
| |
HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/segments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List7', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def batch_segment_members(self, body, list_id, segment_id, **kwargs): # noqa: E501
"""Batch add or remove members # noqa: E501
Batch add/remove list members to static segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.batch_segment_members(body, list_id, segment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MembersToAddremoveTofromAStaticSegment body: (required)
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:return: BatchAddremoveListMembersTofromStaticSegment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.batch_segment_members_with_http_info(body, list_id, segment_id, **kwargs) # noqa: E501
else:
(data) = self.batch_segment_members_with_http_info(body, list_id, segment_id, **kwargs) # noqa: E501
return data
def batch_segment_members_with_http_info(self, body, list_id, segment_id, **kwargs): # noqa: E501
"""Batch add or remove members # noqa: E501
Batch add/remove list members to static segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.batch_segment_members_with_http_info(body, list_id, segment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MembersToAddremoveTofromAStaticSegment body: (required)
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:return: BatchAddremoveListMembersTofromStaticSegment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'list_id', 'segment_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method batch_segment_members" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling ``") # noqa: E501
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'segment_id' is set
if ('segment_id' not in params or
params['segment_id'] is None):
raise ValueError("Missing the required parameter `segment_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'segment_id' in params:
path_params['segment_id'] = params['segment_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/segments/{segment_id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchAddremoveListMembersTofromStaticSegment', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_segment_member(self, list_id, segment_id, body, **kwargs): # noqa: E501
"""Add member to segment # noqa: E501
Add a member to a static segment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment_member(list_id, segment_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:param Body3 body: (required)
:return: ListMembers1
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_segment_member_with_http_info(list_id, segment_id, body, **kwargs) # noqa: E501
else:
(data) = self.create_segment_member_with_http_info(list_id, segment_id, body, **kwargs) # noqa: E501
return data
def create_segment_member_with_http_info(self, list_id, segment_id, body, **kwargs): # noqa: E501
"""Add member to segment # noqa: E501
Add a member to a static segment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment_member_with_http_info(list_id, segment_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:param Body3 body: (required)
:return: ListMembers1
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'segment_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_segment_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'segment_id' is set
if ('segment_id' not in params or
params['segment_id'] is None):
raise ValueError("Missing the required parameter `segment_id` when calling ``") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'segment_id' in params:
path_params['segment_id'] = params['segment_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/segments/{segment_id}/members', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListMembers1', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_list_signup_form(self, list_id, body, **kwargs): # noqa: E501
"""Customize signup form # noqa: E501
Customize a list's default signup form. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_list_signup_form(list_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param SignupForm1 body: (required)
:return: SignupForm
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_list_signup_form_with_http_info(list_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_list_signup_form_with_http_info(list_id, body, **kwargs) # noqa: E501
return data
def update_list_signup_form_with_http_info(self, list_id, body, **kwargs): # noqa: E501
"""Customize signup form # noqa: E501
Customize a list's default signup form. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_list_signup_form_with_http_info(list_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param SignupForm1 body: (required)
:return: SignupForm
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_list_signup_form" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', | |
(unnormalized)
Returns
-------
CESS: float
conditional effective sample size
"""
prev_weights_normalization = np.exp(logsumexp(-works_prev))
prev_weights_normalized = np.exp(-works_prev) / prev_weights_normalization
#_logger.debug(f"\t\tnormalized weights: {prev_weights_normalized}")
incremental_weights_unnormalized = np.exp(-works_incremental)
#_logger.debug(f"\t\tincremental weights (unnormalized): {incremental_weights_unnormalized}")
N = len(prev_weights_normalized)
CESS = N * np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(prev_weights_normalized, np.power(incremental_weights_unnormalized, 2))
#_logger.debug(f"\t\tCESS: {CESS}")
return CESS
@staticmethod
def multinomial_resample(cumulative_works, sampler_states, num_resamples, previous_labels):
"""
from a list of cumulative works and sampler states, resample the sampler states N times with replacement
from a multinomial distribution conditioned on the weights w_i \propto e^{-cumulative_works_i}
Parameters
----------
cumulative_works : np.array
generalized accumulated works at time t for all particles
sampler_states : list of (openmmtools.states.SamplerState)
list of sampler states at time t for all particles
num_resamples : int, default len(sampler_states)
number of resamples to conduct; default doesn't change the number of particles
previous_labels : list of int
previous labels of the particles
Returns
-------
resampled_works : np.array([1.0/num_resamples]*num_resamples)
resampled works (uniform)
resampled_sampler_states : list of (openmmtools.states.SamplerState)
resampled sampler states of size num_resamples
corrected resampled_labels : list of ints
resampled labels for tracking particle duplicates
"""
normalized_weights = np.exp(-cumulative_works - logsumexp(-cumulative_works))
resampled_labels = np.random.choice(len(normalized_weights), num_resamples, p=normalized_weights, replace = True)
resampled_sampler_states = [sampler_states[i] for i in resampled_labels]
resampled_works = np.array([np.average(cumulative_works)] * num_resamples)
corrected_resampled_labels = np.array([previous_labels[i] for i in resampled_labels])
return resampled_works, resampled_sampler_states, corrected_resampled_labels
def binary_search(self,
futures,
start_val,
end_val,
observable,
observable_threshold,
thermodynamic_state,
max_iterations=20,
initial_guess = None,
precision_threshold = None):
"""
Given corresponding start_val and end_val of observables, conduct a binary search to find min value for which the observable threshold
is exceeded.
Arguments
----------
futures:
list of dask.Future objects that point to futures
start_val: float
start value of binary search
end_val: float
end value of binary search
observable : function
function to compute an observable
observable_threshold : float
the threshold of the observable used to satisfy the binary search criterion
thermodynamic_state:
thermodynamic state with which to compute importance weights
max_iterations: int, default 20
maximum number of interations to conduct
initial_guess: float, default None
guess where the threshold is achieved
precision_threshold: float, default None
precision threshold below which, the max iteration will break
Returns
-------
midpoint: float
maximum value that doesn't exceed threshold
_observable : float
observed value of observable
"""
_base_end_val = end_val
_logger.debug(f"\t\t\tmin, max values: {start_val}, {end_val}. ")
cumulative_work_futures = self.deploy(feptasks.Particle.pull_cumulative_work, (futures,))
sampler_state_futures = self.deploy(feptasks.Particle.pull_sampler_state, (futures,))
cumulative_works = np.array(self.gather_results(cumulative_work_futures))
sampler_states = self.gather_results(sampler_state_futures)
thermodynamic_state = self.modify_thermodynamic_state(thermodynamic_state, current_lambda = start_val)
current_rps = np.array([feptasks.compute_reduced_potential(thermodynamic_state, sampler_state) for sampler_state in sampler_states])
if initial_guess is not None:
midpoint = initial_guess
else:
midpoint = (start_val + end_val) * 0.5
_logger.debug(f"\t\t\tinitial midpoint is: {midpoint}")
for _ in range(max_iterations):
_logger.debug(f"\t\t\titeration {_}: current lambda: {midpoint}")
thermodynamic_state = self.modify_thermodynamic_state(thermodynamic_state, current_lambda = midpoint)
new_rps = np.array([feptasks.compute_reduced_potential(thermodynamic_state, sampler_state) for sampler_state in sampler_states])
_observable = observable(cumulative_works, new_rps - current_rps) / len(current_rps)
_logger.debug(f"\t\t\tobservable: {_observable}")
if _observable <= observable_threshold:
_logger.debug(f"\t\t\tobservable {_observable} <= observable_threshold {observable_threshold}")
end_val = midpoint
else:
_logger.debug(f"\t\t\tobservable {_observable} > observable_threshold {observable_threshold}")
start_val = midpoint
midpoint = (start_val + end_val) * 0.5
if precision_threshold is not None:
if abs(_base_end_val - midpoint) <= precision_threshold:
_logger.debug(f"\t\t\tthe difference between the original max val ({_base_end_val}) and the midpoint is less than the precision_threshold ({precision_threshold}). Breaking with original max val.")
midpoint = _base_end_val
thermodynamic_state = self.modify_thermodynamic_state(thermodynamic_state, current_lambda = midpoint)
new_rps = np.array([feptasks.compute_reduced_potential(thermodynamic_state, sampler_state) for sampler_state in sampler_states])
_observable = observable(cumulative_works, new_rps - current_rps) / len(current_rps)
break
elif abs(end_val - start_val) <= precision_threshold:
_logger.debug(f"\t\t\tprecision_threshold: {precision_threshold} is exceeded. Breaking")
midpoint = end_val
thermodynamic_state = self.modify_thermodynamic_state(thermodynamic_state, current_lambda = midpoint)
new_rps = np.array([feptasks.compute_reduced_potential(thermodynamic_state, sampler_state) for sampler_state in sampler_states])
_observable = observable(cumulative_works, new_rps - current_rps) / len(current_rps)
break
return midpoint, _observable
def equilibrate(self,
n_equilibration_iterations = 1,
endstates = [0,1],
max_size = 1024*1e3,
decorrelate=False,
timer = False,
minimize = False,
LSF = False,
num_processes = 2,
adapt = False):
"""
Run the equilibrium simulations a specified number of times at the lambda 0, 1 states. This can be used to equilibrate
the simulation before beginning the free energy calculation.
Parameters
----------
n_equilibration_iterations : int; default 1
number of equilibrium simulations to run, each for lambda = 0, 1.
endstates : list, default [0,1]
at which endstate(s) to conduct n_equilibration_iterations (either [0] ,[1], or [0,1])
max_size : float, default 1.024e6 (bytes)
number of bytes allotted to the current writing-to file before it is finished and a new equilibrium file is initiated.
decorrelate : bool, default False
whether to parse all written files serially and remove correlated snapshots; this returns an ensemble of iid samples in theory.
timer : bool, default False
whether to trigger the timing in the equilibration; this adds an item to the EquilibriumResult, which is a list of times for various
processes in the feptask equilibration scheme.
minimize : bool, default False
Whether to minimize the sampler state before conducting equilibration. This is passed directly to feptasks.run_equilibration
LSF: bool, default False
whether we are using the LSF dask Client
num_processes : int, default 2
number of processes to run. This argument does nothing if not LSF
adapt : bool, default False
whether to use an adaptive scheduler.
Returns
-------
equilibrium_result : perses.dispersed.feptasks.EquilibriumResult
equilibrium result namedtuple
"""
_logger.debug(f"conducting equilibration")
# run a round of equilibrium
_logger.debug(f"iterating through endstates to submit equilibrium jobs")
EquilibriumFEPTask_list = []
for state in endstates: #iterate through the specified endstates (0 or 1) to create appropriate EquilibriumFEPTask inputs
_logger.debug(f"\tcreating lambda state {state} EquilibriumFEPTask")
input_dict = {'thermodynamic_state': self._hybrid_thermodynamic_states[state],
'nsteps_equil': self._n_equil_steps,
'topology': self._factory.hybrid_topology,
'n_iterations': n_equilibration_iterations,
'splitting': self._eq_splitting_string,
'atom_indices_to_save': None,
'trajectory_filename': None,
'max_size': max_size,
'timer': timer,
'_minimize': minimize,
'file_iterator': 0,
'timestep': self._timestep}
if self._write_traj:
_logger.debug(f"\twriting traj to {self._trajectory_filename[state]}")
equilibrium_trajectory_filename = self._trajectory_filename[state]
input_dict['trajectory_filename'] = equilibrium_trajectory_filename
else:
_logger.debug(f"\tnot writing traj")
if self._eq_dict[state] == []:
_logger.debug(f"\tself._eq_dict[{state}] is empty; initializing file_iterator at 0 ")
else:
last_file_num = int(self._eq_dict[state][-1][0][-7:-3])
_logger.debug(f"\tlast file number: {last_file_num}; initiating file iterator as {last_file_num + 1}")
file_iterator = last_file_num + 1
input_dict['file_iterator'] = file_iterator
task = EquilibriumFEPTask(sampler_state = self._sampler_states[state], inputs = input_dict, outputs = None)
EquilibriumFEPTask_list.append(task)
_logger.debug(f"scattering and mapping run_equilibrium task")
#remote_EquilibriumFEPTask_list = self.client.scatter(EquilibriumFEPTask_list)
#distributed.progress(remote_EquilibriumFEPTask_list, notebook = False)
#futures_EquilibriumFEPTask_list = self.client.map(feptasks.run_equilibrium, remote_EquilibriumFEPTask_list)
self.activate_client(LSF = LSF,
num_processes = num_processes,
adapt = adapt)
futures = self.deploy(feptasks.run_equilibrium, (EquilibriumFEPTask_list,))
#distributed.progress(futures, notebook = False)
eq_results = self.gather_results(futures)
self.deactivate_client()
_logger.debug(f"finished submitting tasks; gathering...")
#eq_results = self.client.gather(futures_EquilibriumFEPTask_list)
for state, eq_result in zip(endstates, eq_results):
_logger.debug(f"\tcomputing equilibrium task future for state = {state}")
self._eq_dict[state].extend(eq_result.outputs['files'])
self._eq_dict[f"{state}_reduced_potentials"].extend(eq_result.outputs['reduced_potentials'])
self._sampler_states[state] = eq_result.sampler_state
self._eq_timers[state].append(eq_result.outputs['timers'])
_logger.debug(f"collections complete.")
if decorrelate: # if we want to decorrelate all sample
_logger.debug(f"decorrelating data")
for state in endstates:
_logger.debug(f"\tdecorrelating lambda = {state} data.")
traj_filename = self._trajectory_filename[state]
if os.path.exists(traj_filename[:-2] + f'0000' + '.h5'):
_logger.debug(f"\tfound traj filename: {traj_filename[:-2] + f'0000' + '.h5'}; proceeding...")
[t0, g, Neff_max, A_t, uncorrelated_indices] = feptasks.compute_timeseries(np.array(self._eq_dict[f"{state}_reduced_potentials"]))
_logger.debug(f"\tt0: {t0}; Neff_max: {Neff_max}; uncorrelated_indices: {uncorrelated_indices}")
self._eq_dict[f"{state}_decorrelated"] = uncorrelated_indices
#now we just have to turn the file tuples into an array
_logger.debug(f"\treorganizing decorrelated data; files w/ num_snapshots are: {self._eq_dict[state]}")
iterator, corrected_dict = 0, {}
for tupl in self._eq_dict[state]:
new_list = [i + iterator for i in range(tupl[1])]
iterator += len(new_list)
decorrelated_list = [i for i in new_list if i in uncorrelated_indices]
corrected_dict[tupl[0]] = decorrelated_list
self._eq_files_dict[state] = corrected_dict
_logger.debug(f"\t corrected_dict for state {state}: {corrected_dict}")
def modify_thermodynamic_state(self, thermodynamic_state, current_lambda):
"""
modify a thermodynamic state in place
"""
if self.relative_transform:
thermodynamic_state.set_alchemical_parameters(current_lambda, LambdaProtocol(functions = self._protocol))
return thermodynamic_state
else:
raise Exception(f"modifying a local thermodynamic state when self.relative_transform = False is not supported. Aborting!")
def pull_trajectory_snapshot(self, endstate):
"""
Draw randomly a single snapshot from self._eq_files_dict
Parameters
----------
endstate: int
lambda endstate from which to extract an equilibrated snapshot, either 0 or 1
Returns
-------
sampler_state: openmmtools.SamplerState
sampler state with positions and box vectors if applicable
"""
#pull a random index
_logger.debug(f"\tpulling a decorrelated trajectory snapshot...")
index = random.choice(self._eq_dict[f"{endstate}_decorrelated"])
_logger.debug(f"\t\tpulled decorrelated index label {index}")
files = [key for key in self._eq_files_dict[endstate].keys() if index in self._eq_files_dict[endstate][key]]
_logger.debug(f"\t\t files corresponding to index {index}: {files}")
assert len(files) == 1, f"files: {files} doesn't have one entry; index: {index}, eq_files_dict: {self._eq_files_dict[endstate]}"
file = files[0]
file_index = self._eq_files_dict[endstate][file].index(index)
_logger.debug(f"\t\tfile_index: {file_index}")
#now we load file as a traj and create a sampler state with it
traj = md.load_frame(file, file_index)
positions | |
# ***********************************************************************
# This file defines some common feedforward Neural Network
# arquitectures:
# - ConvNet: convolutional neural network
# - MlpNet: multilayer perceptron
#
# Wrote by: <NAME> (<EMAIL>)
# Modern Heuristics Research Group (MHRG)
# Virginia Commonwealth University (VCU), Richmond, VA
# http://www.people.vcu.edu/~mmanic/
#
# ***********************************************************************
from __future__ import division
from __future__ import print_function
import warnings
import functools
import collections
import numpy as np
import tensorflow as tf
import twodlearn as tdl
from .normalizer import (Normalizer)
from .losses import (Loss, EmpiricalLoss, AddNLosses,
ClassificationLoss, L2Regularizer,
L2Loss, EmpiricalWithRegularization,
ScaledLoss, EmpiricalLossWrapper)
class Options:
def __init__(self, weight_initialization,
weight_initialization_alpha):
self.weight_initialization = weight_initialization
self.weight_initialization_alpha = weight_initialization_alpha
options = Options(weight_initialization='sum',
weight_initialization_alpha=1.0)
''' ------------------------- Activation functions ------------------------ '''
def selu01(x):
''' Self normalizing activation function
Activation function proposed by Gunter Klambauer et. al.
"Self-Normalizing Neural Networks", https://arxiv.org/abs/1706.02515
'''
with tf.name_scope('selu01'):
alpha_01 = 1.6733
lambda_01 = 1.0507
x_pos = tf.nn.relu(x)
x_neg = -tf.nn.relu(-x)
y = lambda_01 * (x_pos + (alpha_01 * tf.exp(x_neg) - alpha_01))
return y
def selu01_disc(x):
''' discontinuous selu '''
alpha_01 = 1.6733
lambda_01 = 1.0507
x = x + alpha_01 * lambda_01
x_pos = tf.nn.relu(x)
return x_pos - alpha_01 * lambda_01
def selu01_disc2(x):
''' another version of discontinuous selu '''
alpha_01 = 1.6733
lambda_01 = 1.0507
x = x + alpha_01
x_pos = tf.nn.relu(x)
return lambda_01 * x_pos - alpha_01 * lambda_01
def leaky_relu(x, leaky_slope=0.01):
''' leaky relu, with 0.01 slope for negative values'''
x_pos = tf.nn.relu(x)
x_neg = -tf.nn.relu(-x)
return x_pos + leaky_slope * x_neg
''' -------------------------------- Layers ------------------------------- '''
class Transpose(tdl.core.TdlModel):
@tdl.core.InferenceInput
def inputs(self, value):
return value
@tdl.core.InputArgument
def rightmost(self, value):
if value is None:
value = False
return value
@tdl.core.LazzyProperty
def shape(self):
tdl.core.assert_initialized(self, 'shape', ['value'])
return self.value.shape
@tdl.core.InputArgument
def perm(self, value):
tdl.core.assert_initialized(self, 'perm', ['rightmost'])
if value is None:
tdl.core.assert_any_available(self, 'axis', ['inputs'])
x = tf.convert_to_tensor(self.inputs)
if x.shape.ndims is not None:
ndims = x.shape.ndims
if self.rightmost:
left_axis = np.arange(ndims - 2)
right_axis = np.array([ndims-1, ndims-2])
value = np.concatenate([left_axis, right_axis], axis=0)
else:
value = np.flip(np.arange(ndims), axis=0)
else:
ndims = tf.rank(x)
if self.rightmost:
left_axis = tf.range(tf.rank(x) - 2)
right_axis = tf.convert_to_tensor([ndims-1, ndims-2])
value = tf.concat(left_axis, right_axis, axis=0)
else:
value = tf.reverse(tf.range(ndims), axis=[0])
else:
assert self.rightmost is False
return value
@tdl.core.OutputValue
def value(self, value):
tdl.core.assert_initialized(self, 'value', ['inputs', 'perm'])
return tf.transpose(self.inputs, perm=self.perm)
class TransposeLayer(tdl.core.TdlModel):
@tdl.core.InputArgument
def rightmost(self, value):
if value is None:
value = False
return value
@tdl.core.SubmodelInit
def perm(self, inputs):
tdl.core.assert_initialized(self, 'perm', ['rightmost'])
x = tf.convert_to_tensor(inputs)
if x.shape.ndims is not None:
ndims = x.shape.ndims
if self.rightmost:
left_axis = np.arange(ndims - 2)
right_axis = np.array([ndims-1, ndims-2])
value = np.concatenate([left_axis, right_axis], axis=0)
else:
value = np.flip(np.arange(ndims), axis=0)
else:
ndims = tf.rank(x)
if self.rightmost:
left_axis = tf.range(tf.rank(x) - 2)
right_axis = tf.convert_to_tensor([ndims-1, ndims-2])
value = tf.concat(left_axis, right_axis, axis=0)
else:
value = tf.reverse(tf.range(ndims), axis=[0])
return value
def __call__(self, x, name=None):
if not tdl.core.is_property_set(self, 'perm'):
self.perm.init(inputs=x)
return Transpose(inputs=x, perm=self.perm, name=name)
class AlexnetLayer(tdl.core.TdlModel):
'''Creates a layer like the one used in (ImageNet Classification
with Deep Convolutional Neural Networks).
The format for filter_size is:
[filter_size_dim0 , filter_size_dim1], it performs 2D convolution
The format for n_maps is:
[num_input_maps, num_output_maps]
The format for pool_size is:
[pool_size_dim0, pool_size_dim1]
'''
@tdl.core.SimpleParameter
def weights(self, value):
if value is None:
initializer = tf.truncated_normal(
shape=[self.filter_size[0], self.filter_size[1],
self.n_maps[0], self.n_maps[1]],
stddev=0.1)
value = tf.Variable(initializer, name='W')
return value
@tdl.core.SimpleParameter
def bias(self, value):
if value is None:
initializer = tf.truncated_normal([self.n_maps[1]], stddev=0.1)
value = tf.Variable(initializer, name='b')
return value
@property
def filter_size(self):
return self._filter_size
@property
def n_maps(self):
return self._n_maps
@property
def pool_size(self):
return self._pool_size
@tdl.core.Regularizer
def regularizer(self, scale=None):
reg = tdl.losses.L2Regularizer(self.weights, scale=scale)
return reg
def __init__(self, filter_size, n_maps, pool_size, name=None):
self._filter_size = filter_size
self._n_maps = n_maps
self._pool_size = pool_size
self._n_inputs = n_maps[0]
self._n_outputs = n_maps[1]
super(AlexnetLayer, self).__init__(name=name)
class AlexnetLayerSetup(tdl.core.TdlModel):
@tdl.core.InputModel
def model(self, value):
return value
@tdl.core.InputArgument
def inputs(self, value):
return value
@property
def weights(self):
return self.model.weights
@property
def bias(self):
return self.model.bias
@property
def pool_size(self):
return self.model.pool_size
def eval_layer(self, inputs):
conv = tf.nn.conv2d(
inputs, self.weights, strides=[1, 1, 1, 1], padding='VALID')
hidden = tf.nn.relu(conv + self.bias)
# Perform Pooling if the size of the pooling layer is bigger than 1
# note that the size of the pooling kernel and the stride is the same
if (self.pool_size[0] == 1 and self.pool_size[1] == 1):
return hidden
else:
pool = tf.nn.max_pool(
hidden,
ksize=[1, self.pool_size[0], self.pool_size[1], 1],
strides=[1, self.pool_size[0], self.pool_size[1], 1],
padding='VALID')
return pool
@tdl.core.OutputValue
def value(self, _):
tdl.core.assert_initialized(self, 'value', ['inputs', 'model'])
return self.eval_layer(self.inputs)
@property
def y(self):
return self.value
def evaluate(self, inputs, name=None):
if name is None:
name = self.name
return self.AlexnetLayerSetup(model=self,
inputs=inputs,
name=name)
@tdl.core.create_init_docstring
class LinearLayer(tdl.core.Layer):
'''Standard linear (W*X) fully connected layer'''
@tdl.core.InputArgument
def input_shape(self, value):
if value is None:
raise tdl.core.exceptions.ArgumentNotProvided(self)
if isinstance(value, int):
value = (None, value)
if not isinstance(value, tf.TensorShape):
value = tf.TensorShape(value)
return value
@tdl.core.InputArgument
def units(self, value):
'''Number of output units (int).'''
if value is None:
raise tdl.core.exceptions.ArgumentNotProvided(self)
if not isinstance(value, int):
raise TypeError('units must be an integer')
return value
@tdl.core.ParameterInit(lazzy=True)
def kernel(self, initializer=None, trainable=True, **kargs):
tdl.core.assert_initialized(
self, 'kernel', ['units', 'input_shape'])
if initializer is None:
initializer = tf.keras.initializers.glorot_uniform()
return self.add_weight(
name='kernel',
initializer=initializer,
shape=[self.input_shape[-1].value, self.units],
trainable=trainable,
**kargs)
@tdl.core.Regularizer
def regularizer(self, scale=None):
tdl.core.assert_initialized(self, 'regularizer', ['kernel'])
with tf.name_scope(self.scope):
reg = tdl.losses.L2Regularizer(self.kernel, scale=scale)
return reg
def compute_output_shape(self, input_shape=None):
if input_shape is None:
tdl.core.assert_initialized(self, 'copute_output_shape',
['input_shape', 'units'])
input_shape = self.input_shape
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape(input_shape[:-1] + [self.units])
class Output(tdl.core.TdlModel):
@property
def shape(self):
return self.value.shape
@property
def kernel(self):
return self.model.kernel
@tdl.core.InputArgument
def inputs(self, value):
return value
@tdl.core.OutputValue
def value(self, _):
inputs = tf.convert_to_tensor(self.inputs)
return tf.linalg.LinearOperatorFullMatrix(self.kernel)\
.matvec(inputs, adjoint=True)
def __init__(self, model, inputs, options=None, name=None):
self.model = model
super(LinearLayer.Output, self).__init__(
inputs=inputs, options=options, name=name)
def call(self, inputs, *args, **kargs):
return type(self).Output(self, inputs, *args, **kargs)
def __init__(self, units, *args, **kargs):
if args:
raise ValueError('Dense layers only accept units as potitional '
'argument.')
super(LinearLayer, self).__init__(units=units, **kargs)
@tdl.core.create_init_docstring
class AffineLayer(LinearLayer):
'''Standard affine (W*X+b) fully connected layer'''
@tdl.core.ParameterInit(lazzy=True)
def bias(self, initializer=None, trainable=True, **kargs):
tdl.core.assert_initialized(self, 'bias', ['units'])
if initializer is None:
initializer = tf.keras.initializers.zeros()
return self.add_weight(
name='bias',
initializer=initializer,
shape=[self.units],
trainable=trainable,
**kargs)
class Output(LinearLayer.Output):
@property
def bias(self):
return self.model.bias
@tdl.core.OutputValue
def value(self, _):
inputs = tf.convert_to_tensor(self.inputs)
output = tf.linalg.LinearOperatorFullMatrix(self.kernel)\
.matvec(inputs, adjoint=True)
if self.bias is not None:
output = output + self.bias
return output
@tdl.core.create_init_docstring
class DenseLayer(AffineLayer):
'''Standard fully connected layer'''
@tdl.core.InputArgument
def activation(self, value):
return value
def __init__(self, activation=tf.nn.relu, name=None, **kargs):
super(DenseLayer, self).__init__(activation=activation,
name=name, **kargs)
class Output(AffineLayer.Output):
@property
def affine(self):
''' activation before non-linearity '''
inputs = tf.convert_to_tensor(self.inputs)
output = tf.linalg.LinearOperatorFullMatrix(self.kernel)\
.matvec(inputs, adjoint=True)
if self.bias is not None:
output = output + self.bias
return output
@property
def activation(self):
return self.model.activation
@tdl.core.OutputValue
def value(self, _):
return (self.affine if self.activation is None
else self.activation(self.affine))
''' ----------------------------- Networks -------------------------------- '''
class NetConf(object):
'''This is a wrapper to any network configuration, it contains the
references to the placeholders for inputs and labels, and the
reference of the computation graph for the network
inputs: placeholder for the inputs
labels: placeholder for the labels
y: output of the comptuation graph, ussually a linear map
from the last layer (logits)
loss: loss for the network
'''
def __init__(self, inputs, labels, y, loss):
self.inputs = inputs
self.labels = labels
self.y = y # TODO: change this to out
self.loss = loss
class MultiLayer2DConvolution(tdl.core.TdlModel):
''' Creates a Convolutional neural network
It performs a series of 2d Convolutions and pooling operations
input_size: size of the input maps, [size_dim0, size_dim1]
n_outputs: number of outputs
n_input_maps: number of input maps
n_filters: list with the number of filters for layer
filter_size: list with the size of the kernel for each layer,
the format for the size of each layer is:
[filter_size_dim0 , filter_size_dim1]
pool_size: list with the size of the pooling kernel foreach layer,
the format for each layer is: [pool_size_dim0, pool_size_dim1]
'''
@property
def input_shape(self):
return self._input_shape
@property
def output_shape(self):
return self._output_shape
@property
def n_filters(self):
return self._n_filters
@property
def filter_sizes(self):
return self._filter_sizes
@property
def pool_sizes(self):
return self._pool_sizes
@property
def weights(self):
weights = [layer.weights for layer in self.layers]
return weights
@tdl.core.Submodel
def layers(self, _):
layers = list()
_n_inputs = self.input_shape[-1]
for idx, n_units in enumerate(self.n_filters):
layers.append(
AlexnetLayer(self.filter_sizes[idx],
[_n_inputs, n_units],
self.pool_sizes[idx],
name='conv_{}'.format(idx)))
_n_inputs = n_units
return layers
@tdl.core.Regularizer
def regularizer(self, scale=None):
reg = [(layer.regularizer.value if layer.regularizer.is_set
else layer.regularizer.init(scale))
for layer in self.layers
if hasattr(layer, 'regularizer')]
if reg:
reg = (reg[0] if len(reg) == 1
else | |
min_order_width,\
threshold_abs, num_orders, plot)
#------------------------
# TRACE THE ORDER RIDGES:
#------------------------
ridge_pos_cross, ridge_pos_disp = self.find_order_ridges(data, smooth_win, exclude_border,\
min_order_width, threshold_abs, num_peaks)
#------------------------------------
# FILL IN DATA INTO THE FOUND RIDGES:
#------------------------------------
# Make dict placeholders:
order_traced = {}
order_trace = {}
for i, order_pos in enumerate(np.sort(ref_cen_pos)[::-1]):
# Here "order_pos" is the cross dispersion center value. order_pos[0] simply chooses one
# value and not the increasing list within the loop.
# Using ridges trace each order in each direction:
min_order_width = 10
order_trace_cross, order_trace_disp = self.find_order_outliers(self.cen_disp, order_pos[0],\
ridge_pos_disp, ridge_pos_cross,\
min_order_width, disp_gap_tol)
# Fit ridges with polynomial:
poly_coefs = np.polyfit(order_trace_disp, order_trace_cross, 5)
order_traced['order_{}'.format(i)] = poly_coefs
order_trace['order_{}'.format(i)] = [order_trace_disp, order_trace_cross]
#-----------------------------------------------------------------------------
if plot==1:
pt.plot_trace_order(ridge_pos_disp, ridge_pos_cross, order_trace, order_traced, \
order_trace_disp, self.cen_disp, ref_cen_pos)
#-----------------------------------------------------------------------------
self.ref_cen_pos = ref_cen_pos
self.trace = order_traced
#-----------------------------------------------------------------------------
return order_traced
def find_ref_cen_pos(self, data, ref_int, smooth_win, exclude_border, min_distance, threshold_abs, \
num_peaks, plot):
"""
This function finds the center order position used as a reference.
"""
# Collapse in disp direction to reduce cosmic ray contamination:
# (FIXME done to make this robust against cosmics - maybe it is not needed)
center_rows_median = np.median(data[ref_int[0]:ref_int[1], :], axis=0)
# Smooth cross_dispersion direction to prepare for the peak-detection algorithm:
center_row_median_convolved = bottleneck.move_sum(center_rows_median.astype(np.float), \
smooth_win, min_count=1)
# Find orders using a peak detection function from scikit-image:
order_centres = skfeature.peak_local_max(center_row_median_convolved, \
exclude_border=exclude_border,\
min_distance=min_distance, threshold_rel=0,\
threshold_abs=threshold_abs, num_peaks=num_peaks)
# Peaks detected minus the smooth window applied (simply due to the moving sum of bottleneck):
ref_cen_pos = order_centres - int(smooth_win/2)
#------------------------------------------------------------------------------
if plot==1:
pt.plot_find_ref_cen_pos(center_rows_median, center_row_median_convolved, \
self.len_cross, smooth_win, ref_cen_pos)
#------------------------------------------------------------------------------
return ref_cen_pos
def find_order_ridges(self, data, smooth_win, exclude_border, min_distance, threshold_abs, num_peaks):
"""
This function finds the ridge of each order. It does so by making a slice in cross dispersion and
colvolve that with a smooth filter such as the "bottleneck.move_sum". It then finds the local max
for each slice and saves the position
"""
# Placeholders:
ridge_indices_disp = []
ridge_indices_cross = []
# Loop over the dispersion length (i) and the cross order row:
for i, crossorder in enumerate(data):
# Collapse in dispersion axis:
# TODO should smoothing be handled separately?
top_hat_conv = bottleneck.move_sum(crossorder.astype(np.float), smooth_win, min_count=1)
# Again find the peaks as done in "find_ref_cen_pos":
peaks = skfeature.peak_local_max(top_hat_conv, exclude_border=exclude_border,\
min_distance=min_distance, threshold_rel=0,\
threshold_abs=threshold_abs, indices=True, num_peaks=num_peaks)
# Convert peaks to a list covering the ridges:
peaks -= int(smooth_win/2)
ridge_indices_cross = np.append(ridge_indices_cross, peaks)
ridge_indices_disp = np.append(ridge_indices_disp, np.ones(peaks.shape[0]) * i)
#-----------------------------------------------------
return ridge_indices_cross, ridge_indices_disp
def find_order_outliers(self, cen_disp, ref_cen_cross, all_orders_x, all_orders_y, order_width,\
disp_gap_tol):
"""
This utility takes the found reference positions in cross dispersion and the traced ridges and
locate all the outliers defined by 'order_width' threshold. If center_row is not an integer this
will fail!
"""
# To simplify the code we make some abbreviations:
x = np.unique(all_orders_x)
y_last = ref_cen_cross
x_last = x[cen_disp]
cross_gap_tol = int(order_width/2.)
# Placeholders for outliers:
cross = []
disp = []
# Outliers on the left side of cen_disp:
for xi in x[cen_disp:]:
index_xi = all_orders_x == xi
orders_y = all_orders_y[index_xi]
min_dist_index = np.argmin(np.abs(orders_y - y_last))
new_y_pos = orders_y[min_dist_index]
if (np.abs(new_y_pos - y_last) < cross_gap_tol) & (np.abs(xi - x_last) < disp_gap_tol):
cross.append(new_y_pos)
y_last = cross[-1]
disp.append(xi)
x_last = disp[-1]
y_last = ref_cen_cross
x_last = x[cen_disp]
# Outliers on the right side of cen_disp:
for xi in x[cen_disp-1::-1]:
index_xi = all_orders_x == xi
orders_y = all_orders_y[index_xi]
min_dist_index = np.argmin(np.abs(orders_y - y_last))
new_y_pos = orders_y[min_dist_index]
if (np.abs(new_y_pos - y_last) < cross_gap_tol) & (np.abs(xi - x_last) < disp_gap_tol):
cross.append(new_y_pos)
y_last = cross[-1]
disp.append(xi)
x_last = disp[-1]
index = np.argsort(disp)
#---------------------------------------------------
return np.array(cross)[index], np.array(disp)[index]
########################################################################################################
# INTER-ORDER MASK #
########################################################################################################
def inter_order_mask(self, data=None, order_traces=None, order_width=None, \
low_nudge=0, high_nudge=0, plot=0):
"""
This function is used to determine the background flux which will be used to correct for scattered
light, wignetting, etc. The function looks at the flux level in between the orders ("inter-order")
and make and return a mask with ones for which is inter-order and zero elsewhere. The function uses
the result from the previos subdroutine "traced orders".
----------------------------
INPUT :
----------------------------
order_width (dict) : Traced orders found from the function 'trace'
order_traces (int, float): Width of inter-order mask
low_nudge (int, float): Number of pixels used below the traced orders
high_nudge (int, float): Number of pixels used above the traced orders
plot (int, float): Plot result if you like
----------------------------
OUTPUT :
----------------------------
inter_order_mask (dict) : Orders within 'order x' and corresponding array with polynomials
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if data ==None: data = self.F_calib
if order_traces==None: order_traces = self.trace
if order_width ==None: order_width = self.find_optimal_width(plot=plot) # FUNCTION CALL!
# Check if the inter-order width is odd integer:
inter_order_width = int(order_width * 4/3)
if inter_order_width % 2 == 0: inter_order_width = inter_order_width - 1
# Constants and placeholders:
inter_order_mask = data * 0 + 1 # Initial image mask of ones
disp = np.arange(self.len_disp) # Number pixel interval in dispersion
order_no = sorted(order_traces.keys()) # Orders numbers (string)
cross_order_center = []
#-----------------------
# FIND ALL INTER-ORDERS:
#-----------------------
# First loop through each order:
for order in order_no:
# Get the coefficients from the trace function:
coefs = order_traces[order]
cross_order_position = np.polyval(coefs, disp) # Polyfit to each order
cross_order_center = np.append(cross_order_center, cross_order_position[int(self.len_disp/2)])
# Each inter order is found:
for disp_i in range(self.len_disp):
lower_order_edge =int(np.round(cross_order_position[disp_i]-inter_order_width/2-low_nudge))
upper_order_edge =int(np.round(cross_order_position[disp_i]+inter_order_width/2+high_nudge))
inter_order_mask[int(disp_i), lower_order_edge:upper_order_edge] = 0
# Distance/size of each inter order:
inter_order_size = cross_order_center[1:] - cross_order_center[:-1] - inter_order_width \
- low_nudge - high_nudge
#-----------------------
# REMOVE 'GHOST' ORDERS:
#-----------------------
# Predict inter_order_size:
xx = np.arange(len(cross_order_center)-1)
inter_order_size_fit = np.polyfit(xx, inter_order_size, 2)
size_before = np.polyval(inter_order_size_fit, -1)
size_after = np.polyval(inter_order_size_fit, len(cross_order_center))
# Remove 'ghost orders' before first order:
coefs = order_traces[order_no[0]]
cross_order_position = np.polyval(coefs, disp)
for disp_i in range(self.len_disp):
lower_inter_order_edge = np.round(cross_order_position[disp_i] - inter_order_width/2 \
- low_nudge - size_before).astype(int)
# Remove orders below edges:
if lower_inter_order_edge < 0: lower_inter_order_edge = 0
inter_order_mask[disp_i, :lower_inter_order_edge] = 0
# Remove 'ghost orders' after last order:
coefs = order_traces[order_no[-1]]
cross_order_position = np.polyval(coefs, disp)
for disp_i in range(self.len_disp):
upper_inter_order_edge = np.round(cross_order_position[disp_i] + inter_order_width/2 \
+ high_nudge + size_after).astype(int)
# Remove orders above edges:
if upper_inter_order_edge > self.len_cross+50: upper_inter_order_edge = 0
inter_order_mask[disp_i, upper_inter_order_edge:] = 0
#--------------------------------------------------------------
if plot==1: pt.plot_inter_order_mask(data, inter_order_mask)
#--------------------------------------------------------------
self.inter_order_width = inter_order_width
self.inter_order_mask = inter_order_mask
#--------------------------------------------------------------
return self.inter_order_mask
########################################################################################################
# BACKGROUND IMAGE #
########################################################################################################
def background(self, image, inter_order_mask=None, order_ref=None, \
poly_order_y=2, poly_order_x=4, filter_size=5, plot=0):
"""
This function estimates the background flux of scattered light and subtract it. It uses the
inter_order_mask to perform this removal.
----------------------------
INPUT :
----------------------------
mask (2d array): Background mask with ones and zeros
poly_order_x (int, float): Order of polynomy to fits background flux in dispersion
poly_order_y (int, float): Order of polynomy to fits background flux in cross dispersion
nsteps (int, float): Number of steps
orderdef (int, float):
----------------------------
OUTPUT :
----------------------------
background_image (2d array):
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if inter_order_mask==None: inter_order_mask = self.inter_order_mask
#----------------------------
# CONSTANTS AND PLACEHOLDERS:
#----------------------------
# Create a background image:
(ysize, xsize) = image.shape
background_image = np.zeros((ysize, xsize), dtype=np.float64)
# Data size in arange:
xx = np.arange(xsize, dtype=np.float64)
yy = np.arange(ysize, dtype=np.float64)
# Array to withhold fitted y values:
xfitarr = np.zeros((len(yy), xsize), dtype=np.float64)
# Step size and range:
yvals = np.arange(len(yy))
# Constants:
ycount = 0
niter = 0
sigma_limit = 3
# For plots:
s_disp = [500, 1500, int(yvals[-1])] # Slices in disp
s_cros = [50, 200, int(xx[-1])] # Slices in cross
#----------------------------
# FIT IN Y-DIRECTION (CROSS):
#----------------------------
for i in yvals:
# Cut out slice in cross dispersion with width determined by 'filter_size':
ymin_ind = np.max([i - filter_size, 0])
ymax_ind = np.min([i + filter_size, ysize-1])
y_slice = image[ymin_ind:ymax_ind, :]
# Collapse in dispersion to a single cross row:
y_mean = np.mean(y_slice, axis=0)
# Indices/image of inter-order | |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_edgetpu_cpp_wrapper', [dirname(__file__)])
except ImportError:
import _edgetpu_cpp_wrapper
return _edgetpu_cpp_wrapper
if fp is not None:
try:
_mod = imp.load_module('_edgetpu_cpp_wrapper', fp, pathname, description)
finally:
fp.close()
return _mod
_edgetpu_cpp_wrapper = swig_import_helper()
del swig_import_helper
else:
import _edgetpu_cpp_wrapper
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_value(self)
def incr(self, n=1):
return _edgetpu_cpp_wrapper.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _edgetpu_cpp_wrapper.SwigPyIterator_decr(self, n)
def distance(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator_distance(self, x)
def equal(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator_equal(self, x)
def copy(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_copy(self)
def next(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_next(self)
def __next__(self):
return _edgetpu_cpp_wrapper.SwigPyIterator___next__(self)
def previous(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_previous(self)
def advance(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _edgetpu_cpp_wrapper.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _edgetpu_cpp_wrapper.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class StringVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StringVector, name)
__repr__ = _swig_repr
def iterator(self):
return _edgetpu_cpp_wrapper.StringVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _edgetpu_cpp_wrapper.StringVector___nonzero__(self)
def __bool__(self):
return _edgetpu_cpp_wrapper.StringVector___bool__(self)
def __len__(self):
return _edgetpu_cpp_wrapper.StringVector___len__(self)
def __getslice__(self, i, j):
return _edgetpu_cpp_wrapper.StringVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _edgetpu_cpp_wrapper.StringVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___delitem__(self, *args)
def __getitem__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___getitem__(self, *args)
def __setitem__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___setitem__(self, *args)
def pop(self):
return _edgetpu_cpp_wrapper.StringVector_pop(self)
def append(self, x):
return _edgetpu_cpp_wrapper.StringVector_append(self, x)
def empty(self):
return _edgetpu_cpp_wrapper.StringVector_empty(self)
def size(self):
return _edgetpu_cpp_wrapper.StringVector_size(self)
def swap(self, v):
return _edgetpu_cpp_wrapper.StringVector_swap(self, v)
def begin(self):
return _edgetpu_cpp_wrapper.StringVector_begin(self)
def end(self):
return _edgetpu_cpp_wrapper.StringVector_end(self)
def rbegin(self):
return _edgetpu_cpp_wrapper.StringVector_rbegin(self)
def rend(self):
return _edgetpu_cpp_wrapper.StringVector_rend(self)
def clear(self):
return _edgetpu_cpp_wrapper.StringVector_clear(self)
def get_allocator(self):
return _edgetpu_cpp_wrapper.StringVector_get_allocator(self)
def pop_back(self):
return _edgetpu_cpp_wrapper.StringVector_pop_back(self)
def erase(self, *args):
return _edgetpu_cpp_wrapper.StringVector_erase(self, *args)
def __init__(self, *args):
this = _edgetpu_cpp_wrapper.new_StringVector(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _edgetpu_cpp_wrapper.StringVector_push_back(self, x)
def front(self):
return _edgetpu_cpp_wrapper.StringVector_front(self)
def back(self):
return _edgetpu_cpp_wrapper.StringVector_back(self)
def assign(self, n, x):
return _edgetpu_cpp_wrapper.StringVector_assign(self, n, x)
def resize(self, *args):
return _edgetpu_cpp_wrapper.StringVector_resize(self, *args)
def insert(self, *args):
return _edgetpu_cpp_wrapper.StringVector_insert(self, *args)
def reserve(self, n):
return _edgetpu_cpp_wrapper.StringVector_reserve(self, n)
def capacity(self):
return _edgetpu_cpp_wrapper.StringVector_capacity(self)
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_StringVector
__del__ = lambda self: None
StringVector_swigregister = _edgetpu_cpp_wrapper.StringVector_swigregister
StringVector_swigregister(StringVector)
def GetRuntimeVersion():
"""
Returns runtime (libedgetpu.so) version.
The version is dynamically retrieved from shared object.
Retruns:
string.
"""
return _edgetpu_cpp_wrapper.GetRuntimeVersion()
class BasicEngine(_object):
"""Python wrapper for BasicEngine."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BasicEngine, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BasicEngine, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
Initializes BasicEngine with model's path.
Args:
model_path: [required] string.
device_path: [optional] string, path to Edge TPU device.
See ListEdgeTpuPaths() for path example.
"""
this = _edgetpu_cpp_wrapper.new_BasicEngine(*args)
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_BasicEngine
__del__ = lambda self: None
def RunInference(self, input):
"""
Runs inference with given input.
Args:
input: 1-D numpy.array. Flattened input tensor.
Returns:
(latency, output_tensors). Latency is milliseconds in float while
output_tensors is 1-D numpy.array. If there are multiple output tensors,
it will be compressed into a 1-D array. You can use
get_all_output_tensors_sizes, get_num_of_output_tensors and
get_output_tensor_size to calculate the offset for each tensor.
For example, if the model output 2 tensors with value [1, 2, 3] and
[0.1, 0.4, 0.9], output_tesnors will be [1, 2, 3, 0.1, 0.4, 0.9].
"""
return _edgetpu_cpp_wrapper.BasicEngine_RunInference(self, input)
def get_input_tensor_shape(self):
"""
Gets shape of required input tensor.
For models trained for image classification / detection, it's always
(1, height, width, channels). After flatten, the 1-D array with size
height * width channels is the required input for RunInference.
Returns:
1-D numpy.array.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_input_tensor_shape(self)
def get_all_output_tensors_sizes(self):
"""
Gets sizes of output tensors.
A model may ouput several tensors, but in RunInference and get_raw_output
we'll concacate them as one. This funcion will return the sizes of original
output tesnors, which can be used to calculate the offset.
Returns:
Numpy.array represents the sizes of output tensors.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_all_output_tensors_sizes(self)
def get_num_of_output_tensors(self):
"""
Gets number of output tensors.
Returns:
An integer representing number of output tensors.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_num_of_output_tensors(self)
def get_output_tensor_size(self, tensor_index):
"""
Gets size of specific output tensor.
Args:
tensor_index: integer, the index of the output tensor.
Returns:
An integer representing the size of the output tensor.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_output_tensor_size(self, tensor_index)
def required_input_array_size(self):
"""
Returns required size of input array of RunInference.
Returns:
An integer representing the size of the input array used for RunInference.
"""
return _edgetpu_cpp_wrapper.BasicEngine_required_input_array_size(self)
def total_output_array_size(self):
"""
Gets expected size of output array returned by RunInference.
Returns:
An integer representing the size of output_tensors returned by
RunInference().
"""
return _edgetpu_cpp_wrapper.BasicEngine_total_output_array_size(self)
def model_path(self):
"""
Gets the path of model loaded in the engine.
Returns:
A string representing the model file's path.
"""
return _edgetpu_cpp_wrapper.BasicEngine_model_path(self)
def get_raw_output(self):
"""
Gets output_tensors of last inference.
This can be used by higher level engines for debugging.
Returns:
A numpy.array.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_raw_output(self)
def get_inference_time(self):
"""
Gets latency of last inference.
This can be used by higher level engines for debugging.
Returns:
A float number(in milliseconds).
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_inference_time(self)
def device_path(self):
"""
Gets associated device path of this BasicEngine instance.
Returns:
A string representing corresponding Edge TPU device path.
"""
return _edgetpu_cpp_wrapper.BasicEngine_device_path(self)
BasicEngine_swigregister = _edgetpu_cpp_wrapper.BasicEngine_swigregister
BasicEngine_swigregister(BasicEngine)
cvar = _edgetpu_cpp_wrapper.cvar
kEdgeTpuCppWrapperVersion = cvar.kEdgeTpuCppWrapperVersion
kSupportedRuntimeVersion = cvar.kSupportedRuntimeVersion
class ImprintingEngine(_object):
"""Engine used for imprinting method based transfer learning."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ImprintingEngine, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ImprintingEngine, name)
__repr__ = _swig_repr
def __init__(self, model_path):
"""
Initializes ImprintingEngine with embedding extractor/model's path.
Args:
model_path: string, path of the embedding extractor or the model previous
trained with ImprintingEngine.
"""
this = _edgetpu_cpp_wrapper.new_ImprintingEngine(model_path)
try:
self.this.append(this)
except Exception:
self.this = this
def SaveModel(self, output_path):
"""
Saves trained model as '.tflite' file.
Args:
output_path: string, ouput path of the trained model.
"""
return _edgetpu_cpp_wrapper.ImprintingEngine_SaveModel(self, output_path)
def Train(self, input):
"""
Trains model with a set of images from same class.
Args:
input: list of numpy.array. Each numpy.array represents as a 1-D tensor
converted from an image.
Returns:
int, the label_id for the class.
"""
return _edgetpu_cpp_wrapper.ImprintingEngine_Train(self, input)
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_ImprintingEngine
__del__ = lambda self: None
ImprintingEngine_swigregister = _edgetpu_cpp_wrapper.ImprintingEngine_swigregister
ImprintingEngine_swigregister(ImprintingEngine)
_edgetpu_cpp_wrapper.EdgeTpuState_kNone_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kNone = _edgetpu_cpp_wrapper.EdgeTpuState_kNone
_edgetpu_cpp_wrapper.EdgeTpuState_kAssigned_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kAssigned = _edgetpu_cpp_wrapper.EdgeTpuState_kAssigned
_edgetpu_cpp_wrapper.EdgeTpuState_kUnassigned_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kUnassigned = _edgetpu_cpp_wrapper.EdgeTpuState_kUnassigned
def ListEdgeTpuPaths(state):
"""
Lists paths of Edge TPU devices available to host.
Args:
state: device's current state. Can be:
EDGE_TPU_STATE_ASSIGNED: devices that are associated with BasicEngine instance.
EDGE_TPU_STATE_UNASSIGNED: devices that are available.
EDGE_TPU_STATE_NONE: ASSIGNED or UNASSIGNED, all devices detected by host.
Returns:
tuple of strings, which represents device paths in certain state.
"""
| |
<filename>downscale_/downscale/test/test_micro_met.py
import numpy as np
from numpy.testing import *
import os
import warnings
import pytest
warnings.filterwarnings("ignore", category=DeprecationWarning)
from downscale.operators.micro_met import MicroMet
@pytest.fixture
def mnt():
directory = os.getcwd()
try:
mnt = np.load("../data_test/mnt_small.npy")
except FileNotFoundError:
mnt = np.load(directory + "/data_test/mnt_small.npy")
return mnt
@pytest.fixture
def idx_x():
idx_x = np.array([10, 30, 72])
return np.array(idx_x)
@pytest.fixture
def idx_y():
idx_y = [28, 29, 53]
return idx_y
@pytest.fixture
def m():
micromet_instance = MicroMet()
return micromet_instance
@pytest.fixture
def result_terrain_slope_map(mnt, m):
result = m.terrain_slope_map(mnt, 30, verbose=False)
return result
@pytest.fixture
def result_terrain_slope_idx(mnt, m, idx_x, idx_y):
result = m.terrain_slope_idx(mnt, 30, idx_x, idx_y, verbose=False)
return result
@pytest.fixture
def result_terrain_slope_azimuth_map(mnt, m):
result = m.terrain_slope_azimuth_map(mnt, 30, verbose=False)
return result
@pytest.fixture
def result_terrain_slope_azimuth_idx(mnt, m, idx_x, idx_y):
result = m.terrain_slope_azimuth_idx(mnt, 30, idx_x, idx_y, verbose=False)
return result
@pytest.fixture
def result_curvature_map_scale_true(mnt, m):
result = m.curvature_map(mnt, scale=True, verbose=False)
return result
@pytest.fixture
def result_curvature_map_scale_false(mnt, m):
result = m.curvature_map(mnt, scale=False, verbose=False)
return result
@pytest.fixture
def result_omega_s_map_scale_true(mnt, m):
result = m.omega_s_map(mnt, 30, 270, scale=True, verbose=False)
return result
@pytest.fixture
def result_omega_s_map_scale_false(mnt, m):
result = m.curvature_map(mnt, 30, 270, scale=False, verbose=False)
return result
@pytest.fixture
def result_diverting_factor_map_scale_true(mnt, m):
result = m.diverting_factor_map(mnt, 30, 270, scale=True, verbose=False)
return result
@pytest.fixture
def result_diverting_factor_map_scale_false(mnt, m):
result = m.diverting_factor_map(mnt, 30, 270, scale=False, verbose=False)
return result
@pytest.fixture
def result_wind_weighting_factor_map_scale_true(mnt, m):
result = m.wind_weighting_factor_map(mnt, 30, 270, scale=True, verbose=False)
return result
@pytest.fixture
def result_wind_weighting_factor_map_scale_false(mnt, m):
result = m.wind_weighting_factor_map(mnt, 30, 270, scale=False, verbose=False)
return result
@pytest.fixture
def result_curvature_idx_scale_true_safe(mnt, m, idx_x, idx_y):
result = m.curvature_idx(mnt, idx_x, idx_y, scale=True, method="safe", verbose=False)
return result
@pytest.fixture
def result_curvature_idx_scale_false_safe(mnt, m, idx_x, idx_y):
result = m.curvature_idx(mnt, idx_x, idx_y, scale=False, method="safe", verbose=False)
return result
@pytest.fixture
def result_curvature_idx_scale_true_other(mnt, m, idx_x, idx_y):
result = m.curvature_idx(mnt, idx_x, idx_y, scale=True, method="other", verbose=False)
return result
@pytest.fixture
def result_curvature_idx_scale_false_other(mnt, m, idx_x, idx_y):
result = m.curvature_idx(mnt, idx_x, idx_y, scale=False, method="other", verbose=False)
return result
@pytest.fixture
def result_omega_s_idx_scale_true_safe(mnt, m, idx_x, idx_y):
result = m.omega_s_idx(mnt, 30, 270, idx_x, idx_y, scale=True, method="safe", verbose=False)
return result
@pytest.fixture
def result_omega_s_idx_scale_false_safe(mnt, m, idx_x, idx_y):
result = m.omega_s_idx(mnt, 30, 270, idx_x, idx_y, scale=False, method="safe", verbose=False)
return result
@pytest.fixture
def result_omega_s_idx_scale_true_other(mnt, m, idx_x, idx_y):
result = m.omega_s_idx(mnt, 30, 270, idx_x, idx_y, scale=True, method="other", verbose=False)
return result
@pytest.fixture
def result_omega_s_idx_scale_false_other(mnt, m, idx_x, idx_y):
result = m.omega_s_idx(mnt, 30, 270, idx_x, idx_y, scale=False, method="other", verbose=False)
return result
@pytest.fixture
def result_wind_weighting_factor_idx_scale_true_safe(mnt, m, idx_x, idx_y):
result = m.wind_weighting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=True, method="safe", verbose=False)
return result
@pytest.fixture
def result_wind_weighting_factor_idx_scale_false_safe(mnt, m, idx_x, idx_y):
result = m.wind_weighting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=False, method="safe", verbose=False)
return result
@pytest.fixture
def result_wind_weighting_factor_idx_scale_true_other(mnt, m, idx_x, idx_y):
result = m.wind_weighting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=True, method="other", verbose=False)
return result
@pytest.fixture
def result_wind_weighting_factor_idx_scale_false_other(mnt, m, idx_x, idx_y):
result = m.wind_weighting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=False, method="other", verbose=False)
return result
@pytest.fixture
def result_diverting_factor_idx_scale_true_safe(mnt, m, idx_x, idx_y):
result = m.diverting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=True, method="safe", verbose=False)
return result
@pytest.fixture
def result_diverting_factor_idx_scale_false_safe(mnt, m, idx_x, idx_y):
result = m.diverting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=False, method="safe", verbose=False)
return result
@pytest.fixture
def result_diverting_factor_idx_scale_true_other(mnt, m, idx_x, idx_y):
result = m.diverting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=True, method="other", verbose=False)
return result
@pytest.fixture
def result_diverting_factor_idx_scale_false_other(mnt, m, idx_x, idx_y):
result = m.diverting_factor_idx(mnt, 30, 270, idx_x, idx_y, scale=False, method="other", verbose=False)
return result
def test_terrain_slope_map_no_nans(result_terrain_slope_map):
assert np.isnan(result_terrain_slope_map).sum() == 0
def test_terrain_slope_map_good_shape(result_terrain_slope_map, mnt):
assert result_terrain_slope_map.shape == mnt.shape
def test_terrain_slope_map_good_dtype(result_terrain_slope_map):
assert result_terrain_slope_map.dtype == np.float32
def test_terrain_slope_idx_no_nans(result_terrain_slope_idx):
assert np.isnan(result_terrain_slope_idx).sum() == 0
def test_terrain_slope_idx_good_shape(result_terrain_slope_idx, idx_x):
assert result_terrain_slope_idx.shape == idx_x.shape
def test_terrain_slope_idx_good_dtype(result_terrain_slope_idx):
assert result_terrain_slope_idx.dtype == np.float32
def test_terrain_slope_azimuth_map_no_nans(result_terrain_slope_azimuth_map):
assert np.isnan(result_terrain_slope_azimuth_map).sum() == 0
def test_terrain_slope_azimuth_map_good_shape(result_terrain_slope_azimuth_map, mnt):
assert result_terrain_slope_azimuth_map.shape == mnt.shape
def test_terrain_slope_azimuth_map_good_dtype(result_terrain_slope_azimuth_map):
assert result_terrain_slope_azimuth_map.dtype == np.float32
def test_terrain_slope_azimuth_idx_no_nans(result_terrain_slope_azimuth_idx):
assert np.isnan(result_terrain_slope_azimuth_idx).sum() == 0
def test_terrain_slope_azimuth_idx_good_shape(result_terrain_slope_azimuth_idx, idx_x):
assert result_terrain_slope_azimuth_idx.shape == idx_x.shape
def test_terrain_slope_azimuth_idx_good_dtype(result_terrain_slope_azimuth_idx):
assert result_terrain_slope_azimuth_idx.dtype == np.float32
def test_curvature_map_no_nans(result_curvature_map_scale_true, result_curvature_map_scale_false):
assert np.isnan(result_curvature_map_scale_true).sum() == 0
assert np.isnan(result_curvature_map_scale_false).sum() == 0
def test_curvature_map_good_shape(result_curvature_map_scale_true, result_curvature_map_scale_false, mnt):
assert result_curvature_map_scale_true.shape == mnt.shape
assert result_curvature_map_scale_false.shape == mnt.shape
def test_curvature_map_good_dtype(result_curvature_map_scale_true, result_curvature_map_scale_false):
assert result_curvature_map_scale_true.dtype == np.float32
assert result_curvature_map_scale_false.dtype == np.float32
def test_curvature_idx_no_nans(result_curvature_idx_scale_true_safe, result_curvature_idx_scale_false_safe,
result_curvature_idx_scale_true_other, result_curvature_idx_scale_false_other):
assert np.isnan(result_curvature_idx_scale_true_safe).sum() == 0
assert np.isnan(result_curvature_idx_scale_false_safe).sum() == 0
assert np.isnan(result_curvature_idx_scale_true_other).sum() == 0
assert np.isnan(result_curvature_idx_scale_false_other).sum() == 0
def test_curvature_idx_good_shape(result_curvature_idx_scale_true_safe, result_curvature_idx_scale_false_safe,
result_curvature_idx_scale_true_other,
result_curvature_idx_scale_false_other, idx_x):
assert result_curvature_idx_scale_true_safe.shape == idx_x.shape
assert result_curvature_idx_scale_false_safe.shape == idx_x.shape
assert result_curvature_idx_scale_true_other.shape == idx_x.shape
assert result_curvature_idx_scale_false_other.shape == idx_x.shape
def test_curvature_idx_good_dtype(result_curvature_idx_scale_true_safe, result_curvature_idx_scale_false_safe,
result_curvature_idx_scale_true_other, result_curvature_idx_scale_false_other):
assert result_curvature_idx_scale_true_safe.dtype == np.float32
assert result_curvature_idx_scale_false_safe.dtype == np.float32
assert result_curvature_idx_scale_true_other.dtype == np.float32
assert result_curvature_idx_scale_false_other.dtype == np.float32
def test_omega_s_map_no_nans(result_omega_s_map_scale_true, result_omega_s_map_scale_false):
assert np.isnan(result_omega_s_map_scale_true).sum() == 0
assert np.isnan(result_omega_s_map_scale_false).sum() == 0
def test_omega_s_map_good_shape(result_omega_s_map_scale_true, result_omega_s_map_scale_false, mnt):
assert result_omega_s_map_scale_true.shape == mnt.shape
assert result_omega_s_map_scale_false.shape == mnt.shape
def test_omega_s_map_good_dtype(result_omega_s_map_scale_true, result_omega_s_map_scale_false):
assert result_omega_s_map_scale_true.dtype == np.float32
assert result_omega_s_map_scale_false.dtype == np.float32
def test_omega_s_idx_no_nans(result_omega_s_idx_scale_true_safe, result_omega_s_idx_scale_false_safe,
result_omega_s_idx_scale_true_other, result_omega_s_idx_scale_false_other):
assert np.isnan(result_omega_s_idx_scale_true_safe).sum() == 0
assert np.isnan(result_omega_s_idx_scale_false_safe).sum() == 0
assert np.isnan(result_omega_s_idx_scale_true_other).sum() == 0
assert np.isnan(result_omega_s_idx_scale_false_other).sum() == 0
def test_omega_s_idx_good_shape(result_omega_s_idx_scale_true_safe, result_omega_s_idx_scale_false_safe,
result_omega_s_idx_scale_true_other,
result_omega_s_idx_scale_false_other, idx_x):
assert result_omega_s_idx_scale_true_safe.shape == idx_x.shape
assert result_omega_s_idx_scale_false_safe.shape == idx_x.shape
assert result_omega_s_idx_scale_true_other.shape == idx_x.shape
assert result_omega_s_idx_scale_false_other.shape == idx_x.shape
def test_omega_s_idx_good_dtype(result_omega_s_idx_scale_true_safe, result_omega_s_idx_scale_false_safe,
result_omega_s_idx_scale_true_other, result_omega_s_idx_scale_false_other):
assert result_omega_s_idx_scale_true_safe.dtype == np.float32
assert result_omega_s_idx_scale_false_safe.dtype == np.float32
assert result_omega_s_idx_scale_true_other.dtype == np.float32
assert result_omega_s_idx_scale_false_other.dtype == np.float32
def test_wind_weighting_factor_map_no_nans(result_wind_weighting_factor_map_scale_true,
result_wind_weighting_factor_map_scale_false):
assert np.isnan(result_wind_weighting_factor_map_scale_true).sum() == 0
assert np.isnan(result_wind_weighting_factor_map_scale_false).sum() == 0
def test_wind_weighting_factor_map_good_shape(result_wind_weighting_factor_map_scale_true,
result_wind_weighting_factor_map_scale_false, mnt):
assert result_wind_weighting_factor_map_scale_true.shape == mnt.shape
assert result_wind_weighting_factor_map_scale_false.shape == mnt.shape
def test_wind_weighting_factor_map_good_dtype(result_wind_weighting_factor_map_scale_true,
result_wind_weighting_factor_map_scale_false):
assert result_wind_weighting_factor_map_scale_true.dtype == np.float32
assert result_wind_weighting_factor_map_scale_false.dtype == np.float32
def test_wind_weighting_factor_idx_no_nans(result_wind_weighting_factor_idx_scale_true_safe,
result_wind_weighting_factor_idx_scale_false_safe,
result_wind_weighting_factor_idx_scale_true_other,
result_wind_weighting_factor_idx_scale_false_other):
assert np.isnan(result_wind_weighting_factor_idx_scale_true_safe).sum() == 0
assert np.isnan(result_wind_weighting_factor_idx_scale_false_safe).sum() == 0
assert np.isnan(result_wind_weighting_factor_idx_scale_true_other).sum() == 0
assert np.isnan(result_wind_weighting_factor_idx_scale_false_other).sum() == 0
def test_wind_weighting_factor_idx_good_shape(result_wind_weighting_factor_idx_scale_true_safe,
result_wind_weighting_factor_idx_scale_false_safe,
result_wind_weighting_factor_idx_scale_true_other,
result_wind_weighting_factor_idx_scale_false_other, idx_x):
assert result_wind_weighting_factor_idx_scale_true_safe.shape == idx_x.shape
assert result_wind_weighting_factor_idx_scale_false_safe.shape == idx_x.shape
assert result_wind_weighting_factor_idx_scale_true_other.shape == idx_x.shape
assert result_wind_weighting_factor_idx_scale_false_other.shape == idx_x.shape
def test_wind_weighting_factor_idx_good_dtype(result_wind_weighting_factor_idx_scale_true_safe,
result_wind_weighting_factor_idx_scale_false_safe,
result_wind_weighting_factor_idx_scale_true_other,
result_wind_weighting_factor_idx_scale_false_other):
assert result_wind_weighting_factor_idx_scale_true_safe.dtype == np.float32
assert result_wind_weighting_factor_idx_scale_false_safe.dtype == np.float32
assert result_wind_weighting_factor_idx_scale_true_other.dtype == np.float32
assert result_wind_weighting_factor_idx_scale_false_other.dtype == np.float32
def test_diverting_factor_map_no_nans(result_diverting_factor_map_scale_true, result_diverting_factor_map_scale_false):
assert np.isnan(result_diverting_factor_map_scale_true).sum() == 0
assert np.isnan(result_diverting_factor_map_scale_false).sum() == 0
def test_diverting_factor_map_good_shape(result_diverting_factor_map_scale_true,
result_diverting_factor_map_scale_false, mnt):
assert result_diverting_factor_map_scale_true.shape == mnt.shape
assert result_diverting_factor_map_scale_false.shape == mnt.shape
def test_diverting_factor_map_good_dtype(result_diverting_factor_map_scale_true,
result_diverting_factor_map_scale_false):
assert result_diverting_factor_map_scale_true.dtype == np.float32
assert result_diverting_factor_map_scale_false.dtype == np.float32
def test_diverting_factor_idx_no_nans(result_diverting_factor_idx_scale_true_safe,
result_diverting_factor_idx_scale_false_safe,
result_diverting_factor_idx_scale_true_other,
result_diverting_factor_idx_scale_false_other):
assert np.isnan(result_diverting_factor_idx_scale_true_safe).sum() == 0
assert np.isnan(result_diverting_factor_idx_scale_false_safe).sum() == 0
assert np.isnan(result_diverting_factor_idx_scale_true_other).sum() == 0
assert np.isnan(result_diverting_factor_idx_scale_false_other).sum() == 0
def test_diverting_factor_idx_good_shape(result_diverting_factor_idx_scale_true_safe,
result_diverting_factor_idx_scale_false_safe,
result_diverting_factor_idx_scale_true_other,
result_diverting_factor_idx_scale_false_other, idx_x):
assert result_diverting_factor_idx_scale_true_safe.shape == idx_x.shape
assert result_diverting_factor_idx_scale_false_safe.shape == idx_x.shape
assert result_diverting_factor_idx_scale_true_other.shape == idx_x.shape
assert result_diverting_factor_idx_scale_false_other.shape == idx_x.shape
def test_diverting_factor_idx_good_dtype(result_diverting_factor_idx_scale_true_safe,
result_diverting_factor_idx_scale_false_safe,
result_diverting_factor_idx_scale_true_other,
result_diverting_factor_idx_scale_false_other):
assert result_diverting_factor_idx_scale_true_safe.dtype == np.float32
assert result_diverting_factor_idx_scale_false_safe.dtype == np.float32
assert result_diverting_factor_idx_scale_true_other.dtype == np.float32
assert result_diverting_factor_idx_scale_false_other.dtype == np.float32
def test_terrain_slope_map_and_idx_consistent():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50, 36, 28, 28],
[12, 11, 41, 51, 38, 10, 20],
[50, 13, 28, 48, 28, 5, 18],
[7, 32, 41, 53, 59, 28, 8],
[18, 19, 48, 20, 29, 24, 93],
[19, 29, 30, 28, 18, 49, 50]])
result_map = m.terrain_slope_map(array_test, 1, verbose=False)[3, 4]
result_idx = m.terrain_slope_idx(array_test, 1, [4], [3], verbose=False)
assert_allclose(result_map, result_idx)
def test_terrain_slope_map_good_result():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50],
[12, 11, 41, 51],
[50, 13, 28, 48],
[7, 32, 41, 53]])
result_map = m.terrain_slope_map(array_test, 1, verbose=False)
expected_results = np.array([[1.48050674, 1.50876899, 1.5076349, 1.51538309],
[1.51687339, 1.50640289, 1.52108546, 1.47161907],
[1.54383732, 1.50513128, 1.51371554, 1.52090016],
[1.55069422, 1.5315932, 1.51102598, 1.49402444]])
assert_allclose(result_map, expected_results)
def test_terrain_slope_azimuth_map_and_idx_consistent():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50, 36, 28, 28],
[12, 11, 41, 51, 38, 10, 20],
[50, 13, 28, 48, 28, 5, 18],
[7, 32, 41, 53, 59, 28, 8],
[18, 19, 48, 20, 29, 24, 93],
[19, 29, 30, 28, 18, 49, 50]])
result_map = m.terrain_slope_azimuth_map(array_test, 1, verbose=False)[3, 4]
result_idx = m.terrain_slope_azimuth_idx(array_test, 1, [4], [3], verbose=False)
assert_allclose(result_map, result_idx)
def test_terrain_slope_azimuth_map_good_result():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50],
[12, 11, 41, 51],
[50, 13, 28, 48],
[7, 32, 41, 53]])
result_map = m.terrain_slope_azimuth_map(array_test, 1, verbose=False)
expected_results = np.array([[4.80304887, 5.65210592, 4.10684432, 4.65689048],
[6.22918381, 5.07493322, 4.81205763, 4.81205763],
[4.64492396, 5.47453552, 4.71238898, 4.66243058],
[5.75655804, 3.87149231, 3.82100646, 4.31759786]])
assert_allclose(result_map, expected_results)
def test_curvature_map_and_idx_consistent():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50, 36, 28, 28],
[12, 11, 41, 51, 38, 10, 20],
[50, 13, 28, 48, 28, 5, 18],
[7, 32, 41, 53, 59, 28, 8],
[18, 19, 48, 20, 29, 24, 93],
[19, 29, 30, 28, 18, 49, 50]])
result_map = m.curvature_map(array_test, verbose=False)[3, 4]
result_idx = m.curvature_idx(array_test, [4], [3], verbose=False)
assert_allclose(result_map, result_idx)
def test_curvature_map_good_result():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50],
[12, 11, 41, 51],
[50, 13, 28, 48],
[7, 32, 41, 53]])
result_map_1 = m.curvature_map(array_test, verbose=False)[1, 1]
expected_results_1 = -0.19803762
result_map_2 = m.curvature_map(array_test, verbose=False)[1, 2]
expected_results_2 = 0.121523505
assert_almost_equal(result_map_1, expected_results_1)
assert_almost_equal(result_map_2, expected_results_2)
def test_curvature_idx_good_result():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50],
[12, 11, 41, 51],
[50, 13, 28, 48],
[7, 32, 41, 53]])
result_map_1 = m.curvature_idx(array_test, 1, 1, verbose=False)
expected_results_1 = -0.19803762
result_map_2 = m.curvature_idx(array_test, 2, 1, verbose=False)
expected_results_2 = 0.121523505
assert_almost_equal(result_map_1, expected_results_1)
assert_almost_equal(result_map_2, expected_results_2)
def test_omega_s_map_good_result():
m = MicroMet()
array_test = np.array([[13, 24, 32, 50],
[12, 11, 41, 51],
[50, 13, 28, 48],
[7, 32, 41, 53]])
result_map_1 = np.round(m.omega_s_map(array_test, 1, 270, verbose=False), 2)
result_map_2 = np.round(m.omega_s_map(array_test, 1, 270, scale=True, scaling_factor=10, verbose=False), 2)
result_1 = np.array([[1.48050674, 1.50876899, 1.5076349, 1.51538309],
[1.51687339, 1.50640289, 1.52108546, 1.47161907],
[1.54383732, 1.50513128, 1.51371554, 1.52090016],
| |
import os
import math
import copy
import numpy as np
import astropy.io.fits as pf
from astropy.time import Time, TimeDelta
from astropy.stats import biweight_midvariance, mad_std
from sklearn.cluster import DBSCAN
from collections import OrderedDict
from utils import (baselines_2_ants, index_of, get_uv_correlations,
find_card_from_header, get_key, to_boolean_array,
check_issubset, convert_an_hdu, convert_fq_hdu,
mask_boolean_with_boolean)
try:
import pylab
except ImportError:
pylab = None
import matplotlib
vec_complex = np.vectorize(np.complex)
vec_int = np.vectorize(np.int)
stokes_dict = {-8:'YX', -7:'XY', -6:'YY', -5:'XX',
-4:'LR', -3:'RL', -2:'LL', -1:'RR',
1: 'I', 2: 'Q', 3: 'U', 4: 'V'}
# FIXME: Handling FITS files with only one scan (used for CV)
class UVData(object):
def __init__(self, fname, mode='readonly'):
self.fname = fname
self.hdulist = pf.open(fname, mode=mode, save_backup=True)
self.hdu = self.hdulist[0]
self._stokes_dict = {'RR': 0, 'LL': 1, 'RL': 2, 'LR': 3}
self.learn_data_structure(self.hdu)
self._uvdata = self.view_uvdata({'COMPLEX': 0}) +\
1j * self.view_uvdata({'COMPLEX': 1})
self._weights = self.view_uvdata({'COMPLEX': 2})
# Numpy boolean arrays with shape of ``UVData.uvdata``.
self._nw_indxs = self._weights < 0
self._pw_indxs = self._weights >= 0
self._error = None
self._scans_bl = None
self._stokes = None
self._times = None
self._frequency = None
self._nchans = None
self._freq_width = None
self._freq_width_if = None
self._band_center = None
self.scale_uv = 1.0 # scale for uv scaling )
# Dictionary with keys - baselines & values - boolean numpy arrays or
# lists of boolean numpy arrays with indexes of that baseline (or it's
# scans) in ``UVData.uvdata`` array
self._indxs_baselines = dict()
self._indxs_baselines_scans = dict()
# Dictionary with keys - baselines & values - tuples or lists of tuples
# of shapes of part for that baseline (or it's scans) in
# ``UVData.uvdata`` array
self._shapes_baselines = dict()
self._shapes_baselines_scans = dict()
self._get_baselines_info()
self._noise_diffs = None
self._noise_v = None
def set_uv_scale(self, scale):
self.scale_uv = scale
def _get_baselines_info(self):
"""
Count indexes of visibilities on each single baseline (for single IF &
Stokes) in ``uvdata`` array.
"""
self._indxs_baselines_scans = self.scans_bl
for baseline in self.baselines:
indxs = self._get_baseline_indexes(baseline)
self._indxs_baselines[baseline] = indxs
self._shapes_baselines[baseline] = np.shape(self.uvdata[indxs])
self._shapes_baselines_scans[baseline] = list()
try:
for scan_indxs in self._indxs_baselines_scans[baseline]:
bl_scan_data = self.uvdata[scan_indxs]
self._shapes_baselines_scans[baseline].append(np.shape(bl_scan_data))
except TypeError:
pass
def nw_indxs_baseline(self, baseline, average_bands=False, stokes=None,
average_stokes=False):
"""
Shortcut to negative or zero weights visibilities on given baseline.
:param baseline:
Integer baseline number.
:param average_bands: (optional)
Average bands in that way that if any bands for current
visibility/stokes has negative weight then this visibility/stokes
has negative weight. (default: ``False``)
:param stokes: (optional)
Stokes parameters of ``self`` that output or use for calculation of
frequency averaged values.
:param average_stokes: (optional)
Average Stokes parameters chosen in ``stokes`` kw argument or all
present in data in that way that if any stokes for current
visibility has negative weight then this visibility has negative
weight. (default: ``False``)
:return:
Numpy boolean array with shape of ``(#vis, #bands, #stokes)`` or
``(#vis, #stokes)``, where #vis - number of visibilities for given
baseline & #stokes - number of stokes parameters in ``self`` or
``len(stokes)`` in ``stokes`` is not ``None``. (default: ``None``)
"""
result = self._nw_indxs[self._indxs_baselines[baseline]]
stokes_indxs = list()
if stokes is not None:
for stoke in stokes:
assert stoke in self.stokes
stokes_indxs.append(self.stokes_dict_inv[stoke])
result = result[:, :, stokes_indxs]
if average_bands:
result = np.asarray(~result, dtype=int)
result = np.prod(result, axis=1)
result = np.asarray(result, dtype=bool)
result = ~result
if average_stokes and not average_bands:
result = np.asarray(~result, dtype=int)
result = np.prod(result, axis=2)
result = np.asarray(result, dtype=bool)
result = ~result
if average_stokes and average_bands:
result = np.asarray(~result, dtype=int)
result = np.prod(result, axis=1)
result = np.asarray(result, dtype=bool)
result = ~result
return result
def pw_indxs_baseline(self, baseline, average_bands=False, stokes=None,
average_stokes=False):
"""
Shortcut to positive weights visibilities on given baseline.
:param baseline:
Integer baseline number.
:return:
Numpy boolean array with shape of ``(#vis, #bands, #stokes)``, where
#vis - number of visibilities for given baseline.
"""
return ~self.nw_indxs_baseline(baseline, average_bands=average_bands,
stokes=stokes,
average_stokes=average_stokes)
# FIXME: Finish it!
def sample_size(self, stokes='I', average_freq=True):
assert self._check_stokes_present(stokes)
if average_freq:
size = self.uvdata_freq_averaged.mask[0]
else:
size = self.uvdata_weight_masked.shape[0] * self.uvdata_weight_masked[1]
stokes_multiplyer = 1
if stokes == 'I':
stokes_multiplyer = 2
def _check_stokes_present(self, stokes):
"""
Check if ``stokes`` is present in data (could be calculated from data).
:param stokes:
String of Stokes parameters ("I, Q, U, V, RR, LL, RL, LR").
:return:
Boolean value.
"""
stokes_present = self.stokes
if stokes in stokes_present:
return True
elif stokes in ("I", "Q", "U", "V"):
if stokes in ("I", "V"):
return "RR" in stokes_present and "LL" in stokes_present
# If "Q" or "U"
else:
return "RL" in stokes_present and "LR" in stokes_present
elif stokes in ("RR", "LL", "RL", "LR"):
return stokes in stokes_present
else:
raise Exception("stokes must be from I, Q, U, V, RR, LL, RL or LR!")
def sync(self):
"""
Sync internal representation with complex representation and update
complex representation ``self._uvdata``. I need this because i don't
know how to make a complex view to real numpy.ndarray
"""
slices_dict = self.slices_dict.copy()
slices_dict.update({'COMPLEX': 0})
self.hdu.data.data[slices_dict.values()] = self.uvdata.real
slices_dict.update({'COMPLEX': 1})
self.hdu.data.data[slices_dict.values()] = self.uvdata.imag
def save(self, fname=None, data=None, rewrite=False,
downscale_by_freq=False):
"""
Save uv-data to FITS-file.
:param data: (optional)
Numpy record array with uv-data & parameters info. If ``None`` then
save current instance's uv-data. (default: ``None``)
:param fname: (optional)
Name of FITS-file to save. If ``None`` then use current instance's
original file. (default: ``None``)
:param rewrite: (optional)
Boolean - rewrite file with original name if any? (default:
``False``)
"""
fname = fname or self.fname
if os.path.exists(fname) and rewrite:
os.unlink(fname)
if data is None:
if downscale_by_freq:
self._downscale_uvw_by_frequency()
self.hdulist.writeto(fname)
else:
# datas = np.array(sorted(data, key=lambda x: x['DATE']+x['_DATE']),
# dtype=data.dtype)
new_hdu = pf.GroupsHDU(data)
# PyFits updates header using given data (``GCOUNT`` key) anyway
new_hdu.header = self.hdu.header
hdulist = pf.HDUList([new_hdu])
for hdu in self.hdulist[1:]:
if hdu.header['EXTNAME'] == 'AIPS AN':
# FIXME:
try:
hdu = convert_an_hdu(hdu, new_hdu)
except IndexError:
print "You should fix that issue!"
pass
if hdu.header['EXTNAME'] == 'AIPS FQ':
hdu = convert_fq_hdu(hdu)
hdulist.append(hdu)
# FIXME: Sometimes i need this to be commented
if downscale_by_freq:
self._downscale_uvw_by_frequency()
hdulist.writeto(fname, output_verify='ignore')
def save_fraction(self, fname, frac, random_state=0):
"""
Save only fraction of of data on each baseline.
:param fname:
File path to save.
:param frac:
Float (0., 1.). Fraction of points from each baseline to save.
"""
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=1-frac,
random_state=random_state)
indxs = list()
for bl in self.baselines:
bl_indxs = self._indxs_baselines[bl]
print "Baseline {} has {} samples".format(bl,
np.count_nonzero(bl_indxs))
bl_indxs_pw = self.pw_indxs_baseline(bl, average_bands=True,
stokes=['RR', 'LL'],
average_stokes=True)
bl_indxs = mask_boolean_with_boolean(bl_indxs, bl_indxs_pw)
for train, test in ss.split(np.nonzero(bl_indxs)[0]):
# tr = to_boolean_array(np.nonzero(bl_indxs)[0][train],
# len(bl_indxs))
tr = np.nonzero(bl_indxs)[0][train]
indxs.append(tr)
indxs = np.hstack(indxs)
indxs = sorted(indxs)
data = self.hdu.data[indxs]
self.save(fname, data, rewrite=True)
def save_uvrange(self, fname, uv_min):
"""
Save only fraction of of data on each baseline.
:param fname:
File path to save.
"""
indxs = list()
for bl in self.baselines:
bl_indxs = self._indxs_baselines[bl]
bl_indxs_pw = self.pw_indxs_baseline(bl, average_bands=True,
stokes=['RR', 'LL'],
average_stokes=True)
bl_indxs = mask_boolean_with_boolean(bl_indxs, bl_indxs_pw)
uv = self.uv[np.nonzero(bl_indxs)[0]]
uv_rad = np.hypot(uv[:, 0], uv[:, 1])
tr = np.nonzero(bl_indxs)[0][uv_rad > uv_min]
indxs.append(tr)
indxs = np.hstack(indxs)
indxs = sorted(indxs)
data = self.hdu.data[indxs]
self.save(fname, data, rewrite=True)
# TODO: for IDI extend this method
def learn_data_structure(self, hdu):
# Learn parameters
par_dict = OrderedDict()
for i, par in enumerate(hdu.data.names):
par_dict.update({par: i})
self.par_dict = par_dict
# Create mapping of FITS CTYPEi ``i`` number to dimensions of PyFits
# hdu.data[`DATA`] (hdu.data.data) numpy.ndarray.
data_dict = OrderedDict()
data_dict.update({'GROUP': (0, hdu.header['GCOUNT'])})
for i in range(hdu.header['NAXIS'], 1, -1):
data_dict.update({hdu.header['CTYPE' + str(i)]:
(hdu.header['NAXIS'] - i + 1,
hdu.header['NAXIS' + str(i)])})
# Save shape and dimensions of data recarray
self.data_dict = data_dict
self.nif = data_dict['IF'][1]
self.nstokes = data_dict['STOKES'][1]
# Create dictionary with necessary slices
slices_dict = OrderedDict()
for key, value in data_dict.items():
# FIXME: Generally we should avoid removing dims
if value[1] == 1 and key not in ['IF', 'STOKES']:
slices_dict.update({key: 0})
else:
slices_dict.update({key: slice(None, None)})
self.slices_dict = slices_dict
uvdata_slices_dict = OrderedDict()
for key, value in slices_dict.items():
if value is not 0:
uvdata_slices_dict.update({key: value})
self.uvdata_slices_dict = uvdata_slices_dict
def new_slices(self, key, key_slice):
"""
Return VIEW of internal ``hdu.data.data`` numpy.ndarray with given
slice.
"""
slices_dict = self.slices_dict.copy()
slices_dict.update({key: key_slice})
return slices_dict
def view_uvdata(self, new_slices_dict):
"""
Return VIEW of internal ``hdu.data.data`` numpy.ndarray with given
slices.
:param new_slices_dict:
Ex. {'COMPLEX': slice(0, 1), 'IF': slice(0, 2)}
"""
slices_dict = self.slices_dict.copy()
for key, key_slice in new_slices_dict.items():
slices_dict.update({key: key_slice})
return self.hdu.data.data[slices_dict.values()]
@property
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.