input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>qcodes/data/data_array.py<gh_stars>0
from typing import Dict, Any, Optional
import numpy as np
import collections
from qcodes.utils.helpers import DelegateAttributes, full_class, warn_units
import xarray as xr
class DataArray(DelegateAttributes):
"""
A container for one parameter in a measurement loop.
If this is a measured parameter, This object doesn't contain
the data of the setpoints it was measured at, but it references
the DataArray objects of these parameters. Those objects only have
the dimensionality at which they were set - ie the inner loop setpoint
the same dimensionality as the measured parameter, but the outer
loop setpoint(s) have lower dimensionality
When it's first created, a DataArray has no dimensionality, you must call
.nest for each dimension.
If preset_data is provided it is used to initialize the data, and the array
can still be nested around it (making many copies of the data).
Otherwise it is an error to nest an array that already has data.
Once the array is initialized, a DataArray acts a lot like a numpy array,
because we delegate attributes through to the numpy array
Args:
parameter (Optional[Parameter]): The parameter whose values will
populate this array, if any. Will copy ``name``, ``full_name``,
``label``, ``unit``, and ``snapshot`` from here unless you
provide them explicitly.
name (Optional[str]): The short name of this array.
TODO: use full_name as name, and get rid of short name
full_name (Optional[str]): The complete name of this array. If the
array is based on a parameter linked to an instrument, this is
typically '<instrument_name>_<param_name>'
label (Optional[str]): A description of the values in this array to
use for axis and colorbar labels on plots.
snapshot (Optional[dict]): Metadata snapshot to save with this array.
array_id (Optional[str]): A name for this array that's unique within
its ``DataSet``. Typically the full_name, but when the ``DataSet``
is constructed we will append '_<i>' (``i`` is an integer starting
from 1) if necessary to differentiate arrays with the same id.
TODO: this only happens for arrays provided to the DataSet
constructor, not those added with add_array. Fix this!
Also, do we really need array_id *and* full_name (let alone name
but I've already said we should remove this)?
set_arrays (Optional[Tuple[DataArray]]): If this array is being
created with shape already, you can provide one setpoint array
per dimension. The first should have one dimension, the second
two dimensions, etc.
shape (Optional[Tuple[int]]): The shape (as in numpy) of the array.
Will be prepended with new dimensions by any calls to ``nest``.
action_indices (Optional[Tuple[int]]): If used within a ``Loop``,
these are the indices at each level of nesting within the
``Loop`` of the loop action that's populating this array.
TODO: this shouldn't be in DataArray at all, the loop should
handle converting this to array_id internally (maybe it
already does?)
unit (Optional[str]): The unit of the values stored in this array.
units (Optional[str]): DEPRECATED, redirects to ``unit``.
is_setpoint (bool): True if this is a setpoint array, False if it
is measured. Default False.
preset_data (Optional[Union[numpy.ndarray, Sequence]]): Contents of the
array, if already known (for example if this is a setpoint
array). ``shape`` will be inferred from this array instead of
from the ``shape`` argument.
"""
# attributes of self to include in the snapshot
SNAP_ATTRS = (
'array_id',
'name',
'shape',
'unit',
'label',
'action_indices',
'is_setpoint')
# attributes of the parameter (or keys in the incoming snapshot)
# to copy to DataArray attributes, if they aren't set some other way
COPY_ATTRS_FROM_INPUT = (
'name',
'label',
'unit')
# keys in the parameter snapshot to omit from our snapshot
SNAP_OMIT_KEYS = (
'ts',
'value',
'__class__',
'set_arrays',
'shape',
'array_id',
'action_indices')
def __init__(self, parameter=None, name=None, full_name=None, label=None,
snapshot=None, array_id=None, set_arrays=(), shape=None,
action_indices=(), unit=None, units=None, is_setpoint=False,
preset_data=None):
self.name = name
self.full_name = full_name or name
self.label = label
self.shape = shape
if units is not None:
warn_units('DataArray', self)
if unit is None:
unit = units
self.unit = unit
self.array_id = array_id
self.is_setpoint = is_setpoint
self.action_indices = action_indices
self.set_arrays = set_arrays
self._preset = False
# store a reference up to the containing DataSet
# this also lets us make sure a DataArray is only in one DataSet
self._data_set = None
self.last_saved_index = None
self.modified_range = None
self.ndarray = None
if snapshot is None:
snapshot = {}
self._snapshot_input = {}
if parameter is not None:
param_full_name = getattr(parameter, 'full_name', None)
if param_full_name and not full_name:
self.full_name = parameter.full_name
if hasattr(parameter, 'snapshot') and not snapshot:
snapshot = parameter.snapshot()
else:
# TODO: why is this in an else clause?
for attr in self.COPY_ATTRS_FROM_INPUT:
if (hasattr(parameter, attr) and
not getattr(self, attr, None)):
setattr(self, attr, getattr(parameter, attr))
for key, value in snapshot.items():
if key not in self.SNAP_OMIT_KEYS:
self._snapshot_input[key] = value
if (key in self.COPY_ATTRS_FROM_INPUT and
not getattr(self, key, None)):
setattr(self, key, value)
if not self.label:
self.label = self.name
if preset_data is not None:
self.init_data(preset_data)
elif shape is None:
self.shape = ()
@property
def data_set(self):
"""
The DataSet this array belongs to.
A DataArray can belong to at most one DataSet.
TODO: make this a weakref
"""
return self._data_set
@data_set.setter
def data_set(self, new_data_set):
if (self._data_set is not None and
new_data_set is not None and
self._data_set != new_data_set):
raise RuntimeError('A DataArray can only be part of one DataSet')
self._data_set = new_data_set
def nest(self, size, action_index=None, set_array=None):
"""
Nest this array inside a new outer loop.
You cannot call ``nest`` after ``init_data`` unless this is a
setpoint array.
TODO: is this restriction really useful? And should we maintain
a distinction between _preset and is_setpoint, or can wejust use
is_setpoint?
Args:
size (int): Length of the new loop.
action_index (Optional[int]): Within the outer loop at this
nesting level, which action does this array derive from?
set_array (Optional[DataArray]): The setpoints of the new outer
loop. If this DataArray *is* a setpoint array, you should
omit both ``action_index`` and ``set_array``, and it will
reference itself as the inner setpoint array.
Returns:
DataArray: self, in case you want to construct the array with
chained method calls.
"""
if self.ndarray is not None and not self._preset:
raise RuntimeError('Only preset arrays can be nested after data '
'is initialized! {}'.format(self))
if set_array is None:
if self.set_arrays:
raise TypeError('a setpoint array must be its own inner loop')
set_array = self
self.shape = (size, ) + self.shape
if action_index is not None:
self.action_indices = (action_index, ) + self.action_indices
self.set_arrays = (set_array, ) + self.set_arrays
if self._preset:
inner_data = self.ndarray
self.ndarray = np.ndarray(self.shape)
# existing preset array copied to every index of the nested array.
for i in range(size):
self.ndarray[i] = inner_data
# update modified_range so the entire array still looks modified
self.modified_range = (0, self.ndarray.size - 1)
self._set_index_bounds()
return self
def init_data(self, data=None):
"""
Create the actual numpy array to hold data.
The array will be sized based on either ``self.shape`` or
data provided here.
Idempotent: will do nothing if the array already exists.
If data is provided, this array is marked as a preset
meaning it can still be nested around this data.
TODO: per above, perhaps remove this distinction entirely?
Args:
data (Optional[Union[numpy.ndarray, Sequence]]): If provided,
we fill the array with this data. Otherwise the new
array will be filled with NaN.
Raises:
ValueError: if ``self.shape`` does not match ``data.shape``
ValueError: if the array was already initialized with a
different shape than we're about to create
"""
if data is not None:
if not isinstance(data, np.ndarray):
if isinstance(data, collections.abc.Iterator):
# faster than np.array(tuple(data)) (or via list)
# but requires us to assume float
data = np.fromiter(data, float)
else:
data = np.array(data)
if self.shape is None:
self.shape = data.shape
elif data.shape != self.shape:
raise ValueError('preset data must be a sequence '
'with shape matching the array shape',
data.shape, self.shape)
self.ndarray = data
self._preset = True
# mark the entire array as modified
self.modified_range = (0, data.size - 1)
elif self.ndarray is not None:
if self.ndarray.shape != self.shape:
raise ValueError('data has already been initialized, '
'but its shape doesn\'t match self.shape')
| |
import calendar, json
from urllib.parse import urlparse
import requests
from datetime import date, time, datetime, timedelta
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import User
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from django.urls import NoReverseMatch
from rest_framework.reverse import reverse
from task import const
from task.const import *
from rusel.utils import nice_date
from rusel.categories import get_categories_list
from rusel.files import get_files_list_by_path
from rusel.secret import storage_dvlp
storage_path = storage_dvlp
class Group(models.Model):
"""
Task groups
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'), related_name='task_group')
app = models.CharField(_('app name'), max_length=50, blank=False, default=APP_TODO, null=True)
role = models.CharField(_('role name'), max_length=50, blank=False, default=ROLE_TODO, null=True)
node = models.ForeignKey('self', on_delete=models.CASCADE, verbose_name=_('node'), blank=True, null=True)
name = models.CharField(_('group name'), max_length=200, blank=False)
sort = models.CharField(_('sort code'), max_length=50, blank=True)
created = models.DateTimeField(_('creation time'), blank=True, default=datetime.now)
last_mod = models.DateTimeField(_('last modification time'), blank=True, auto_now=True)
completed = models.BooleanField(_('display completed records'), null=True)
theme = models.IntegerField(_('theme id'), blank=True, null=True)
sub_groups = models.CharField(_('content items sub groups'), max_length=10000, blank=True, null=True)
use_sub_groups = models.BooleanField(_('using content items sub groups'), null=True)
determinator = models.CharField(_('group category: "group", "role" or "view"'), max_length=10, blank=True, null=True)
view_id = models.CharField(_('view identificator for "role" and "view"'), max_length=50, blank=True, null=True)
items_sort = models.CharField(_('items sorting orders'), max_length=500, blank=True)
info = models.TextField(_('information').capitalize(), blank=True, null=True)
src_id = models.IntegerField(_('ID in source table'), blank=True, null=True)
act_items_qty = models.IntegerField(_('items in group'), blank=True, null=True)
#------------- Expen --------------
expen_byn = models.BooleanField(_('totals in BYN'), null=True)
expen_usd = models.BooleanField(_('totals in USD'), null=True)
expen_eur = models.BooleanField(_('totals in EUR'), null=True)
class Meta:
verbose_name=_('task group')
verbose_name_plural = _('task groups')
def __str__(self):
return self.name
def __unicode__(self):
return '.' * self.level() + self.name
def s_id(self):
return str(self.id)
def get_shifted_name(self):
return '.'*self.level()*2 + self.name
def edit_url(self):
if (self.app == APP_ALL):
return 'todo:group'
return self.app + ':group'
def level(self):
ret = 0
node = self.node
while node:
ret += 1
node = node.node
return ret
def is_leaf(self):
return not Group.objects.filter(node=self.id).exists()
def toggle_sub_group(self, sub_group_id):
sgs = json.loads(self.sub_groups)
for sg in sgs:
if (sg['id'] == int(sub_group_id)):
sg['is_open'] = not sg['is_open']
self.sub_groups = json.dumps(sgs)
self.save()
def set_theme(self, theme_id):
self.theme = theme_id
self.save()
def dark_theme(self):
if not self.theme:
return False
return (self.theme < 8) or (self.theme > 14)
def set_sort(self, sort_id):
if self.items_sort.replace('-', '') == sort_id:
if self.items_sort.replace('-', '') == self.items_sort:
self.items_sort = '-' + sort_id
else:
self.items_sort = sort_id
else:
self.items_sort = sort_id
self.save()
def reverse_sort(self):
if not self.items_sort:
return
if self.items_sort.replace('-', '') == self.items_sort:
self.items_sort = '-' + self.items_sort
else:
self.items_sort = self.items_sort[1:]
self.save()
def delete_sort(self):
self.items_sort = ''
self.save()
def expen_what_totals(self):
if (not self.expen_byn) and (not self.expen_usd) and (not self.expen_eur):
return True, False, False
return self.expen_byn, self.expen_usd, self.expen_eur
def expen_get_totals(self):
byn = 0
usd = 0
eur = 0
in_byn, in_usd, in_eur = self.expen_what_totals()
for exp in TaskGroup.objects.filter(group=self.id):
if in_byn:
byn += exp.task.expen_amount('BYN')
if in_usd:
usd += exp.task.expen_amount('USD')
if in_eur:
eur += exp.task.expen_amount('EUR')
return byn, usd, eur
def expen_summary(self):
in_byn, in_usd, in_eur = self.expen_what_totals()
byn, usd, eur = self.expen_get_totals()
res = []
if in_usd:
res.append(currency_repr(usd, '$'))
if in_eur:
res.append(currency_repr(eur, '€'))
if in_byn:
res.append(currency_repr(byn, ' BYN'))
return res
def get_absolute_url(self):
if not self.app:
return '/'
id = self.id
try:
url = reverse(self.app + ':' + self.role + '-item', args = [id])
return url
except NoReverseMatch:
return '/'
def check_items_qty(self):
if (self.determinator == None) or (self.determinator == 'group'):
tgs = TaskGroup.objects.filter(group=self.id)
qnt = 0
for tg in tgs:
if not tg.task.completed:
qnt += 1
if (self.act_items_qty != qnt):
self.act_items_qty = qnt
self.save()
def detect_group(user, app, determinator, view_id, name):
group = None
if (determinator == 'group'):
if Group.objects.filter(user=user.id, app=app, id=int(view_id)).exists():
group = Group.objects.filter(user=user.id, app=app, id=int(view_id)).get()
if (determinator == 'role'):
if Group.objects.filter(user=user.id, app=app, determinator='role', view_id=view_id).exists():
group = Group.objects.filter(user=user.id, app=app, determinator='role', view_id=view_id).get()
if (determinator == 'view'):
if Group.objects.filter(user=user.id, app=app, determinator='view', view_id=view_id).exists():
group = Group.objects.filter(user=user.id, app=app, determinator='view', view_id=view_id).get()
if not group and (determinator != 'group'):
group = Group.objects.create(
user=user,
app=app,
determinator=determinator,
view_id=view_id,
name=name,
act_items_qty=0,
use_sub_groups=True,)
return group
class Task(models.Model):
"""
An Entity that can be a Task or something else
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_('user'), related_name = 'task_user')
name = models.CharField(_('name').capitalize(), max_length=200, blank=False)
event = models.DateTimeField(_('event date').capitalize(), blank=True, null=True)
start = models.DateField(_('start date').capitalize(), blank=True, null=True)
stop = models.DateTimeField(_('termin').capitalize(), blank=True, null=True)
completed = models.BooleanField(_('completed').capitalize(), default=False)
completion = models.DateTimeField(_('completion time').capitalize(), blank=True, null=True)
in_my_day = models.BooleanField(_('in my day').capitalize(), default=False)
important = models.BooleanField(_('important').capitalize(), default=False)
remind = models.DateTimeField(_('remind').capitalize(), blank=True, null=True)
last_remind = models.DateTimeField(_('last remind').capitalize(), blank=True, null=True)
repeat = models.IntegerField(_('repeat').capitalize(), blank=True, null=True, choices=REPEAT_SELECT, default=NONE)
repeat_num = models.IntegerField(_('repeat num').capitalize(), blank=True, null=True)
repeat_days = models.IntegerField(_('repeat days').capitalize(), blank=True, null=True)
categories = models.TextField(_('categories').capitalize(), blank=True, null=True)
info = models.TextField(_('information').capitalize(), blank=True, null=True)
src_id = models.IntegerField(_('ID in source table'), blank=True, null=True)
app_task = models.IntegerField('Role in application Task', choices=TASK_ROLE_CHOICE, default=NONE, null=True)
app_note = models.IntegerField('Role in application Note', choices=NOTE_ROLE_CHOICE, default=NONE, null=True)
app_news = models.IntegerField('Role in application News', choices=NEWS_ROLE_CHOICE, default=NONE, null=True)
app_store = models.IntegerField('Role in application Store', choices=STORE_ROLE_CHOICE, default=NONE, null=True)
app_doc = models.IntegerField('Role in application Document', choices=DOC_ROLE_CHOICE, default=NONE, null=True)
app_warr = models.IntegerField('Role in application Warranty', choices=WARR_ROLE_CHOICE, default=NONE, null=True)
app_expen = models.IntegerField('Role in application Expense', choices=EXPEN_ROLE_CHOICE, default=NONE, null=True)
app_trip = models.IntegerField('Role in application Trip', choices=TRIP_ROLE_CHOICE, default=NONE, null=True)
app_fuel = models.IntegerField('Role in application Fueling', choices=FUEL_ROLE_CHOICE, default=NONE, null=True)
app_apart = models.IntegerField('Role in application Communal', choices=APART_ROLE_CHOICE, default=NONE, null=True)
app_health = models.IntegerField('Role in application Health', choices=HEALTH_ROLE_CHOICE, default=NONE, null=True)
app_work = models.IntegerField('Role in application Work', choices=WORK_ROLE_CHOICE, default=NONE, null=True)
app_photo = models.IntegerField('Role in application Photo Bank', choices=PHOTO_ROLE_CHOICE, default=NONE, null=True)
created = models.DateTimeField(_('creation time').capitalize(), default=datetime.now)
last_mod = models.DateTimeField(_('last modification time').capitalize(), blank=True, auto_now=True)
groups = models.ManyToManyField(Group, through='TaskGroup')
active = models.BooleanField(_('is active navigation item').capitalize(), null=True)
task_1 = models.ForeignKey('self', on_delete=models.SET_NULL, verbose_name=_('linked task #1'), related_name='task_link_1', blank=True, null=True)
task_2 = models.ForeignKey('self', on_delete=models.SET_NULL, verbose_name=_('linked task #2'), related_name='task_link_2', blank=True, null=True)
task_3 = models.ForeignKey('self', on_delete=models.SET_NULL, verbose_name=_('linked task #3'), related_name='task_link_3', blank=True, null=True)
item_attr = models.CharField(_('item attributes').capitalize(), max_length=2000, blank=True, null=True)
sort = models.CharField(_('sort code'), max_length=50, blank=True)
#------------ Expenses ------------
expen_qty = models.DecimalField(_('quantity').capitalize(), blank=True, null=True, max_digits=15, decimal_places=3)
expen_price = models.DecimalField(_('Price in NC'), blank=True, null=True, max_digits=15, decimal_places=2)
expen_rate = models.DecimalField(_('USD exchange rate'), blank=True, null=True, max_digits=15, decimal_places=4)
expen_rate_2 = models.DecimalField(_('EUR exchange rate'), blank=True, null=True, max_digits=15, decimal_places=4)
expen_usd = models.DecimalField(_('amount in USD'), blank=True, null=True, max_digits=15, decimal_places=2)
expen_eur = models.DecimalField(_('amount in EUR'), blank=True, null=True, max_digits=15, decimal_places=2)
expen_kontr = models.CharField(_('manufacturer').capitalize(), max_length=1000, blank=True, null=True)
#------------ Person --------------
pers_dative = models.CharField(_('dative'), max_length=500, null=True)
#------------- Trip ---------------
trip_days = models.IntegerField(_('days'), null=True)
trip_oper = models.IntegerField(_('operation'), null=True)
trip_price = models.DecimalField(_('price'), max_digits=15, decimal_places=2, null=True)
#------------- Store --------------
store_username = models.CharField(_('username'), max_length=150, blank=True, null=True)
store_value = models.CharField(_('value'), max_length=128, null=True)
store_params = models.IntegerField(_('generator parameters used'), null=True)
#------------- Apart --------------
apart_has_el = models.BooleanField(_('has electricity'), null=True)
apart_has_hw = models.BooleanField(_('has hot water'), null=True)
apart_has_cw = models.BooleanField(_('has cold water'), null=True)
apart_has_gas = models.BooleanField(_('has gas'), null=True)
apart_has_ppo = models.BooleanField(_('payments to the partnership of owners'), null=True)
apart_has_tv = models.BooleanField(_('has Internet/TV'), null=True)
apart_has_phone = models.BooleanField(_('has phone'), null=True)
apart_has_zkx = models.BooleanField(_('has ZKX'), null=True)
#------------- Meter --------------
meter_el = models.IntegerField(_('electricity'), null=True)
meter_hw = models.IntegerField(_('hot water'), null=True)
meter_cw = models.IntegerField(_('cold water'), null=True)
meter_ga = models.IntegerField(_('gas'), null=True)
meter_zkx = models.DecimalField('account amount', null=True, max_digits=15, decimal_places=2)
#------------- Price --------------
price_service = models.IntegerField(_('service code'), null=True)
price_tarif = models.DecimalField(_('tariff 1'), null=True, max_digits=15, decimal_places=5)
price_border = models.DecimalField(_('border 1'), null=True, max_digits=15, decimal_places=4)
price_tarif2 = models.DecimalField(_('tariff 2'), null=True, max_digits=15, decimal_places=5)
price_border2 = models.DecimalField(_('border 2'), null=True, max_digits=15, decimal_places=4)
price_tarif3 = models.DecimalField(_('tariff 3'), null=True, max_digits=15, decimal_places=5)
price_unit = models.CharField(_('unit'), max_length=100, blank=True, null=True)
#------------- Bill ---------------
bill_residents = models.IntegerField(_('number of residents'), null=True)
bill_el_pay = models.DecimalField('electro - payment', null=True, max_digits=15, decimal_places=2)
bill_tv_bill = models.DecimalField('tv - accrued', null=True, max_digits=15, decimal_places=2)
bill_tv_pay = models.DecimalField('tv - payment', null=True, max_digits=15, decimal_places=2)
bill_phone_bill = models.DecimalField('phone - accrued', null=True, max_digits=15, decimal_places=2)
bill_phone_pay = models.DecimalField('phone - payment', null=True, max_digits=15, decimal_places=2)
bill_zhirovka = models.DecimalField('zhirovka', null=True, max_digits=15, decimal_places=2)
bill_hot_pay = models.DecimalField('heatenergy - payment', null=True, max_digits=15, decimal_places=2)
bill_repair_pay = models.DecimalField('overhaul - payment', null=True, max_digits=15, decimal_places=2)
bill_zkx_pay = models.DecimalField('housing and communal services - payment', null=True, max_digits=15, decimal_places=2)
bill_water_pay = models.DecimalField('water - payment', null=True, max_digits=15, decimal_places=2)
bill_gas_pay = models.DecimalField('gas - payment', null=True, max_digits=15, decimal_places=2)
bill_rate = models.DecimalField('rate', null=True, max_digits=15, decimal_places=4)
bill_poo = models.DecimalField('pay to the Partnersheep of Owners - accrued', null=True, max_digits=15, decimal_places=2)
bill_poo_pay = models.DecimalField('pay to the Partnersheep of Owners - payment', null=True, max_digits=15, decimal_places=2)
#-------------- Car ----------------
car_plate = models.CharField(_('car number'), max_length=100, null=True, blank=True)
car_odometr = models.IntegerField(_('odometer'), null=True)
#-------------- Fuel ---------------
fuel_volume = models.DecimalField(_('volume'), | |
1
if dir_down:
row += 1
else:
row -= 1
return "".join(result)
def rot13(word, **kwargs):
'''
Case Sensetive
Support Numbers and Symbols But Not Work on Them (Under Maintaince)
'''
result = ""
for v in word:
c = ord(v)
if c >= ord('a') and c <= ord('z'):
if c > ord('m'):
c -= 13
else:
c += 13
elif c >= ord('A') and c <= ord('Z'):
if c > ord('M'):
c -= 13
else:
c += 13
result += chr(c)
return result
class Rot13(_Cipher):
@staticmethod
def encrypt(word, **kwargs):
return rot13(word)
@staticmethod
def decrypt(word, **kwargs):
return rot13(word)
class SimpleSubstitution(_Cipher):
"""
The Simple Substitution Cipher
Key: characters are not important but length should be equal to alphabet length
"""
class Tools:
@staticmethod
def _encDec(text, key, alphabet, isEncrypt):
if len(alphabet) != len(key):
raise ValueError("'key' and 'alphabet' must have the same length")
ans = ""
for i in range(len(text)):
m = text[i]
k = ""
try:
if isEncrypt == 1:
k = key[alphabet.index(m)]
else:
k = alphabet[key.index(m)]
except ValueError:
wrchar = m.encode('utf-8')
raise Exception("Can't find char '" + wrchar + "' of text in alphabet!")
ans += k
return ans
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return SimpleSubstitution.Tools._encDec(text, key, alphabet, 1)
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return SimpleSubstitution.Tools._encDec(text, key, alphabet, -1)
class ThreeSquare(_Cipher):
"""
The Three Square Cipher
Key: characters are not important but length should be equal to alphabet length
"""
class Tools:
@staticmethod
def _encDec(text, key, alphabet, isEncrypt):
square1 = _PolybiusSquare(alphabet, key[0])
square2 = _PolybiusSquare(alphabet, key[1])
square3 = _PolybiusSquare(alphabet, key[2])
enc = u""
if isEncrypt:
if len(text) % 2:
text += alphabet[-1][0]
odd = text[1::2]
even = text[::2]
for i in range(len(even)):
row1, column1 = square1.get_coordinates(even[i])
row2, column2 = square2.get_coordinates(odd[i])
rows = square1.get_rows()
index = random.randrange(rows)
left = square1.get_char(index, column1)
middle = square3.get_char(row1, column2)
cols = square2.get_columns()
index = random.randrange(cols)
right = square2.get_char(row2, index)
enc += left + middle + right
else:
trigrams = []
i = 0
while i < len(text):
trigrams.append(text[i:i+3])
i += 3
for trigram in trigrams:
col1 = square1.get_coordinates(trigram[0])[1]
row3, col3 = square3.get_coordinates(trigram[1])
row2 = square2.get_coordinates(trigram[2])[0]
enc += square1.get_char(row3, col1)
enc += square2.get_char(row2, col3)
return enc
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return ThreeSquare.Tools._encDec(text, key, alphabet, True)
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return ThreeSquare.Tools._encDec(text, key, alphabet, False)
class Transpose(_Cipher):
'''
len(word)>key>1
'''
@staticmethod
def encrypt(text, key, **kwargs):
ciphertext = [''] * key
for col in range(key):
pointer = col
while pointer < len(text):
ciphertext[col] += text[pointer]
pointer += key
return ''.join(ciphertext)
@staticmethod
def decrypt(text, key, **kwargs):
import math
numOfColumns = math.ceil(len(text) / key)
numOfRows = key
numOfShadedBoxes = (numOfColumns * numOfRows) - len(text)
plaintext = [''] * numOfColumns
col = 0
row = 0
for symbol in text:
plaintext[col] += symbol
col += 1
if (col == numOfColumns) or (col == numOfColumns - 1 and row >= numOfRows - numOfShadedBoxes):
col = 0
row += 1
return ''.join(plaintext)
# FIXME
class Trifid(_Cipher):
"""
#FIXME: WRONG DECRYPT/ENCRYPT
The Trifid Cipher
Key: integer <= len(text)
"""
class Tools:
@staticmethod
def _code(text, alphabet):
code = ""
for char in text:
for index in range(len(alphabet)):
try:
alphabet[index].index(char)
break
except ValueError:
pass
square = int(index / 9)
index = index % 9
row = int(index / 3)
col = index % 3
code += str(square+1) + str(row+1) + str(col+1)
return code
@staticmethod
def _decode(text, alphabet):
code = ""
for i in range(0, len(text), 3):
square = int(text[i])-1
row = int(text[i+1])-1
col = int(text[i+2])-1
index = square*9 + row*3 + col
code += alphabet[index][0]
return code
@staticmethod
def encrypt(text, key=TEXT_LENGTH, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
key = int(key)
if not key > 0:
key = len(text)
code = Trifid.Tools._code(text, alphabet)
code0 = ""
for j in range(0, len(text)*3, 3*key):
for i in range(3):
code0 += code[j+i:j+3*key:3]
code = Trifid.Tools._decode(code0, alphabet)
return code
@staticmethod
def decrypt(text, key=TEXT_LENGTH, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
key = int(key)
if not key > 0:
key = len(text)
code = Trifid.Tools._code(text, alphabet)
code0 = ""
rmd = (len(text) % key)
for j in range(0, (len(text) - rmd) * 3, 3*key):
for i in range(key):
code0 += code[j+i:j+3*key:key]
j = (len(text) - rmd) * 3
for i in range(rmd):
code0 += code[j+i:j+3*rmd:rmd]
code = Trifid.Tools._decode(code0, alphabet)
return code
#FIXME:!!!
def two_square(text, key, alphabet=ALL_CHARS):
square1 = _PolybiusSquare(alphabet, key[0])
square2 = _PolybiusSquare(alphabet, key[1])
# text encryption
if len(text) % 2:
text += alphabet[-1][0]
odd = text[1::2]
even = text[::2]
enc = u""
for i in range(len(even)):
coords = square1.get_coordinates(even[i])
row1 = coords[0]
column1 = coords[1]
coords = square2.get_coordinates(odd[i])
row2 = coords[0]
column2 = coords[1]
if column1 == column2:
enc += square1.get_char(row2, column1)
enc += square2.get_char(row1, column1)
else:
enc += square1.get_char(row1, column2)
enc += square2.get_char(row2, column1)
return enc
class TwoSquare(_Cipher):
"""
The Two-Square Cipher
"""
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return two_square(alphabet, text, key)
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return two_square(alphabet, text, key)
#FIXME:!!!
class Vic(_Cipher):
"""
-> Polybius square
The Vic Cipher
"""
class Tools:
@staticmethod
def _find_index_in_alphabet(char, alphabet):
for j in range(len(alphabet)):
try:
alphabet[j].index(char)
break
except ValueError:
pass
return j
@staticmethod
def _EncDec(text, key, alphabet, do_encrypt):
columns = []
width = 10
# define columns with null string
for i, value in enumerate(alphabet):
if value == "":
columns.append(i)
# encode chars to numbers
code = ""
for char in text:
j = Vic.Tools._find_index_in_alphabet(char, alphabet)
row = int(j / width)
if row > 0:
column = j % width
code += str(columns[row-1]) + str(column)
else:
code += str(j)
enc = ""
if do_encrypt:
# addition by key
for i in range(0, len(code)):
enc += str((int(code[i]) + int(key[i % len(key)])) % 10)
else:
# subraction by key
for i in range(0, len(code)):
enc += str((int(code[i]) - int(key[i % len(key)])) % 10)
# encode numbers to chars
enc2 = ""
row = 0
for i in range(0, len(enc)):
if row == 0 and (int(enc[i]) in columns):
row = columns.index(int(enc[i])) + 1
else:
enc2 += alphabet[row * | |
<gh_stars>0
# -*- coding: UTF-8 -*-
#############################################
## (C)opyright by <NAME>, 2008 ##
## All rights reserved ##
#############################################
import cgi
import copy
import inspect
import logging
import os
import os.path
import re
import string
import sys
import types
import re
try:
import StringIO
except:
import cStringIO as StringIO
log = logging.getLogger(__name__)
from pyxer.template.genshi import XML, HTML, Stream, QName, Attrs
from pyxer.template.genshi.core import START, END, TEXT, COMMENT, DOCTYPE
# _commands = re.compile(u"\<\%(.*?)\%\>", re.M)
_vars = re.compile(u"""
\$(
\$
|
\{(.*?)\}
|
([a-z_][a-z_0-9]*)(\.[a-z_][a-z_0-9]*)*
)
""", re.M | re.VERBOSE)
class Dict(dict):
" Helper class to map dicts to attributes "
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def __setattr__(self, name, value):
self[name] = value
class CodeGenerator(object):
level = 0
tab = '\t'
def __init__(self, code = None, level = 0, tab = '\t'):
self.code = code or []
if level != self.level:
self.level = level
if tab != self.tab:
self.tab = tab
self.pad = self.tab * self.level
def line(self, *lines):
for text in lines:
self.code.append(self.pad + text)
def start_block(self, text):
self.line(text)
self.level += 1
self.pad += self.tab
def end_block(self, nblocks = 1, with_pass = False):
for n in range(nblocks):
if with_pass:
self.line('pass')
self.level -= 1
self.pad = self.pad[: - len(self.tab)]
'''
def insert_block(self, block):
lines = block.splitlines()
if len(lines) == 1:
# special case single lines
self.line(lines[0].strip())
else:
# adjust the block
for line in _adjust_python_block(lines, self.tab):
self.line(line)
'''
def __str__(self):
return '\n'.join(self.code + [''])
def debug(self):
for n in range(0, len(self.code)):
print "%4d:" % (n + 1), self.code[n]
def pretty(self):
out = []
for n in range(0, len(self.code)):
out.append("%4d: %s" % (n + 1, self.code[n].replace("\t", " ")))
return "\n".join(out)
class PyxerStream(Stream):
def append(self, value):
self.events.append(value)
def add(self, value, encoding="utf8"):
try:
if type(value) is types.FunctionType:
value = value()
if value is None:
return
if isinstance(value, list) or isinstance(value, Stream):
self.events += list(value)
return
if value is True:
value = '1'
elif value is False:
value = '0'
elif not isinstance(value, unicode):
value = unicode(str(value), 'utf8')
self.events.append((TEXT, value, (None, 0, 0)))
except Exception, e:
log.exception("element error")
self.events.append((TEXT, unicode(e), (None, 0, 0)))
def inner(self, path):
result = list(self.select(path))
if result and result[0][0] == START:
return result[1:-1]
return result
def css(self, selector):
import pyxer.template.cssselect as select
path = select.css_to_xpath(selector)
inner = False
if selector.endswith(" *"):
selector = selector[:-2]
inner = True
log.debug("CSS Selector %r -> XPath %r (innerHTML %r)", selector, path, inner)
result = list(self.select(path))
if inner and result and result[0][0] == START:
return result[1:-1]
return result
selectInner = inner
class TemplateSoup(object):
"""
Yet another templating system based on BeautyfulSoup
"""
def __init__(self, source, html5 = False, strict = True, debug = False, xml=False):
self.strict = strict
self.html5 = html5
self.debug = debug
self.xml = False
self.code = None
self.bytecode = None
self.sourcecode = u""
self.stream = None
self.layout = []
self.extends = []
self.parse(source)
self.generateCode()
self.generateByteCode()
# Save memory
if not self.debug:
self.code = None
self.sourcecode = None
def parse(self, source):
self.source = source
# Parse source
if 0: # self.html5:
import html5lib
import html5lib.treebuilders
parser = html5lib.HTMLParser(tree = html5lib.treebuilders.getTreeBuilder("beautifulsoup"))
self.soup = parser.parse(StringIO.StringIO(self.source))
else:
if self.xml:
self.soup = XML(self.source)
else:
self.soup = HTML(self.source)
return self.soup
def generateCode(self):
self.code = CodeGenerator()
self.code_line = 1
# Create code
self.code.line(
# "# -*- coding: UTF-8 -*-",
"from pyxer.template.genshi import XML, HTML, Stream, QName, Attrs",
"from pyxer.template.genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT",
"def _forceUnicode(value):",
"\tif value is None: return ''",
"\tif isinstance(value, unicode): return value",
"\treturn unicode(str(value), 'utf8')",
"def select(path):",
"\tglobal stream",
"\treturn stream.select(path)"
)
self.code.start_block("def main():")
self.code.line(
"global stream",
)
try:
self.current_node = self.soup
self.loop(self.soup)
except SyntaxError, e:
#print "###", self.code_line
#part = self.source.splitlines()[self.code_line: self.code_line+4]
#print "\n".join(part)
raise
self.code.line(
"return stream"
)
self.code.end_block()
#self.code.line(
# "main(soup)",
# "print soup",
# # "print soup.prettify()",
#)
self.sourcecode = unicode(self.code)
#if self.debug:
# print self.code.pretty()
def generateByteCode(self):
if self.debug:
log.debug("Sourcecode:\n%s", self.code.pretty())
self.bytecode = compile(self.sourcecode, "<string>", "exec")
def generate(self, __vars__={}, **kw):
# import pprint
vars = __vars__
vars.update(kw)
# For referencing
if not vars.has_key("top"):
vars["top"] = None
# Prepare context
context = Dict(vars)
context.update(
stream = PyxerStream([])
)
# print context.keys()
stream = None
try:
exec(self.bytecode, context)
stream = context["main"]()
# pprint.pprint(list(stream))
context["top"] = stream
# Applying the layouts
for layout in self.layout:
template = eval(layout, context)
if isinstance(template, TemplateSoup):
stream = template.generate(context)
except:
# XXX must become more Python conform
#error = inspect.trace()[-1][0].f_locals.get("error", None)
#if not error:
# raise
#exc_info = sys.exc_info()
#e = exc_info[1]
#if getattr(e, 'args', None):
# arg0 = e.args[0]
#else:
# arg0 = str(e)
#msg = arg0 + "\nError in template line %d: %s" % error
#raise exc_info[0], msg, exc_info[2]
raise
# pprint.pprint(list(stream))
self.stream = stream
return stream
def render(self, method="html", encoding = "utf8", doctype=None, strip_whitespace=True, **kw):
method = method.lower()
if (doctype is None) and (method != 'xml'):
doctype = 'html'
if self.stream:
return self.stream.render(method,
strip_whitespace=strip_whitespace,
doctype=doctype,
**kw)
__str__ = render
def getAttr(self, node, name):
name = unicode(name)
value = None
kind, data, pos = node
if kind == START:
attr = data[1]
# XXX <label for...> problem
#if not (data[0].lower() == "meta" and name == "content"):
# if name in attr:
# value = attr.get(name)
# node[1] = (data[0], attr - name)
name = "py:" + name
if name in attr:
if value is not None:
raise "Attribute %s is defined twice"
value = attr.get(name)
node[1] = (data[0], attr - name)
return value
def checkSyntax(self, value, mode = "eval"):
if self.strict:
try:
compile(value, "<string>", mode)
except SyntaxError, msg:
raise SyntaxError, str(msg) + " in expression %s" % value
return value
def addElement(self, kind, data, pos):
self.code.line("stream.append((%s, %r, %r))" % (kind, data, pos))
def loop(self, input, depth = 0):
stack = []
path = []
for node in input:
# Make node changeable
node = list(node)
# Split all informations
kind, data, position = node
# Set some defaults
indent = 0
pyDef = None
# if in pyContent or pyReplace do not show
show = not (len(path) and path[-1][2])
# Handle tags
if kind == START:
# Get commands and strip attribute
pyDef = self.getAttr(node, "def")
pyMatch = self.getAttr(node, "match") # XXX todo
pyWhen = self.getAttr(node, "when") # XXX todo
pyOtherwise = self.getAttr(node, "otherwise") # XXX todo
pyFor = self.getAttr(node, "for")
pyIf = self.getAttr(node, "if")
pyChoose = self.getAttr(node, "choose") # XXX todo
pyWith = self.getAttr(node, "with")
pyReplace = self.getAttr(node, "replace")
pyContent = self.getAttr(node, "content")
pyAttrs = self.getAttr(node, "attrs")
pyStrip = self.getAttr(node, "strip")
# pyStrip = self.getAttr(node, "select")
pyExtends = self.getAttr(node, "extends") # XXX todo
pyLayout = self.getAttr(node, "layout")
pyElse = self.getAttr(node, "else") # XXX todo
pyElif = self.getAttr(node, "elif") # XXX todo
# get modified attributes
attr = node[1][1]
if pyExtends:
self.extends.append(pyExtends)
if pyLayout:
self.layout.append(pyLayout)
if pyMatch:
import uuid
pyDef = "match_%s" % uuid.uuid1().hex
if pyDef:
stack.append(self.code)
self.code = CodeGenerator()
if not pyDef.strip().endswith(")"):
pyDef += "()"
self.code.start_block("def %s:" % pyDef)
self.code.line(
"stream = []"
)
#if pyMatch:
# self.code.line(
# pyMatch
# )
# For error handling
self.code.line(
"error = " + repr(position)
)
if pyFor:
self.code.start_block("for %s:" % self.checkSyntax(pyFor))
indent += 1
if pyIf:
self.code.start_block("if %s:" % self.checkSyntax(pyIf))
indent += 1
if pyWith:
self.code.line(
self.checkSyntax(pyWith, "exec"),
# pyWith
)
if (pyReplace) or pyStrip:
pyStrip = True
elif show:
attrs = []
for name, value in attr:
pos = 0
expr = []
for m in _vars.finditer(value):
if value[pos:m.start()]:
expr.append(repr(unicode(value[pos:m.start()])))
cmd = m.group(1)
if cmd != "$":
if cmd.startswith("{"):
cmd = cmd[1: - 1].strip()
expr.append(self.checkSyntax("_forceUnicode(%s)" % cmd))
else:
expr.append(repr(u"$"))
# Escaped dollar $$ -> $
pos = m.end()
if value[pos:]:
expr.append(repr(unicode(value[pos:])))
attrs.append("(%r, %s)" % (name, " + ".join(expr) or 'u""'))
newattr = "Attrs([%s])" % ", ".join(attrs)
if pyAttrs:
newattr += " | [(QName(k), _forceUnicode(v)) for k, v in dict(%s).items() if v is not None]" % self.checkSyntax(pyAttrs)
element = (START, "(%r, %s)" % (data[0], newattr), position)
self.code.line("stream.append((%s, %s, %r))" % element)
#self.code.line(
# "node = Tag(soup, %r, [%s], parent)" % (node, ", ".join(attrs)),
# "parent.append(node)",
#)
#if pyFromid:
# pyContent = "fromid(%r, top, soup)" % pyFromid
if pyStrip and pyContent:
pyReplace = | |
def list_cases_with_http_info(self, request):
"""查询工单列表接口
查询工单列表接口
:param ListCasesRequest request
:return: ListCasesResponse
"""
all_params = ['search_key', 'label_id_list', 'app_key', 'incident_id', 'query_start_time', 'query_end_time', 'status', 'incident_status', 'x_customer_id', 'x_customer_name', 'group_id', 'offset', 'limit', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'search_key' in local_var_params:
query_params.append(('search_key', local_var_params['search_key']))
collection_formats['search_key'] = 'csv'
if 'label_id_list' in local_var_params:
query_params.append(('label_id_list', local_var_params['label_id_list']))
collection_formats['label_id_list'] = 'csv'
if 'app_key' in local_var_params:
query_params.append(('app_key', local_var_params['app_key']))
if 'incident_id' in local_var_params:
query_params.append(('incident_id', local_var_params['incident_id']))
if 'query_start_time' in local_var_params:
query_params.append(('query_start_time', local_var_params['query_start_time']))
if 'query_end_time' in local_var_params:
query_params.append(('query_end_time', local_var_params['query_end_time']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'incident_status' in local_var_params:
query_params.append(('incident_status', local_var_params['incident_status']))
if 'x_customer_id' in local_var_params:
query_params.append(('x_customer_id', local_var_params['x_customer_id']))
if 'x_customer_name' in local_var_params:
query_params.append(('x_customer_name', local_var_params['x_customer_name']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/cases',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListCasesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_extends_params(self, request):
"""查询附加参数
提单时,根据不同的产品或者问题类型,会存在不同的一些附加参数填写
:param ListExtendsParamsRequest request
:return: ListExtendsParamsResponse
"""
return self.list_extends_params_with_http_info(request)
def list_extends_params_with_http_info(self, request):
"""查询附加参数
提单时,根据不同的产品或者问题类型,会存在不同的一些附加参数填写
:param ListExtendsParamsRequest request
:return: ListExtendsParamsResponse
"""
all_params = ['business_type_id', 'incident_sub_type_id', 'product_category_id', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'business_type_id' in local_var_params:
query_params.append(('business_type_id', local_var_params['business_type_id']))
if 'incident_sub_type_id' in local_var_params:
query_params.append(('incident_sub_type_id', local_var_params['incident_sub_type_id']))
if 'product_category_id' in local_var_params:
query_params.append(('product_category_id', local_var_params['product_category_id']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/config/extends-map',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListExtendsParamsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_has_verified_contacts(self, request):
"""查询已验证的列表
查询已验证的列表
:param ListHasVerifiedContactsRequest request
:return: ListHasVerifiedContactsResponse
"""
return self.list_has_verified_contacts_with_http_info(request)
def list_has_verified_contacts_with_http_info(self, request):
"""查询已验证的列表
查询已验证的列表
:param ListHasVerifiedContactsRequest request
:return: ListHasVerifiedContactsResponse
"""
all_params = ['contact_way', 'customer_id', 'sub_customer_id', 'expired_time', 'verified_id', 'contact_value', 'area_code', 'offset', 'limit', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'contact_way' in local_var_params:
query_params.append(('contact_way', local_var_params['contact_way']))
if 'customer_id' in local_var_params:
query_params.append(('customer_id', local_var_params['customer_id']))
if 'sub_customer_id' in local_var_params:
query_params.append(('sub_customer_id', local_var_params['sub_customer_id']))
if 'expired_time' in local_var_params:
query_params.append(('expired_time', local_var_params['expired_time']))
if 'verified_id' in local_var_params:
query_params.append(('verified_id', local_var_params['verified_id']))
if 'contact_value' in local_var_params:
query_params.append(('contact_value', local_var_params['contact_value']))
if 'area_code' in local_var_params:
query_params.append(('area_code', local_var_params['area_code']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/verifycodes/has-verified',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListHasVerifiedContactsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_history_operate_logs(self, request):
"""查询堡垒机历史操作记录
查询堡垒机历史操作记录
:param ListHistoryOperateLogsRequest request
:return: ListHistoryOperateLogsResponse
"""
return self.list_history_operate_logs_with_http_info(request)
def list_history_operate_logs_with_http_info(self, request):
"""查询堡垒机历史操作记录
查询堡垒机历史操作记录
:param ListHistoryOperateLogsRequest request
:return: ListHistoryOperateLogsResponse
"""
all_params = ['authorization_id', 'authorization_detail_id', 'session_id', 'group_id', 'sort', 'offset', 'limit', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'authorization_id' in local_var_params:
path_params['authorization_id'] = local_var_params['authorization_id']
if 'authorization_detail_id' in local_var_params:
path_params['authorization_detail_id'] = local_var_params['authorization_detail_id']
if 'session_id' in local_var_params:
path_params['session_id'] = local_var_params['session_id']
query_params = []
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'sort' in local_var_params:
query_params.append(('sort', local_var_params['sort']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/authorizations/{authorization_id}/authorization-details/{authorization_detail_id}/sessions/{session_id}/operation-logs',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListHistoryOperateLogsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_history_sessions(self, request):
"""查询堡垒机历史会话列表
查询堡垒机历史会话列
:param ListHistorySessionsRequest request
:return: ListHistorySessionsResponse
"""
return self.list_history_sessions_with_http_info(request)
def list_history_sessions_with_http_info(self, request):
"""查询堡垒机历史会话列表
查询堡垒机历史会话列
:param ListHistorySessionsRequest request
:return: ListHistorySessionsResponse
"""
all_params = ['authorization_id', 'authorization_detail_id', 'group_id', 'offset', 'limit', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'authorization_id' in local_var_params:
path_params['authorization_id'] = local_var_params['authorization_id']
if 'authorization_detail_id' in local_var_params:
path_params['authorization_detail_id'] = local_var_params['authorization_detail_id']
query_params = []
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/authorizations/{authorization_id}/authorization-details/{authorization_detail_id}/history-sessions',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListHistorySessionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_labels(self, request):
"""查询标签
查询标签
:param ListLabelsRequest request
:return: ListLabelsResponse
"""
return self.list_labels_with_http_info(request)
def list_labels_with_http_info(self, request):
"""查询标签
查询标签
:param ListLabelsRequest request
:return: ListLabelsResponse
"""
all_params = ['offset', 'limit', 'name', 'label_id', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'label_id' in local_var_params:
query_params.append(('label_id', local_var_params['label_id']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/labels',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListLabelsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_messages(self, request):
"""查询留言
查询留言
:param ListMessagesRequest request
:return: ListMessagesResponse
"""
return self.list_messages_with_http_info(request)
def list_messages_with_http_info(self, request):
"""查询留言
查询留言
:param ListMessagesRequest request
:return: ListMessagesResponse
"""
all_params = ['case_id', 'group_id', 'offset', 'limit', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'case_id' in local_var_params:
path_params['case_id'] = local_var_params['case_id']
query_params = []
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/cases/{case_id}/messages',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListMessagesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_more_instant_messages(self, request):
"""查询更多留言
查询更多留言
:param ListMoreInstantMessagesRequest request
:return: ListMoreInstantMessagesResponse
"""
return self.list_more_instant_messages_with_http_info(request)
def list_more_instant_messages_with_http_info(self, request):
"""查询更多留言
查询更多留言
:param ListMoreInstantMessagesRequest request
:return: ListMoreInstantMessagesResponse
"""
all_params = ['case_id', 'create_time', 'type', 'limit', 'group_id', 'x_site', 'x_language', 'x_time_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'case_id' in local_var_params:
path_params['case_id'] = local_var_params['case_id']
query_params = []
if 'create_time' in local_var_params:
query_params.append(('create_time', local_var_params['create_time']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_site' in local_var_params:
header_params['X-Site'] = local_var_params['x_site']
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
if 'x_time_zone' in local_var_params:
header_params['X-Time-Zone'] = local_var_params['x_time_zone']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/servicerequest/cases/{case_id}/instant-messages/more',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListMoreInstantMessagesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_new_instant_messages(self, request):
"""轮询查询即时消息
轮询查询即时消息接口
:param ListNewInstantMessagesRequest request
:return: ListNewInstantMessagesResponse
"""
return self.list_new_instant_messages_with_http_info(request)
def list_new_instant_messages_with_http_info(self, request):
"""轮询查询即时消息
轮询查询即时消息接口
:param ListNewInstantMessagesRequest request
:return: ListNewInstantMessagesResponse
"""
all_params = ['case_ids', 'last_message_time_id', 'group_id', 'x_site', 'x_language', 'x_time_zone']
local_var_params = | |
Constraint(expr=m.x555*m.x2515 + m.x1180*m.x2521 + m.x1805*m.x2527 + m.x2430*m.x2533 <= 8)
m.c2449 = Constraint(expr=m.x556*m.x2515 + m.x1181*m.x2521 + m.x1806*m.x2527 + m.x2431*m.x2533 <= 8)
m.c2450 = Constraint(expr=m.x557*m.x2515 + m.x1182*m.x2521 + m.x1807*m.x2527 + m.x2432*m.x2533 <= 8)
m.c2451 = Constraint(expr=m.x558*m.x2515 + m.x1183*m.x2521 + m.x1808*m.x2527 + m.x2433*m.x2533 <= 8)
m.c2452 = Constraint(expr=m.x559*m.x2515 + m.x1184*m.x2521 + m.x1809*m.x2527 + m.x2434*m.x2533 <= 8)
m.c2453 = Constraint(expr=m.x560*m.x2515 + m.x1185*m.x2521 + m.x1810*m.x2527 + m.x2435*m.x2533 <= 8)
m.c2454 = Constraint(expr=m.x561*m.x2515 + m.x1186*m.x2521 + m.x1811*m.x2527 + m.x2436*m.x2533 <= 8)
m.c2455 = Constraint(expr=m.x562*m.x2515 + m.x1187*m.x2521 + m.x1812*m.x2527 + m.x2437*m.x2533 <= 8)
m.c2456 = Constraint(expr=m.x563*m.x2515 + m.x1188*m.x2521 + m.x1813*m.x2527 + m.x2438*m.x2533 <= 8)
m.c2457 = Constraint(expr=m.x564*m.x2515 + m.x1189*m.x2521 + m.x1814*m.x2527 + m.x2439*m.x2533 <= 8)
m.c2458 = Constraint(expr=m.x565*m.x2515 + m.x1190*m.x2521 + m.x1815*m.x2527 + m.x2440*m.x2533 <= 8)
m.c2459 = Constraint(expr=m.x566*m.x2515 + m.x1191*m.x2521 + m.x1816*m.x2527 + m.x2441*m.x2533 <= 8)
m.c2460 = Constraint(expr=m.x567*m.x2515 + m.x1192*m.x2521 + m.x1817*m.x2527 + m.x2442*m.x2533 <= 8)
m.c2461 = Constraint(expr=m.x568*m.x2515 + m.x1193*m.x2521 + m.x1818*m.x2527 + m.x2443*m.x2533 <= 8)
m.c2462 = Constraint(expr=m.x569*m.x2515 + m.x1194*m.x2521 + m.x1819*m.x2527 + m.x2444*m.x2533 <= 8)
m.c2463 = Constraint(expr=m.x570*m.x2515 + m.x1195*m.x2521 + m.x1820*m.x2527 + m.x2445*m.x2533 <= 8)
m.c2464 = Constraint(expr=m.x571*m.x2515 + m.x1196*m.x2521 + m.x1821*m.x2527 + m.x2446*m.x2533 <= 8)
m.c2465 = Constraint(expr=m.x572*m.x2515 + m.x1197*m.x2521 + m.x1822*m.x2527 + m.x2447*m.x2533 <= 8)
m.c2466 = Constraint(expr=m.x573*m.x2515 + m.x1198*m.x2521 + m.x1823*m.x2527 + m.x2448*m.x2533 <= 8)
m.c2467 = Constraint(expr=m.x574*m.x2515 + m.x1199*m.x2521 + m.x1824*m.x2527 + m.x2449*m.x2533 <= 8)
m.c2468 = Constraint(expr=m.x575*m.x2515 + m.x1200*m.x2521 + m.x1825*m.x2527 + m.x2450*m.x2533 <= 8)
m.c2469 = Constraint(expr=m.x576*m.x2515 + m.x1201*m.x2521 + m.x1826*m.x2527 + m.x2451*m.x2533 <= 8)
m.c2470 = Constraint(expr=m.x577*m.x2515 + m.x1202*m.x2521 + m.x1827*m.x2527 + m.x2452*m.x2533 <= 8)
m.c2471 = Constraint(expr=m.x578*m.x2515 + m.x1203*m.x2521 + m.x1828*m.x2527 + m.x2453*m.x2533 <= 8)
m.c2472 = Constraint(expr=m.x579*m.x2515 + m.x1204*m.x2521 + m.x1829*m.x2527 + m.x2454*m.x2533 <= 8)
m.c2473 = Constraint(expr=m.x580*m.x2515 + m.x1205*m.x2521 + m.x1830*m.x2527 + m.x2455*m.x2533 <= 8)
m.c2474 = Constraint(expr=m.x581*m.x2515 + m.x1206*m.x2521 + m.x1831*m.x2527 + m.x2456*m.x2533 <= 8)
m.c2475 = Constraint(expr=m.x582*m.x2515 + m.x1207*m.x2521 + m.x1832*m.x2527 + m.x2457*m.x2533 <= 8)
m.c2476 = Constraint(expr=m.x583*m.x2515 + m.x1208*m.x2521 + m.x1833*m.x2527 + m.x2458*m.x2533 <= 8)
m.c2477 = Constraint(expr=m.x584*m.x2515 + m.x1209*m.x2521 + m.x1834*m.x2527 + m.x2459*m.x2533 <= 8)
m.c2478 = Constraint(expr=m.x585*m.x2515 + m.x1210*m.x2521 + m.x1835*m.x2527 + m.x2460*m.x2533 <= 8)
m.c2479 = Constraint(expr=m.x586*m.x2515 + m.x1211*m.x2521 + m.x1836*m.x2527 + m.x2461*m.x2533 <= 8)
m.c2480 = Constraint(expr=m.x587*m.x2515 + m.x1212*m.x2521 + m.x1837*m.x2527 + m.x2462*m.x2533 <= 8)
m.c2481 = Constraint(expr=m.x588*m.x2515 + m.x1213*m.x2521 + m.x1838*m.x2527 + m.x2463*m.x2533 <= 8)
m.c2482 = Constraint(expr=m.x589*m.x2515 + m.x1214*m.x2521 + m.x1839*m.x2527 + m.x2464*m.x2533 <= 8)
m.c2483 = Constraint(expr=m.x590*m.x2515 + m.x1215*m.x2521 + m.x1840*m.x2527 + m.x2465*m.x2533 <= 8)
m.c2484 = Constraint(expr=m.x591*m.x2515 + m.x1216*m.x2521 + m.x1841*m.x2527 + m.x2466*m.x2533 <= 8)
m.c2485 = Constraint(expr=m.x592*m.x2515 + m.x1217*m.x2521 + m.x1842*m.x2527 + m.x2467*m.x2533 <= 8)
m.c2486 = Constraint(expr=m.x593*m.x2515 + m.x1218*m.x2521 + m.x1843*m.x2527 + m.x2468*m.x2533 <= 8)
m.c2487 = Constraint(expr=m.x594*m.x2515 + m.x1219*m.x2521 + m.x1844*m.x2527 + m.x2469*m.x2533 <= 8)
m.c2488 = Constraint(expr=m.x595*m.x2515 + m.x1220*m.x2521 + m.x1845*m.x2527 + m.x2470*m.x2533 <= 8)
m.c2489 = Constraint(expr=m.x596*m.x2515 + m.x1221*m.x2521 + m.x1846*m.x2527 + m.x2471*m.x2533 <= 8)
m.c2490 = Constraint(expr=m.x597*m.x2515 + m.x1222*m.x2521 + m.x1847*m.x2527 + m.x2472*m.x2533 <= 8)
m.c2491 = Constraint(expr=m.x598*m.x2515 + m.x1223*m.x2521 + m.x1848*m.x2527 + m.x2473*m.x2533 <= 8)
m.c2492 = Constraint(expr=m.x599*m.x2515 + m.x1224*m.x2521 + m.x1849*m.x2527 + m.x2474*m.x2533 <= 8)
m.c2493 = Constraint(expr=m.x600*m.x2515 + m.x1225*m.x2521 + m.x1850*m.x2527 + m.x2475*m.x2533 <= 8)
m.c2494 = Constraint(expr=m.x601*m.x2515 + m.x1226*m.x2521 + m.x1851*m.x2527 + m.x2476*m.x2533 <= 8)
m.c2495 = Constraint(expr=m.x602*m.x2515 + m.x1227*m.x2521 + m.x1852*m.x2527 + m.x2477*m.x2533 <= 8)
m.c2496 = Constraint(expr=m.x603*m.x2515 + m.x1228*m.x2521 + m.x1853*m.x2527 + m.x2478*m.x2533 <= 8)
m.c2497 = Constraint(expr=m.x604*m.x2515 + m.x1229*m.x2521 + m.x1854*m.x2527 + m.x2479*m.x2533 <= 8)
m.c2498 = Constraint(expr=m.x605*m.x2515 + m.x1230*m.x2521 + m.x1855*m.x2527 + m.x2480*m.x2533 <= 8)
m.c2499 = Constraint(expr=m.x606*m.x2515 + m.x1231*m.x2521 + m.x1856*m.x2527 + m.x2481*m.x2533 <= 8)
m.c2500 = Constraint(expr=m.x607*m.x2515 + m.x1232*m.x2521 + m.x1857*m.x2527 + m.x2482*m.x2533 <= 8)
m.c2501 = Constraint(expr=m.x608*m.x2515 + m.x1233*m.x2521 + m.x1858*m.x2527 + m.x2483*m.x2533 <= 8)
m.c2502 = Constraint(expr=m.x609*m.x2515 + m.x1234*m.x2521 + m.x1859*m.x2527 + m.x2484*m.x2533 <= 8)
m.c2503 = Constraint(expr=m.x610*m.x2515 + m.x1235*m.x2521 + m.x1860*m.x2527 + m.x2485*m.x2533 <= 8)
m.c2504 = Constraint(expr=m.x611*m.x2515 + m.x1236*m.x2521 + m.x1861*m.x2527 + m.x2486*m.x2533 <= 8)
m.c2505 = Constraint(expr=m.x612*m.x2515 + m.x1237*m.x2521 + m.x1862*m.x2527 + m.x2487*m.x2533 <= 8)
m.c2506 = Constraint(expr=m.x613*m.x2515 + m.x1238*m.x2521 + m.x1863*m.x2527 + m.x2488*m.x2533 <= 8)
m.c2507 = Constraint(expr=m.x614*m.x2515 + m.x1239*m.x2521 + m.x1864*m.x2527 + m.x2489*m.x2533 <= 8)
m.c2508 = Constraint(expr=m.x615*m.x2515 + m.x1240*m.x2521 + m.x1865*m.x2527 + m.x2490*m.x2533 <= 8)
m.c2509 = Constraint(expr=m.x616*m.x2515 + m.x1241*m.x2521 + m.x1866*m.x2527 + m.x2491*m.x2533 <= 8)
m.c2510 = Constraint(expr=m.x617*m.x2515 + m.x1242*m.x2521 + m.x1867*m.x2527 + m.x2492*m.x2533 <= 8)
m.c2511 = Constraint(expr=m.x618*m.x2515 + m.x1243*m.x2521 + m.x1868*m.x2527 + m.x2493*m.x2533 <= 8)
m.c2512 = Constraint(expr=m.x619*m.x2515 + m.x1244*m.x2521 + m.x1869*m.x2527 + m.x2494*m.x2533 <= 8)
m.c2513 = Constraint(expr=m.x620*m.x2515 + m.x1245*m.x2521 + m.x1870*m.x2527 + m.x2495*m.x2533 <= 8)
m.c2514 = Constraint(expr=m.x621*m.x2515 + m.x1246*m.x2521 + m.x1871*m.x2527 + m.x2496*m.x2533 <= 8)
m.c2515 = Constraint(expr=m.x622*m.x2515 + m.x1247*m.x2521 + m.x1872*m.x2527 + m.x2497*m.x2533 <= 8)
m.c2516 = Constraint(expr=m.x623*m.x2515 + m.x1248*m.x2521 + m.x1873*m.x2527 + m.x2498*m.x2533 <= 8)
m.c2517 = Constraint(expr=m.x624*m.x2515 + m.x1249*m.x2521 + m.x1874*m.x2527 + m.x2499*m.x2533 <= 8)
m.c2518 = Constraint(expr=m.x625*m.x2515 + m.x1250*m.x2521 + m.x1875*m.x2527 + m.x2500*m.x2533 <= 8)
m.c2519 = Constraint(expr=m.x626*m.x2515 + m.x1251*m.x2521 + m.x1876*m.x2527 + m.x2501*m.x2533 <= 8)
m.c2520 = Constraint(expr=m.x627*m.x2515 + m.x1252*m.x2521 + m.x1877*m.x2527 + m.x2502*m.x2533 <= 8)
m.c2521 = Constraint(expr=m.x628*m.x2515 + m.x1253*m.x2521 + m.x1878*m.x2527 + m.x2503*m.x2533 <= 8)
m.c2522 = Constraint(expr=m.x629*m.x2515 + m.x1254*m.x2521 + m.x1879*m.x2527 + m.x2504*m.x2533 <= 8)
m.c2523 = Constraint(expr=m.x630*m.x2515 + m.x1255*m.x2521 + m.x1880*m.x2527 + m.x2505*m.x2533 <= 8)
m.c2524 = Constraint(expr=m.x631*m.x2515 + m.x1256*m.x2521 + m.x1881*m.x2527 + m.x2506*m.x2533 <= 8)
m.c2525 = Constraint(expr=m.x632*m.x2515 + m.x1257*m.x2521 + m.x1882*m.x2527 + m.x2507*m.x2533 <= 8)
m.c2526 = Constraint(expr=m.x8*m.x2516 + m.x633*m.x2522 + m.x1258*m.x2528 + m.x1883*m.x2534 <= 8)
m.c2527 = Constraint(expr=m.x9*m.x2516 + m.x634*m.x2522 + m.x1259*m.x2528 + m.x1884*m.x2534 <= 8)
m.c2528 = Constraint(expr=m.x10*m.x2516 + m.x635*m.x2522 + m.x1260*m.x2528 + m.x1885*m.x2534 <= 8)
m.c2529 = Constraint(expr=m.x11*m.x2516 + m.x636*m.x2522 + m.x1261*m.x2528 + m.x1886*m.x2534 <= 8)
m.c2530 = Constraint(expr=m.x12*m.x2516 + m.x637*m.x2522 + m.x1262*m.x2528 + m.x1887*m.x2534 <= 8)
m.c2531 = Constraint(expr=m.x13*m.x2516 + m.x638*m.x2522 + m.x1263*m.x2528 + m.x1888*m.x2534 <= 8)
m.c2532 = Constraint(expr=m.x14*m.x2516 + m.x639*m.x2522 + m.x1264*m.x2528 + m.x1889*m.x2534 <= 8)
m.c2533 = Constraint(expr=m.x15*m.x2516 + m.x640*m.x2522 + m.x1265*m.x2528 + m.x1890*m.x2534 <= 8)
m.c2534 = Constraint(expr=m.x16*m.x2516 + m.x641*m.x2522 + m.x1266*m.x2528 + m.x1891*m.x2534 <= 8)
m.c2535 = Constraint(expr=m.x17*m.x2516 + m.x642*m.x2522 + m.x1267*m.x2528 + m.x1892*m.x2534 <= 8)
m.c2536 = Constraint(expr=m.x18*m.x2516 + m.x643*m.x2522 + m.x1268*m.x2528 + m.x1893*m.x2534 <= 8)
m.c2537 = Constraint(expr=m.x19*m.x2516 + m.x644*m.x2522 + m.x1269*m.x2528 + m.x1894*m.x2534 <= 8)
m.c2538 = Constraint(expr=m.x20*m.x2516 + m.x645*m.x2522 + m.x1270*m.x2528 + m.x1895*m.x2534 <= 8)
m.c2539 = Constraint(expr=m.x21*m.x2516 + m.x646*m.x2522 + m.x1271*m.x2528 + m.x1896*m.x2534 <= 8)
m.c2540 = Constraint(expr=m.x22*m.x2516 + m.x647*m.x2522 + m.x1272*m.x2528 + m.x1897*m.x2534 <= 8)
m.c2541 = Constraint(expr=m.x23*m.x2516 + m.x648*m.x2522 + m.x1273*m.x2528 + m.x1898*m.x2534 <= 8)
m.c2542 = Constraint(expr=m.x24*m.x2516 + m.x649*m.x2522 + m.x1274*m.x2528 + m.x1899*m.x2534 <= 8)
m.c2543 = Constraint(expr=m.x25*m.x2516 + m.x650*m.x2522 + m.x1275*m.x2528 + m.x1900*m.x2534 <= 8)
m.c2544 = Constraint(expr=m.x26*m.x2516 + m.x651*m.x2522 + m.x1276*m.x2528 + m.x1901*m.x2534 <= 8)
m.c2545 = Constraint(expr=m.x27*m.x2516 + m.x652*m.x2522 + m.x1277*m.x2528 + m.x1902*m.x2534 <= 8)
m.c2546 = Constraint(expr=m.x28*m.x2516 + m.x653*m.x2522 + m.x1278*m.x2528 + m.x1903*m.x2534 <= 8)
m.c2547 = Constraint(expr=m.x29*m.x2516 + m.x654*m.x2522 + m.x1279*m.x2528 + m.x1904*m.x2534 <= 8)
m.c2548 = Constraint(expr=m.x30*m.x2516 + m.x655*m.x2522 + m.x1280*m.x2528 + m.x1905*m.x2534 <= 8)
m.c2549 = Constraint(expr=m.x31*m.x2516 + m.x656*m.x2522 + m.x1281*m.x2528 + m.x1906*m.x2534 <= 8)
m.c2550 = Constraint(expr=m.x32*m.x2516 + m.x657*m.x2522 + m.x1282*m.x2528 + m.x1907*m.x2534 <= 8)
m.c2551 = Constraint(expr=m.x33*m.x2516 + m.x658*m.x2522 + m.x1283*m.x2528 + m.x1908*m.x2534 <= 8)
m.c2552 = Constraint(expr=m.x34*m.x2516 + m.x659*m.x2522 + m.x1284*m.x2528 + m.x1909*m.x2534 <= 8)
m.c2553 = Constraint(expr=m.x35*m.x2516 + m.x660*m.x2522 + m.x1285*m.x2528 + m.x1910*m.x2534 <= 8)
m.c2554 = Constraint(expr=m.x36*m.x2516 + m.x661*m.x2522 + m.x1286*m.x2528 + m.x1911*m.x2534 <= 8)
m.c2555 = Constraint(expr=m.x37*m.x2516 + m.x662*m.x2522 + m.x1287*m.x2528 + m.x1912*m.x2534 <= 8)
m.c2556 = Constraint(expr=m.x38*m.x2516 + m.x663*m.x2522 + m.x1288*m.x2528 + m.x1913*m.x2534 <= 8)
m.c2557 = Constraint(expr=m.x39*m.x2516 + m.x664*m.x2522 + m.x1289*m.x2528 + m.x1914*m.x2534 <= 8)
m.c2558 = Constraint(expr=m.x40*m.x2516 + m.x665*m.x2522 + m.x1290*m.x2528 + m.x1915*m.x2534 <= 8)
m.c2559 = Constraint(expr=m.x41*m.x2516 + m.x666*m.x2522 + m.x1291*m.x2528 + m.x1916*m.x2534 <= 8)
m.c2560 = Constraint(expr=m.x42*m.x2516 + m.x667*m.x2522 + m.x1292*m.x2528 + m.x1917*m.x2534 <= 8)
m.c2561 = Constraint(expr=m.x43*m.x2516 + m.x668*m.x2522 + m.x1293*m.x2528 + m.x1918*m.x2534 <= 8)
m.c2562 = Constraint(expr=m.x44*m.x2516 + m.x669*m.x2522 + m.x1294*m.x2528 + m.x1919*m.x2534 <= 8)
m.c2563 = Constraint(expr=m.x45*m.x2516 + m.x670*m.x2522 + m.x1295*m.x2528 + m.x1920*m.x2534 <= 8)
m.c2564 = Constraint(expr=m.x46*m.x2516 + m.x671*m.x2522 + m.x1296*m.x2528 + m.x1921*m.x2534 <= 8)
m.c2565 = Constraint(expr=m.x47*m.x2516 + m.x672*m.x2522 + m.x1297*m.x2528 + m.x1922*m.x2534 <= 8)
m.c2566 = Constraint(expr=m.x48*m.x2516 + m.x673*m.x2522 + m.x1298*m.x2528 + m.x1923*m.x2534 <= 8)
m.c2567 = Constraint(expr=m.x49*m.x2516 + m.x674*m.x2522 + m.x1299*m.x2528 + m.x1924*m.x2534 <= 8)
m.c2568 = Constraint(expr=m.x50*m.x2516 + m.x675*m.x2522 + m.x1300*m.x2528 + m.x1925*m.x2534 <= 8)
m.c2569 = Constraint(expr=m.x51*m.x2516 + m.x676*m.x2522 + m.x1301*m.x2528 + m.x1926*m.x2534 <= 8)
m.c2570 = Constraint(expr=m.x52*m.x2516 + m.x677*m.x2522 + m.x1302*m.x2528 + m.x1927*m.x2534 <= 8)
m.c2571 = Constraint(expr=m.x53*m.x2516 + m.x678*m.x2522 + m.x1303*m.x2528 + m.x1928*m.x2534 <= 8)
m.c2572 = Constraint(expr=m.x54*m.x2516 + m.x679*m.x2522 + m.x1304*m.x2528 + m.x1929*m.x2534 <= 8)
m.c2573 = Constraint(expr=m.x55*m.x2516 + m.x680*m.x2522 + m.x1305*m.x2528 + m.x1930*m.x2534 <= 8)
m.c2574 = Constraint(expr=m.x56*m.x2516 + m.x681*m.x2522 + m.x1306*m.x2528 + m.x1931*m.x2534 <= 8)
m.c2575 = Constraint(expr=m.x57*m.x2516 + m.x682*m.x2522 + m.x1307*m.x2528 + m.x1932*m.x2534 <= 8)
m.c2576 = | |
<reponame>fractalego/dgt
import os
import random
import torch
from gensim.models import KeyedVectors
from parvusdb import create_graph_from_string
from parvusdb.utils.code_container import DummyCodeContainer
from parvusdb.utils.match import Match
from parvusdb.utils.match import MatchException
from dgt.graph import GraphRule
from dgt.graph.graph import Graph
from dgt.graph.node_matcher import VectorNodeMatcher
from dgt.knowledge import Knowledge
from dgt.metric import GloveMetric
from dgt.auxiliary.config import device
_path = os.path.dirname(__file__)
_small = 1e-15
_max_items_size = 20
def get_data_goal_knowledge_from_json(json_item, metric, relations_metric):
fact_lst = json_item['facts']
goal_lst = json_item['goals']
if len(fact_lst) != len(goal_lst):
raise RuntimeError('The number of facts and goals is not the same!')
nongrad_rules = '; '.join(json_item['non_trainable_rules'])
if 'trainable_rules' in json_item:
grad_rules = '; '.join(json_item['trainable_rules'])
else:
grad_rules = ''
data = []
goals = []
for fact in fact_lst:
data.append(Graph.create_from_predicates_string(fact, metric, relations_metric, gradient=False))
for goal in goal_lst:
goals.append(Graph.create_from_predicates_string(goal, metric, relations_metric, gradient=False))
k = Knowledge(metric=metric, relations_metric=relations_metric)
k.add_rules(nongrad_rules, gradient=False)
k.add_rules(grad_rules, gradient=True)
return data, goals, k
def get_relations_embeddings_dict_from_json(json_item, embedding_size=50):
relations = json_item['relations']
embeddings = torch.nn.Embedding(len(relations), embedding_size)
vectors = [embeddings(torch.LongTensor([i]))[0].detach().numpy() for i in range(len(relations))]
model = KeyedVectors(embedding_size)
model.add(relations, vectors)
return GloveMetric(model, threshold=0.9)
def print_predicates(k):
rules = k.get_all_rules()
print('Predicates:')
for rule in rules:
print(rule[0].predicates())
def print_all_the_paths(end_graph):
for item in end_graph:
print('---')
print(item[1])
[print(item.predicates()) for item in item[2]]
def print_all_the_rules_with_weights(k):
rules = k.get_all_rules()
print('Predicates:')
for rule in rules:
print(rule[0].predicates(), rule[0].weight)
def get_string_with_all_the_rules_with_weights(k, print_threshold=True, print_gradient=False):
rules = k.get_all_rules()
str_list = []
for rule in rules:
str_list.append(
rule[0].predicates(print_threshold=print_threshold, print_gradient=print_gradient).strip().replace(' ', ' '))
return str_list
def create_graph_list(inference_list, goal):
graph_list = []
for item in inference_list:
if type(item) is GraphRule:
graph_list.append(str(item.get_hypothesis()))
elif type(item) is Graph:
graph_list.append(str(item))
graph_list.append(str(goal))
return graph_list
def create_rule_matrix(len_cons, len_hyp, matrix_size):
rule_matrix = torch.zeros([matrix_size, matrix_size]).to(device)
for i in range(len_cons):
for j in range(len_hyp):
rule_matrix[i, j] = 1.
return rule_matrix
def create_all_rule_matrices(inference_list):
rule_matrices = []
relations_rule_matrices = []
for item in inference_list:
if type(item) is GraphRule:
hyp = item.get_hypothesis()
cons = item.get_consequence()
weight = item.weight.clamp(min=0, max=1.)
rule_matrix = weight * create_rule_matrix(len(cons._g.vs), len(hyp._g.vs), _max_items_size)
relation_rule_matrix = weight * create_rule_matrix(len(cons._g.es), len(hyp._g.es), _max_items_size)
rule_matrices.append(rule_matrix)
relations_rule_matrices.append(relation_rule_matrix)
return rule_matrices, relations_rule_matrices
def get_weight_list(inference_list):
weight_list = []
for item in inference_list:
if type(item) is GraphRule:
weight_list.append(item.weight.clamp(min=0, max=1.))
return weight_list
def get_proper_vector(metric, item, key):
vector_index = item[key]
vector = metric.get_vector_from_index(vector_index) / metric.get_vector_from_index(vector_index).norm(2)
return vector
def graph_iterations(g):
length = len(g.vs)
permutations = []
for i in range(length):
permutation = [(s + i) % length for s in range(length)]
permutations.append(g.permute_vertices(permutation))
return permutations
def get_weighted_match(lhs, rhs, matching_variables, metric):
w = 0
for k, v in matching_variables[0].items():
rindex = rhs.vs['name' == v]['vector']
lindex = lhs.vs['name' == k]['vector']
w += metric.indices_dot_product(lindex, rindex)
return w
def get_matching_variables(match, gl, gr, metric):
all_matches = []
for left_g in graph_iterations(gl):
matching_variables = match.get_variables_substitution_dictionaries(left_g, gr)
w = get_weighted_match(left_g, gr, matching_variables, metric)
all_matches.append((matching_variables, w))
return sorted(all_matches, key=lambda x: -x[1])[0][0]
def create_list_of_states(metric, relations_metric, graph_list, match, permutations):
pre_match = []
post_match = []
post_thresholds = []
substitutions = []
for i in range(0, len(graph_list), 2):
gl = create_graph_from_string(str(graph_list[i]))
gr = create_graph_from_string(str(graph_list[i + 1]))
try:
iterations = graph_iterations(gl)
# gl = iterations[permutation_shift % len(iterations)]
gl = iterations[permutations[i // 2] % len(iterations)]
# gl = iterations[int(random.uniform(0, len(iterations)))]
substitutions.append(match.get_variables_substitution_dictionaries(gl, gr))
# substitutions.append(get_matching_variables(match, gl, gr, metric))
pre_items = [[item['name'], get_proper_vector(metric, item, 'vector')] for item in gl.vs]
post_items = [[item['name'], get_proper_vector(metric, item, 'vector')] for item in gr.vs]
len_pre_items = len(pre_items)
len_post_items = len(post_items)
pre_items += [['dummy', torch.zeros(metric.vector_size).to(device)] for _ in range(_max_items_size - len_pre_items)]
post_items += [['dummy', torch.zeros(metric.vector_size).to(device)] for _ in range(_max_items_size - len_post_items)]
pre_match.append(pre_items)
post_match.append(post_items)
post_thrs = [metric.get_threshold_from_index(item['vector']) for item in gr.vs]
post_thrs += [torch.zeros(1).to(device) for _ in range(_max_items_size - len(post_thrs))]
post_thresholds.append(post_thrs)
except MatchException:
return [], [], [], []
return pre_match, post_match, post_thresholds, substitutions
def create_list_of_states_without_weight_matching(metric, relations_metric, graph_list, match):
pre_match = []
post_match = []
post_thresholds = []
substitutions = []
for i in range(0, len(graph_list), 2):
gl = create_graph_from_string(str(graph_list[i]))
gr = create_graph_from_string(str(graph_list[i + 1]))
try:
substitutions.append(match.get_variables_substitution_dictionaries(gl, gr))
pre_items = [[item['name'], get_proper_vector(metric, item, 'vector')] for item in gl.vs]
post_items = [[item['name'], get_proper_vector(metric, item, 'vector')] for item in gr.vs]
len_pre_items = len(pre_items)
len_post_items = len(post_items)
pre_items += [['dummy', torch.zeros(metric.vector_size).to(device)] for _ in range(_max_items_size - len_pre_items)]
post_items += [['dummy', torch.zeros(metric.vector_size).to(device)] for _ in range(_max_items_size - len_post_items)]
pre_match.append(pre_items)
post_match.append(post_items)
post_thrs = [metric.get_threshold_from_index(item['vector']) for item in gr.vs]
post_thrs += [torch.zeros(1).to(device) for _ in range(_max_items_size - len(post_thrs))]
post_thresholds.append(post_thrs)
except MatchException:
return [], [], [], []
return pre_match, post_match, post_thresholds, substitutions
def order_pre_post_matches_according_to_substitutions(pre_match, post_match, substitutions, nodes_or_relations=0):
"""
Reorders the matching vectors so that they are in the order of substitution (the scattering matrix becomes diagonal)
:param pre_match:
:param post_match:
:param substitutions:
:param nodes_or_relations:
:return:
"""
new_pre_match = []
new_post_match = []
for i in range(len(substitutions)):
l_vector = []
r_vector = []
used_l_indices = []
used_r_indices = []
for k, v in substitutions[i][nodes_or_relations].items():
l_index = [item[0] for item in pre_match[i]].index(v)
r_index = [item[0] for item in post_match[i]].index(k)
l_vector.append(pre_match[i][l_index])
r_vector.append(post_match[i][r_index])
used_l_indices.append(v)
used_r_indices.append(k)
for item in pre_match[i]:
index = item[0]
if index in used_l_indices:
continue
l_vector.append(item)
for item in post_match[i]:
index = item[0]
if index in used_r_indices:
continue
r_vector.append(item)
# Sometimes indices can repeat themselves (due to the matching algorithm. This normalises the length to the max length
len_pre_items = len(l_vector)
len_post_items = len(r_vector)
l_vector += [['dummy', torch.zeros(pre_match[i][0][1].shape[0]).to(device)] for _ in range(_max_items_size - len_pre_items)]
r_vector += [['dummy', torch.zeros(post_match[i][0][1].shape[0]).to(device)] for _ in
range(_max_items_size - len_post_items)]
new_pre_match.append(l_vector)
new_post_match.append(r_vector)
return new_pre_match, new_post_match
def create_list_of_states_for_relations(nodes_metric, metric, graph_list, match, permutations):
pre_match = []
post_match = []
post_thresholds = []
substitutions = []
for i in range(0, len(graph_list), 2):
gl = create_graph_from_string(str(graph_list[i]))
gr = create_graph_from_string(str(graph_list[i + 1]))
try:
iterations = graph_iterations(gl)
#gl = iterations[permutation_shift % len(iterations)]
gl = iterations[permutations[i // 2] % len(iterations)]
#gl = iterations[int(random.uniform(0, len(iterations)))]
substitutions.append(match.get_variables_substitution_dictionaries(gl, gr))
# substitutions.append(get_matching_variables(match, gl, gr, nodes_metric))
pre_items = [[item['name'], get_proper_vector(metric, item, 'rvector')] for item in gl.es]
post_items = [[item['name'], get_proper_vector(metric, item, 'rvector')] for item in gr.es]
len_pre_items = len(pre_items)
len_post_items = len(post_items)
pre_items += [['dummy', torch.zeros(metric.vector_size).to(device)] for _ in range(_max_items_size - len_pre_items)]
post_items += [['dummy', torch.zeros(metric.vector_size).to(device)] for _ in range(_max_items_size - len_post_items)]
pre_match.append(pre_items)
post_match.append(post_items)
post_thrs = [metric.get_threshold_from_index(item['rvector']) for item in gr.es]
post_thrs += [torch.ones(1).to(device) for _ in range(_max_items_size - len(post_thrs))]
post_thresholds.append(post_thrs)
except MatchException:
return [], [], [], []
return pre_match, post_match, post_thresholds, substitutions
def create_scattering_sequence(pre_match, post_match, post_thresholds, substitutions, rule_matrices,
nodes_or_relations, clamp):
scattering_matrices = []
adjacency_matrices = []
for i in range(len(substitutions)):
pre_vectors = torch.stack([item[1] for item in pre_match[i]])
post_vectors = torch.stack([item[1] for item in post_match[i]])
post_biases = torch.stack([item[0] for item in post_thresholds[i]])
bias_matrix = torch.ones([_max_items_size, _max_items_size]).to(device)
for i2 in range(_max_items_size):
for j2 in range(_max_items_size):
bias_matrix[i2, j2] = post_biases[j2].clamp(min=clamp, max=1)
softmax = torch.nn.Softmax()
scattering_matrix = softmax(torch.mm(post_vectors, torch.transpose(pre_vectors, 0, 1)) - bias_matrix)
adjacency_matrix = torch.zeros(scattering_matrix.shape).to(device)
for k, v in substitutions[i][nodes_or_relations].items():
l_index = [item[0] for item in pre_match[i]].index(v)
r_index = [item[0] for item in post_match[i]].index(k)
adjacency_matrix[l_index, r_index] = 1.
scattering_matrix = torch.mul(adjacency_matrix, scattering_matrix)
scattering_matrices.append(scattering_matrix)
adjacency_matrices.append(adjacency_matrix)
scattering_sequence = torch.eye(_max_items_size).to(device)
for i, scattering_matrix in enumerate(scattering_matrices):
scattering_sequence = torch.mm(scattering_matrix, scattering_sequence)
try:
rule_matrix = rule_matrices[i]
scattering_sequence = torch.mm(rule_matrix, scattering_sequence)
except:
pass
return scattering_sequence
def train_a_single_path(path, goal, metric, relation_metric, no_threshold_match, threshold_match, optimizer, epochs, permutations, clamp):
for i in range(epochs):
graph_list = create_graph_list(path[2], goal)
rule_matrices, relations_rule_matrices = create_all_rule_matrices(path[2])
# Skip training for paths that do not have a differentiable rule
has_gradient_rule = False
for it in path[2]:
if it.gradient:
has_gradient_rule = True
break
if not has_gradient_rule:
break
# Printing path out while training
# print('new path:')
# [print(it.predicates()) for it in path[2]]
pre_match, post_match, post_thresholds, substitutions = create_list_of_states(metric,
relation_metric,
graph_list,
no_threshold_match,
permutations)
relations_pre_match, relations_post_match, relations_post_thresholds, substitutions \
= create_list_of_states_for_relations(metric, relation_metric, graph_list, no_threshold_match, permutations)
if not substitutions:
break
pre_match, post_match = order_pre_post_matches_according_to_substitutions(pre_match, post_match, substitutions,
nodes_or_relations=0)
if not substitutions:
break
# print(substitutions)
scattering_sequence = create_scattering_sequence(pre_match, post_match, post_thresholds, substitutions,
rule_matrices, nodes_or_relations=0, clamp=clamp)
relations_pre_match, relations_post_match = order_pre_post_matches_according_to_substitutions(
relations_pre_match, relations_post_match,
substitutions,
nodes_or_relations=1)
relations_scattering_sequence = create_scattering_sequence(relations_pre_match, relations_post_match,
relations_post_thresholds, substitutions,
relations_rule_matrices, nodes_or_relations=1,
clamp=clamp)
initial_vector = torch.ones(_max_items_size).to(device)
final_vector = torch.mv(scattering_sequence, initial_vector)
goal_vector = torch.Tensor([0 if item[0] is 'dummy' else 1 for item in post_match[-1]]).to(device)
relations_initial_vector = torch.ones(_max_items_size).to(device)
relations_final_vector = torch.mv(relations_scattering_sequence, relations_initial_vector)
relations_goal_vector = torch.Tensor([0 if item[0] is 'dummy' else 1 for item in relations_post_match[-1]]).to(
device)
criterion = torch.nn.BCEWithLogitsLoss()
# criterion = torch.nn.MSELoss()
loss = (criterion(final_vector, goal_vector)
+ criterion(relations_final_vector, relations_goal_vector)
).to(device)
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# Check if the trained sequence of rules actually satisfy the goal
new_graph_list = create_graph_list(path[2], goal)
_, _, _, substitutions = create_list_of_states(metric, relation_metric, new_graph_list,
threshold_match, permutations)
if substitutions:
print('Making the substitution!', loss)
return True
return False
def train_all_paths(metric, relations_metric, k, paths, goal, | |
<reponame>ram8647/gcb-mobilecsp
# Copyright 2016 Mobile CSP Project. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes and methods to create and manage the Teacher Dashboard.
Based off of the announcements module, which was created by
<EMAIL>.
"""
__author__ = '<NAME> (<EMAIL>)'
__author__ = '<EMAIL>'
__author__ = '<NAME> (<EMAIL>)'
import cgi
import datetime
import os
import urllib
import logging
import jinja2
import appengine_config
from common import tags
from common import utils as common_utils
from common import schema_fields
from common import jinja_utils
from controllers import utils
from models import resources_display
from models import custom_modules
from models import entities
from models import models
from models import roles
from models import transforms
from models import utils as models_utils
from models.models import MemcacheManager
from models.models import Student
from models.models import EventEntity
from modules.teacher import messages
from modules.dashboard import dashboard
from modules.oeditor import oeditor
from google.appengine.ext import db
from google.appengine.api import users
# Our modules classes
from course_entity import CourseSectionEntity
from course_entity import SectionItemRESTHandler
from teacher_entity import TeacherEntity
from teacher_entity import TeacherItemRESTHandler
from teacher_entity import TeacherRights
from student_activites import ActivityScoreParser
from student_answers import StudentAnswersEntity
GLOBAL_DEBUG = False
MODULE_NAME = 'teacher'
MODULE_TITLE = 'Teacher Dashboard'
#Setup paths and directories for templates and resources
RESOURCES_PATH = '/modules/teacher/resources'
TEMPLATE_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', MODULE_NAME, 'templates')
# These are the module's templates. The first is the teacher's splash page.
TEACHERS_TEMPLATE = os.path.join(TEMPLATE_DIR, 'teacher_dashboard.html')
STUDENT_ROSTER_TEMPLATE = os.path.join(TEMPLATE_DIR, 'student_roster.html')
STUDENT_DASHBOARD_TEMPLATE = os.path.join(TEMPLATE_DIR, 'student_dashboard.html')
QUESTION_PREVIEW_TEMPLATE = os.path.join(TEMPLATE_DIR, 'question_preview.html')
class TeacherHandlerMixin(object):
def get_admin_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'{}?{}'.format(
AdminDashboardHandler.URL, urllib.urlencode(args)))
def get_dashboard_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'{}?{}'.format(
TeacherDashboardHandler.DASHBOARD_URL, urllib.urlencode(args)))
def format_admin_template(self, items):
""" Formats the template for the Admin 'Add Teacher' page.
When clicked the 'Admin: Add Teacher button opens up
a list of teachers plus and 'Add Teacher' button.
"""
template_items = []
for item in items:
item = transforms.entity_to_dict(item)
date = item.get('date')
if date:
date = datetime.datetime.combine(
date, datetime.time(0, 0, 0, 0))
item['date'] = (
date - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
# add 'edit' actions
if TeacherRights.can_edit_section(self):
item['edit_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_EDIT_ACTION, key=item['key'])
item['delete_xsrf_token'] = self.create_xsrf_token(
AdminDashboardHandler.ADMIN_DELETE_ACTION)
item['delete_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_DELETE_ACTION,
key=item['key'])
template_items.append(item)
output = {}
output['children'] = template_items
# Add actions for the 'Add Teacher'
if TeacherRights.can_edit(self):
output['add_xsrf_token'] = self.create_xsrf_token(
AdminDashboardHandler.ADMIN_ADD_ACTION)
output['add_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_ADD_ACTION)
return output
def format_dashboard_template(self, sections, user_email):
""" Formats the template for the main Teacher Dashboard page.
This is the page that registered teachers will see. It consists of
list of the teacher's course sections and buttons to manage the
sections.
"""
template_sections = []
if sections:
for section in sections:
section = transforms.entity_to_dict(section)
date = section.get('date')
if date:
date = datetime.datetime.combine(
date, datetime.time(0, 0, 0, 0))
section['date'] = (
date - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
if GLOBAL_DEBUG:
logging.debug('***RAM*** format template section = ' + str(section))
# Add 'edit' and 'delete' actions to each section that will be displayed
if section['teacher_email'] == user_email and TeacherRights.can_edit_section(self):
section['edit_action'] = self.get_dashboard_action_url(
TeacherDashboardHandler.EDIT_SECTION_ACTION, key=section['key'])
section['delete_xsrf_token'] = self.create_xsrf_token(
TeacherDashboardHandler.DELETE_SECTION_ACTION)
section['delete_action'] = self.get_dashboard_action_url(
TeacherDashboardHandler.DELETE_SECTION_ACTION,
key=section['key'])
template_sections.append(section)
output = {}
output['sections'] = template_sections
# Add actions for the 'New Section' button
output['newsection_xsrf_token'] = self.create_xsrf_token(
TeacherDashboardHandler.ADD_SECTION_ACTION)
output['add_section'] = self.get_dashboard_action_url(
TeacherDashboardHandler.ADD_SECTION_ACTION)
# Add actions of the 'Admin' button -- to add new teachers
if TeacherRights.can_edit(self):
output['is_admin'] = True
output['add_xsrf_token'] = self.create_xsrf_token(
AdminDashboardHandler.ADMIN_LIST_ACTION)
output['add_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_LIST_ACTION)
return output
class TeacherDashboardHandler(
TeacherHandlerMixin, utils.BaseHandler,
utils.ReflectiveRequestHandler):
""" Handle all Teacher (non-Admin) functions for the Teacher Dashboard.
The Teacher functions include creating and deleting course sections,
adding and removing students from sections, and monitoring student
performance. The Admin functions consist solely of registering teachers
and are handled by AdminDashboardHandler.
"""
# Actions for the various Section functions
LIST_SECTION_ACTION = 'edit_sections'
EDIT_SECTION_ACTION = 'edit_section'
DELETE_SECTION_ACTION = 'delete_section'
ADD_SECTION_ACTION = 'add_section'
DISPLAY_ROSTER_ACTION = 'display_roster'
STUDENT_DASHBOARD_ACTION = 'student_dashboard'
PREVIEW_QUESTION = 'question_preview'
# The links for Teacher functions
DASHBOARD_LINK_URL = 'teacher'
DASHBOARD_URL = '/{}'.format(DASHBOARD_LINK_URL)
DASHBOARD_LIST_URL = '{}?action={}'.format(DASHBOARD_LINK_URL, LIST_SECTION_ACTION)
# Not sure what these do? May be expendable?
default_action = 'edit_sections'
get_actions = [default_action, LIST_SECTION_ACTION, EDIT_SECTION_ACTION,
ADD_SECTION_ACTION, DISPLAY_ROSTER_ACTION, STUDENT_DASHBOARD_ACTION, PREVIEW_QUESTION]
post_actions = [DELETE_SECTION_ACTION]
def is_registered_teacher(self, user_email):
"""Determines if current user is a registered teacher."""
items = TeacherEntity.get_teachers()
items = TeacherRights.apply_rights(self, items)
for teacher in items:
if GLOBAL_DEBUG:
logging.debug('***RAM*** teacher = ' + str(teacher.email))
logging.debug('***RAM*** user ' + str(users.User.email(user)))
if teacher.email == user_email:
return True
return False
def _render(self):
""" Renders the TEACHERS_TEMPLATE by calling super.render(template)
This assumes that the template's values are in template_value.
"""
self.template_value['navbar'] = {'teacher': True}
self.render(TEACHERS_TEMPLATE)
def _render_roster(self):
""" Renders the STUDENT_ROSTER_TEMPLATE by calling super.render(template)
This assumes that the template's values are in template_value.
"""
self.template_value['navbar'] = {'teacher': True}
self.render(STUDENT_ROSTER_TEMPLATE)
def _render_student_dashboard(self):
""" Renders the STUDENT_DASHBOARD_TEMPLATE by calling super.render(template)
This assumes that the template's values are in template_value.
"""
self.template_value['navbar'] = {'teacher': True}
self.render(STUDENT_DASHBOARD_TEMPLATE)
def render_page(self, template):
""" Renders the template that's supplied as an argument."""
self.template_value['navbar'] = {'teacher': True}
self.render(template)
def get_question_preview(self):
"""
Provides a preview of quiz questions.
Invoked from student_dashboard. The question is displayed in a modal
window that is initialized in modal-window.js.
This is an adaptation of the question_preview used by the dashboard module.
It supports Quizly questions.
"""
self.template_value['navbar'] = {'teacher': True}
self.template_value['resources_path'] = RESOURCES_PATH
url = self.request.get('url')
if url == '':
self.template_value['question'] = tags.html_to_safe_dom(
'<question quid="{}">'.format(self.request.get('quid')), self)
else:
self.template_value['url'] = url
self.template_value['question'] = 'Quizly'
self.render(QUESTION_PREVIEW_TEMPLATE)
def get_edit_sections(self):
""" Displays a list of this teacher's sections, using the TEACHERS_TEMPLATE.
This callback method automatically handles 'edit_sections' actions and must be
named 'get_edit_sections'.
This action displays the splash page for the Teacher Dashboard. It
displays when the user clicks on the navbar 'Teachers' tab. From there
the Teacher can manage all their sections. It also contains an
'Admin: Add Teacher' button, which is visible only to admin users.
Its action is handled by AdminDashboardHandler.
The template is injected with a list of this teacher's sections.
"""
# Make sure the user is registered and a registered teacher
# If not redirect to main course page
alerts = []
user_email = ''
disable = False
if not users.get_current_user():
alerts.append('Access denied. Only registered teachers can use this feature.')
disable = True
else:
user_email = users.get_current_user().email()
if not self.is_registered_teacher(user_email):
alerts.append('Access denied. Please see a course admin.')
disable = True
if disable:
self.redirect('/course')
else:
sections = CourseSectionEntity.get_sections()
sections = TeacherRights.apply_rights(self, sections)
if GLOBAL_DEBUG:
logging.debug('***RAM*** Trace: get_edit_sections')
# self._render will render the SECTIONS template
self.template_value['teacher'] = self.format_dashboard_template(sections, user_email)
self.template_value['teacher_email'] = user_email
self.template_value['alerts'] = alerts # Not really used anymore
self.template_value['disabled'] = disable # Not really used anymore
self._render()
def get_add_section(self):
""" Shows an editor for a section entity.
This callback method is triggered when the user clicks on the
'Create New Section' button in the Teacher splach page.
"""
if not TeacherRights.can_add_section(self):
self.error(401)
return
if GLOBAL_DEBUG:
logging.debug('***RAM** get_add_section')
entity = CourseSectionEntity.make('', '', '', True)
entity.put()
self.redirect(self.get_dashboard_action_url(
self.EDIT_SECTION_ACTION, key=entity.key()))
def get_edit_section(self):
"""Shows an editor for a section."""
key = self.request.get('key')
schema = SectionItemRESTHandler.SCHEMA()
exit_url = self.canonicalize_url('/{}'.format(self.DASHBOARD_LIST_URL))
rest_url = self.canonicalize_url('/rest/section/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
schema.get_json_schema(),
schema.get_schema_dict(),
key, rest_url, exit_url,
delete_method='delete',
delete_message='Are you sure you want to delete this section?',
delete_url=self._get_delete_url(
SectionItemRESTHandler.URL, key, 'section-delete'),
display_types=schema.get_display_types())
if GLOBAL_DEBUG:
logging.debug('***RAM** get_edit_section rendering page')
self.template_value['main_content'] = form_html;
self._render()
def post_delete_section(self):
"""Deletes a section."""
if not TeacherRights.can_delete_section(self):
self.error(401)
return
if GLOBAL_DEBUG:
logging.debug('***RAM** post_delete_section')
key = self.request.get('key')
entity = CourseSectionEntity.get(key)
if entity:
entity.delete()
self.redirect('/{}'.format(self.DASHBOARD_LIST_URL))
def _get_delete_url(self, base_url, key, xsrf_token_name):
return '%s?%s' % (
self.canonicalize_url(base_url),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(xsrf_token_name)),
}))
def get_lessons_for_roster(self, units, course):
lessons = {}
for unit in units:
unit_lessons = course.get_lessons(unit.unit_id)
unit_lessons_filtered = []
for lesson in unit_lessons:
unit_lessons_filtered.append({
'title': lesson.title,
'unit_id': lesson.unit_id,
'lesson_id': lesson.lesson_id
})
lessons[unit.unit_id] = unit_lessons_filtered
# Convert to JSON
return transforms.dumps(lessons, {}) | |
or more spectral indices (indices are added as bands) for an image
collection.
.. deprecated:: 0.3.0
Use :func:`spectralIndices()` instead.
Tip
----------
Check more info about the supported platforms and spectral indices in the
:ref:`User Guide<Spectral Indices Computation>`.
Parameters
----------
self : ee.ImageCollection
Image collection to compute indices on. Must be scaled to [0,1].
index : string | list[string], default = 'NDVI'
Index or list of indices to compute.\n
Available options:
- 'vegetation' : Compute all vegetation indices.
- 'burn' : Compute all burn indices.
- 'water' : Compute all water indices.
- 'snow' : Compute all snow indices.
- 'drought' : Compute all drought indices.
- 'urban' : Compute all urban (built-up) indices.
- 'kernel' : Compute all kernel indices.
- 'all' : Compute all indices listed below.
Awesome Spectral Indices for GEE:
Check the complete list of indices
`here <https://awesome-ee-spectral-indices.readthedocs.io/en/latest/list.html>`_.
G : float, default = 2.5
Gain factor. Used just for index = 'EVI'.
C1 : float, default = 6.0
Coefficient 1 for the aerosol resistance term. Used just for index = 'EVI'.
C2 : float, default = 7.5
Coefficient 2 for the aerosol resistance term. Used just for index = 'EVI'.
L : float, default = 1.0
Canopy background adjustment. Used just for index = ['EVI','SAVI'].
cexp : float, default = 1.16
Exponent used for OCVI.
nexp : float, default = 2.0
Exponent used for GDVI.
alpha : float, default = 0.1
Weighting coefficient used for WDRVI.
slope : float, default = 1.0
Soil line slope.
intercept : float, default = 0.0
Soil line intercept.
gamma : float, default = 1.0
Weighting coefficient used for ARVI.
kernel : str, default = 'RBF'
Kernel used for kernel indices.\n
Available options:
- 'linear' : Linear Kernel.
- 'RBF' : Radial Basis Function (RBF) Kernel.
- 'poly' : Polynomial Kernel.
sigma : str | float, default = '0.5 * (a + b)'
Length-scale parameter. Used for kernel = 'RBF'. If str, this must be an
expression including 'a' and 'b'. If numeric, this must be positive.
p : float, default = 2.0
Kernel degree. Used for kernel = 'poly'.
c : float, default = 1.0
Free parameter that trades off the influence of higher-order versus lower-order
terms in the polynomial kernel. Used for kernel = 'poly'. This must be greater
than or equal to 0.
online : boolean, default = False
Wheter to retrieve the most recent list of indices directly from the GitHub
repository and not from the local copy.
drop : boolean, default = True
Whether to drop all bands except the new spectral indices.
Returns
-------
ee.ImageCollection
Image collection with the computed spectral index, or indices, as new bands.
See Also
--------
scale : Scales bands on an image collection.
Examples
--------
>>> import ee, eemont
>>> ee.Authenticate()
>>> ee.Initialize()
>>> S2 = ee.ImageCollection('COPERNICUS/S2_SR').scale()
- Computing one spectral index:
>>> S2.index('NDVI')
- Computing indices with different parameters:
>>> S2.index('SAVI',L = 0.5)
- Computing multiple indices:
>>> S2.index(['NDVI','EVI','GNDVI'])
- Computing a specific group of indices:
>>> S2.index('vegetation')
- Computing kernel indices:
>>> S2.index(['kNDVI'],kernel = 'poly',p = 5)
- Computing all indices:
>>> S2.index('all')
References
----------
.. [1] https://awesome-ee-spectral-indices.readthedocs.io/en/latest/list.html
"""
warnings.warn(
"index() is deprecated, please use spectralIndices() instead",
DeprecationWarning,
)
return ee_extra.Spectral.core.spectralIndices(
self,
index,
G,
C1,
C2,
L,
cexp,
nexp,
alpha,
slope,
intercept,
gamma,
kernel,
sigma,
p,
c,
online,
drop,
)
@extend(ee.imagecollection.ImageCollection)
def spectralIndices(
self,
index="NDVI",
G=2.5,
C1=6.0,
C2=7.5,
L=1.0,
cexp=1.16,
nexp=2.0,
alpha=0.1,
slope=1.0,
intercept=0.0,
gamma=1.0,
kernel="RBF",
sigma="0.5 * (a + b)",
p=2.0,
c=1.0,
online=False,
drop=False,
):
"""Computes one or more spectral indices (indices are added as bands) for an image
collection from the Awesome List of Spectral Indices.
Tip
----------
Check more info about the supported platforms and spectral indices in the
:ref:`User Guide<Spectral Indices Computation>`.
Parameters
----------
self : ee.ImageCollection
Image collection to compute indices on. Must be scaled to [0,1].
index : string | list[string], default = 'NDVI'
Index or list of indices to compute.\n
Available options:
- 'vegetation' : Compute all vegetation indices.
- 'burn' : Compute all burn indices.
- 'water' : Compute all water indices.
- 'snow' : Compute all snow indices.
- 'drought' : Compute all drought indices.
- 'urban' : Compute all urban (built-up) indices.
- 'kernel' : Compute all kernel indices.
- 'all' : Compute all indices listed below.
Awesome Spectral Indices for GEE:
Check the complete list of indices
`here <https://awesome-ee-spectral-indices.readthedocs.io/en/latest/list.html>`_.
G : float, default = 2.5
Gain factor. Used just for index = 'EVI'.
C1 : float, default = 6.0
Coefficient 1 for the aerosol resistance term. Used just for index = 'EVI'.
C2 : float, default = 7.5
Coefficient 2 for the aerosol resistance term. Used just for index = 'EVI'.
L : float, default = 1.0
Canopy background adjustment. Used just for index = ['EVI','SAVI'].
cexp : float, default = 1.16
Exponent used for OCVI.
nexp : float, default = 2.0
Exponent used for GDVI.
alpha : float, default = 0.1
Weighting coefficient used for WDRVI.
slope : float, default = 1.0
Soil line slope.
intercept : float, default = 0.0
Soil line intercept.
gamma : float, default = 1.0
Weighting coefficient used for ARVI.
kernel : str, default = 'RBF'
Kernel used for kernel indices.\n
Available options:
- 'linear' : Linear Kernel.
- 'RBF' : Radial Basis Function (RBF) Kernel.
- 'poly' : Polynomial Kernel.
sigma : str | float, default = '0.5 * (a + b)'
Length-scale parameter. Used for kernel = 'RBF'. If str, this must be an
expression including 'a' and 'b'. If numeric, this must be positive.
p : float, default = 2.0
Kernel degree. Used for kernel = 'poly'.
c : float, default = 1.0
Free parameter that trades off the influence of higher-order versus lower-order
terms in the polynomial kernel. Used for kernel = 'poly'. This must be greater
than or equal to 0.
online : boolean, default = False
Whether to retrieve the most recent list of indices directly from the GitHub
repository and not from the local copy.
drop : boolean, default = True
Whether to drop all bands except the new spectral indices.
Returns
-------
ee.ImageCollection
Image collection with the computed spectral index, or indices, as new bands.
See Also
--------
scaleAndOffset : Scales bands on an image collection.
Examples
--------
>>> import ee, eemont
>>> ee.Authenticate()
>>> ee.Initialize()
>>> S2 = ee.ImageCollection('COPERNICUS/S2_SR').scaleAndOffset()
- Computing one spectral index:
>>> S2.spectralIndices('NDVI')
- Computing indices with different parameters:
>>> S2.spectralIndices('SAVI',L = 0.5)
- Computing multiple indices:
>>> S2.spectralIndices(['NDVI','EVI','GNDVI'])
- Computing a specific group of indices:
>>> S2.spectralIndices('vegetation')
- Computing kernel indices:
>>> S2.spectralIndices(['kNDVI'],kernel = 'poly',p = 5)
- Computing all indices:
>>> S2.spectralIndices('all')
"""
return ee_extra.Spectral.core.spectralIndices(
self,
index,
G,
C1,
C2,
L,
cexp,
nexp,
alpha,
slope,
intercept,
gamma,
kernel,
sigma,
p,
c,
online,
drop,
)
@extend(ee.imagecollection.ImageCollection)
def maskClouds(
self,
method="cloud_prob",
prob=60,
maskCirrus=True,
maskShadows=True,
scaledImage=False,
dark=0.15,
cloudDist=1000,
buffer=250,
cdi=None,
):
"""Masks clouds and shadows in an image collection (valid just for Surface
Reflectance products).
Tip
----------
Check more info about the supported platforms and clouds masking in the
:ref:`User Guide<Masking Clouds and Shadows>`.
Parameters
----------
self : ee.ImageCollection [this]
Image collection to mask.
method : string, default = 'cloud_prob'
Method used to mask clouds.\n
Available options:
- 'cloud_prob' : Use cloud probability.
- 'qa' : Use Quality Assessment band.
This parameter is ignored for Landsat products.
prob : numeric [0, 100], default = 60
Cloud probability threshold. Valid just for method = 'cloud_prob'. This parameter
is ignored for Landsat products.
maskCirrus : boolean, default = True
Whether to mask cirrus clouds. Valid just for method = 'qa'. This parameter is
ignored for Landsat products.
maskShadows : boolean, default = True
Whether to mask cloud shadows. For more info see '<NAME>. 2020. Sentinel-2
Cloud Masking with s2cloudless. Google Earth Engine, Community Tutorials'.
scaledImage : boolean, default = | |
but no private key file configured', str(e))
# outstanding queury cache should still be empty
self.assertEqual(session.get('_saml_outstanding_queries',{}), {})
def test_Saml_handle_discovery_response(self):
session_id = 'a0123456789abcdef0123456789abcdef'
tmp_sp_config = copy.deepcopy(sp_config)
entity_id = 'https://sso-alt.example.com/idp/metadata'
tmp_sp_config['metadata']['inline_dict'] = [{
'entityid': entity_id,
'service': {
'idp': {
'name': '<NAME>',
'endpoints': {
'single_sign_on_service': [(
'https://sso-alt.example.com/idp/sso',
BINDING_HTTP_REDIRECT)],
'single_logout_service': [(
'https://sso-alt.example.com/idp/slo',
BINDING_HTTP_REDIRECT)],
},
},
},
'cert_file': root_path + '/sso_public.crt',
}]
# Remove default idp form SP config.
del tmp_sp_config['service']['sp']['idp']
with self.app.test_request_context('/',
method='GET', query_string=dict(
session_id=session_id, entityID=entity_id
)):
# make the client thing there is outstanding request
session['_saml_outstanding_queries'] = {session_id: '/next'}
sp = auth.Saml(tmp_sp_config)
self.assertEqual(None, sp._config.getattr('idp'))
resp = sp.handle_discovery_response(request)
self.assertEqual(resp.status_code, 302)
self.assert_('SAMLRequest' in resp.headers['Location'])
url = urlparse.urlparse(resp.headers['Location'])
self.assertEqual(url.hostname, 'sso-alt.example.com')
self.assertEqual(url.path, '/idp/sso')
params = urlparse.parse_qs(url.query)
self.assert_('SAMLRequest' in params)
self.assertEqual(params['RelayState'], ['/next'])
def test_Saml_handle_assertion(self):
ava = {'uid': '123456'}
session_id = 'a0123456789abcdef0123456789abcdef'
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
# create a response to assert upon
name_id, authn_response = create_authn_response(session_id, ava)
self.assert_('Signature' in authn_response)
# test fails if there is no known outstanding auth request
with self.app.test_request_context('/',
method='POST',
data=dict(SAMLResponse=base64.b64encode(authn_response),
RelayState='/next')):
sp = auth.Saml(tmp_sp_config)
try:
sp.handle_assertion(request)
self.fail(
'Expected Exception due to lack of outstanding request')
except:
pass
# test with default user_id mapping
with self.app.test_request_context('/',
method='POST',
data=dict(SAMLResponse=base64.b64encode(authn_response),
RelayState='/next')):
# make the client thing there is outstanding request
session['_saml_outstanding_queries'] = {session_id: '/next'}
user_attributes, resp = sp.handle_assertion(request)
self.assertEqual(user_attributes['name_id'][0], name_id)
self.assertEqual(user_attributes['uid'][0], '123456')
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.headers['Location'], '/next')
# outstanding queury cache should now be empty
self.assertEqual(session.get('_saml_outstanding_queries',{}), {})
# identity and subject_id should now be set
self.assertEqual(session.get('_saml_subject_id').text, name_id)
def test_Saml_handle_assertion_encrypted(self):
self.skipTest("Encryption in pysaml2 2.0.0 is possibly broken." + \
" If not, I don't know how to get it not blow up.")
ava = {'uid': '123456'}
session_id = 'a0123456789abcdef0123456789abcdef'
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
# create a response to assert upon
name_id, authn_response = create_authn_response(session_id, ava,
encrypt_assertion=True)
def test_Saml_handle_assertion_allow_unsolicited(self):
ava = {'uid': '123456'}
session_id = 'a0123456789abcdef0123456789abcdef'
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
# The following setting allows for handling unsolicited
# assertions which ironically is the expected behavior according to
# the SAML 2.0 specification
tmp_sp_config['service']['sp']['allow_unsolicited'] = 'true'
# create a response to assert upon
name_id, authn_response = create_authn_response(session_id, ava)
self.assert_('Signature' in authn_response)
# test success if no outstanding queries exist
with self.app.test_request_context('/',
method='POST',
data=dict(SAMLResponse=base64.b64encode(authn_response),
RelayState='/next')):
sp = auth.Saml(tmp_sp_config)
user_attributes, resp = sp.handle_assertion(request)
self.assertEqual(user_attributes['name_id'][0], name_id)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.headers['Location'], '/next')
# identity and subject_id should now be set
self.assert_(name_id in session.get('_saml_identity').keys()[0])
self.assertEqual(session.get('_saml_subject_id').text, name_id)
# test success if outstanding queries exist
with self.app.test_request_context('/',
method='POST',
data=dict(SAMLResponse=base64.b64encode(authn_response),
RelayState='/next')):
sp = auth.Saml(tmp_sp_config)
session['_saml_outstanding_queries'] = {session_id: '/next'}
user_attributes, resp = sp.handle_assertion(request)
self.assertEqual(user_attributes['name_id'][0], name_id)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.headers['Location'], '/next')
# outstanding queury cache should now be empty
self.assertEqual(session.get('_saml_outstanding_queries',{}), {})
# identity and subject_id should now be set
self.assert_(name_id in session.get('_saml_identity').keys()[0])
self.assertEqual(session.get('_saml_subject_id').text, name_id)
def test_Saml_handle_assertion_invalid_SAMLResponse(self):
ava = {'uid': '123456'}
session_id = 'a0123456789abcdef0123456789abcdef'
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
# test missing SAMLResponse
with self.app.test_request_context('/',
method='POST'):
sp = auth.Saml(tmp_sp_config)
try:
sp.handle_assertion(request)
self.fail(
'Expected BadRequest on missing SAMLResponse POST var')
except BadRequest, e:
self.assertEqual(400, e.code)
self.assertEqual('SAMLResponse missing from POST', e.description)
# test SAMLResponse via GET
with self.app.test_request_context('/',
method='GET',
query_string=dict(SAMLResponse='invalid', RelayState='/next')):
sp = auth.Saml(tmp_sp_config)
try:
sp.handle_assertion(request)
self.fail(
'Expected BadRequest on missing SAMLResponse POST var')
except BadRequest, e:
self.assertEqual(400, e.code)
self.assertEqual('SAMLResponse missing from POST', e.description)
# test invalid SAMLResponse
with self.app.test_request_context('/',
method='POST',
data=dict(SAMLResponse='invalid', RelayState='/next')):
sp = auth.Saml(tmp_sp_config)
try:
sp.handle_assertion(request)
self.fail(
'Expected BadRequest on invalid SAMLResponse POST var')
except BadRequest, e:
self.assertEqual(400, e.code)
self.assertEqual('SAML response is invalid', e.description)
# test on unsigned SAMLResponse when signing is required
# create a response to assert upon
name_id, authn_response = create_authn_response(session_id, ava,
sign_response=False, sign_assertion=False)
self.assertNotIn('Signature', authn_response)
with self.app.test_request_context('/',
method='POST',
data=dict(SAMLResponse=base64.b64encode(authn_response),
RelayState='/next')):
sp = auth.Saml(tmp_sp_config)
# test fails if there is no known outstanding auth request
try:
sp.handle_assertion(request)
self.fail(
'Expected Exception due to lack of outstanding request')
except:
pass
# make the client thing there is outstanding request
session['_saml_outstanding_queries'] = {session_id: '/next'}
# test fails if there is not a missing signature
try:
user_attributes, resp = sp.handle_assertion(request)
self.fail(
'Expected SignatureError: Signature missing for assertion')
except SignatureError, e:
self.assertEqual('Signature missing for assertion', str(e))
def test_Saml_logout(self):
not_on_or_after = int(time.time()) + 3600
identity = {'4=id-1': {
'https://sso.example.com/idp/metadata': (
not_on_or_after, {
'authn_info': [],
'name_id': 'id-1',
'not_on_or_after': not_on_or_after,
'came_from': '/next',
'ava': {'uid': ['123456']}
}
)
}}
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
with self.app.test_request_context('/',
method='GET'):
sp = auth.Saml(tmp_sp_config)
# first need to be logged in, let's pretend
session['_saml_identity'] = identity
session['_saml_subject_id'] = saml.NameID(text='id-1')
resp = sp.logout(next_url='/next')
self.assertEqual(resp.status_code, 302)
self.assert_("SAMLRequest" in resp.headers['Location'])
url = urlparse.urlparse(resp.headers['Location'])
self.assertEqual(url.hostname, 'sso.example.com')
self.assertEqual(url.path, '/idp/slo')
params = urlparse.parse_qs(url.query)
self.assert_('Signature' in params)
self.assert_('SigAlg' in params)
self.assert_('SAMLRequest' in params)
logout = samlp.logout_request_from_string(
decode_base64_and_inflate(params['SAMLRequest'][0]))
self.assertEqual(logout.destination,
'https://sso.example.com/idp/slo')
self.assertEqual(logout.name_id.text, 'id-1')
# check the caches still contain data
self.assertEqual(session['_saml_identity'], identity)
self.assertEqual(session['_saml_subject_id'].text, 'id-1')
# verify state cache
self.assert_(logout.id in session['_saml_state'])
self.assertEqual(session['_saml_state'][logout.id]['entity_id'],
'https://sso.example.com/idp/metadata')
self.assertEqual(session['_saml_state'][logout.id]['entity_ids'],
['https://sso.example.com/idp/metadata'])
self.assertEqual(session['_saml_state'][logout.id]['operation'],
'SLO')
# name_id is now being coded/decoded
self.assertEqual(session['_saml_state'][logout.id]['name_id'],
'4=id-1')
self.assertEqual(session['_saml_state'][logout.id]['reason'],
'')
self.assertTrue(session['_saml_state'][logout.id]['sign'])
# test unsigned logout request
with self.app.test_request_context('/',
method='GET'):
tmp_sp_config['service']['sp']['logout_requests_signed'] = False
sp = auth.Saml(tmp_sp_config)
# first need to be logged in, let's pretend
session['_saml_identity'] = identity
session['_saml_subject_id'] = saml.NameID(text='id-1')
resp = sp.logout(next_url='/next')
self.assertEqual(resp.status_code, 302)
self.assert_("SAMLRequest" in resp.headers['Location'])
url = urlparse.urlparse(resp.headers['Location'])
params = urlparse.parse_qs(url.query)
self.assert_('SAMLRequest' in params)
logout = samlp.logout_request_from_string(
decode_base64_and_inflate(params['SAMLRequest'][0]))
self.assertIsNone(logout.signature)
# verify state cache shows signing off
self.assertFalse(session['_saml_state'][logout.id]['sign'])
def test_Saml_logout_via_post(self):
not_on_or_after = int(time.time()) + 3600
identity = {'4=id-1': {
'https://sso.example.com/idp/metadata': (
not_on_or_after, {
'authn_info': [('urn:oasis:names:tc:SAML:2.0:ac:classes:Password', [])],
'name_id': saml.NameID(text='id-1'),
'not_on_or_after': not_on_or_after,
'came_from': '/next',
'ava': {'uid': ['a123456']}
}
)
}}
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
# test signed authentication request
with self.app.test_request_context('/',
method='GET'):
tmp_sp_config['metadata'] = {
'local': [root_path + '/idp_post_metadata.xml']
}
sp = auth.Saml(tmp_sp_config)
# first need to be logged in, let's pretend
session['_saml_identity'] = identity
session['_saml_subject_id'] = saml.NameID(text='id-1')
resp, status = sp.logout(next_url='/next')
self.assertEqual(status, 200)
_form = unpack_form(resp['data'][3])
self.assert_('SAMLRequest' in _form)
self.assert_('RelayState' in _form)
def test_Saml_logout_not_logged_in(self):
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
with self.app.test_request_context('/',
method='GET'):
sp = auth.Saml(tmp_sp_config)
try:
sp.logout(next_url='/next')
self.fail('Expected AuthException on attempted logout when'
' not logged in')
except auth.AuthException, e:
self.assertEqual(
'Unable to retrieve subject id for logout', str(e))
def test_Saml_logout_required_missing_key_file(self):
not_on_or_after = int(time.time()) + 3600
identity = {'4=id-1': {
'https://sso.example.com/idp/metadata': (
not_on_or_after, {
'authn_info': [],
'name_id': 'id-1',
'not_on_or_after': not_on_or_after,
'came_from': '/next',
'ava': {'uid': ['123456']}
}
)
}}
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
with self.app.test_request_context('/',
method='GET'):
tmp_sp_config['key_file'] = None
sp = auth.Saml(tmp_sp_config)
# first need to be logged in, let's pretend
session['_saml_identity'] = identity
session['_saml_subject_id'] = saml.NameID(text='id-1')
try:
sp.logout(next_url='/next')
self.fail(
'Expected AuthException on invalid Saml logout request')
except SigverError, e:
self.assertEqual('Signing failed', str(e))
except auth.AuthException, e:
self.assertEqual(
'Signature requested for this Saml logout request,'
' but no private key file configured', str(e))
def test_Saml_handle_logout_response(self):
not_on_or_after = int(time.time()) + 3600
identity = {'4=id-1': {
'https://sso.example.com/idp/metadata': (
not_on_or_after, {
'authn_info': [],
'name_id': 'id-1',
'not_on_or_after': not_on_or_after,
'came_from': '/next',
'ava': {'uid': ['123456']}
}
)
}}
state = {
'entity_ids': ['https://sso.example.com/idp/metadata'],
'subject_id': 'id-1',
#'return_to': '/next',
'name_id': '4=id-1',
'entity_id': 'https://sso.example.com/idp/metadata',
'not_on_or_after': not_on_or_after,
'operation': 'SLO',
'reason': '',
}
# modifying config in this test, make copy so as not to effect
# following tests.
tmp_sp_config = copy.deepcopy(sp_config)
# create a response to assert upon
sp = auth.Saml(tmp_sp_config)
session_id, logout_response = create_logout_response('id-1',
destination='https://sso.example.com/idp/slo',
issuer_entity_id='https://sso.example.com/idp/metadata',
req_entity_id='https://foo.example.com/sp/metadata')
self.assert_('Signature' in logout_response)
# test SAMLResponse logout as GET
with self.app.test_request_context('/',
method='GET',
query_string=dict(
SAMLResponse=deflate_and_base64_encode(logout_response),
RelayState='/next')):
# first need to be logged in, let's pretend
session['_saml_identity'] = identity
session['_saml_subject_id'] = saml.NameID(text='id-1')
session['_saml_state'] = {session_id: state}
success, resp = sp.handle_logout(request, next_url='/next')
self.assertEqual(session.get('_saml_identity', None), {})
self.assertIsNone(session.get('_saml_subject_id', None))
self.assertEqual(session.get('_saml_state', None), {})
self.assertTrue(success)
self.assertEqual(resp.status_code, 302)
| |
" + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_start_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_start_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + sc.Name + " start failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" start failed: " + process_stderr)
return [-1]
return [0]
def StopService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "stop", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + systemctl_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_stop_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_stop_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + sc.Name + " stop failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" stop failed: " + process_stderr)
return [-1]
return [0]
def GetRunLevel():
(process_stdout, process_stderr, retval) = Process([runlevel_path])
if retval is not 0:
Print("Error: " + runlevel_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + runlevel_path + " failed: " + process_stderr)
return -1
tokens = process_stdout.split(" ")
if len(tokens) is not 2:
Print("Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout, file=sys.stderr)
LG().Log('ERROR', "Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout)
return -1
return int(tokens[1])
def DetermineInitState(stdout):
if "is running" in stdout or "start/running" in stdout \
or "..running" in stdout:
return True
elif stdout.strip() == "running":
return True
elif "(running)" in stdout:
return True
else:
return False
def DetermineInitEnabled(stdout, runlevel):
tokens = stdout.split()
tokens = tokens[1:]
if runlevel > (len(tokens) - 1):
Print("runlevel " + str(runlevel) +
" not found in chkconfig", file=sys.stderr)
LG().Log(
'ERROR', "runlevel " + str(runlevel) + " not found in chkconfig")
return False
runlevel_tokens = tokens[runlevel].split(":")
if len(runlevel_tokens) is not 2:
Print(
"Unable to determine format for chkconfig run level",
file=sys.stderr)
LG().Log(
'ERROR', "Unable to determine format for chkconfig run level")
return False
if runlevel_tokens[1] == "on":
return True
else:
return False
def GetSystemdState(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is 0:
if '(running)' in process_stdout:
return "running"
return "stopped"
def TestSystemdState(sc):
if sc.State and sc.State != GetSystemdState(sc):
return False
return True
def GetSystemdEnabled(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "is-enabled", sc.Name])
if retval is 0:
return True
else:
return False
def TestSystemdEnabled(sc):
if sc.Enabled is not GetSystemdEnabled(sc):
return False
return True
def TestSystemd(sc):
if not SystemdExists():
return [-1]
if not TestSystemdState(sc):
return [-1]
if not TestSystemdEnabled(sc):
return [-1]
return [0]
def GetUpstartState(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_status_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_status_path +
" failed: " + process_stderr)
return ""
if (sc.Name + " start") in process_stdout:
return "running"
else:
return "stopped"
def TestUpstartState(sc):
if sc.State and sc.State != GetUpstartState(sc):
return False
return True
def GetUpstartEnabled(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
start_on_exists = False
start_on_is_enabled = False
stop_on_exists = False
stop_on_is_enabled = False
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if error is not None:
Print(
"Error reading:/etc/init/" + sc.Name + ".conf",
file=sys.stderr)
LG().Log('ERROR', "Error reading:/etc/init/" +
sc.Name + ".conf")
return "Error"
for full_line in file_lines.splitlines():
# everything after a '#' character is a comment, so strip it off
line = full_line.split("#")[0]
if "start on" in line:
start_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "start on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
start_on_is_enabled = True
else:
start_on_is_enabled = False
if "!" in specified_runlevel_digits:
start_on_is_enabled = not start_on_is_enabled
else:
return "Complex"
if "stop on" in line:
stop_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "stop on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
stop_on_is_enabled = True
else:
stop_on_is_enabled = False
if "!" in specified_runlevel_digits:
stop_on_is_enabled = not stop_on_is_enabled
else:
return "Complex"
if not start_on_exists and not stop_on_exists: # not upstart
if os.path.islink('/etc/init.d/' + sc.Name) and \
os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc2.d
# for smylink to conf file. if so its enabled.
file_list = os.listdir('/etc/rc2.d')
for f in file_list:
f = '/etc/rc2.d/' + f
if os.path.islink(f) and os.readlink(f) == \
"../init.d/" + sc.Name:
return True
return False
(process_stdout, process_stderr, retval) = Process(
['chkconfig', sc.Name, '']) # try init style
if retval is 0:
if 'off' not in process_stdout:
return True
return False
if start_on_exists and start_on_is_enabled:
if stop_on_exists and stop_on_is_enabled:
Print("Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.", file=sys.stderr)
LG().Log('ERROR',
"Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.")
return "Complex"
else:
return True
else:
return False
Print("Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf", file=sys.stderr)
LG().Log('ERROR',
"Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf")
return False
else:
Print("Error: conf file does not exist for service named " +
sc.Name, file=sys.stderr)
LG().Log('ERROR',
"Error: conf file does not exist for service named " +
sc.Name)
return False
def TestUpstartEnabled(sc):
currently_enabled = GetUpstartEnabled(sc)
if currently_enabled == "Complex":
Print("Error: Cannot modify 'Enabled' state for service " + sc.Name +
", conf file too complex. Please use the File provider to " +
"write your own conf file for this service.", file=sys.stderr)
LG().Log('ERROR', "Error: Cannot modify 'Enabled' state for service "
+ sc.Name +
", conf file too complex. Please use the File provider to " +
" writeyour own conf file for this service.")
return False
return currently_enabled
def TestUpstart(sc):
if not UpstartExists():
return [-1]
if not TestUpstartState(sc):
return [-1]
if sc.Enabled is not TestUpstartEnabled(sc):
return [-1]
return [0]
def GetInitState(sc):
check_state_program = initd_service
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_service
if os.path.isfile(initd_service):
check_state_program = initd_service
else: # invoke the service directly
check_state_program = '/etc/init.d/'
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "status"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " status failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " status failed: ")
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if retval is not 0:
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
if DetermineInitState(process_stdout):
return "running"
else:
return "stopped"
def TestInitState(sc):
if sc.State and sc.State != GetInitState(sc):
return False
return True
def GetInitEnabled(sc):
runlevel = GetRunLevel()
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
# A service is enabled if a symbolic link
# exists in /etc/rc${RUNLEVEL}.d/ with the name:
# S??${sc.Name}
matched_files = glob.glob(
"/etc/rc" + str(runlevel) + ".d/S??" + sc.Name)
for f in matched_files:
if os.path.islink(f):
return True
return | |
so
# perhaps it isn't the right thing to do. Also, meh
u_psi_rot_real = np.real(np.exp(-1j*rho) * u_psi)
popt, pcov = curve_fit(func_qu,psi,u_psi_rot_real)
return popt[0:2]
# strategy (1/2/3 slice) -- fracpol -- leakage/posang error -- parsep -- snr
# p m b i k
onxpol = np.zeros([4,fraclinpol.size,2,parsep.size,snr.size])
# == PLOT 1 ==
# polarization known and 1 parallactic angle slice
# crudely assume that errors arise from crosshand phase error combined
# with statistical errors for relative d-term recovery
# it's probably a bit more complicated in reality, but
# this should recover main trends with pretty good accuracy
# which is all we are after
p=0
for m in range(fraclinpol.size):
L=fraclinpol[m]
print 'case: '+str(p+1)+'/4'+\
' fracpol: '+str(m+1)+'/'+str(fraclinpol.size)
for k in range(snr.size):
temparr = np.zeros(samples)
if strategy1slice:
for s in range(samples):
# If sigma defined as dual pol (eg Stokes I) noise, then noise in
# real or imag part of single pol product (e.g. V_XY) is sqrt(2)*sigma,
# with additional factor sqrt(Nb) to convert to single-baseline noise.
# Note that we also need to divide by sqrt(Nant-1) because Nant-1
# baselines can be used to solve for D_Xi on antenna i.
# Assume source PA=45 (Q=0, U=L), without loss of generality
# Q_psi = Qcos2p+Usin2p = Lsin2p
# U_psi = Ucos2p-Qsin2p = Lcos2p
# Assume I = 1 Jy, without loss of generality
u_psi = np.r_[L]
# therefore q_psi = np.r_[0.] , but not needed here
# get crosshand phase (1 per array defined on refant)
# assume solution has been iterated and that the
# final d-term errors are small enough to ne negligible
# in the baseline-averaged crosshand phase recovery
# Thus only statistical errors matter here
# For each slice, all baselines, dual-pol, real or imag component:
# noise is 1/snr
noise = np.r_[(np.random.randn()+np.random.randn()*1j)/snr[k]]
# rho_err is the error in frame rotation
rho_err = calc_rho_12(u_psi + noise)
# d-term errors will result because the d-terms will
# soak up the rho_err induced offset from the true
# calibrator polarization and leakage offsets.
# This can be crudely modelled by taking into
# account the typical offset induced by rho_err in V_XY.
# Assume small angle error; errors will get crazy
# at large angles regardless, so no significant effect
offset = rho_err * (u_psi + \
np.random.normal(0,d_typical*np.sqrt(2./pi)) + \
np.random.normal(0,d_typical*np.sqrt(2./pi)) )
# now proceed as if we are calculating d-term modulus errors
# for an unpolarized source, ie model the true
# source polarization by the term above
temparr[s] = np.sqrt((offset**2+np.float(Nant)/snr[k]**2)/2.)
# offset is practically negligible in the equation above
# for most observing conditions because rho_err gets
# progressively smaller with increasing S/N, always
# remaining smaller than the statistical error term
# typical error in mod(D), where units of D are percent
# then divided by sqrt(Nant) to get spurious on-axis LINEAR pol
# this is same result as for spurious CIRCULAR pol
# if we were interested in spurious ELLIPTICAL pol then
# we would instead need to divide by sqrt(2.*Nant/pi)
if strategy1slice:
# result from this simulation
onxpol[p,m,0,0,k] = np.percentile(temparr,mypercentile)/np.sqrt(Nant)*100.
else:
# practically indistinguishable result from theory
onxpol[p,m,0,0,k] = 1./np.sqrt(2.*snr[k]**2)*100.
# absolute position angle error from quadrature sum of:
# * statistical error from real part of d-term error (which will
# be the same as the value in temparr as this has already been
# deprojected with factor 1/sqrt2, yeah lazy)
# * systematic error from relative leakage solutions (magnitude
# of typical d-term real part)
# * systematic error from mechanical feed offset uncertainty
if strategy1slice:
# result from this simulation
onxpol[p,m,1,0,k] = np.sqrt(np.percentile(temparr,mypercentile)**2 + d_typical**2 + \
systpaerr_foffset_rad**2/np.float(Nant)) / d2r
else:
# practically indistinguishable result from theory
onxpol[p,m,1,0,k] = np.sqrt(np.float(Nant)/snr[k]**2/2. + d_typical**2 + \
systpaerr_foffset_rad**2/np.float(Nant)) / d2r
# == PLOT 2 ==
# polarization known and 2 parallactic angle slices
p=1
for m in range(fraclinpol.size):
L=fraclinpol[m]
for i in range(parsep.size):
print 'case: '+str(p+1)+'/4'+\
' fracpol: '+str(m+1)+'/'+str(fraclinpol.size)+\
' parallactic angle: '+str(i+1)+'/'+str(parsep.size)
for k in range(snr.size):
temparr = np.zeros(samples)
for s in range(samples):
# same initial conditions as for plot 1, no loss in generality
u_psi = np.r_[L ,L*np.cos(2.*parsep[i]*d2r)]
q_psi = np.r_[0.,L*np.sin(2.*parsep[i]*d2r)]
# get crosshand phase, again same as for plot 1
# use slice at max u_psi, this would be used in practice
noise = np.r_[(np.random.randn()+np.random.randn()*1j)/snr[k]]
rho_err = calc_rho_12(u_psi[0] + noise)
# now model typical D-term error in the crosshand phase frame,
# but where the raw data is rotated additionally by rho_err
# V_XY_avg = u_psi + (1.-q_psi)*dxi + (1.+q_psi)*sum_[j]^[Nant-1](dyj*)
# where noise is given by the average over Nant-1 baselines
# We need to inject true dxi and dyj then try to recover dxi
dxi = np.random.normal(0,d_typical*np.sqrt(2./pi)) + \
np.random.normal(0,d_typical*np.sqrt(2./pi))*1j
dyj = np.average( np.random.normal(0,d_typical*np.sqrt(2./pi),Nant-1) + \
np.random.normal(0,d_typical*np.sqrt(2./pi),Nant-1)*1j )
V_XY = np.zeros(2,dtype=complex)
# Technically not V_XY, actually V_XY_corrupted averaged over
# Nant-1 baselines ... meh ... though if you are reading
# this and really do care about something as finicky as
# a variable name, along with the details of this code,
# then let's sit down for a coffee! We can celebrate
# that 2 people in the world have looked at this code!
V_XY = ( u_psi + (1.-q_psi)*dxi + (1.+q_psi)*dyj ) * \
np.exp(rho_err*1j) + \
(np.random.randn(2)+np.random.randn(2)*1j) / \
snr[k]*np.sqrt(2)*np.sqrt(Nb)/np.sqrt(Nant-1)
# get d-term modulus error
# divide by sqrt(2) because this error contributes a random
# direction on the true d-term modulus
temparr[s] = calc_d(u_psi,q_psi,V_XY,dxi)/np.sqrt(2)
# spurious lin pol
onxpol[p,m,0,i,k] = np.percentile(temparr,mypercentile)/np.sqrt(Nant)*100.
# absolute position angle error
# strategy recovers absolute leakages
onxpol[p,m,1,i,k] = np.sqrt(np.percentile(temparr,mypercentile)**2 + \
systpaerr_foffset_rad**2/np.float(Nant)) / d2r
# == PLOT 3 ==
# polarization unknown and 3 parallactic angle slices
p=2
for m in range(fraclinpol.size):
L=fraclinpol[m]
for i in range(parsep.size):
print 'case: '+str(p+1)+'/4'+\
' fracpol: '+str(m+1)+'/'+str(fraclinpol.size)+\
' parallactic angle: '+str(i+1)+'/'+str(parsep.size)
for k in range(snr.size):
temparr = np.zeros(samples)
for s in range(samples):
# same initial conditions as for plot 1, no loss in generality
psi = np.zeros(nscans[p-2])
u_psi_obs = np.zeros(nscans[p-2],dtype=complex)
for n in range(nscans[p-2]):
psi[n] = parsep[i]*d2r * np.float(n)/np.float(nscans[p-2]-1)
u_psi_obs[n] = L*np.cos(2.*psi[n]) + \
(np.random.randn()+np.random.randn()*1j)/snr[k]
# get crosshand phase and Stokes Q & U
# assume solution has been iterated and that the
# final d-term errors are small-ish in the
# baseline-averaged crosshand phase recovery.
# Thus only statistical errors matter here
# Don't assume that the curve goes through the origin;
# this will then be the same behaviour as the real solver.
# While I*D and Q*D may be largely removed through
# iteration, some small residual may exist, which the
# solver fits out. Note that if iteration is not used,
# the real CASA solver will handle the I*D offset,
# but it will not properly account for the Q*D terms,
# in turn slightly corrupting the fits.
rho_err = calc_rho(u_psi_obs)
q , u = calc_qu(rho_err,psi,u_psi_obs)
u_psi = u*np.cos(2.*psi) - q*np.sin(2.*psi)
q_psi = q*np.cos(2.*psi) + u*np.sin(2.*psi)
# now continue as with plot 2
dxi = np.random.normal(0,d_typical*np.sqrt(2./pi)) + \
np.random.normal(0,d_typical*np.sqrt(2./pi))*1j
dyj = np.average( np.random.normal(0,d_typical*np.sqrt(2./pi),Nant-1) + \
np.random.normal(0,d_typical*np.sqrt(2./pi),Nant-1)*1j )
V_XY = np.zeros(nscans[p-2],dtype=complex)
V_XY = ( u_psi + (1.-q_psi)*dxi + (1.+q_psi)*dyj ) * \
np.exp(rho_err*1j) + \
(np.random.randn(nscans[p-2])+np.random.randn(nscans[p-2])*1j) / \
snr[k]*np.sqrt(2)*np.sqrt(Nb)/np.sqrt(Nant-1)
temparr[s] = calc_d(u_psi,q_psi,V_XY,dxi)/np.sqrt(2)
onxpol[p,m,0,i,k] = np.percentile(temparr,mypercentile)/np.sqrt(Nant)*100.
onxpol[p,m,1,i,k] = np.sqrt(np.percentile(temparr,mypercentile)**2 + \
systpaerr_foffset_rad**2/np.float(Nant)) / d2r
# == PLOT 4 ==
# polarization unknown and 10 parallactic angle slices
p=3
for m in range(fraclinpol.size):
L=fraclinpol[m]
for i in range(parsep.size):
print 'case: '+str(p+1)+'/4'+\
' fracpol: '+str(m+1)+'/'+str(fraclinpol.size)+\
' parallactic angle: '+str(i+1)+'/'+str(parsep.size)
for k in range(snr.size):
temparr = np.zeros(samples)
for s in range(samples):
psi = np.zeros(nscans[p-2])
u_psi_obs = np.zeros(nscans[p-2],dtype=complex)
for n in range(nscans[p-2]):
psi[n] = parsep[i]*d2r * np.float(n)/np.float(nscans[p-2]-1)
u_psi_obs[n] = L*np.cos(2.*psi[n]) + \
(np.random.randn()+np.random.randn()*1j)/snr[k]
rho_err = calc_rho(u_psi_obs)
q , u = calc_qu(rho_err,psi,u_psi_obs)
u_psi = u*np.cos(2.*psi) - q*np.sin(2.*psi)
q_psi = q*np.cos(2.*psi) + u*np.sin(2.*psi)
# now continue as with plot 2
dxi = np.random.normal(0,d_typical*np.sqrt(2./pi)) + \
np.random.normal(0,d_typical*np.sqrt(2./pi))*1j
dyj = np.average( np.random.normal(0,d_typical*np.sqrt(2./pi),Nant-1) + \
np.random.normal(0,d_typical*np.sqrt(2./pi),Nant-1)*1j )
V_XY = np.zeros(nscans[p-2],dtype=complex)
V_XY = ( u_psi + (1.-q_psi)*dxi + | |
<filename>appuselfbot.py
import collections
import datetime
import math
import subprocess
import asyncio
import random
import glob
import gc
import psutil
import sys
import re
from datetime import timezone
from cogs.utils.allmsgs import *
from discord_webhooks import *
from cogs.utils.checks import *
from discord.ext import commands
config = load_config()
bot_prefix = config['bot_identifier']
if bot_prefix != '':
bot_prefix += ' '
bot = commands.Bot(command_prefix=config['cmd_prefix'], description='''Selfbot by appu1232''', self_bot=True)
# Startup
@bot.event
async def on_ready():
print('Logged in as')
try:
print(bot.user.name)
except:
pass
print('User id:' + str(bot.user.id))
print('------')
bot.uptime = datetime.datetime.now()
bot.icount = bot.message_count = bot.mention_count = bot.keyword_log = 0
bot.self_log = bot.all_log = {}
bot.imagedumps = []
bot.default_status = ''
bot.is_stream = False
bot.game = bot.game_interval = bot.avatar = bot.avatar_interval = bot.subpro = bot.keyword_found = None
bot.game_time = bot.avatar_time = bot.gc_time = bot.refresh_time = time.time()
if os.path.isfile('restart.txt'):
with open('restart.txt', 'r') as re:
channel = bot.get_channel(re.readline())
print('Bot has restarted.')
await bot.send_message(channel, bot_prefix + 'Bot has restarted.')
os.remove('restart.txt')
with open('settings/log.json', 'r') as log:
bot.log_conf = json.load(log)
bot.key_users = bot.log_conf['keyusers']
if os.path.isfile('settings/games.json'):
with open('settings/games.json', 'r+') as g:
games = json.load(g)
if type(games['games']) is list:
bot.game = games['games'][0]
bot.game_interval = games['interval']
else:
bot.game = games['games']
if 'stream' not in games:
games['stream'] = 'no'
if games['stream'] == 'yes':
bot.is_stream = True
g.seek(0)
g.truncate()
json.dump(games, g, indent=4)
# Dealing with old versions updating
if not os.path.exists('avatars'):
os.makedirs('avatars')
if not os.path.isfile('settings/avatars.json'):
with open('settings/avatars.json', 'w') as avis:
json.dump({'password': '', 'interval': '0', 'type': 'random'}, avis, indent=4)
with open('settings/avatars.json', 'r') as g:
avatars = json.load(g)
bot.avatar_interval = avatars['interval']
if os.listdir('avatars') and avatars['interval'] != '0':
all_avis = os.listdir('avatars')
all_avis.sort()
avi = random.choice(all_avis)
bot.avatar = avi
if not os.path.isfile('settings/optional_config.json'):
conf = load_config()
o_conf = {'google_api_key': conf['google_api_key'], 'custom_search_engine': conf['custom_search_engine'], 'mal_username': conf['mal_username'], 'mal_password': conf['mal_password']}
with open('settings/optional_config.json', 'w') as oc:
json.dump(o_conf, oc, indent=4)
with open('settings/optional_config.json', 'r+') as fp:
opt = json.load(fp)
if 'customcmd_color' not in opt:
opt['customcmd_color'] = '27007A'
if 'rich_embed' not in opt:
opt['rich_embed'] = 'on'
if 'default_status' not in opt:
opt['default_status'] = 'idle'
if 'ascii_font' not in opt:
opt['ascii_font'] = 'big'
fp.seek(0)
fp.truncate()
json.dump(opt, fp, indent=4)
with open('settings/notify.json', 'r') as n:
notif = json.load(n)
if notif['type'] == 'dm':
if os.path.exists('notifier.txt'):
pid = open('notifier.txt', 'r').read()
try:
p = psutil.Process(int(pid))
p.kill()
except:
pass
os.remove('notifier.txt')
bot.subpro = subprocess.Popen([sys.executable, 'cogs/utils/notify.py'])
with open('notifier.txt', 'w') as fp:
fp.write(str(bot.subpro.pid))
@bot.command(pass_context=True, aliases=['reboot'])
async def restart(ctx):
"""Restarts the bot."""
def check(msg):
if msg:
return msg.content.lower().strip() == 'y' or msg.content.lower().strip() == 'n'
else:
return False
latest = update_bot(True)
if latest:
await bot.send_message(ctx.message.channel, bot_prefix + 'There is an update available for the bot. Download and apply the update on restart? (y/n)')
reply = await bot.wait_for_message(timeout=10, author=ctx.message.author, check=check)
if not reply or reply.content.lower().strip() == 'n':
with open('restart.txt', 'w') as re:
print('Restarting...')
re.write(str(ctx.message.channel.id))
await bot.send_message(ctx.message.channel, bot_prefix + 'Restarting...')
else:
await bot.send_message(ctx.message.channel, content=None, embed=latest)
with open('quit.txt', 'w') as q:
q.write('update')
print('Downloading update and restarting...')
await bot.send_message(ctx.message.channel, bot_prefix + 'Downloading update and restarting (check your console to see the progress)...')
else:
print('Restarting...')
with open('restart.txt', 'w') as re:
re.write(str(ctx.message.channel.id))
await bot.send_message(ctx.message.channel, bot_prefix + 'Restarting...')
if bot.subpro:
bot.subpro.kill()
os._exit(0)
@bot.command(pass_context=True, aliases=['upgrade'])
async def update(ctx, msg: str = None):
"""Update the bot if there is an update available."""
if msg:
latest = update_bot(False) if msg == 'show' else update_bot(True)
else:
latest = update_bot(True)
if latest:
if not msg == 'show':
if embed_perms(ctx.message):
await bot.send_message(ctx.message.channel, content=None, embed=latest)
await bot.send_message(ctx.message.channel, bot_prefix + 'There is an update available. Downloading update and restarting (check your console to see the progress)...')
else:
await bot.send_message(ctx.message.channel, content=None, embed=latest)
return
with open('quit.txt', 'w') as q:
q.write('update')
with open('restart.txt', 'w') as re:
re.write(str(ctx.message.channel.id))
if bot.subpro:
bot.subpro.kill()
os._exit(0)
else:
await bot.send_message(ctx.message.channel, bot_prefix + 'The bot is up to date.')
@bot.command(pass_context=True, aliases=['stop'])
async def quit(ctx):
"""Quits the bot."""
print('Bot exiting...')
if bot.subpro:
bot.subpro.kill()
open('quit.txt', 'a').close()
await bot.send_message(ctx.message.channel, bot_prefix + 'Bot shut down.')
os._exit(0)
@bot.command(pass_context=True)
async def reload(ctx):
"""Reloads all modules."""
utils = []
for i in bot.extensions:
utils.append(i)
fail = False
for i in utils:
bot.unload_extension(i)
try:
bot.load_extension(i)
except:
await bot.send_message(ctx.message.channel, bot_prefix + 'Failed to reload extension ``%s``' % i)
fail = True
if fail:
await bot.send_message(ctx.message.channel, bot_prefix + 'Reloaded remaining extensions.')
else:
await bot.send_message(ctx.message.channel, bot_prefix + 'Reloaded all extensions.')
# On all messages sent (for quick commands, custom commands, and logging messages)
@bot.event
async def on_message(message):
await bot.wait_until_ready()
await bot.wait_until_login()
if hasattr(bot, 'message_count'):
bot.message_count += 1
# If the message was sent by me
if message.author.id == bot.user.id:
bot.icount += 1
if hasattr(bot, 'self_log'):
if message.channel.id not in bot.self_log:
bot.self_log[message.channel.id] = collections.deque(maxlen=100)
bot.self_log[message.channel.id].append(message)
if message.content.startswith(config['customcmd_prefix']):
response = custom(message.content.lower().strip())
if response:
await bot.delete_message(message)
with open('settings/optional_config.json', 'r') as fp:
opt = json.load(fp)
if opt['rich_embed'] == 'on':
if response[0] == 'embed' and embed_perms(message):
try:
if opt['customcmd_color'] != '':
color = int('0x' + opt['customcmd_color'], 16)
await bot.send_message(message.channel, content=None, embed=discord.Embed(colour=color).set_image(url=response[1]))
else:
await bot.send_message(message.channel, content=None, embed=discord.Embed().set_image(url=response[1]))
except:
await bot.send_message(message.channel, response[1])
else:
await bot.send_message(message.channel, response[1])
else:
await bot.send_message(message.channel, response[1])
else:
response = quickcmds(message.content.lower().strip())
if response:
await bot.delete_message(message)
await bot.send_message(message.channel, response)
notified = message.mentions
if notified:
for i in notified:
if i.id == bot.user.id:
bot.mention_count += 1
if not hasattr(bot, 'log_conf'):
with open('settings/log.json', 'r') as log:
bot.log_conf = json.load(log)
# Keyword logging.
if bot.log_conf['keyword_logging'] == 'on':
try:
word_found = False
if (bot.log_conf['allservers'] == 'True' or str(message.server.id) in bot.log_conf['servers']) and (message.server.id not in bot.log_conf['blacklisted_servers'] and message.channel.id not in bot.log_conf['blacklisted_channels']):
add_alllog(message.channel.id, message.server.id, message)
if not message.author.bot and not any(x in message.author.id for x in bot.log_conf['blacklisted_users']):
for word in bot.log_conf['keywords']:
if ' [server]' in word:
word, server = word.split(' [server]')
if message.server.id != server:
continue
elif ' [channel]' in word:
word, channel = word.split(' [channel]')
if message.channel.id != channel:
continue
if word.startswith('[isolated]'):
word = word[10:].lower()
found = re.findall('\\b' + word + '\\b', message.content.lower())
if found:
word_found = True
break
else:
if word.lower() in message.content.lower() and message.author.id != bot.user.id:
word_found = True
break
for x in bot.log_conf['blacklisted_words']:
if '[server]' in x:
bword, id = x.split('[server]')
if bword.strip().lower() in message.content.lower() and message.server.id == id:
word_found = False
break
elif '[channel]' in x:
bword, id = x.split('[channel]')
if bword.strip().lower() in message.content.lower() and message.channel.id == id:
word_found = False
break
if x.lower() in message.content.lower():
word_found = False
break
user_found = False
if bot.log_conf['user_logging'] == 'on':
if '{} {}'.format(str(message.author.id), str(message.server.id)) in bot.log_conf['keyusers']:
if user_post(bot, '{} {}'.format(str(message.author.id), str(message.server.id))):
user_found = message.author.name
elif '{} all'.format(str(message.author.id)) in bot.log_conf['keyusers']:
if user_post(bot, '{} all'.format(str(message.author.id))):
user_found = message.author.name
if word_found is True or user_found:
if bot.log_conf['user_location'] != bot.log_conf['log_location'] and bot.log_conf['user_location'] != '' and not word_found:
location = bot.log_conf['user_location'].split()
is_separate = True
else:
location = bot.log_conf['log_location'].split()
is_separate = False
server = bot.get_server(location[1])
if message.channel.id != location[0]:
msg = message.clean_content.replace('`', '')
context = []
try:
for i in range(0, int(bot.log_conf['context_len'])):
context.append(bot.all_log[message.channel.id + ' ' + message.server.id][len(bot.all_log[message.channel.id + ' ' + message.server.id])-i-2])
msg = ''
for i in range(0, int(bot.log_conf['context_len'])):
temp = context[len(context)-i-1][0]
if temp.clean_content:
msg += 'User: %s | %s\n' % (temp.author.name, temp.timestamp.replace(tzinfo=timezone.utc).astimezone(tz=None).__format__('%x @ %X')) + temp.clean_content.replace('`', '') + '\n\n'
msg += 'User: %s | %s\n' % (message.author.name, message.timestamp.replace(tzinfo=timezone.utc).astimezone(tz=None).__format__('%x @ %X')) + message.clean_content.replace('`', '')
success = True
except:
success = False
msg = 'User: %s | %s\n' % (message.author.name, message.timestamp.replace(tzinfo=timezone.utc).astimezone(tz=None).__format__('%x @ %X')) + msg
part = int(math.ceil(len(msg) / 1950))
notify = load_notify_config()
if user_found:
title = '%s posted' % user_found
else:
title = '%s mentioned: %s' % (message.author.name, word)
if part == 1 and success is True:
em = discord.Embed(timestamp=message.timestamp, color=0xbc0b0b, title=title, description='Server: ``%s``\nChannel: ``%s``\n\n**Context:**' % (str(message.server), str(message.channel)))
for i in range(0, int(bot.log_conf['context_len'])):
temp = context.pop()
if temp[0].clean_content:
em.add_field(name='%s' % temp[0].author.name, value=temp[0].clean_content, inline=False)
em.add_field(name='%s' % message.author.name, value=message.clean_content, inline=False)
try:
em.set_thumbnail(url=message.author.avatar_url)
except:
pass
if notify['type'] == 'msg':
await webhook(em, 'embed', is_separate)
elif notify['type'] == 'ping':
await webhook(em, 'embed ping', is_separate)
else:
await bot.send_message(server.get_channel(location[0]), embed=em)
else:
split_list = [msg[i:i + 1950] for i in range(0, len(msg), 1950)]
all_words = []
split_msg = ''
for i, blocks in enumerate(split_list):
for b in blocks.split('\n'):
split_msg += b + '\n'
all_words.append(split_msg)
split_msg = ''
if user_found:
logged_msg = '``%s`` posted' % user_found
else:
logged_msg = '``%s`` mentioned' % word
for b, i in enumerate(all_words):
if b == 0:
if notify['type'] == 'msg':
await webhook(bot_prefix + '%s in server: ``%s`` Context: ```Channel: %s\n\n%s```' % (logged_msg, str(message.server), str(message.channel), i), 'message', is_separate)
elif notify['type'] == 'ping':
await webhook(bot_prefix + '%s in server: | |
Solver(name='mc', bootstrap_with=[[1], [2], [3]])
>>> s.add_atmost(lits=[1, 2, 3], k=2, no_return=False)
False
>>> # the AtMostK constraint is in conflict with initial unit clauses
"""
if self.solver:
res = self.solver.add_atmost(lits, k, no_return)
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
This method can be used to add a given list of clauses into the
solver.
:param formula: a list of clauses.
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type formula: iterable(iterable(int))
:type no_return: bool
The ``no_return`` argument is set to ``True`` by default.
:rtype: bool if ``no_return`` is set to ``False``.
.. code-block:: python
>>> cnf = CNF()
... # assume the formula contains clauses
>>> s = Solver()
>>> s.append_formula(cnf.clauses, no_return=False)
True
"""
if self.solver:
res = self.solver.append_formula(formula, no_return)
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
:rtype: bool
A usage example is the following:
.. code-block:: python
>>> s = Solver(name='mc')
>>> s.supports_atmost()
True
>>> # there is support for AtMostK constraints in this solver
"""
if self.solver:
return self.solver.supports_atmost()
@staticmethod
def _proof_bin2text(bytes_):
"""
Auxiliary method to translate a proof specified in the binary DRUP
format to the text DRUP format.
:param bytes_: proof-trace as a sequence of bytes
:type bytes_: bytearray
:rtype: list(str)
"""
# necessary variables
proof, lits, lit, shift, newbeg = [], [], 0, 0, True
for byte in bytes_:
if newbeg:
# new clause; here, we expect either 'a' or 'd'
if byte == 100:
lits.append('d')
else:
assert byte == 97, 'clause should start with either \'a\' or \'d\''
newbeg = False
else:
# this is a byte of an actual literal
if byte:
lit |= (byte & 0x7f) << shift
shift += 7
if byte >> 7 == 0:
# MSB is zero => this is the last byte of the literal
lits.append(str((1 if lit % 2 == 0 else -1) * (lit >> 1)))
lit, shift = 0, 0
else:
# zero-byte indicates the end of clause
lits.append('0')
proof.append(' '.join(lits))
lits, newbeg = [], True
if not newbeg and not lits:
proof.append('0')
return proof
#
#==============================================================================
class Cadical(object):
"""
CaDiCaL SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by CaDiCaL.')
self.cadical = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.cadical = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.cadical:
self.cadical = pysolvers.cadical_new()
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.cadical_tracepr(self.cadical, self.prfile)
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.cadical:
pysolvers.cadical_del(self.cadical, self.prfile)
self.cadical = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.cadical:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.cadical_solve(self.cadical, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
self.prev_assumps = assumptions
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def interrupt(self):
"""
Interrupt solver execution.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def clear_interrupt(self):
"""
Clears an interruption.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
raise NotImplementedError('Simple literal propagation is not yet implemented for CaDiCaL.')
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
raise NotImplementedError('Setting preferred phases is not yet implemented for CaDiCaL.')
def get_status(self):
"""
Returns solver's status.
"""
if self.cadical:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.cadical and self.status == True:
model = pysolvers.cadical_model(self.cadical)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.cadical and self.status == False:
return pysolvers.cadical_core(self.cadical, self.prev_assumps)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.cadical and self.prfile:
self.prfile.seek(0)
# stripping may cause issues here!
return Solver._proof_bin2text(bytearray(self.prfile.read()).strip())
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.cadical:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.cadical:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.cadical:
return pysolvers.cadical_nof_vars(self.cadical)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.cadical:
return pysolvers.cadical_nof_cls(self.cadical)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.cadical:
return pysolvers.cadical_acc_stats(self.cadical)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.cadical:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.cadical:
res = pysolvers.cadical_add_cl(self.cadical, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by CaDiCaL.
"""
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.cadical:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Gluecard3(object):
"""
Gluecard 3 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.gluecard = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.gluecard = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.gluecard:
self.gluecard = pysolvers.gluecard3_new()
if bootstrap_with:
for clause in bootstrap_with:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
self.add_clause(clause)
else:
self.add_atmost(clause[0], clause[1])
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.gluecard3_setincr(self.gluecard)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.gluecard3_tracepr(self.gluecard, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.gluecard:
pysolvers.gluecard3_del(self.gluecard)
self.gluecard = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard3_solve(self.gluecard, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard3_solve_lim(self.gluecard,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.gluecard:
pysolvers.gluecard3_cbudget(self.gluecard, budget)
| |
field.fid in mappings
inst = mappings[field.fid]
# If this is a virtual mapping then we can skip it if we
# are a post close operation because this is the end of a context
if inst.is_virtual() and self.kind == POST_CLOSE_OP_KIND:
continue
if not req.logical_node.perform_physical_close(depth, field, self,
req, inst, perform_checks):
return False
self.check_for_unanalyzed_realm_ops(perform_checks)
return True
def check_for_unanalyzed_realm_ops(self, perform_checks):
if self.realm_copies:
count = 0
for copy in self.realm_copies:
if not copy.analyzed:
count += 1
# Clear out the reachable caches to help the garbage collector
copy.reachable_cache = None
if perform_checks and count > 0:
print("WARNING: "+str(self)+" generated "+str(count)+
" unnecessary Realm copies")
for copy in self.realm_copies:
if not copy.analyzed:
print(' '+str(copy)+' was unnecessary')
if self.realm_fills:
count = 0
for fill in self.realm_fills:
if not fill.analyzed:
count += 1
# Clear out the reachable caches to help the garbage collector
fill.reachable_cache = None
if perform_checks and count > 0:
print("WARNING: "+str(self)+" generated "+str(count)+\
" unnecessary Realm fills")
for fill in self.realm_fills:
if not fill.analyzed:
print(' '+str(fill)+' was unnecessary')
def print_op_mapping_decisions(self, depth):
if self.inter_close_ops:
assert not self.is_close()
for close in self.inter_close_ops:
close.print_op_mapping_decisions(depth)
# If we are an index task just do our points and return
if self.kind == INDEX_TASK_KIND:
assert self.points is not None
for point in self.points.itervalues():
point.op.print_op_mapping_decisions(depth)
return
# Print our mapping decisions
prefix = ''
for idx in range(depth):
prefix += ' '
print(prefix+'-------------------------------------------------')
print(prefix+' Mapping Decisions for '+str(self)+' (depth='+str(depth)+')')
if self.kind == SINGLE_TASK_KIND and self.task is not None:
print(prefix+' Task Mapped to ' + str(self.task.processor))
assert self.task.variant
print(prefix+' Task Variant %d (inner=%s,leaf=%s)' % (self.task.variant.vid,
'Yes' if self.task.variant.inner else 'No',
'Yes' if self.task.variant.leaf else 'No'))
if self.mappings is not None:
for index,mappings in self.mappings.iteritems():
assert index in self.reqs
req = self.reqs[index]
print(prefix+' Region Requirement '+str(index)+' Region=('+
str(req.index_node)+','+str(req.field_space)+','+str(req.tid)+')')
for fid,inst in mappings.iteritems():
field = req.field_space.get_field(fid)
print(prefix+' '+str(field)+': '+str(inst))
print(prefix+' End '+str(self)+' (depth='+str(depth)+')')
print(prefix+'-------------------------------------------------')
# If we are a single task recurse
if self.kind == SINGLE_TASK_KIND and self.task is not None:
self.task.print_task_mapping_decisions()
def get_color(self):
return {
NO_OP_KIND : "white",
SINGLE_TASK_KIND : "lightskyblue",
INDEX_TASK_KIND : "mediumslateblue",
MAP_OP_KIND : "mediumseagreen",
INTER_CLOSE_OP_KIND : "orangered",
READ_ONLY_CLOSE_OP_KIND : "darkgreen",
POST_CLOSE_OP_KIND : "darkslateblue",
FENCE_OP_KIND : "darkorchid2",
COPY_OP_KIND : "darkgoldenrod3",
FILL_OP_KIND : "darkorange1",
ACQUIRE_OP_KIND : "darkolivegreen",
RELEASE_OP_KIND : "darksalmon",
DELETION_OP_KIND : "dodgerblue3",
ATTACH_OP_KIND : "firebrick1",
DETACH_OP_KIND : "cornflowerblue",
DEP_PART_OP_KIND : "steelblue",
PENDING_PART_OP_KIND : "honeydew",
}[self.kind]
def print_base_node(self, printer, dataflow):
title = str(self)+' (UID: '+str(self.uid)+')'
if self.task is not None and self.task.point.dim > 0:
title += ' Point: ' + self.task.point.to_string()
label = printer.generate_html_op_label(title, self.reqs, self.mappings,
self.get_color(), self.state.detailed_graphs)
printer.println(self.node_name+' [label=<'+label+'>,fontsize=14,'+\
'fontcolor=black,shape=record,penwidth=0];')
def print_dataflow_node(self, printer):
# Print any close operations that we have, then print ourself
if self.inter_close_ops:
for close in self.inter_close_ops:
close.print_dataflow_node(printer)
self.print_base_node(printer, True)
def print_incoming_dataflow_edges(self, printer, previous):
if self.incoming is None:
return
if self.inter_close_ops:
for close in self.inter_close_ops:
close.print_incoming_dataflow_edges(printer, previous)
for dep in self.incoming:
dep.print_dataflow_edge(printer, previous)
def print_event_node(self, printer):
self.print_base_node(printer, False)
def print_event_graph(self, printer, elevate, all_nodes, top):
# Do any of our close operations too
if self.inter_close_ops:
for close in self.inter_close_ops:
close.print_event_graph(printer, elevate, all_nodes, False)
# Handle index space operations specially, everything
# else is the same
if self.kind is INDEX_TASK_KIND:
assert self.points is not None
for point in self.points.itervalues():
point.op.print_event_graph(printer, elevate, all_nodes, False)
# Put any operations we generated in the elevate set
if self.realm_copies:
for copy in self.realm_copies:
elevate[copy] = copy.get_context()
if self.realm_fills:
for fill in self.realm_fills:
elevate[fill] = fill.get_context()
return
# If this is a single task, recurse and generate our subgraph first
if self.kind is SINGLE_TASK_KIND:
# Get our corresponding task
task = self.state.get_task(self.uid)
task.print_event_graph_context(printer, elevate, all_nodes, top)
# Look through all our generated realm operations and emit them
if self.realm_copies:
for copy in self.realm_copies:
if copy not in elevate:
#elevate[copy] = copy.get_event_context()
elevate[copy] = copy.get_context()
if self.realm_fills:
for fill in self.realm_fills:
if fill not in elevate:
#elevate[fill] = fill.get_event_context()
elevate[fill] = fill.get_context()
if self.is_physical_operation():
# Finally put ourselves in the set if we are a physical operation
assert self.context is not None
elevate[self] = self.context
def is_realm_operation(self):
return False
def is_physical_operation(self):
if self.is_close():
return False
if self.kind is COPY_OP_KIND:
return False
if self.kind is FILL_OP_KIND:
return False
if self.kind is FENCE_OP_KIND:
return False
if self.kind is DELETION_OP_KIND:
return False
return True
def print_incoming_event_edges(self, printer):
if self.cluster_name is not None:
for src in self.physical_incoming:
if src.cluster_name is not None:
printer.println(src.node_name+' -> '+self.node_name+
' [ltail='+src.cluster_name+',lhead='+
self.cluster_name+',style=solid,color=black,'+
'penwidth=2];')
else:
printer.println(src.node_name+' -> '+self.node_name+
' [lhead='+self.cluster_name+',style=solid,'+
'color=black,penwidth=2];')
else:
for src in self.physical_incoming:
if src.cluster_name is not None:
printer.println(src.node_name+' -> '+self.node_name+
' [ltail='+src.cluster_name+',style=solid,'+
'color=black,penwidth=2];')
else:
printer.println(src.node_name+' -> '+self.node_name+
' [style=solid,color=black,penwidth=2];')
def pack_requirement_replay_info(self, replay_file, req, mapping):
if mapping:
# Get the unique set of instances
unique_insts = set()
for inst in mapping.itervalues():
unique_insts.add(inst)
replay_file.write(struct.pack('I',len(unique_insts)))
for inst in unique_insts:
replay_file.write(struct.pack('Q',inst.handle))
else:
replay_file.write(struct.pack('I',0))
def pack_temporary_replay_info(self, replay_file, req, mapping, temporary):
assert len(temporary) > 0
# Compute the unique set of pairs
unique_pairs = dict()
for fid,inst in temporary.iteritems():
assert fid in mapping
unique_pairs[mapping[fid]] = inst
replay_file.write(struct.pack('I',len(unique_pairs)))
for dst,src in unique_pairs.iteritems():
replay_file.write(struct.pack('Q',dst))
replay_file.write(struct.pack('Q',src))
def pack_inline_replay_info(self, replay_file):
assert self.kind == MAP_OP_KIND
assert 0 in self.reqs
assert 0 in self.mappings
assert len(self.mappings) == 1
replay_file.write(struct.pack('I',1))
self.pack_requirement_replay_info(replay_file, self.reqs[0],
self.mappings[0])
if self.temporaries:
assert 0 in self.temporaries
replay_file.write(struct.pack('I',1))
self.pack_temporary_replay_info(replay_file, self.reqs[0],
self.mappings[0], self.temporaries[0])
else:
replay_file.write(struct.pack('I',0))
def pack_copy_replay_info(self, replay_file):
assert self.kind == COPY_OP_KIND
assert len(self.reqs) % 2 == 0
half = len(self.reqs) / 2
replay_file.write(struct.pack('I',half))
src_temporaries = set()
for idx in range(half):
self.pack_requirement_replay_info(replay_file, self.reqs[idx],
None if idx not in self.mappings[idx] else self.mappings[idx])
if self.temporaries and idx in self.temporaries:
src_temporaries.add(idx)
replay_file.write(struct.pack('I',half))
dst_temporaries = set()
for idx in range(half,2*half):
self.pack_requirement_replay_info(replay_file,self.reqs[idx],
None if idx not in self.mappings[idx] else self.mappings[idx])
if self.temporaries and idx in self.temporaries:
dst_temporaries.add(idx)
replay_file.write(struct.pack('I',len(src_temporaries)))
for idx in src_temporaries:
self.pack_temporary_replay_info(replay_file, self.reqs[idx],
self.mappings[idx], self.temporaries[idx])
replay_file.write(struct.pack('I',len(dst_temporaries)))
for idx in dst_temporaries:
self.pack_temporary_replay_info(replay_file, self.reqs[idx],
self.mappings[idx], self.temporaries[idx])
def pack_close_replay_info(self, replay_file):
assert self.kind == INTER_CLOSE_OP_KIND
assert 0 in self.reqs
if 0 in self.mappings:
assert len(self.mappings) == 1
replay_file.write(struct.pack('I',1))
self.pack_requirement_replay_info(replay_file, self.reqs[0],
self.mappings[0])
else:
replay_file.write(struct.pack('I',0))
if self.temporaries:
assert 0 in self.mappings
assert 0 in self.temporaries
replay_file.write(struct.pack('I',1))
self.pack_temporary_replay_info(replay_file, self.reqs[0],
self.mappings[0], self.temporaries[0])
else:
replay_file.write(struct.pack('I',0))
def pack_release_replay_info(self, replay_file):
assert self.kind == RELEASE_OP_KIND
if self.temporaries:
assert 0 in self.reqs
assert 0 in self.mappings
assert 0 in self.temporaries
replay_file.write(struct.pack('I',1))
self.pack_temporary_replay_info(replay_file, self.reqs[0],
self.mappings[0], self.temporaries[0])
else:
replay_file.write(struct.pack('I',0))
class Variant(object):
__slots__ = ['state', 'vid', 'inner', 'leaf', 'idempotent', 'name']
def __init__(self, state, vid):
self.state = state
self.vid = vid
self.inner = False
self.leaf = False
self.idempotent = False
self.name = None
def initialize(self, inner, leaf, idempotent, name):
self.inner = inner
self.leaf = leaf
self.idempotent = idempotent
self.name = name
class Task(object):
__slots__ = ['state', 'op', 'point', 'operations', 'depth',
'current_fence', 'restrictions', 'dumb_acquisitions',
'used_instances', 'virtual_indexes', 'processor', 'priority',
'premappings', 'postmappings', 'tunables',
'operation_indexes', 'close_indexes', 'variant']
# If you add a field here, you must update the merge method
def __init__(self, state, op):
self.state = state
self.op = op
self.op.task = self
self.point = Point(0)
self.operations = list()
self.depth = None
self.current_fence = None
self.restrictions = None
self.dumb_acquisitions = None
self.used_instances = None
self.virtual_indexes = None
self.processor = None
self.priority = None
self.premappings = None
self.postmappings = None
self.tunables = None
self.operation_indexes = None
self.close_indexes = None
self.variant = None
def __str__(self):
return str(self.op)
__repr__ = __str__
def add_operation(self, operation):
self.operations.append(operation)
def set_point(self, point):
self.point = point
def set_priority(self, priority):
assert not self.priority
self.priority = priority
def set_processor(self, processor):
assert not self.processor
self.processor = processor
def set_variant(self, variant):
assert not self.variant
self.variant = variant
def add_premapping(self, index):
if not self.premappings:
self.premappings = set()
self.premappings.add(index)
def add_postmapping(self, index, fid, inst):
if not self.postmappings:
self.postmappings = dict()
if index not in self.postmappings:
self.postmappings[index] = dict()
self.postmappings[index][fid] = inst
def update_instance_uses(self):
if self.premappings:
for index in self.premappings:
assert index in self.op.mappings
mapping = self.op.mappings[index]
unique_insts = set()
for inst in mapping.itervalues():
unique_insts.add(inst)
for inst in unique_insts:
inst.increment_use_count()
if self.postmappings:
for mapping in self.postmappings.itervalues():
unique_insts = set()
for inst in mapping.itervalues():
unique_insts.add(inst)
for inst in unique_insts:
inst.increment_use_count()
def add_tunable(self, index, size, value):
| |
+ str(number_of_clusters) + "\n")
log_file.write("Average cluster size: " + str(avg_size_of_cluster) + "\n")
log_file.write("Standard deviation cluster size: " + str(std_size_of_cluster) + "\n")
log_file.write("Noise percentage: " + str(noise_percentage) + "\n")
log_file.write("Percentage of all unknown connections that are in the noise cluster: " + str(
percentage_total_unknown_in_noise_cluster) + "\n")
log_file.write("Percentage of all connections in noise cluster that are unknown: " + str(
percentage_unknown_of_noise_cluster) + "\n")
log_file.write("Percentage of connections with detailed labels that are in noise cluster: " + str(
percentage_detailed_labels_in_noise_cluster) + "\n")
log_file.write("Average cluster purity: " + str(average_cluster_purity) + "\n")
log_file.write("Average detailed label cohesion: " + str(avg_detailed_label_cohesion) + "\n")
log_file.write("Average cluster probability: " + str(avg_cluster_probability) + "\n")
log_file.close()
###############################
# Performance Matrix Creation #
###############################
print("Creating performance matrices")
performance_matrix_folder = path_to_intermediate_results + "/performance_matrices"
os.mkdir(performance_matrix_folder)
label_performance_matrix = performance_matrix_folder + "/label_performance_matrix" + addition + ".csv"
label_performance_matrix_table = performance_matrix_folder + "/label_performance_matrix" + addition + ".png"
detailed_label_performance_matrix = performance_matrix_folder + "/detailed_label_performance_matrix" + addition + ".csv"
detailed_label_performance_matrix_table = performance_matrix_folder + "/detailed_label_performance_matrix" + addition + ".png"
label_df = summary_csv_df.groupby("clusnum")["label"].value_counts().to_frame()
label_df = label_df.rename(columns={"label": "count"})
label_df = label_df.reset_index()
labels = label_df["label"].unique()
for label in labels:
lower_label = label.lower()
label_df[lower_label] = np.where(label_df["label"] == label, label_df["count"], 0)
label_df = label_df.drop(["count", "label"], axis=1)
label_df = label_df.rename(columns={"clusnum": "Cluster"})
columns = label_df.columns.tolist()
labels = label_df.columns.tolist()
labels.remove("Cluster")
clusters = label_df["Cluster"].unique().tolist()
data = []
for cluster in clusters:
cluster_column_data = []
cluster_column_data.append(cluster)
for label in labels:
count = int(label_df[(label_df["Cluster"] == cluster)][label].sum())
cluster_column_data.append(count)
data.append(cluster_column_data)
improved_label_df = pd.DataFrame(data, columns=columns)
detailed_label_df = summary_csv_df.groupby("clusnum")["detailed_label"].value_counts().to_frame()
detailed_label_df = detailed_label_df.rename(columns={"detailed_label": "count"})
detailed_label_df = detailed_label_df.reset_index()
detailed_labels = detailed_label_df["detailed_label"].unique()
for detail_label in detailed_labels:
lower_detail_label = detail_label.lower()
detailed_label_df[lower_detail_label] = np.where(detailed_label_df["detailed_label"] == detail_label,
detailed_label_df["count"], 0)
detailed_label_df = detailed_label_df.drop(["count", "detailed_label"], axis=1)
detailed_label_df = detailed_label_df.rename(columns={"clusnum": "Cluster"})
columns = detailed_label_df.columns.tolist()
labels = detailed_label_df.columns.tolist()
labels.remove("Cluster")
clusters = detailed_label_df["Cluster"].unique().tolist()
data = []
for cluster in clusters:
cluster_column_data = []
cluster_column_data.append(cluster)
for label in labels:
count = int(detailed_label_df[(detailed_label_df["Cluster"] == cluster)][label].sum())
cluster_column_data.append(count)
data.append(cluster_column_data)
improved_detail_label_df = pd.DataFrame(data, columns=columns)
improved_label_df.to_csv(label_performance_matrix, index=False)
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=improved_label_df.values, colLabels=improved_label_df.columns, loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(improved_label_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout()
plt.savefig(label_performance_matrix_table)
plt.close()
plt.clf()
improved_detail_label_df.to_csv(detailed_label_performance_matrix, index=False)
reduced_column_size_name = [x[0:10] for x in improved_detail_label_df.columns.tolist()]
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table2 = ax.table(cellText=improved_detail_label_df.values, colLabels=reduced_column_size_name, loc='center',
cellLoc='center')
table2.auto_set_column_width(col=list(range(len(reduced_column_size_name))))
for (row, col), cell in table2.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout()
plt.savefig(detailed_label_performance_matrix_table, dpi=1200, bbox_inches='tight')
plt.close()
plt.clf()
##################
# Graph Creation #
#################
print("Creating graphs")
graphs_folder = self.path_to_store + "/graphs_folder"
os.mkdir(graphs_folder)
summary_csv_df = pd.read_csv(csv_file)
application_name_graph = graphs_folder + "/application_name_graph" + addition + ".png"
path_to_application_name_legend_storage = graphs_folder + "/application_name_legend" + addition + ".png"
path_to_application_name_combined = graphs_folder + "/application_name_combined" + addition + ".png"
application_category_name_graph = graphs_folder + "/application_category_name_graph" + addition + ".png"
path_to_application_category_name_legend_storage = graphs_folder + "/application_category_name_legend" + addition + ".png"
path_to_application_category_name_combined = graphs_folder + "/application_category_name_combined" + addition + ".png"
label_distribution_graph = graphs_folder + "/label_graph" + addition + ".png"
path_to_label_legend_storage = graphs_folder + "/label_legend" + addition + ".png"
path_to_label_combined = graphs_folder + "/label_combined" + addition + ".png"
detailed_label_distribution_graph = graphs_folder + "/detailed_label_graph" + addition + ".png"
path_to_detailed_label_legend_storage = graphs_folder + "/detailed_label_legend" + addition + ".png"
path_to_detailed_label_combined = graphs_folder + "/detailed_label_combined" + addition + ".png"
name_distribution_graph = graphs_folder + "/name_graph" + addition + ".png"
path_to_name_legend_storage = graphs_folder + "/name_legend" + addition + ".png"
path_to_name_combined = graphs_folder + "/name_combined" + addition + ".png"
####################
# application name #
####################
overall_detailed_label_df = summary_csv_df.groupby("clusnum")["application_name"].value_counts().to_frame()
overall_detailed_label_df = overall_detailed_label_df.rename(columns={"application_name": "count"})
overall_detailed_label_df = overall_detailed_label_df.reset_index()
clusters = overall_detailed_label_df["clusnum"].unique().tolist()
if len(clusters) < 4:
ncols = len(clusters)
else:
ncols = 4
nrows = math.ceil(len(clusters) / 4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))
list_of_names_dfs = []
for cluster in clusters:
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_name", "count"]]
cluster_df["application_name"] = np.where(cluster_df["count"] <= 4, "Other", cluster_df.application_name)
cluster_df = cluster_df.groupby("application_name")["count"].aggregate(sum).reset_index().sort_values(
by=["count"], ascending=False)
list_of_names_dfs.append(cluster_df)
detailed_label_name_df = list_of_names_dfs.pop()
for name_df in list_of_names_dfs:
detailed_label_name_df = detailed_label_name_df.append(name_df)
detailed_label_name_df = detailed_label_name_df.groupby("application_name")["count"].aggregate(
sum).reset_index().sort_values(by=["count"])
unique_application_category_names = detailed_label_name_df["application_name"].tolist()
colors = {}
cmap = cm.tab20c(np.linspace(0, 1, len(unique_application_category_names)))
for index, color in enumerate(cmap):
application_name = unique_application_category_names.pop()
colors[application_name] = color
for index, cluster in enumerate(clusters):
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_name", "count"]]
cluster_df["application_name"] = np.where(cluster_df["count"] <= 4, "Other",
cluster_df.application_name)
cluster_df = cluster_df.groupby("application_name")["count"].aggregate(sum).reset_index().sort_values(
by=["count"])
cluster_df["relative_count"] = round((cluster_df["count"] / cluster_df["count"].sum()) * 100, 2)
if len(clusters) == 1:
patches, texts = ax.pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in cluster_df["application_name"]])
new_labels = self.clean_up_labels(texts)
ax.clear()
ax.pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax.set_title("Cluster " + str(cluster))
elif len(clusters) <= 4:
patches, texts = ax[index].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["application_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[index].clear()
ax[index].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[index].set_title("Cluster " + str(cluster))
else:
patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df["count"],
labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df[
"application_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[math.floor(index / 4), index % 4].clear()
ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in
cluster_df["application_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[math.floor(index / 4), index % 4].set_title("Cluster " + str(cluster))
if len(clusters) % 4 != 0:
if len(clusters) > 4:
for missing_axis in range(4 - len(clusters) % 4, 4):
ax[nrows - 1, missing_axis].axis('off')
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]
plt.suptitle("Application Name Distribution per Cluster", y=0.985, x=0.5, fontweight='bold')
fig.tight_layout()
fig.canvas.draw()
fig.savefig(application_name_graph, dpi=1200)
legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,
bbox_to_anchor=(2, 0))
separate_legend = legend.figure
separate_legend.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path_to_application_name_legend_storage, dpi=1200, bbox_inches=bbox)
legend.remove()
plt.close()
plt.clf()
graph_img = Image.open(application_name_graph)
legend_im = Image.open(path_to_application_name_legend_storage)
widths_graph = graph_img.width
heights_graph = graph_img.height
widths_legend = legend_im.width
heights_legend = legend_im.height
if heights_legend > heights_graph:
resize_percentage = heights_graph / heights_legend
new_width = int(resize_percentage * widths_legend)
legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)
total_width = widths_graph + widths_legend
y_offset = int((heights_graph - heights_legend) / 2)
combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))
combined_im.paste(graph_img, (0, 0))
combined_im.paste(legend_im, (widths_graph, y_offset))
combined_im.save(path_to_application_name_combined)
#############################
# application category name #
#############################
overall_detailed_label_df = summary_csv_df.groupby("clusnum")[
"application_category_name"].value_counts().to_frame()
overall_detailed_label_df = overall_detailed_label_df.rename(columns={"application_category_name": "count"})
overall_detailed_label_df = overall_detailed_label_df.reset_index()
clusters = overall_detailed_label_df["clusnum"].unique().tolist()
if len(clusters) < 4:
ncols = len(clusters)
else:
ncols = 4
nrows = math.ceil(len(clusters) / 4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))
list_of_names_dfs = []
for cluster in clusters:
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_category_name", "count"]]
cluster_df = cluster_df.groupby("application_category_name")["count"].aggregate(
sum).reset_index().sort_values(
by=["count"], ascending=False)
list_of_names_dfs.append(cluster_df)
detailed_label_name_df = list_of_names_dfs.pop()
for name_df in list_of_names_dfs:
detailed_label_name_df = detailed_label_name_df.append(name_df)
detailed_label_name_df = detailed_label_name_df.groupby("application_category_name")["count"].aggregate(
sum).reset_index().sort_values(by=["count"])
unique_application_category_names = detailed_label_name_df["application_category_name"].tolist()
colors = {}
cmap = cm.gist_rainbow(np.linspace(0, 1, len(unique_application_category_names)))
for index, color in enumerate(cmap):
application_name = unique_application_category_names.pop()
colors[application_name] = color
for index, cluster in enumerate(clusters):
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_category_name", "count"]]
cluster_df = cluster_df.groupby("application_category_name")["count"].aggregate(
sum).reset_index().sort_values(
by=["count"])
cluster_df["relative_count"] = round((cluster_df["count"] / cluster_df["count"].sum()) * 100, 2)
if len(clusters) == 1:
patches, texts = ax.pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in cluster_df["application_category_name"]])
new_labels = self.clean_up_labels(texts)
ax.clear()
ax.pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_category_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax.set_title("Cluster " + str(cluster))
elif len(clusters) <= 4:
patches, texts = ax[index].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["application_category_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[index].clear()
ax[index].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_category_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[index].set_title("Cluster " + str(cluster))
else:
patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["application_category_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[math.floor(index / 4), index % 4].clear()
ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_category_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[math.floor(index / 4), index % 4].set_title("Cluster " + str(cluster))
if len(clusters) % 4 != 0:
if len(clusters) > 4:
for missing_axis in range(4 - len(clusters) % 4, 4):
ax[nrows - 1, missing_axis].axis('off')
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]
fig.subplots_adjust(bottom=0.25)
plt.suptitle("Application Category Name Distribution per Cluster", y=0.985, x=0.5, fontweight='bold')
fig.tight_layout()
fig.canvas.draw()
fig.savefig(application_category_name_graph, dpi=1200)
legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,
bbox_to_anchor=(2, 0))
separate_legend = legend.figure
separate_legend.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path_to_application_category_name_legend_storage, dpi=1200, bbox_inches=bbox)
legend.remove()
plt.close()
plt.clf()
graph_img = Image.open(application_category_name_graph)
legend_im = Image.open(path_to_application_category_name_legend_storage)
widths_graph = graph_img.width
heights_graph = graph_img.height
widths_legend = legend_im.width
heights_legend = legend_im.height
if heights_legend > heights_graph:
resize_percentage = heights_graph / heights_legend
new_width = int(resize_percentage * widths_legend)
legend_im = legend_im.resize((new_width, | |
<filename>trident/optims/pytorch_losses.py<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import sys
from typing import Callable, Any
from tqdm import tqdm
import builtins
import math
from math import *
import string
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.modules.loss import _Loss
from trident.backend.tensorspec import ObjectType
from trident.layers.pytorch_layers import Dense
from trident import context
from trident.backend.model import ModelBase
from trident.backend import dtype as Dtype
from trident.backend.common import *
from trident.backend.pytorch_backend import *
from trident.backend.pytorch_ops import *
from trident.layers.pytorch_activations import sigmoid
from trident.optims.losses import Loss
# from trident.optims.pytorch_trainer import Model
_session = get_session()
_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
__all__ = ['_ClassificationLoss', 'MSELoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELoss', 'F1ScoreLoss', 'L1Loss', 'SmoothL1Loss', 'L2Loss', 'CosineSimilarityLoss',
'ExponentialLoss', 'ItakuraSaitoLoss', 'MS_SSIMLoss', 'DiceLoss', 'WingLoss', 'AdaptiveWingLoss',
'IoULoss', 'FocalLoss', 'SoftIoULoss', 'CenterLoss', 'TripletLoss', 'TripletMarginLoss',
'LovaszSoftmax', 'PerceptionLoss', 'EdgeLoss', 'TransformInvariantLoss', 'get_loss']
def _calculate_loss_unimplemented(self, output: Tensor, target: Tensor) -> None:
raise NotImplementedError
class _ClassificationLoss(Loss):
"""Calculate loss for complex classification task."""
def __init__(self, axis=1, sample_weight=None, auto_balance=False, from_logits=False, ignore_index=-100, cutoff=None, label_smooth=False, reduction='mean', enable_ohem=False,
ohem_ratio=3.5, binding_dataset_symbol=None, input_names=None, output_names=None,
name=None, **kwargs):
"""
Args:
axis (int): the position where the classes is.
sample_weight (Tensor): means the weights of classes , it shoud be a 1D tensor and length the same as
number of classes.
from_logits (bool): whether the output tensor is normalized as a probability (total equal to 1)
ignore_index (int or list of int):
cutoff (None or decimal): the cutoff point of probability for classification, should be None of a number
less than 1..
is_target_onehot (bool): Is the target tensor in onehot format?
label_smooth (bool): Should use label smoothing?
reduction (string): the method to aggrgate loss. None means no need to aggregate, 'mean' means average loss,
'sum' means the summation of losses,'batch_mean' means average loss cross the batch axis then
summation them.
Attributes:
need_target_onehot (bool): If True, means the before loss calculation , need to transform target as one-hot format, ex. label-smooth, default is False.
is_multiselection (bool): If True, means the classification model is multi-selection, so cannot use any softmax process, use sigmoid and binary_crosss_entropy
insteaded.
is_target_onehot (bool): If True, means we have confirmed (not just declare) the target is transformed as one-hot format
reduction(str): The aggregation function for loss, available options are 'sum', 'mean 'and 'batch_mean', default is 'mean'
axis (None or int): The axis we according with for loss calculation. Default is 1.
from_logits (bool):If True, means the sum of all probability will equal 1.
is_logsoftmax (bool):If True, means model use SoftMax as last layer or use any equivalent calculation.
sample_weight(1D tensor):The loss weight for all classes.
ignore_index(int , list, tuple): The classes we want to ignore in the loss calculation.
cutoff(float): Means the decision boundary in this classification model, default=0.5.
num_classes(int):number of all the classes.
label_smooth (bool):If True, mean we will apply label-smoothing in loss calculation.
"""
super(_ClassificationLoss, self).__init__(reduction=reduction, sample_weight=sample_weight, axis=axis, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio, input_names=input_names, output_names=output_names,name=name)
self.need_target_onehot = False
self.is_multiselection = False
self.is_target_onehot = False
self.from_logits = from_logits
self.is_logsoftmax = False
self.ignore_index = ignore_index
self.ignore_index_weight = None
self.auto_balance = auto_balance
self.binding_dataset_symbol = binding_dataset_symbol
if self.auto_balance:
self.label_statistics = None
self._calculate_label_statistics()
if cutoff is not None and not 0 < cutoff < 1:
raise ValueError('cutoff should between 0 and 1')
self.cutoff = cutoff
self.num_classes = None
self.label_smooth = label_smooth
self.reduction = reduction
# initilize weight
def _calculate_label_statistics(self):
ctx = context._context()
if hasattr(ctx._thread_local_info, 'data_providers') and len(ctx._thread_local_info.data_providers) > 0:
with torch.no_grad():
dp = list(ctx._thread_local_info.data_providers.values())[-1]
if dp.traindata.label.__class__.__name__ != 'ZipDataset':
self.binding_dataset_symbol = dp.traindata.label.symbol
ds = [ds for ds in dp.traindata.get_datasets() if ds.symbol == self.binding_dataset_symbol if self.binding_dataset_symbol is not None]
ds = ds[0] if len(ds) > 0 else None
if ds is not None and ds.__class__.__name__ == 'LabelDataset':
print('Start retrive label class distribution for auto-balance in loss function.')
unique, counts = np.unique(np.array(dp.traindata.label.items), return_counts=True)
reweights = np.clip(counts, 1, np.inf) / np.sum(counts).astype(np.float32)
reweights1 = np.max(reweights) / reweights
reweights1[reweights == 1] = 1
self.label_statistics = reweights1
elif ds is not None and ds.__class__.__name__ == 'MaskDataset' and dp.traindata.label.object_type in [ObjectType.label_mask, ObjectType.color_mask]:
print('Start retrive label class distribution for auto-balance in loss function.')
unique, counts = torch.unique(to_tensor(np.stack([dp.traindata.label[i] for i in tqdm(range(len(dp.traindata.label)))]), dtype=Dtype.long, device='cpu'),
return_counts=True)
unique = to_list(to_numpy(unique))
counts = to_numpy(counts)
if len(unique) != builtins.max(unique) + 1:
counts = np.array([counts[unique.index(i)] if i in unique else 0 for i in range(builtins.max(unique) + 1)])
reweights = np.clip(counts, 1, np.inf) / np.sum(counts).astype(np.float32)
reweights1 = np.max(reweights) / reweights
reweights1[reweights == 1] = 1
self.label_statistics = reweights1
elif ds is not None and ds.__class__.__name__ == 'TextSequenceDataset':
chars_count = np.array(ds.vocabs_frequency.value_list).astype(np.float32)
reweights = np.clip(chars_count, 1, np.inf) / np.sum(chars_count).astype(np.float32)
reweights1 = np.max(reweights) / reweights
# fix for rare words
reweights1[reweights == 1] = 1
self.label_statistics = reweights1
def flatten_check(self, output, target):
"Check that `out` and `targ` have the same number of elements and flatten them."
ndim_output = ndim(output)
ndim_target = ndim(target)
if ndim(output) > 2:
if self.axis == 1:
output = reshape(output, (output.size(0), output.size(1), -1))
elif self.axis == -1:
output = reshape(output, (output.size(0), -1, output.size(-1)))
if ndim_target == ndim_output - 1 and target.dtype == Dtype.long:
target = reshape(target, (target.size(0), -1))
elif ndim_target == ndim_output and target.dtype != Dtype.long:
if self.axis == 1:
target = reshape(target, (target.size(0), target.size(1), -1))
elif self.axis == -1:
target = reshape(target, (-1, target.size(-1)))
return output.contiguous(), target
elif ndim(output) <= 2 and len(output) == len(target):
return output, target
elif ndim(output) <= 2 and ndim(output) == ndim(target) + 1:
return output, target
else:
raise ValueError('output and target have diffent elements.')
def preprocess(self, output: Tensor, target: Tensor, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
# check num_clases
if len(output) == 0:
return to_tensor(0.0)
if self.num_classes is None:
self.num_classes = int_shape(output)[self.axis]
if self.sample_weight is None:
self.sample_weight = ones(self.num_classes, requires_grad=False).to(get_device())
elif len(self.sample_weight) != self.num_classes:
raise ValueError('weight should be 1-D tensor and length equal to numbers of filters')
elif self.sample_weight.requires_grad != False or self.sample_weight.dtype != output.dtype or self.sample_weight.device != output.device:
self.sample_weight = to_tensor(self.sample_weight, requires_grad=False).to(get_device())
else:
pass
output_exp = exp(output)
if ndim(output) >= 1 and 'float' in str(output.dtype) and reduce_min(output) >= 0 :
self.is_logsoftmax = False
if reduce_max(output) > 1:
output=output/reduce_max(output)
sum_output=reduce_sum(output,axis=self.axis,keepdims=True)
if reduce_max(abs(sum_output-1))<1e-3:
self.from_logits = True
else:
output=output/clip(sum_output,1e-7,1-1e-7)
self.from_logits = True
if self.auto_balance and self.label_statistics is not None:
if self.num_classes== len(self.label_statistics):
new_shp = [1] * len(int_shape(output))
new_shp[1] = len(self.label_statistics)
output = output * clip(to_tensor(np.reshape(self.label_statistics.copy(), tuple(new_shp)),requires_grad=False), min=1e-7)
output = clip(output, min=1e-7, max=1)
elif (ndim(output) >= 1 and 'float' in str(output.dtype) and output_exp.min() >= 0 and output_exp.max() <= 1):
self.is_logsoftmax = True
self.from_logits = True
if self.auto_balance and self.label_statistics is not None:
if int_shape(output)[1] == len(self.label_statistics):
new_shp = [1] * len(int_shape(output))
new_shp[1] = len(self.label_statistics)
output = output - to_tensor(np.reshape(np.log(clip(self.label_statistics.copy(), min=1e-7)), tuple(new_shp)))
output = clip(output, max=- 1e-7)
else:
self.is_logsoftmax = False
self.from_logits = False
if (ndim(target) == ndim(output) and 'float' in str(target.dtype) and target.min() >= 0 and target.max() <= 1):
self.is_target_onehot = True
self.ignore_index_weight = ones_like(self.sample_weight, requires_grad=False, dtype=output.dtype).to(get_device())
# ignore_index
with torch.no_grad():
if isinstance(self.ignore_index, int) and 0 <= self.ignore_index < self.num_classes:
self.ignore_index_weight[self.ignore_index] = 0
elif isinstance(self.ignore_index, (list, tuple)):
for idx in self.ignore_index:
if isinstance(idx, int) and 0 <= idx < int_shape(output)[self.axis]:
self.ignore_index_weight[idx] = 0
if self.label_smooth:
self.need_target_onehot = True
if target.dtype == str2dtype('long'):
self.is_target_onehot = False
elif target.dtype != str2dtype('long') and (target.min() >= 0 and target.max() <= 1 and abs(target.sum(self.axis)-1).mean() < 1e-4):
target = clip(target, min=1e-7, max=1- 1e-7)
self.is_target_onehot = True
# need target onehot but currently not
if target.dtype == Dtype.long and self.need_target_onehot == True and self.is_target_onehot == False:
target = make_onehot(target, num_classes=self.num_classes, axis=self.axis).to(get_device())
target.require_grads = False
self.is_target_onehot = True
if self.label_smooth:
target = clip(target + random_uniform_like(target,-0.2,0.2).to(target.device),1e-7,1.1)
self.is_target_onehot = True
target.require_grads = False
if self.enable_ohem:
output, target = self._do_ohem(output, target)
# setting cutoff
# if self.cutoff is not None:
# mask = (output > self.cutoff).to(output.dtype)
# output = output * mask
return output, target
calculate_loss: Callable[..., Any] = _calculate_loss_unimplemented
# def calculate_loss(self, output, target, **kwargs):
# """ Calculate the unaggregate loss.
# The loss function calculation logic should define here., please dont't aggregate the loss in this phase.
#
# Args:
# output (tf.Tensor):
# target (tf.Tensor):
# """
# ##dont do aggregation
# raise NotImplementedError
def _handel_abnormal(self, loss):
if any_abnormal_number(loss):
sys.stderr.write('{0} has abnormal value,trident | |
0.45787382, 0.55902845],
[0.4123755, 0.45249655, 0.55598654],
[0.41653307, 0.44715887, 0.55286034],
[0.42074398, 0.44186248, 0.54965225],
[0.42500442, 0.43660906, 0.54636471],
[0.4293108, 0.43140027, 0.54300022],
[0.4336598, 0.42623771, 0.53956127],
[0.43804833, 0.42112299, 0.5360504],
[0.44247353, 0.41605769, 0.53247018],
[0.44693273, 0.41104339, 0.52882319],
[0.45142348, 0.40608165, 0.52511203],
[0.45594353, 0.40117402, 0.52133933],
[0.46049078, 0.39632207, 0.51750773],
[0.46506333, 0.39152738, 0.51361988],
[0.46965941, 0.38679153, 0.50967843],
[0.47427742, 0.38211613, 0.50568606],
[0.47891588, 0.37750281, 0.50164543],
[0.48357344, 0.37295324, 0.49755923],
[0.48824886, 0.36846913, 0.49343012],
[0.49294102, 0.36405223, 0.48926078],
[0.49764888, 0.35970437, 0.48505386],
[0.50237152, 0.35542742, 0.48081202],
[0.50710806, 0.35122333, 0.4765379],
[0.51185772, 0.34709412, 0.47223412],
[0.51661976, 0.34304192, 0.46790331],
[0.52139352, 0.33906894, 0.46354803],
[0.52617837, 0.33517748, 0.45917087],
[0.53097373, 0.33136998, 0.45477435],
[0.53577904, 0.32764897, 0.45036099],
[0.5405938, 0.32401712, 0.44593327],
[0.54541748, 0.32047724, 0.44149363],
[0.55024962, 0.31703224, 0.43704449],
[0.55508972, 0.31368521, 0.43258821],
[0.55993732, 0.31043936, 0.42812712],
[0.56479194, 0.30729806, 0.42366351],
[0.56965309, 0.30426482, 0.4191996],
[0.57452028, 0.3013433, 0.4147376],
[0.57939299, 0.29853732, 0.41027962],
[0.58427069, 0.29585081, 0.40582776],
[0.58915282, 0.29328786, 0.40138404],
[0.5940388, 0.29085269, 0.39695042],
[0.598928, 0.28854962, 0.39252882],
[0.60381976, 0.28638308, 0.38812107],
[0.60871339, 0.28435758, 0.38372895],
[0.61360815, 0.28247773, 0.37935419],
[0.61850324, 0.28074814, 0.37499843],
[0.62339785, 0.27917347, 0.37066324],
[0.62829109, 0.27775838, 0.36635013],
[0.63318202, 0.27650747, 0.36206054],
[0.63806966, 0.2754253, 0.35779584],
[0.64295295, 0.27451632, 0.35355731],
[0.64783079, 0.27378486, 0.34934618],
[0.65270204, 0.27323505, 0.3451636],
[0.65756546, 0.27287086, 0.34101062],
[0.66241977, 0.27269597, 0.33688826],
[0.66726363, 0.27271382, 0.33279744],
[0.67209565, 0.27292754, 0.32873901],
[0.67691434, 0.27333989, 0.32471376],
[0.6817182, 0.27395329, 0.32072241],
[0.68650563, 0.27476976, 0.31676559],
[0.69127497, 0.27579088, 0.31284389],
[0.69602454, 0.27701784, 0.30895782],
[0.70075254, 0.27845134, 0.30510785],
[0.70545716, 0.28009166, 0.30129438],
[0.7101365, 0.28193859, 0.29751774],
[0.71478864, 0.2839915, 0.29377823],
[0.71941156, 0.28624927, 0.29007611],
[0.72400323, 0.28871039, 0.28641158],
[0.72856152, 0.29137287, 0.28278481],
[0.73308431, 0.29423437, 0.27919595],
[0.73756937, 0.29729211, 0.27564513],
[0.74201447, 0.300543, 0.27213243],
[0.74641731, 0.30398357, 0.26865796],
[0.75077557, 0.30761007, 0.26522181],
[0.75508687, 0.31141845, 0.26182407],
[0.75934881, 0.31540441, 0.25846485],
[0.76355894, 0.31956343, 0.2551443],
[0.76771479, 0.32389079, 0.25186257],
[0.77181387, 0.32838159, 0.24861988],
[0.77585364, 0.33303079, 0.2454165],
[0.77983156, 0.33783322, 0.24225275],
[0.78374507, 0.3427836, 0.23912906],
[0.78759158, 0.3478766, 0.23604592],
[0.79136849, 0.35310679, 0.23300394],
[0.7950732, 0.35846871, 0.23000384],
[0.7987031, 0.36395687, 0.22704646],
[0.80225556, 0.36956577, 0.2241328],
[0.80572797, 0.37528989, 0.22126402],
[0.80911772, 0.38112373, 0.21844145],
[0.8124222, 0.38706178, 0.21566659],
[0.8156388, 0.39309858, 0.21294117],
[0.81876495, 0.39922869, 0.21026713],
[0.82179807, 0.40544668, 0.20764663],
[0.82473561, 0.41174718, 0.2050821],
[0.82757503, 0.41812487, 0.20257621],
[0.83031385, 0.42457445, 0.20013192],
[0.83294957, 0.43109068, 0.19775247],
[0.83547976, 0.43766835, 0.19544138],
[0.83790201, 0.44430233, 0.1932025],
[0.84021395, 0.45098751, 0.19103997],
[0.84241324, 0.45771883, 0.18895823],
[0.8444976, 0.4644913, 0.18696207],
[0.84646479, 0.47129996, 0.18505655],
[0.84831261, 0.47813992, 0.18324704],
[0.85003894, 0.48500631, 0.18153919],
[0.85164169, 0.49189433, 0.17993891],
[0.85311883, 0.49879922, 0.17845236],
[0.8544684, 0.50571627, 0.17708588],
[0.85568849, 0.51264081, 0.175846],
[0.85677728, 0.51956824, 0.17473936],
[0.85773299, 0.52649398, 0.17377268],
[0.85855392, 0.5334135, 0.17295267],
[0.85923845, 0.54032234, 0.17228601],
[0.85978503, 0.54721606, 0.17177924],
[0.86019218, 0.55409028, 0.17143872],
[0.86045851, 0.56094067, 0.17127053],
[0.8605827, 0.56776293, 0.17128041],
[0.86056351, 0.57455284, 0.17147369],
[0.86039981, 0.58130619, 0.17185518],
[0.86009053, 0.58801884, 0.17242918],
[0.85963469, 0.59468671, 0.17319933],
[0.8590314, 0.60130575, 0.17416865],
[0.85827988, 0.60787198, 0.17533943],
[0.85737943, 0.61438144, 0.17671322],
[0.85632942, 0.62083027, 0.17829085],
[0.85512935, 0.62721464, 0.18007238],
[0.85377879, 0.63353076, 0.18205711],
[0.85227743, 0.63977494, 0.18424366],
[0.85062503, 0.6459435, 0.18662991],
[0.84882148, 0.65203286, 0.1892131],
[0.84686674, 0.65803948, 0.19198986],
[0.84476088, 0.66395989, 0.19495627],
[0.84250408, 0.66979069, 0.19810788],
[0.84009661, 0.67552853, 0.20143982],
[0.83753885, 0.68117015, 0.20494683],
[0.83483127, 0.68671234, 0.2086233],
[0.83197445, 0.69215196, 0.21246337],
[0.82896909, 0.69748597, 0.21646096],
[0.82581596, 0.70271136, 0.22060982],
[0.82251594, 0.70782524, 0.22490355],
[0.81907005, 0.71282477, 0.22933571],
[0.81547936, 0.7177072, 0.23389979],
[0.81174508, 0.72246985, 0.23858926],
[0.80786851, 0.72711014, 0.24339764],
[0.80385106, 0.73162554, 0.24831845],
[0.79969423, 0.73601365, 0.25334528],
[0.79539964, 0.74027212, 0.25847182],
[0.790969, 0.7443987, 0.2636918],
[0.78640412, 0.74839123, 0.2689991],
[0.78170693, 0.75224764, 0.27438765],
[0.77687945, 0.75596594, 0.27985154],
[0.7719238, 0.75954426, 0.28538495],
[0.7668422, 0.7629808, 0.29098216],
[0.76163698, 0.76627386, 0.29663761],
[0.75631058, 0.76942183, 0.30234581],
[0.75086551, 0.77242321, 0.30810142],
[0.7453044, 0.77527659, 0.31389918],
[0.73963, 0.77798066, 0.31973397],
[0.73384511, 0.78053421, 0.32560075],
[0.72795269, 0.78293613, 0.33149461],
[0.72195575, 0.78518539, 0.33741072],
[0.71585743, 0.7872811, 0.34334436],
[0.70966097, 0.78922244, 0.3492909],
[0.70336969, 0.7910087, 0.35524579],
[0.69698703, 0.79263928, 0.36120459],
[0.69051652, 0.79411368, 0.36716293],
[0.6839618, 0.79543148, 0.37311652],
[0.67732662, 0.79659239, 0.37906117],
[0.67061482, 0.79759621, 0.38499274],
[0.66383034, 0.79844284, 0.39090718],
[0.65697724, 0.7991323, 0.39680051],
[0.65005968, 0.79966468, 0.40266882],
[0.64308193, 0.80004019, 0.40850828],
[0.63604837, 0.80025914, 0.41431512],
[0.62896348, 0.80032194, 0.42008563],
[0.62183187, 0.80022909, 0.42581618],
[0.61465825, 0.79998121, 0.4315032],
[0.60744746, 0.79957898, 0.43714318],
[0.60020446, 0.79902322, 0.44273267],
[0.59293432, 0.79831481, 0.44826831],
[0.58564224, 0.79745474, 0.45374678],
[0.57833356, 0.7964441, 0.45916482],
[0.57101373, 0.79528407, 0.46451926]])
circular4 = mcolors.ListedColormap(circular4_vals)
husl_99_75_vals = \
array([[0.99772728, 0.61398246, 0.69300476],
[0.99773502, 0.61487166, 0.68537325],
[0.99774278, 0.6157606, 0.67761784],
[0.99775055, 0.61665034, 0.66972379],
[0.99775836, 0.61754197, 0.6616753],
[0.99776621, 0.61843657, 0.65345533],
[0.99777411, 0.61933526, 0.64504541],
[0.99778207, 0.62023914, 0.63642541],
[0.9977901, 0.62114938, 0.62757329],
[0.99779821, 0.62206715, 0.61846478],
[0.99780642, 0.62299366, 0.60907298],
[0.99781473, 0.62393019, 0.59936798],
[0.99782316, 0.62487805, 0.58931624],
[0.99783171, 0.62583862, 0.57887998],
[0.99784041, 0.62681333, 0.56801633],
[0.99784927, 0.62780371, 0.55667629],
[0.9978583, 0.62881137, 0.54480344],
[0.99786752, 0.62983801, 0.53233222],
[0.99787695, 0.63088547, 0.5191858],
[0.9978866, 0.63195568, 0.5052731],
[0.9978965, 0.63305074, 0.49048487],
[0.99790666, 0.6341729, 0.47468825],
[0.99791712, 0.63532458, 0.45771902],
[0.99792789, 0.63650843, 0.43937027],
[0.99793901, 0.63772729, 0.41937521],
[0.99795051, 0.63898431, 0.39737988],
[0.99796241, 0.64028289, 0.37289748],
[0.99797477, 0.64162679, 0.34522629],
[0.99798761, 0.64302015, 0.31328787],
[0.99800099, 0.64446754, 0.27526397],
[0.99801496, 0.64597402, 0.22760324],
[0.99802957, 0.64754525, 0.16114023],
[0.99665581, 0.64968082, 0.05220968],
[0.9867071, 0.65480068, 0.05217829],
[0.97700171, 0.65968141, 0.05214805],
[0.96751758, 0.6643454, 0.05211887],
[0.95823432, 0.66881245, 0.05209067],
[0.94913298, 0.67310024, 0.05206335],
[0.94019588, 0.67722454, 0.05203686],
[0.93140648, 0.68119952, 0.05201111],
[0.92274919, 0.68503797, 0.05198606],
[0.91420927, 0.68875141, 0.05196164],
[0.90577274, 0.69235035, 0.0519378],
[0.89742621, 0.69584432, 0.0519145],
[0.88915686, 0.69924207, 0.05189169],
[0.88095231, 0.7025516, 0.05186932],
[0.87280055, 0.7057803, 0.05184736],
[0.86468986, 0.70893497, 0.05182577],
[0.85660874, 0.71202194, 0.05180452],
[0.84854584, 0.71504707, 0.05178357],
[0.8404899, 0.71801587, 0.05176289],
[0.83242966, 0.72093347, 0.05174246],
[0.82435383, 0.72380471, 0.05172224],
[0.81625097, 0.72663416, 0.05170221],
[0.80810945, 0.72942615, 0.05168233],
[0.7999174, 0.7321848, 0.05166259],
[0.79166257, 0.73491406, 0.05164296],
[0.78333229, 0.73761771, 0.05162342],
[0.77491337, 0.74029939, 0.05160394],
[0.76639198, 0.74296264, 0.05158449],
[0.75775357, 0.7456109, 0.05156506],
[0.74898273, 0.74824754, 0.05154562],
[0.74006304, 0.75087584, 0.05152615],
[0.73097693, 0.75349908, 0.05150662],
[0.72170548, 0.75612049, 0.05148701],
[0.71222821, 0.75874329, 0.0514673],
[0.70252286, 0.76137072, 0.05144746],
[0.69256505, 0.76400603, 0.05142746],
[0.682328, 0.76665252, 0.05140728],
[0.67178205, 0.76931353, 0.05138689],
[0.66089424, 0.77199248, 0.05136627],
[0.64962765, 0.7746929, 0.05134538],
[0.63794071, 0.7774184, 0.0513242],
[0.62578628, 0.78017274, 0.05130268],
[0.61311049, 0.78295983, 0.0512808],
[0.59985134, 0.78578375, 0.05125853],
[0.5859368, 0.78864879, 0.05123581],
[0.57128241, 0.79155947, 0.05121261],
[0.55578805, 0.79452056, 0.0511889],
[0.53933359, 0.79753714, 0.0511646],
[0.52177281, 0.80061463, 0.05113969],
[0.50292481, 0.80375879, 0.0511141],
[0.48256137, 0.80697586, 0.05108777],
[0.46038768, 0.81027252, 0.05106063],
[0.43601173, 0.81365601, 0.05103262],
[0.40889268, 0.81713418, 0.05100366],
[0.37824758, 0.82071557, 0.05097365],
[0.34286585, 0.82440951, 0.05094251],
[0.30068778, 0.82822624, 0.05091012],
[0.2476282, 0.83217698, 0.05087638],
[0.17278955, 0.83627415, 0.05084115],
[0.05108215, 0.8395208, 0.09015243],
[0.05150655, 0.83810554, 0.20200306],
[0.05190436, 0.83676888, 0.26584904],
[0.05227845, 0.83550297, 0.31306083],
[0.0526313, 0.83430091, 0.35107166],
[0.05296507, 0.83315665, 0.38306843],
[0.05328166, 0.83206482, 0.4107662],
[0.0535827, 0.83102068, 0.43521338],
[0.05386966, 0.83001997, 0.45710607],
[0.05414383, 0.82905891, 0.47693403],
[0.05440637, 0.82813409, 0.49505699],
[0.05465829, 0.82724245, 0.51174815],
[0.05490052, 0.82638121, 0.52722051],
[0.05513389, 0.82554784, 0.54164375],
[0.05535914, 0.82474007, 0.55515554],
[0.05557694, 0.82395579, 0.56786923],
[0.05578792, 0.8231931, 0.5798794],
[0.05599263, 0.82245022, 0.59126585],
[0.05619159, 0.82172553, 0.60209663],
[0.05638526, 0.82101753, 0.61243023],
[0.05657409, 0.82032482, 0.62231737],
[0.05675847, 0.81964611, 0.63180234],
[0.05693876, 0.81898017, 0.64092408],
[0.05711533, 0.81832587, 0.64971703],
[0.05728848, 0.81768214, 0.65821185],
[0.05745853, 0.81704796, 0.66643594],
[0.05762575, 0.81642237, 0.67441396],
[0.05779041, 0.81580446, 0.68216821],
[0.05795277, 0.81519336, 0.6897189],
[0.05811307, 0.81458824, 0.69708452],
[0.05827153, 0.81398827, 0.70428199],
[0.05842837, 0.81339269, 0.71132689],
[0.05858381, 0.81280073, 0.71823365],
[0.05873806, 0.81221165, 0.72501567],
[0.0588913, 0.81162472, 0.73168545],
[0.05904374, 0.81103924, 0.73825473],
[0.05919555, 0.81045449, 0.74473458],
[0.05934694, 0.80986975, 0.75113547],
[0.05949808, 0.80928434, 0.75746736],
[0.05964916, 0.80869754, 0.76373978],
[0.05980035, 0.80810862, 0.76996191],
[0.05995185, 0.80751688, 0.7761426],
[0.06010383, 0.80692156, 0.78229046],
[0.06025649, 0.80632191, 0.78841389],
[0.06041001, 0.80571715, 0.79452114],
[0.06056459, 0.80510648, 0.80062039],
[0.06072043, 0.80448905, 0.80671972],
[0.06087774, 0.80386401, 0.81282722],
[0.06103672, 0.80323043, 0.81895103],
[0.06119761, 0.80258736, 0.82509934],
[0.06136064, 0.8019338, 0.83128049],
[0.06152604, 0.80126867, 0.83750299],
[0.06169408, 0.80059085, 0.84377556],
[0.06186502, 0.79989911, 0.85010721],
[0.06203917, 0.79919217, 0.85650728],
[0.06221681, 0.79846864, 0.8629855],
[0.06239829, 0.79772703, 0.86955206],
[0.06258396, 0.7969657, 0.87621767],
[0.06277419, 0.79618292, 0.88299366],
[0.06296939, 0.79537677, 0.88989203],
[0.06317002, 0.79454518, 0.89692559],
[0.06337654, 0.79368587, 0.90410803],
[0.06358949, 0.79279633, 0.91145409],
[0.06380945, 0.7918738, 0.91897963],
[0.06403704, 0.79091525, 0.92670185],
[0.06427296, 0.78991729, 0.93463945],
[0.06451797, 0.78887615, 0.94281282],
[0.06477294, 0.78778762, 0.95124433],
[0.06503881, 0.78664698, 0.95995854],
[0.06531664, 0.78544893, 0.9689826],
[0.06560762, 0.78418743, 0.97834661],
[0.06591309, 0.78285567, 0.98808404],
[0.08145765, 0.78123638, 0.99736713],
[0.18579268, 0.77734027, 0.99736573],
[0.24686469, 0.77354432, 0.99736437],
[0.2929153, 0.76984014, 0.99736306],
[0.33066524, 0.76621996, 0.99736178],
[0.3629892, 0.76267657, 0.99736054],
[0.3914302, 0.75920327, 0.99735933],
[0.41692965, 0.75579377, 0.99735814],
[0.44011194, 0.7524422, 0.99735699],
[0.46141635, 0.74914303, 0.99735586],
[0.48116579, 0.74589101, 0.99735475],
[0.49960596, 0.74268117, 0.99735366],
[0.51692891, 0.7395088, 0.99735259],
[0.53328821, 0.73636935, 0.99735154],
[0.54880896, 0.73325847, 0.99735051],
[0.56359473, 0.73017196, 0.99734948],
[0.57773243, 0.72710575, 0.99734847],
[0.5912959, 0.72405585, 0.99734747],
[0.60434857, 0.7210184, 0.99734648],
[0.61694543, 0.71798954, 0.9973455],
[0.6291346, 0.71496551, 0.99734453],
[0.64095854, 0.71194255, 0.99734356],
[0.65245498, 0.70891689, 0.99734259],
[0.66365772, 0.70588476, 0.99734163],
[0.67459724, 0.70284237, 0.99734067],
[0.68530118, 0.69978585, 0.99733971],
[0.69579484, 0.69671127, 0.99733875],
[0.70610146, 0.69361462, 0.99733779],
[0.71624255, 0.69049175, 0.99733683],
[0.72623814, 0.68733837, 0.99733586],
[0.73610702, 0.68415004, 0.99733489],
[0.7458669, 0.68092213, 0.99733392],
[0.7555346, 0.67764978, 0.99733293],
[0.76512618, 0.67432788, 0.99733194],
[0.77465711, 0.67095102, 0.99733093],
[0.78414235, 0.66751347, 0.99732992],
[0.79359649, 0.66400915, 0.99732889],
[0.80303385, 0.66043152, 0.99732785],
[0.81246857, 0.6567736, 0.99732679],
[0.82191471, 0.65302785, 0.99732572],
[0.83138636, 0.64918612, 0.99732462],
[0.84089772, 0.64523958, 0.99732351],
[0.85046317, 0.64117861, 0.99732237],
[0.8600974, 0.63699271, 0.99732121],
[0.86981552, 0.63267036, 0.99732002],
[0.87963311, 0.62819886, 0.99731879],
[0.88956639, 0.62356421, 0.99731754],
[0.89963231, 0.61875086, 0.99731625],
[0.90984868, 0.61374151, 0.99731492],
[0.92023431, 0.6085168, 0.99731355],
[0.93080922, 0.603055, 0.99731213],
[0.94159473, 0.59733158, 0.99731066],
[0.95261376, 0.59131876, 0.99730914],
[0.96389098, 0.58498485, 0.99730756],
[0.9754531, 0.57829356, 0.99730591],
[0.98732921, 0.57120306, 0.99730419],
[0.99732668, 0.56552241, 0.99509296],
[0.99734617, 0.56800304, 0.98314291],
[0.99736455, 0.57032901, 0.97169814],
[0.99738192, 0.57251697, 0.96071116],
[0.99739839, 0.57458132, 0.9501397],
[0.99741404, 0.57653459, 0.93994597],
[0.99742895, 0.57838772, 0.93009603],
[0.99744319, 0.58015033, 0.92055927],
[0.99745682, 0.58183089, 0.91130796],
[0.99746989, 0.58343693, 0.9023169],
[0.99748245, 0.58497512, 0.89356306],
[0.99749455, 0.58645144, 0.88502533],
[0.99750622, 0.58787125, 0.87668428],
[0.9975175, 0.58923935, 0.86852195],
[0.99752842, 0.5905601, 0.86052166],
[0.99753901, 0.59183744, 0.85266786],
[0.9975493, 0.59307496, 0.844946],
[0.99755931, 0.59427592, 0.83734239],
[0.99756906, 0.59544331, 0.8298441],
[0.99757859, 0.59657988, 0.82243884],
[0.99758789, 0.59768816, 0.81511491],
[0.997597, 0.5987705, 0.80786108],
[0.99760593, 0.59982905, 0.80066652],
[0.9976147, 0.60086583, | |
<filename>src/onevision/cv/imgproc/transformation/affine.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""An affine transformation is any transformation that preserves collinearity
(i.e., all points lying on a line initially still lie on a line after
transformation) and ratios of distances (e.g., the midpoint of a line segment
remains the midpoint after transformation). In this sense, affine indicates a
special class of projective transformations that do not move any objects from
the affine space R^3 to the plane at infinity or conversely. An affine
transformation is also called an affinity.
Geometric contraction, expansion, dilation, reflection, rotation, shear,
similarity transformations, spiral similarities, and translation are all
affine transformations, as are their combinations. In general, an affine
transformation is a composition of rotations, translations, dilations,
and shears.
While an affine transformation preserves proportions on lines, it does not
necessarily preserve angles or lengths. Any triangle can be transformed into
any other by an affine transformation, so all triangles are affine and,
in this sense, affine is a generalization of congruent and similar.
"""
from __future__ import annotations
import math
from typing import Optional
from typing import Union
import cv2
import numpy as np
from torch import nn
from torch import Tensor
from torchvision.transforms import functional_pil as F_pil
from torchvision.transforms import functional_tensor as F_t
from torchvision.transforms import InterpolationMode
from torchvision.transforms.functional import _get_inverse_affine_matrix
from onevision.cv.core import get_image_size
from onevision.cv.core import pad_image
from onevision.cv.core import PaddingMode
from onevision.cv.imgproc.shape import affine_box
from onevision.cv.imgproc.transformation.interpolation_mode import interpolation_mode_from_int
from onevision.cv.imgproc.transformation.interpolation_mode import pil_modes_mapping
from onevision.cv.utils import batch_image_processing
from onevision.cv.utils import channel_last_processing
from onevision.factory import TRANSFORMS
from onevision.type import FloatAnyT
from onevision.type import IntAnyT
from onevision.type import ListOrTuple2T
from onevision.type import TensorOrArray
__all__ = [
"affine",
"affine_image_box",
"Affine",
]
# MARK: - Functional
def _affine_tensor_image(
image : Tensor,
angle : float,
translate : IntAnyT,
scale : float,
shear : FloatAnyT,
center : Optional[ListOrTuple2T[int]] = None,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
keep_shape : bool = True,
pad_mode : Union[PaddingMode, str] = "constant",
fill : Optional[FloatAnyT] = None,
) -> Tensor:
"""Apply affine transformation on the image keeping image center invariant.
If the image is torch Tensor, it is expected to have [..., H, W] shape,
where ... means an arbitrary number of leading dimensions.
Args:
image (Tensor[B, C, H, W]):
Image to be transformed.
angle (float):
Rotation angle in degrees between -180 and 180, clockwise direction.
translate (IntAnyT):
Horizontal and vertical translations (post-rotation translation).
scale (float):
Overall scale.
shear (FloatAnyT):
Shear angle value in degrees between -180 to 180, clockwise
direction. If a sequence is specified, the first value corresponds
to a shear parallel to the x-axis, while the second value
corresponds to a shear parallel to the y-axis.
center (ListOrTuple2T[int], optional):
Center of affine transformation. If `None`, use the center of the
image. Default: `None`.
interpolation (InterpolationMode):
Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is `InterpolationMode.NEAREST`.
If input is Tensor, only `InterpolationMode.NEAREST`,
`InterpolationMode.BILINEAR` are supported. For backward
compatibility integer values (e.g. `PIL.Image.NEAREST`) are still
acceptable.
keep_shape (bool):
If `True`, expands the output image to make it large enough to
hold the entire rotated image.
If `False` or omitted, make the output image the same size as the
input image.
Note that the `keep_shape` flag assumes rotation around the center
and no translation. Default: `True`.
pad_mode (PaddingMode, str):
One of the padding modes defined in `PaddingMode`.
Default: `constant`.
fill (FloatAnyT, optional):
Pixel fill value for the area outside the transformed image.
If given a number, the value is used for all bands respectively.
Returns:
image (Tensor[B, C, H, W]):
Transformed image.
"""
if not isinstance(angle, (int, float)):
raise TypeError(f"`angle` must be `int` or `float`. But got: {type(angle)}.")
if isinstance(angle, int):
angle = float(angle)
if isinstance(translate, (int, float)):
translate = [translate, translate]
if isinstance(translate, tuple):
translate = list(translate)
if not isinstance(translate, (list, tuple)):
raise TypeError(f"`translate` must be `list` or `tuple`. But got: {type(translate)}.")
if len(translate) != 2:
raise ValueError(f"`translate` must be a sequence of length 2. But got: {len(translate)}.")
if isinstance(scale, int):
scale = float(scale)
if scale < 0.0:
raise ValueError(f"`scale` must be positive. But got: {scale}.")
if not isinstance(shear, (int, float, list, tuple)):
raise TypeError(f"`shear` must be a single value or a sequence of length 2. But got: {shear}.")
if isinstance(shear, (int, float)):
shear = [shear, 0.0]
if isinstance(shear, tuple):
shear = list(shear)
if len(shear) == 1:
shear = [shear[0], shear[0]]
if len(shear) != 2:
raise ValueError(f"`translate` must be a sequence of length 2. But got: {len(shear)}.")
if isinstance(interpolation, int):
interpolation = interpolation_mode_from_int(interpolation)
if not isinstance(interpolation, InterpolationMode):
raise TypeError(f"`interpolation` must be a `InterpolationMode`. But got: {type(interpolation)}.")
img = image.clone()
h, w = get_image_size(img)
center = (h * 0.5, w * 0.5) if center is None else center # H, W
center = tuple(center[::-1]) # W, H
if not isinstance(image, Tensor):
# center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5)
# it is visually better to estimate the center without 0.5 offset
# otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.affine(image, matrix=matrix, interpolation=pil_interpolation, fill=fill)
# If keep shape, find the new width and height bounds
if not keep_shape:
matrix = _get_inverse_affine_matrix([0, 0], angle, [0, 0], 1.0, [0.0, 0.0])
abs_cos = abs(matrix[0])
abs_sin = abs(matrix[1])
new_w = int(h * abs_sin + w * abs_cos)
new_h = int(h * abs_cos + w * abs_sin)
image = pad_image(image, pad_size=(new_h, new_w))
translate_f = [1.0 * t for t in translate]
matrix = _get_inverse_affine_matrix([0, 0], angle, translate_f, scale, shear)
return F_t.affine(image, matrix=matrix, interpolation=interpolation.value, fill=fill)
@channel_last_processing
def _affine_numpy_image(
image : np.ndarray,
angle : float,
translate : IntAnyT,
scale : float,
shear : FloatAnyT,
center : Optional[ListOrTuple2T[int]] = None,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
keep_shape : bool = True,
pad_mode : Union[PaddingMode, str] = "constant",
fill : Optional[FloatAnyT] = None,
) -> np.ndarray:
"""Apply affine transformation on the image keeping image center invariant.
References:
https://www.thepythoncode.com/article/image-transformations-using-opencv-in-python
https://stackoverflow.com/questions/43892506/opencv-python-rotate-image-without-cropping-sides
Args:
image (np.ndarray[C, H, W]):
Image to be transformed. The image is converted to channel last
format during processing.
angle (float):
Rotation angle in degrees between -180 and 180, clockwise direction.
translate (IntAnyT):
Horizontal and vertical translations (post-rotation translation).
scale (float):
Overall scale.
shear (FloatAnyT):
Shear angle value in degrees between -180 to 180, clockwise
direction. If a sequence is specified, the first value corresponds
to a shear parallel to the x-axis, while the second value
corresponds to a shear parallel to the y-axis.
center (ListOrTuple2T[int], optional):
Center of affine transformation. If `None`, use the center of the
image. Default: `None`.
interpolation (InterpolationMode):
Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is `InterpolationMode.NEAREST`.
If input is Tensor, only `InterpolationMode.NEAREST`,
`InterpolationMode.BILINEAR` are supported. For backward
compatibility integer values (e.g. `PIL.Image.NEAREST`) are still
acceptable.
keep_shape (bool):
If `True`, expands the output image to make it large enough to
hold the entire rotated image.
If `False` or omitted, make the output image the same size as the
input image.
Note that the `keep_shape` flag assumes rotation around the center
and no translation. Default: `True`.
pad_mode (PaddingMode, str):
One of the padding modes defined in `PaddingMode`.
Default: `constant`.
fill (FloatAnyT, optional):
Pixel fill value for the area outside the transformed image.
If given a number, the value is used for all bands respectively.
Returns:
image (np.ndarray[C, H, W]):
Transformed image.
"""
if image.ndim != 3:
raise ValueError(f"`image.ndim` must be 3. But got: {image.ndim}.")
if not isinstance(angle, (int, float)):
raise TypeError(f"`angle` must be `int` or `float`. But got: {type(angle)}.")
if isinstance(angle, int):
angle = float(angle)
if isinstance(translate, (int, float)):
translate = [translate, translate]
if not isinstance(translate, (list, tuple)):
raise TypeError(f"`translate` must be `list` or `tuple`. But got: {type(translate)}.")
if isinstance(translate, tuple):
translate = list(translate)
if len(translate) != 2:
raise ValueError(f"`translate` must be a sequence of length 2. But got: {len(translate)}.")
if isinstance(scale, int):
scale = float(scale)
if scale < 0.0:
raise ValueError(f"`scale` must be positive. But got: {scale}.")
if not isinstance(shear, (int, float, list, tuple)):
raise TypeError(f"`shear` must be a single value or a sequence of length 2. But got: {shear}.")
if isinstance(shear, (int, float)):
shear = [shear, 0.0]
if isinstance(shear, tuple):
shear = | |
<filename>sam/tweeter/app.py
import json
import io
import datetime
import os
import logging
import boto3
import pandas
import numpy
import altair
from shared import S3_scraper_index
from twitter_shared import TwitterAPI
from plot_shared import get_chrome_driver, plot_key_ni_stats_date_range, plot_points_average_and_trend, output_plot, plot_heatmap
from data_shared import get_s3_csv_or_empty_df, push_csv_to_s3, get_ni_pop_pyramid
good_symb = '\u2193'
bad_symb = '\u2191'
def find_previous(df, newest, colname):
# Find the date since which the rate was as high/low
gte = df[(df[colname] >= newest[colname]) & (df['Sample_Date'] < newest['Sample_Date'])]
lt = df[(df[colname] < newest[colname]) & (df['Sample_Date'] < newest['Sample_Date'])]
if len(gte)>0:
gte = df.iloc[gte['Sample_Date'].idxmax()]
else:
return bad_symb, 'highest ever'
if len(lt)>0:
lt = df.iloc[lt['Sample_Date'].idxmax()]
else:
return good_symb, 'lowest ever'
if gte['Sample_Date'] < lt['Sample_Date']:
est = bad_symb
diff = (newest['Sample_Date'] - gte['Sample_Date']).days
prev = 'highest for %s day%s' %(diff,'s' if diff > 1 else '')
else:
est = good_symb
diff = (newest['Sample_Date'] - lt['Sample_Date']).days
prev = 'lowest for %s day%s' %(diff,'s' if diff > 1 else '')
return est, prev
def calc_exp_fit0(data):
curve = numpy.polyfit(data.index, numpy.log(data.values), 1)
return curve[0]
def calc_exp_fit1(data):
curve = numpy.polyfit(data.index, numpy.log(data.values), 1)
return curve[1]
def fit_exp(curve0, curve1, value):
return (numpy.exp(curve1) * numpy.exp(curve0 * value))
def create_model(df, to_model, datekey):
df['x'] = (df[datekey] - df[datekey].min()).dt.days
df.set_index('x', inplace=True)
df['%s model0'%to_model] = df.rolling(window=9, center=True)[to_model].apply(calc_exp_fit0)
df['%s model1'%to_model] = df.rolling(window=9, center=True)[to_model].apply(calc_exp_fit1)
df['%s model_daily_change' %to_model] = (fit_exp(df['%s model0'%to_model], df['%s model1'%to_model], 2) - fit_exp(df['%s model0'%to_model], df['%s model1'%to_model], 1)) / fit_exp(df['%s model0'%to_model], df['%s model1'%to_model], 1)
df['%s model_weekly_change' %to_model] = (fit_exp(df['%s model0'%to_model], df['%s model1'%to_model], 8) - fit_exp(df['%s model0'%to_model], df['%s model1'%to_model], 1)) / fit_exp(df['%s model0'%to_model], df['%s model1'%to_model], 1)
return(df)
def load_ni_time_series(url, sheet_name, date_col, series_col, model=False, filter_col=None, filter=None):
df = pandas.read_excel(url, engine='openpyxl', sheet_name=sheet_name)
if filter_col is not None:
df = df[df[filter_col] == filter]
df = df.groupby(date_col)[series_col].sum().reset_index()
df.set_index(date_col, inplace=True)
newind = pandas.date_range(start=df.index.min(), end=df.index.max())
df = df.reindex(newind)
df.index.name = date_col
df.reset_index(inplace=True)
df.fillna(0, inplace=True)
df['%s 7-day rolling mean' %series_col] = df[series_col].rolling(7, center=True).mean()
if model is True:
df = create_model(df, '%s 7-day rolling mean' %series_col, date_col)
return df
def plot_test_stats(df, start_date, scale='linear'):
return plot_points_average_and_trend(
[
{
'points': df[(df['Sample_Date'] > start_date)].set_index(['Sample_Date'])['INDIVIDUALS TESTED POSITIVE'],
'line': df[(df['Sample_Date'] > start_date)].set_index(['Sample_Date'])['New cases 7-day rolling mean'],
'colour': '#076543',
'date_col': 'Sample_Date',
'x_title': 'Specimen Date',
'y_title': 'New cases',
'scales': [scale],
'height': 225
},
{
'points': df[(df['Sample_Date'] > start_date)].set_index(['Sample_Date'])['pos_rate'],
'line': df[(df['Sample_Date'] > start_date)].set_index(['Sample_Date'])['rolling_pos_rate'],
'colour': '#076543',
'date_col': 'Sample_Date',
'x_title': 'Specimen Date',
'y_title': 'Positivity rate',
'y_format': '%',
'scales': [scale],
'height': 225
},
{
'points': df[(df['Sample_Date'] > start_date)].set_index('Sample_Date')['ALL INDIVIDUALS TESTED'],
'line': df[(df['Sample_Date'] > start_date)].set_index('Sample_Date')['ROLLING 7 DAY INDIVIDUALS TESTED'],
'colour': '#076543',
'date_col': 'Sample_Date',
'x_title': 'Specimen Date',
'y_title': 'Tests',
'scales': [scale],
'height': 225
},
],
'%s COVID-19 %s reported on %s' %(
'NI',
'testing statistics',
datetime.datetime.today().strftime('%A %-d %B %Y'),
),
[
'All data from DoH daily data',
'Last two days may be revised upwards due to reporting delays',
'https://twitter.com/ni_covid19_data on %s' %datetime.datetime.now().date().strftime('%A %-d %B %Y'),
]
)
def plot_hospital_stats(adm_dis_7d, inpatients, icu, start_date, scale='linear'):
return plot_points_average_and_trend(
[
{
'points': None,
'line': adm_dis_7d[(adm_dis_7d['Date'] > start_date)].set_index(['Date','variable'])['value'],
'colour': 'variable',
'date_col': 'Date',
'x_title': 'Date',
'y_title': 'Number of people (7-day average)',
'scales': [scale],
'height': 225
},
{
'points': None,
'line': inpatients[(inpatients['Date'] > start_date)].set_index(['Date'])['Number of Confirmed COVID Inpatients'],
'colour': 'red',
'date_col': 'Date',
'x_title': 'Date',
'y_title': 'Inpatients (with confirmed COVID-19)',
'scales': [scale],
'height': 225
},
{
'points': None,
'line': icu[(icu['Date'] > start_date)].set_index('Date')['Confirmed COVID Occupied'],
'colour': 'black',
'date_col': 'Date',
'x_title': 'Date',
'y_title': 'ICU Beds COVID Occupied',
'scales': [scale],
'height': 225
},
],
'%s COVID-19 %s (%s scale) reported on %s' %(
'NI',
'hospital admissions, discharges, inpatients and ICU',
scale,
datetime.datetime.today().strftime('%A %-d %B %Y'),
),
[
'Hospital data from DoH daily data',
'Last two days (five for admissions) may be revised upwards',
'https://twitter.com/ni_covid19_data on %s' %datetime.datetime.now().date().strftime('%A %-d %B %Y'),
]
)
def lambda_handler(event, context):
# Get the secret
sm = boto3.client('secretsmanager')
secretobj = sm.get_secret_value(SecretId='ni-covid-tweets')
secret = json.loads(secretobj['SecretString'])
# Get the index
s3 = boto3.client('s3')
status = S3_scraper_index(s3, secret['bucketname'], secret['doh-dd-index'])
index = status.get_dict()
tweets = []
# Download the most recently updated Excel file
for change in event:
obj = s3.get_object(Bucket=secret['bucketname'],Key=change['keyname'])['Body']
stream = io.BytesIO(obj.read())
# Load test data and add extra fields
df = pandas.read_excel(stream,engine='openpyxl',sheet_name='Summary Tests')
df['pos_rate'] = df['INDIVIDUALS TESTED POSITIVE']/df['ALL INDIVIDUALS TESTED']
df['rolling_pos_rate'] = df['ROLLING 7 DAY POSITIVE TESTS']/df['ROLLING 7 DAY INDIVIDUALS TESTED']
df['printdate']=df['Sample_Date'].dt.strftime('%-d %B %Y')
df['rolling_7d_change'] = (df['ROLLING 7 DAY POSITIVE TESTS'] - df['ROLLING 7 DAY POSITIVE TESTS'].shift(7)) * 7
df['New cases 7-day rolling mean'] = df['INDIVIDUALS TESTED POSITIVE'].rolling(7, center=True).mean()
df.set_index('Sample_Date', inplace=True)
newind = pandas.date_range(start=df.index.min(), end=df.index.max())
df = df.reindex(newind)
df.index.name = 'Sample_Date'
df.reset_index(inplace=True)
df['Rolling cases per 100k'] = 100000 * (df['New cases 7-day rolling mean'] / 1893667)
df = create_model(df,'Rolling cases per 100k','Sample_Date')
# Get the latest dates with values for tests and rolling
latest = df.iloc[df['Sample_Date'].idxmax()]
latest_7d = df.iloc[df[df['ROLLING 7 DAY POSITIVE TESTS'].notna()]['Sample_Date'].idxmax()]
latest_model = df.iloc[df[df['Rolling cases per 100k model_daily_change'].notna()]['Sample_Date'].idxmax()]
last_but1_model = df.iloc[df[(df['Rolling cases per 100k model_daily_change'].notna()) & (df['Sample_Date'] != latest_model['Sample_Date'])]['Sample_Date'].idxmax()]
# Summary stats to allow 'X registered in last 24 hours' info
deaths = load_ni_time_series(stream,'Deaths','Date of Death','Number of Deaths')
admissions = load_ni_time_series(stream,'Admissions','Admission Date','Number of Admissions',True)
discharges = load_ni_time_series(stream,'Discharges','Discharge Date','Number of Discharges')
inpatients = load_ni_time_series(stream,'Inpatients','Inpatients at Midnight','Number of Confirmed COVID Inpatients',False,'Sex','All')
inpatients.rename(columns={'Inpatients at Midnight': 'Date'}, inplace=True)
icu = load_ni_time_series(stream,'ICU','Date','Confirmed COVID Occupied')
totals = {
'ind_tested': int(df['ALL INDIVIDUALS TESTED'].sum()),
'ind_positive': int(df['INDIVIDUALS TESTED POSITIVE'].sum()),
'deaths': int(deaths['Number of Deaths'].sum()),
'admissions': int(admissions['Number of Admissions'].sum()),
'discharges': int(discharges['Number of Discharges'].sum())
}
print(totals)
latest_adm_model = admissions.iloc[admissions[admissions['Number of Admissions 7-day rolling mean model_daily_change'].notna()]['Admission Date'].idxmax()]
adm_dis = admissions.merge(discharges, how='inner', left_on='Admission Date', right_on='Discharge Date', validate='1:1')
adm_dis.drop(columns=['Discharge Date'], inplace = True)
adm_dis.rename(columns={'Admission Date': 'Date'}, inplace = True)
adm_dis['Inpatients'] = adm_dis['Number of Admissions 7-day rolling mean'].cumsum() - adm_dis['Number of Discharges 7-day rolling mean'].cumsum()
adm_dis_7d = adm_dis.rename(columns={'Number of Admissions 7-day rolling mean': 'Admissions','Number of Discharges 7-day rolling mean': 'Discharges'})[['Date','Admissions','Discharges']]
adm_dis_7d = adm_dis_7d.melt(id_vars='Date')
# Age band data
age_bands = pandas.read_excel(stream,engine='openpyxl',sheet_name='Individuals 7 Days - 5yr Age')
age_bands['Total_Tests'] = age_bands['Positive_Tests'] + age_bands['Negative_Tests'] + age_bands['Indeterminate_Tests']
age_bands = age_bands.groupby('Age_Band_5yr').sum()[['Positive_Tests','Total_Tests']].reset_index()
age_bands['Positivity_Rate'] = age_bands['Positive_Tests'] / age_bands['Total_Tests']
age_bands['Band Start'] = age_bands['Age_Band_5yr'].str.extract('Aged (\d+)').astype(float)
age_bands['Band End'] = age_bands['Age_Band_5yr'].str.extract('Aged \d+ - (\d+)').astype(float)
age_bands['Date'] = df['Sample_Date'].max()
# Get the age bands datastore contents from S3
s3dir = change['keyname'].split('/',maxsplit=1)[0]
agebands_keyname = '%s/agebands.csv' %s3dir
datastore = get_s3_csv_or_empty_df(s3, secret['bucketname'], agebands_keyname, ['Date'])
# Remove any data already recorded for the current day
datastore['Date'] = pandas.to_datetime(datastore['Date'])
if (change.get('notweet', False) is False) and (change.get('tweet', True) is True):
datastore = datastore[datastore['Date'] != df['Sample_Date'].max()]
# Append the new data
datastore = pandas.concat([datastore, age_bands])
# Send back to S3, if we are in tweet mode
push_csv_to_s3(datastore, s3, secret['bucketname'], agebands_keyname)
# Plot the case reports and 7-day average
driver = get_chrome_driver()
plots = []
if driver is not None:
today_str = datetime.datetime.now().date().strftime('%Y-%m-%d')
p = plot_key_ni_stats_date_range(df, admissions, deaths, latest['Sample_Date'] - pandas.to_timedelta(42, unit='d'), latest['Sample_Date'], ['linear','log'])
plots = output_plot(p, plots, driver, 'ni-cases-%s.png' % today_str)
if len(plots) > 0:
p = plot_hospital_stats(adm_dis_7d, inpatients, icu, latest['Sample_Date'] - pandas.to_timedelta(42, unit='d'))
plots = output_plot(p, plots, driver, 'ni-hospitals-%s.png' % today_str)
if len(plots) > 1:
toplot = datastore[datastore['Date'] >= (datastore['Date'].max() + pandas.DateOffset(days=-42))]
toplot['Date'] = pandas.to_datetime(toplot['Date'])
newind = pandas.date_range(start=toplot['Date'].max() + pandas.DateOffset(days=-42), end=toplot['Date'].max())
alldates = pandas.Series(newind)
alldates.name = 'Date'
toplot = toplot.merge(alldates, how='outer', left_on='Date', right_on='Date')
toplot['X'] = toplot['Date'].dt.strftime('%e %b')
toplot['Most Recent Positive Tests'] = toplot['Positive_Tests'].where(toplot['Date'] == toplot['Date'].max()).apply(lambda x: f"{x:n}" if not pandas.isna(x) else "")
toplot['Age_Band_5yr'].fillna('Not Known', inplace=True)
bands = toplot.groupby(['Age_Band_5yr','Band Start','Band End'], dropna=False).size().reset_index()[['Age_Band_5yr','Band Start','Band End']]
bands = bands[bands['Age_Band_5yr']!='Not Known']
bands.fillna(90, inplace=True)
bands['Band End'] = bands['Band End'].astype(int)
bands['Band Start'] = bands['Band Start'].astype(int)
bands['Year'] = bands.apply(lambda x: range(x['Band Start'], x['Band End']+1), axis='columns')
bands = bands.explode('Year').reset_index()
pops = get_ni_pop_pyramid()
pops = pops[pops['Year']==2020].groupby(['Age Band']).sum()['Population']
bands = bands.merge(pops, how='inner', validate='1:1', right_index=True, left_on='Year')
bands = bands.groupby('Age_Band_5yr').sum()['Population']
toplot = toplot.merge(bands, how='left', on='Age_Band_5yr')
toplot['Positive per 100k'] = (100000 * toplot['Positive_Tests']) / toplot['Population']
toplot['Most Recent Positive per 100k'] = toplot['Positive per 100k'].where(toplot['Date'] == toplot['Date'].max()).apply(lambda x: f"{int(x):n}" if not pandas.isna(x) else "")
heatmap2 = plot_heatmap(toplot,'X','Date','Date','Age_Band_5yr','Band Start','Age Band','Positive per 100k','Positive Tests per 100k')
p = altair.vconcat(
altair.layer(
heatmap2.properties(
height=450,
width=800,
title='NI COVID-19 7-day Positive Tests by Age Band per 100k people (%s to %s)' %(toplot['Date'].min().strftime('%-d %B %Y'),toplot['Date'].max().strftime('%-d %B %Y')),
),
heatmap2.mark_text(
align='right',
baseline='middle',
dx=43
).encode(
text = altair.Text('Most Recent Positive per 100k'),
color = altair.value('black')
)
)
).properties(
title=altair.TitleParams(
['Data from DoH daily downloads',
'Numbers to right of chart show most recent value',
'https://twitter.com/ni_covid19_data on %s' %datetime.datetime.now().strftime('%A %-d %B %Y')],
baseline='bottom',
orient='bottom',
anchor='end',
fontWeight='normal',
fontSize=10,
dy=10
),
)
plots = output_plot(p, plots, driver, 'ni-cases-age-bands-%s.png' % | |
<reponame>tristan-j-wood/gap-train-1
import numpy as np
from ase.calculators.dftb import Dftb
from gaptrain.utils import work_in_tmp_dir
from gaptrain.log import logger
from gaptrain.exceptions import MethodFailed, GAPFailed
from gaptrain.gtconfig import GTConfig
from subprocess import Popen, PIPE
import os
ha_to_ev = 27.2114
a0_to_ang = 0.52917829614246
def set_threads(n_cores):
"""Set the number of threads to use"""
n_cores = GTConfig.n_cores if n_cores is None else n_cores
logger.info(f'Using {n_cores} cores')
os.environ['OMP_NUM_THREADS'] = str(n_cores)
os.environ['MLK_NUM_THREADS'] = str(n_cores)
return None
class DFTB(Dftb):
"""
DFTB+ installed from the binaries downloaded from:
https://www.dftbplus.org/download/dftb-stable/
sk-files from:
http://www.dftb.org/parameters/download/3ob/3ob-3-1-cc/
"""
def read_fermi_levels(self):
"""ASE calculator doesn't quite work..."""
try:
super().read_fermi_levels()
except AssertionError:
pass
return None
@work_in_tmp_dir()
def run_autode(configuration, max_force=None, method=None, n_cores=1):
"""
Run an orca or xtb calculation
--------------------------------------------------------------------------
:param configuration: (gaptrain.configurations.Configuration)
:param max_force: (float) or None
:param method: (autode.wrappers.base.ElectronicStructureMethod)
"""
from autode.species import Species
from autode.calculation import Calculation
from autode.exceptions import CouldNotGetProperty
if method.name == 'orca' and GTConfig.orca_keywords is None:
raise ValueError("For ORCA training GTConfig.orca_keywords must be"
" set. e.g. "
"GradientKeywords(['PBE', 'def2-SVP', 'EnGrad'])")
# optimisation is not implemented, needs a method to run
assert max_force is None and method is not None
species = Species(name=configuration.name,
atoms=configuration.atoms,
charge=configuration.charge,
mult=configuration.mult)
# allow for an ORCA calculation to have non-default keywords.. not the
# cleanest implementation..
kwds = GTConfig.orca_keywords if method.name == 'orca' else method.keywords.grad
calc = Calculation(name='tmp',
molecule=species,
method=method,
keywords=kwds,
n_cores=n_cores)
calc.run()
ha_to_ev = 27.2114
try:
configuration.forces = -ha_to_ev * calc.get_gradients()
except CouldNotGetProperty:
logger.error('Failed to set forces')
configuration.energy = ha_to_ev * calc.get_energy()
configuration.partial_charges = calc.get_atomic_charges()
return configuration
@work_in_tmp_dir(kept_exts=['.traj'])
def run_gpaw(configuration, max_force):
"""Run a periodic DFT calculation using GPAW. Will set configuration.energy
and configuration.forces as their DFT calculated values at the 400eV/PBE
level of theory
--------------------------------------------------------------------------
:param configuration: (gaptrain.configurations.Configuration)
:param max_force: (float) or None
"""
from gpaw import GPAW, PW
from ase.optimize import BFGS
if ('GPAW_SETUP_PATH' not in os.environ.keys()
or os.environ['GPAW_SETUP_PATH'] == ''):
raise AssertionError('$GPAW_SETUP_PATH needs to be set')
ase_atoms = configuration.ase_atoms()
dft = GPAW(mode=PW(400),
basis='dzp',
charge=configuration.charge,
xc='PBE',
txt=None)
ase_atoms.set_calculator(dft)
if max_force is not None:
minimisation = BFGS(ase_atoms)
minimisation.run(fmax=float(max_force))
set_configuration_atoms_from_ase(configuration, ase_atoms)
configuration.energy = ase_atoms.get_potential_energy()
configuration.forces = ase_atoms.get_forces()
return configuration
@work_in_tmp_dir(kept_exts=['.traj'], copied_exts=['.xml'])
def run_gap(configuration, max_force, gap, traj_name=None):
"""
Run a GAP calculation using quippy as the driver which is a wrapper around
the F90 QUIP code used to evaluate forces and energies using a GAP
--------------------------------------------------------------------------
:param configuration: (gaptrain.configurations.Configuration)
:param max_force: (float) or None
:param gap: (gaptrain.gap.GAP)
:return:
"""
configuration.save(filename='config.xyz')
a, b, c = configuration.box.size
# Energy minimisation section to the file
min_section = ''
if max_force is not None:
if traj_name is not None:
min_section = (f'traj = Trajectory(\'{traj_name}\', \'w\', '
f' system)\n'
'dyn = BFGS(system)\n'
'dyn.attach(traj.write, interval=1)\n'
f'dyn.run(fmax={float(max_force)})')
else:
min_section = ('dyn = BFGS(system)\n'
f'dyn.run(fmax={float(max_force)})')
# Print a Python script to execute quippy - likely not installed in the
# current interpreter..
with open(f'gap.py', 'w') as quippy_script:
print('import quippy',
'import numpy as np',
'from ase.io import read, write',
'from ase.optimize import BFGS',
'from ase.io.trajectory import Trajectory',
'system = read("config.xyz")',
f'system.cell = [{a}, {b}, {c}]',
'system.pbc = True',
'system.center()',
f'{gap.ase_gap_potential_str()}',
'system.set_calculator(pot)',
f'{min_section}',
'np.savetxt("energy.txt",\n'
' np.array([system.get_potential_energy()]))',
'np.savetxt("forces.txt", system.get_forces())',
f'write("config.xyz", system)',
sep='\n', file=quippy_script)
# Run the process
subprocess = Popen(GTConfig.quippy_gap_command + ['gap.py'],
shell=False, stdout=PIPE, stderr=PIPE)
subprocess.wait()
# Grab the energy from the output after unsetting it
try:
configuration.load(filename='config.xyz')
configuration.energy = np.loadtxt('energy.txt')
except IOError:
raise GAPFailed('Failed to calculate energy with the GAP')
# Grab the final forces from the numpy array
configuration.forces = np.loadtxt('forces.txt')
return configuration
@work_in_tmp_dir(kept_exts=['.traj'])
def run_dftb(configuration, max_force, traj_name=None):
"""Run periodic DFTB+ on this configuration. Will set configuration.energy
and configuration.forces as their calculated values at the TB-DFT level
--------------------------------------------------------------------------
:param configuration: (gaptrain.configurations.Configuration)
:param max_force: (float) or None
:param traj_name: (str) or None
"""
from ase.optimize import BFGS
ase_atoms = configuration.ase_atoms()
dftb = DFTB(atoms=ase_atoms,
kpts=(1, 1, 1),
Hamiltonian_Charge=configuration.charge)
ase_atoms.set_calculator(dftb)
try:
configuration.energy = ase_atoms.get_potential_energy()
if max_force is not None:
minimisation = BFGS(ase_atoms, trajectory=traj_name)
minimisation.run(fmax=float(max_force))
configuration.n_opt_steps = minimisation.get_number_of_steps()
set_configuration_atoms_from_ase(configuration, ase_atoms)
except ValueError:
raise MethodFailed('DFTB+ failed to generate an energy')
configuration.forces = ase_atoms.get_forces()
configuration.partial_charges = ase_atoms.get_charges()
# Return self to allow for multiprocessing
return configuration
@work_in_tmp_dir(kept_exts=['.traj'])
def run_cp2k(configuration, max_force):
"""Run periodic CP2K on this configuration. Will set configuration.energy
and configuration.forces as their calculated values at the DFT level.
--------------------------------------------------------------------------
:param configuration: (gaptrain.configurations.Configuration)
:param max_force: (float) or None
"""
assert max_force is None
if 'CP2K_BASIS_FOLDER' not in os.environ:
raise RuntimeError('Could not execute CP2K. Set the environment '
'variable for the directory containing basis sets '
'$CP2K_BASIS_FOLDER')
if set([atom.label for atom in configuration.atoms]) != {'O', 'H'}:
raise NotImplementedError('CP2K input files only built for O/H '
'containing configurations')
basis_dir = os.environ['CP2K_BASIS_FOLDER']
if not basis_dir.endswith('/'):
basis_dir += '/'
configuration.save(filename='init.xyz')
a, b, c = configuration.box.size
with open('cp2k.inp', 'w') as inp_file:
print("&GLOBAL",
" PROJECT name",
" RUN_TYPE ENERGY_FORCE",
" PRINT_LEVEL LOW",
"&END GLOBAL",
"",
"&FORCE_EVAL",
" &DFT",
f" BASIS_SET_FILE_NAME {basis_dir}GTH_BASIS_SETS",
f" BASIS_SET_FILE_NAME {basis_dir}BASIS_ADMM",
f" POTENTIAL_FILE_NAME {basis_dir}POTENTIAL",
" &MGRID",
" CUTOFF 400",
" &END MGRID",
" &SCF",
" SCF_GUESS ATOMIC",
" MAX_SCF 20",
" EPS_SCF 5.0E-7",
" &OT",
" MINIMIZER DIIS",
" PRECONDITIONER FULL_ALL",
" &END OT",
" &OUTER_SCF",
" MAX_SCF 20",
" EPS_SCF 5.0E-7",
" &END OUTER_SCF",
" &END SCF",
" &QS",
" EPS_DEFAULT 1.0E-12",
" EPS_PGF_ORB 1.0E-14",
" EXTRAPOLATION_ORDER 5",
" &END QS",
" &XC # revPBE0-TC-D3",
" &XC_FUNCTIONAL",
" &PBE",
" PARAMETRIZATION REVPBE",
" SCALE_X 0.75",
" SCALE_C 1.0",
" &END",
" &END XC_FUNCTIONAL",
" &HF",
" FRACTION 0.25",
" &SCREENING",
" EPS_SCHWARZ 1.0E-6",
" SCREEN_ON_INITIAL_P FALSE",
" &END",
" &MEMORY",
" MAX_MEMORY 37000",
" EPS_STORAGE_SCALING 0.1",
" &END",
" &INTERACTION_POTENTIAL",
" POTENTIAL_TYPE TRUNCATED",
" CUTOFF_RADIUS 6.0",
f" T_C_G_DATA {basis_dir}t_c_g.dat",
" &END",
" &HF_INFO",
" &END HF_INFO",
" &END",
" &VDW_POTENTIAL",
" POTENTIAL_TYPE PAIR_POTENTIAL",
" &PAIR_POTENTIAL",
" TYPE DFTD3",
" R_CUTOFF 15",
" LONG_RANGE_CORRECTION TRUE",
" REFERENCE_FUNCTIONAL revPBE0",
f" PARAMETER_FILE_NAME {basis_dir}dftd3.dat",
" &END",
" &END",
" &XC_GRID",
" XC_DERIV SPLINE2",
" &END",
" &END XC",
" &AUXILIARY_DENSITY_MATRIX_METHOD",
" METHOD BASIS_PROJECTION",
" ADMM_PURIFICATION_METHOD MO_DIAG",
" &END AUXILIARY_DENSITY_MATRIX_METHOD",
" &END DFT",
" &SUBSYS",
" &TOPOLOGY",
" COORD_FILE_NAME init.xyz",
" COORD_FILE_FORMAT XYZ",
" CONN_FILE_FORMAT GENERATE",
" &END TOPOLOGY",
" &CELL",
f" ABC [angstrom] {a:.2f} {b:.2f} {c:.2f}",
" &END CELL",
" &KIND H",
" BASIS_SET TZV2P-GTH",
" BASIS_SET AUX_FIT cpFIT3",
" POTENTIAL GTH-PBE-q1",
" &END KIND",
" &KIND O",
" BASIS_SET TZV2P-GTH",
" BASIS_SET AUX_FIT cpFIT3",
" POTENTIAL GTH-PBE-q6",
" &END KIND",
" &END SUBSYS",
" &PRINT",
" &FORCES ON",
" &END FORCES",
" &END PRINT",
"&END FORCE_EVAL",
"",
sep='\n', file=inp_file)
# Run the calculation
calc = Popen(GTConfig.cp2k_command + ['-o', 'cp2k.out', 'cp2k.inp'],
shell=False)
calc.communicate()
if not os.path.exists('cp2k.out'):
raise RuntimeError('CP2K failed')
set_energy_forces_cp2k_out(configuration, out_filename='cp2k.out')
return configuration
def get_gp_var_quip_out(configuration, out_filename='quip.out'):
"""
Given a QUIP output file extract the numpy array of atomic variances for
each set of atoms in the output file
:param configuration: (gt.Configuration)
:param out_filename: (str)
:return: (list(np.ndarray))
"""
out_lines = [line for line in open(out_filename, 'r')
if line.startswith('AT')]
# and grab the local GP variance per atom from the output
first_line_idx = None
for i, line in enumerate(out_lines):
try:
if int(line.split()[-1]) == len(configuration.atoms):
first_line_idx = i
break
except ValueError:
continue
if first_line_idx is None:
raise RuntimeError('Could not extract the first line')
gp_vars = []
n_atoms = len(configuration.atoms)
for i, line in enumerate(out_lines[first_line_idx:][::n_atoms+4]):
atom_vars = []
# Go through each xyz section and grab the predicted atomic variance
first_line = first_line_idx + i*(n_atoms+2) + 2
for xyz_line in out_lines[first_line:first_line+n_atoms]:
try:
atom_var = float(xyz_line.split()[-4])
atom_vars.append(atom_var)
except (ValueError, IndexError):
raise RuntimeError('Could not extract the atomic var')
gp_vars.append(np.array(atom_vars))
return gp_vars
def set_energy_forces_cp2k_out(configuration, out_filename='cp2k.out'):
"""
Set the energy and forces of a configuration from a CP2K output file
:param configuration: (gt.Configuration)
:param out_filename: (str)
"""
n_atoms = len(configuration.atoms)
forces = []
out_lines = open(out_filename, 'r').readlines()
for i, line in enumerate(out_lines):
"""
Total energy: -17.23430883483457
"""
if 'Total energy:' in line:
# Convert from Ha to eV
configuration.energy = ha_to_ev * float(line.split()[-1])
# And grab the first set of atomic forces
if 'ATOMIC FORCES' in line:
logger.info('Found CP2K forces')
"""
Format e.g.:
ATOMIC FORCES in [a.u.]
# Atom Kind Element X Y Z
1 1 O 0.02872261 0.00136975 0.02168759
2 2 H -0.00988376 0.02251862 -0.01740272
3 2 H -0.01791165 -0.02390685 -0.00393702
"""
for f_line in out_lines[i+3:i+3+n_atoms]:
fx, fy, fz = | |
"""The Latticework AmberOS component."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from datetime import timedelta
import logging
from typing import Any
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.debounce import Debouncer
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_HOST,
CONF_ENTITY_ID,
CONF_FILENAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from latticework_amberos.latticework_amberos import LatticeworkAmberOS
from latticework_amberos.exceptions import (
AmberOSLoginInvalidException,
AmberOSFileIndexNotEnable,
AmberOSFileIndexNotFound
)
from latticework_amberos.api.amberos_information_api import AmberOSInformation
from latticework_amberos.api.amberos_network_api import AmberOSNetwork
from latticework_amberos.api.amberos_system_api import AmberOSSystem
from latticework_amberos.api.amberos_security_api import AmberOSSecurity
from latticework_amberos.api.amberos_storage_api import AmberOSStorage
from latticework_amberos.api.amberos_update_api import AmberOSUpdate
from latticework_amberos.api.amberos_utilization_api import AmberOSUtilization
from latticework_amberos.api.amberos_backup_api import AmberOSBackup
from latticework_amberos.api.amberos_cast_api import AmberOSCast
from .const import (
ATTRIBUTION,
AMBEROS_API,
CONF_DEVICE_TOKEN,
CONF_REASON,
CONF_SERIAL,
COORDINATOR_CAST,
COORDINATOR_CENTRAL,
DEFAULT_CAST_SCAN_INTERVAL,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
PLATFORMS,
SERVICE_CAST_PLAY,
SERVICE_REBOOT,
SERVICE_SHUTDOWN,
SERVICES,
SYSTEM_LOADED,
UNDO_UPDATE_LISTENER,
AmberOSSensorEntityDescription,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Latticework AmberOS sensors."""
api = AmberOSApi(hass, entry)
try:
await api.async_setup()
except (
AmberOSLoginInvalidException
):
_LOGGER.error("AmberOSLoginInvalidException")
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.unique_id] = {
UNDO_UPDATE_LISTENER: entry.add_update_listener(_async_update_listener),
AMBEROS_API: api,
SYSTEM_LOADED: True,
}
# Services
await _async_setup_services(hass)
async def async_coordinator_update_data_cast() -> None:
"""Fetch all device and sensor data from api."""
try:
await api.async_cast_update()
except Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
return None
hass.data[DOMAIN][entry.unique_id][COORDINATOR_CAST] = AmberOSCastCoordinator(
hass,
_LOGGER,
name=f"{entry.unique_id}_cast",
update_method=async_coordinator_update_data_cast,
update_interval=timedelta(seconds=DEFAULT_CAST_SCAN_INTERVAL),
)
async def async_coordinator_update_data_central() -> None:
"""Fetch all device and sensor data from api."""
try:
await api.async_update()
except Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
return None
hass.data[DOMAIN][entry.unique_id][COORDINATOR_CENTRAL] = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"{entry.unique_id}_central",
update_method=async_coordinator_update_data_central,
update_interval=timedelta(
minutes=entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
),
)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Latticewokr AmberOS sensors."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
entry_data = hass.data[DOMAIN][entry.unique_id]
entry_data[UNDO_UPDATE_LISTENER]()
await entry_data[AMBEROS_API].async_unload()
hass.data[DOMAIN].pop(entry.unique_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def _async_setup_services(hass: HomeAssistant) -> None:
"""Service handler setup."""
async def service_handler(call: ServiceCall) -> None:
"""Handle service call."""
serial = call.data.get(CONF_SERIAL)
reason = call.data.get(CONF_REASON)
amberos_devices = hass.data[DOMAIN]
filepath = call.data.get(CONF_FILENAME)
entity_id = call.data.get(CONF_ENTITY_ID)
amberos_device = None
if serial:
amberos_device = amberos_devices.get(serial)
elif len(amberos_devices) == 1:
amberos_device = next(iter(amberos_devices.values()))
serial = next(iter(amberos_devices))
else:
if call.service == SERVICE_CAST_PLAY:
for _, device in amberos_devices.items():
amberos_api = device[AMBEROS_API]
if amberos_api.network.hostname.replace("-", "_") in entity_id[0]:
await amberos_api.async_cast_play(f"/share/{filepath}")
return
else:
_LOGGER.error(
"More than one AmberOS configured, must specify one of serials %s",
sorted(amberos_devices),
)
return
if not amberos_device:
_LOGGER.error("AmberOS with specified serial %s not found", serial)
return
if call.service == SERVICE_CAST_PLAY:
if not filepath:
_LOGGER.error("AmberOS with specified filename %s not found", serial)
return
_LOGGER.debug("%s AmberOS with serial %s", call.service, serial)
amberos_api = amberos_device[AMBEROS_API]
amberos_device[SYSTEM_LOADED] = False
if call.service == SERVICE_REBOOT:
await amberos_api.async_reboot()
elif call.service == SERVICE_SHUTDOWN:
await amberos_api.async_shutdown(reason)
elif call.service == SERVICE_CAST_PLAY:
await amberos_api.async_cast_play(filepath)
for service in SERVICES:
hass.services.async_register(DOMAIN, service, service_handler)
class AmberOSApi:
"""Class to interface with Latticework AmberOS API."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Initialize the API wrapper class."""
self._hass = hass
self._entry = entry
if entry.data.get(CONF_SSL):
self.config_url = f"https://{entry.data[CONF_HOST]}"
else:
self.config_url = f"http://{entry.data[CONF_HOST]}"
self.initialized = False
# AmberOS APIs
self.amberos: LatticeworkAmberOS = None
self.information: AmberOSInformation = None
self.network: AmberOSNetwork = None
self.security: AmberOSSecurity = None
self.storage: AmberOSStorage = None
self.backup: AmberOSBackup = None
self.system: AmberOSSystem = None
self.upgrade: AmberOSUpdate = None
self.utilisation: AmberOSUtilization = None
self.cast: AmberOSCast = None
# Should we fetch them
self._fetching_entities: dict[str, set[str]] = {}
self._with_information = True
self._with_security = True
self._with_storage = True
self._with_backup = True
self._with_system = True
self._with_upgrade = True
self._with_utilisation = True
self._with_cast = True
async def async_setup(self) -> None:
"""Start interacting with the AmberOS."""
self.amberos = LatticeworkAmberOS(
self._entry.data[CONF_HOST],
self._entry.data[CONF_PORT],
self._entry.data[CONF_USERNAME],
self._entry.data[CONF_PASSWORD],
self._entry.data[CONF_SSL],
timeout=self._entry.options.get(CONF_TIMEOUT),
device_token=self._entry.data.get(CONF_DEVICE_TOKEN),
)
await self._hass.async_add_executor_job(self.amberos.login)
self._async_setup_api_requests()
await self._hass.async_add_executor_job(self._fetch_device_configuration)
await self.async_update()
self.initialized = True
@callback
def subscribe(self, api_key: str, unique_id: str) -> Callable[[], None]:
"""Subscribe an entity to API fetches."""
_LOGGER.debug("Subscribe new entity: %s", unique_id)
if api_key not in self._fetching_entities:
self._fetching_entities[api_key] = set()
self._fetching_entities[api_key].add(unique_id)
@callback
def unsubscribe() -> None:
"""Unsubscribe an entity from API fetches (when disable)."""
_LOGGER.debug("Unsubscribe entity: %s", unique_id)
self._fetching_entities[api_key].remove(unique_id)
if len(self._fetching_entities[api_key]) == 0:
self._fetching_entities.pop(api_key)
return unsubscribe
@callback
def _async_setup_api_requests(self) -> None:
"""Determine if we should fetch each API, if one entity needs it."""
# Entities not added yet, fetch all
if not self._fetching_entities:
_LOGGER.debug(
"Entities not added yet, fetch all for '%s'", self._entry.unique_id
)
return
# Determine if we should fetch an API
self._with_system = bool(self.amberos.apis.get(AmberOSSystem.API_KEY))
self._with_security = bool(
self._fetching_entities.get(AmberOSSecurity.API_KEY)
)
self._with_storage = bool(self._fetching_entities.get(AmberOSStorage.API_KEY))
self._with_upgrade = bool(self._fetching_entities.get(AmberOSUpdate.API_KEY))
self._with_utilisation = bool(
self._fetching_entities.get(AmberOSUtilization.API_KEY)
)
self._with_information = bool(
self._fetching_entities.get(AmberOSInformation.API_KEY)
)
self._with_backup = bool(
self._fetching_entities.get(AmberOSBackup.API_KEY)
)
self._with_cast = bool(
self._fetching_entities.get(AmberOSCast.API_KEY)
)
# Reset not used API, information is not reset since it's used in device_info
if not self._with_security:
_LOGGER.debug(
"Disable security api from being updated for '%s'",
self._entry.unique_id,
)
self.amberos.reset(self.security)
self.security = None
if not self._with_storage:
_LOGGER.debug(
"Disable storage api from being updatedf or '%s'", self._entry.unique_id
)
self.amberos.reset(self.storage)
self.storage = None
if not self._with_system:
_LOGGER.debug(
"Disable system api from being updated for '%s'", self._entry.unique_id
)
self.amberos.reset(self.system)
self.system = None
if not self._with_upgrade:
_LOGGER.debug(
"Disable upgrade api from being updated for '%s'", self._entry.unique_id
)
self.amberos.reset(self.upgrade)
self.upgrade = None
if not self._with_utilisation:
_LOGGER.debug(
"Disable utilisation api from being updated for '%s'",
self._entry.unique_id,
)
self.amberos.reset(self.utilisation)
self.utilisation = None
if not self._with_backup:
_LOGGER.debug(
"Disable backup api from being updated for '%s'",
self._entry.unique_id,
)
self.amberos.reset(self.backup)
self.backup = None
if not self._with_cast:
_LOGGER.debug(
"Disable cast api from being updated for '%s'",
self._entry.unique_id,
)
self.amberos.reset(self.cast)
self.cast = None
def _fetch_device_configuration(self) -> None:
"""Fetch initial device config."""
self.information = self.amberos.information
self.information.update()
self.network = self.amberos.network
self.network.update()
if self._with_security:
_LOGGER.debug("Enable security api updates for '%s'", self._entry.unique_id)
self.security = self.amberos.security
if self._with_storage:
_LOGGER.debug("Enable storage api updates for '%s'", self._entry.unique_id)
self.storage = self.amberos.storage
if self._with_upgrade:
_LOGGER.debug("Enable upgrade api updates for '%s'", self._entry.unique_id)
self.upgrade = self.amberos.upgrade
if self._with_system:
_LOGGER.debug("Enable system api updates for '%s'", self._entry.unique_id)
self.system = self.amberos.system
if self._with_utilisation:
_LOGGER.debug(
"Enable utilisation api updates for '%s'", self._entry.unique_id
)
self.utilisation = self.amberos.utilisation
if self._with_backup:
_LOGGER.debug("Enable backup api updates for '%s'", self._entry.unique_id)
self.backup = self.amberos.backup
if self._with_cast:
_LOGGER.debug("Enable cast api updates for '%s'", self._entry.unique_id)
self.cast = self.amberos.cast
async def async_reboot(self) -> None:
"""Reboot AmberOS."""
try:
await self._hass.async_add_executor_job(self.system.reboot)
except (AmberOSLoginInvalidException) as err:
_LOGGER.error(
"Reboot of '%s' not possible, please try again later",
self._entry.unique_id,
)
_LOGGER.debug("Exception:%s", err)
async def async_shutdown(self, reason=None) -> None:
"""Shutdown AmberOS."""
try:
await self._hass.async_add_executor_job(self.system.shutdown, reason)
except (AmberOSLoginInvalidException) as err:
_LOGGER.error(
"Shutdown of '%s' not possible, please try again later",
self._entry.unique_id,
)
_LOGGER.debug("Exception:%s", err)
async def async_cast_play(self, filename) -> None:
"""AmberOS Cast Play."""
try:
sid = self._hass.data.get("core.uuid", "0")
await self._hass.async_add_executor_job(
self.cast.play_media, sid, "", filename, False)
except (AmberOSLoginInvalidException) as err:
_LOGGER.error(
"Cast play of '%s' not possible, please try again later",
self._entry.unique_id,
)
_LOGGER.debug("Exception:%s", err)
except AmberOSFileIndexNotEnable as err:
_LOGGER.error(
"File Index is not enabled, please enable it"
)
_LOGGER.debug("Exception:%s", err)
except AmberOSFileIndexNotFound as err:
_LOGGER.error(
"Cast play on which '%s' is not found, please try again later",
filename,
)
_LOGGER.debug("Exception:%s", err)
async def async_unload(self) -> None:
"""Stop interacting with the AmberOS and prepare for removal from hass."""
try:
await self._hass.async_add_executor_job(self.amberos.logout)
except (AmberOSLoginInvalidException) as err:
_LOGGER.debug(
"Logout from '%s' not possible:%s", self._entry.unique_id, err
)
async def async_update(self, now: timedelta | None = None) -> None:
"""Update function for updating API information."""
_LOGGER.debug("Start data update for '%s'", self._entry.unique_id)
self._async_setup_api_requests()
try:
await self._hass.async_add_executor_job(
self.amberos.update, self._with_information
)
except (AmberOSLoginInvalidException) as err:
if not self.initialized:
raise err
_LOGGER.warning(
"Connection error during update, fallback by reloading the entry"
)
_LOGGER.debug(
"Connection error during update of '%s' with exception: %s",
self._entry.unique_id,
err,
)
await self._hass.config_entries.async_reload(self._entry.entry_id)
return
async def async_cast_update(self, now: timedelta | None = None) -> None:
"""Update function for updating API cast information."""
_LOGGER.debug("Start data update for '%s'", self._entry.unique_id)
try:
await self._hass.async_add_executor_job(
self.amberos.cast_update, self._with_cast
)
except (AmberOSLoginInvalidException) as err:
if not self.initialized:
raise err
_LOGGER.warning(
"Connection error during update, fallback by reloading the entry"
)
_LOGGER.debug(
"Connection error during update of '%s' with exception: %s",
self._entry.unique_id,
err,
)
await self._hass.config_entries.async_reload(self._entry.entry_id)
return
class LwAmberOSBaseEntity(CoordinatorEntity):
"""Representation of a Latticewokr AmberOS base entry."""
entity_description: AmberOSSensorEntityDescription
unique_id: str
def __init__(
self,
api: AmberOSApi,
coordinator: DataUpdateCoordinator[dict[str, dict[str, Any]]],
description: AmberOSSensorEntityDescription,
) -> None:
"""Initialize the Latticework | |
hist[0][np.argmax(hist[1])]
self.preview3DViewer.volumeviewer.setLevels([max, hist[0][-1]])
def onManualCenter(self, active):
"""
Activates the manual center portion of the ProjectionViewer.
This is connected to the corresponding toolbar signal
Parameters
----------
active : bool
Boolean specifying to activate or not. True activate, False deactivate
"""
if active:
self.viewstack.setCurrentWidget(self.projectionViewer)
self.projectionViewer.hideMBIR()
self.projectionViewer.showCenterDetection()
else:
self.projectionViewer.hideCenterDetection()
def onMBIR(self, active):
"""
Slot to activate MBIR slurm generation menu. Not currently in use.
"""
if active:
self.viewstack.setCurrentWidget(self.projectionViewer)
self.projectionViewer.hideCenterDetection()
self.projectionViewer.showMBIR()
else:
self.projectionViewer.hideMBIR()
def onROIselection(self):
"""
Shows a rectangular roi to select portion of data to reconstruct. (Not implemented yet)
Parameters
----------
active : bool
Boolean specifying to activate or not. True activate, False deactivate
"""
self.viewstack.setCurrentWidget(self.projectionViewer)
self.projectionViewer.addROIselection()
class MBIRViewer(QtGui.QWidget):
"""
Widget to generate .slurm files used in NERSC-based MBIR
"""
def __init__(self, data, path, *args, **kwargs):
super(MBIRViewer, self).__init__(*args, **kwargs)
self.mdata = data.header
if path is list:
paths = path[0]
else:
paths = path
self.path = paths
self.data = data
self.center = 0
self.cor_detection_funcs = ['Phase Correlation', 'Vo', 'Nelder-Mead']
self.runButton = QtGui.QPushButton(parent=self)
self.runButton.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("gui/icons_34.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.runButton.setIcon(icon)
# self.runButton.setToolTip("Submit MBIR job to NERSC")
self.runButton.setToolTip("Generate slurm file")
self.cor_widget = QtGui.QWidget() #parent widget for center of rotation input
# set up widget for user choice of manual or auto COR detection
self.cor_Holder = QtGui.QGroupBox('Center of Rotation', parent = self.cor_widget)
manual_cor = QtGui.QRadioButton('Manually input center of rotation')
manual_cor.clicked.connect(self.manualCOR)
auto_cor = QtGui.QRadioButton('Auto-detect center of rotation')
auto_cor.clicked.connect(self.autoCOR)
manual_cor.setChecked(True)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(manual_cor)
vbox.addWidget(auto_cor)
self.cor_Holder.setLayout(vbox)
# series of widgets for manual COR input
self.cor_Value = QtGui.QStackedWidget(parent = self.cor_widget)
self.manual_tab = QtGui.QWidget()
self.val_box = QtGui.QDoubleSpinBox(parent = self.manual_tab)
self.val_box.setRange(0,10000)
self.val_box.setDecimals(1)
self.val_box.setValue(int(data.shape[1])/2)
text_label = QtGui.QLabel('Center of Rotation: ', parent = self.manual_tab)
text_layout = QtGui.QHBoxLayout()
text_layout.addWidget(text_label)
text_layout.addWidget(self.val_box)
self.manual_tab.setLayout(text_layout)
self.auto_tab = QtGui.QWidget()
self.auto_tab_layout = QtGui.QVBoxLayout()
self.cor_function = functionwidgets.FunctionWidget(name="Center Detection", subname="Phase Correlation",
package=reconpkg.packages[config.names["Phase Correlation"][1]])
self.cor_params = pg.parametertree.Parameter.create(name=self.cor_function.name,
children=config.parameters[self.cor_function.subfunc_name], type='group')
self.cor_param_tree = pg.parametertree.ParameterTree()
self.cor_param_tree.setMinimumHeight(200)
self.cor_param_tree.setMinimumWidth(200)
self.cor_param_tree.setParameters(self.cor_params,showTop = False)
for key, val in self.cor_function.param_dict.iteritems():
if key in [p.name() for p in self.cor_params.children()]:
self.cor_params.child(key).setValue(val)
self.cor_params.child(key).setDefault(val)
self.cor_method_box = QtGui.QComboBox()
self.cor_method_box.currentIndexChanged.connect(self.changeCORfunction)
for item in self.cor_detection_funcs:
self.cor_method_box.addItem(item)
cor_method_label = QtGui.QLabel('COR detection function: ')
cor_method_layout = QtGui.QHBoxLayout()
cor_method_layout.addWidget(cor_method_label)
cor_method_layout.addWidget(self.cor_method_box)
self.auto_tab_layout.addLayout(cor_method_layout)
self.auto_tab_layout.addWidget(self.cor_param_tree)
self.auto_tab.setLayout(self.auto_tab_layout)
# set up COR stackwidget
self.cor_Value.addWidget(self.manual_tab)
self.cor_Value.addWidget(self.auto_tab)
# set up COR widget
v = QtGui.QVBoxLayout()
v.addWidget(self.cor_Holder)
v.addWidget(self.cor_Value)
self.cor_widget.setLayout(v)
self.runButton.clicked.connect(self.write_slurm)
self.mbirParams = pg.parametertree.ParameterTree()
self.mbirParams.setMinimumHeight(230)
params = [{'name': 'Dataset path', 'type': 'str'},
{'name': 'Z start', 'type': 'int', 'value': 0, 'default': 0},
{'name': 'Z num elts', 'type': 'int', 'value': int(data.shape[-1]) ,
'default': int(data.shape[-1])},
{'name': 'Smoothness', 'type': 'float', 'value': 0.15, 'default': 0.15},
{'name': 'Zinger thresh', 'type': 'float', 'value': 5, 'default': 5},
{'name': 'View subsample factor', 'type': 'int', 'value': 2, 'default': 2},
{'name': 'Output folder', 'type':'str', 'value':'Results', 'default': 'Results'}]
self.mbir_params = pg.parametertree.Parameter.create(name='MBIR Parameters', type='group', children=params)
self.mbirParams.setParameters(self.mbir_params,showTop=False)
right_menu = QtGui.QSplitter(self)
right_menu.setOrientation(QtCore.Qt.Vertical)
button_holder = QtGui.QStackedWidget()
button_holder.addWidget(self.runButton)
right_menu.addWidget(button_holder)
right_menu.addWidget(self.cor_widget)
container = QtGui.QWidget()
container_layout = QtGui.QVBoxLayout()
container_layout.addWidget(right_menu)
container.setLayout(container_layout)
left_menu = QtGui.QSplitter(self)
left_menu.addWidget(self.mbirParams)
left_menu.addWidget(container)
h = QtGui.QHBoxLayout()
h.setContentsMargins(0, 0, 0, 0)
h.addWidget(left_menu)
self.setLayout(h)
def changeCORfunction(self, index):
"""
Changes COR auto-detect function used based on index of combobox
"""
subname = self.cor_method_box.itemText(index)
self.auto_tab_layout.removeWidget(self.cor_param_tree)
self.cor_function = functionwidgets.FunctionWidget(name="Center Detection", subname=subname,
package=reconpkg.packages[config.names[subname][1]])
self.cor_params = pg.parametertree.Parameter.create(name=self.cor_function.name,
children=config.parameters[self.cor_function.subfunc_name], type='group')
self.cor_param_tree = pg.parametertree.ParameterTree()
self.cor_param_tree.setMinimumHeight(200)
self.cor_param_tree.setMinimumWidth(200)
self.cor_param_tree.setParameters(self.cor_params,showTop = False)
for key, val in self.cor_function.param_dict.iteritems():
if key in [p.name() for p in self.cor_params.children()]:
self.cor_params.child(key).setValue(val)
self.cor_params.child(key).setDefault(val)
self.auto_tab_layout.addWidget(self.cor_param_tree)
self.auto_tab.setLayout(self.auto_tab_layout)
def manualCOR(self):
"""Slot to receive signal when manual COR detection is chosen"""
self.cor_Value.setCurrentWidget(self.manual_tab)
def autoCOR(self):
"""Slot to receive signal when automatic COR detection is chosen"""
self.cor_Value.setCurrentWidget(self.auto_tab)
def loadCOR(self):
"""
Get dataset COR, either automatically or from user-entered value
:return:
"""
widget = self.cor_Value.currentWidget()
if widget is self.manual_tab:
return self.val_box.value()
else:
if self.parentWidget():
return self.find_COR(self.cor_function.subfunc_name)
else:
return -1
def find_COR(self, cor_function):
"""Auto-detect COr based on 'cor_function' parameter"""
if not cor_function in self.cor_detection_funcs:
return -1
else:
if cor_function == 'Phase Correlation':
proj1, proj2 = map(self.data.fabimage.__getitem__, (0,-1))
kwargs = {'proj1' : proj1, 'proj2' : proj2}
elif cor_function == 'Vo':
kwargs = {'tomo' : np.ascontiguousarray(self.data.fabimage[:, :, :])}
elif cor_function == 'Nelder-Mead':
kwargs = {'tomo' : np.ascontiguousarray(self.data.fabimage[:, :, :]),
'theta' : tomopy.angles(int(self.data.shape[0]),ang1=90,ang2=270)}
else:
return -1
for child in self.cor_params.children():
kwargs[child.name()] = child.value()
val = self.cor_function._function(**kwargs)
return val[0] if val is list else val
def write_slurm(self):
"""
A 'slurm' file is a job to run on nersc
"""
import os.path
msg.showMessage("Generating slurm file...", timeout=0)
self.center = self.loadCOR()
if not self.center > 0 or self.center > self.data.shape[0]:
msg.showMessage('Invalid center of rotation')
pass
else:
views = int(self.data[0]) - 1
file_name = self.path.split("/")[-1].split(".")[0]
nodes = int(np.ceil(self.mbir_params.child('Z num elts').value()/ float(24)))
output = os.path.join('/',self.mbir_params.child('Output folder').value(), file_name + '_mbir')
try:
group = self.mdata['archdir'].split("\\")[-1]
px_size = float(self.mdata['pzdist'])*1000
except KeyError:
msg.showMessage('Insufficient metadata to write slurm file.', timeout=0)
if group != file_name:
group_hdf5 = "{}/{}".format(group, file_name)
else:
group_hdf5 = file_name
slurm = '#!/bin/tcsh\n#SBATCH -p regular\n#SBATCH -N {}\n'.format(nodes)
slurm += '#SBATCH -t 4:00:00\n#SBATCH -J {}\n#SBATCH -e {}.err\n#SBATCH -o {}.out\n\n'.format(file_name, file_name, file_name)
slurm += 'setenv OMP_NUM_THREADS 24\nsetenv CRAY_ROOTFS DSL\nmodule load PrgEnv-intel\n'
slurm += 'module load python/2.7.3\nmodule load h5py\nmodule load pil\nmodule load mpi4py\n\n'
slurm += 'mkdir $SCRATCH/LaunchFolder\nmkdir $SCRATCH/Results\n\n'
slurm += 'python XT_MBIR_3D.py --setup_launch_folder --run_reconstruction --Edison'
slurm += ' --input_hdf5 {}/{}.h5'.format(self.mbir_params.child('Dataset path').value(), file_name)
slurm += ' --group_hdf5 /{}'.format(group_hdf5)
slurm += ' --code_launch_folder $SCRATCH/LaunchFolder/'
slurm += ' --output_hdf5 $SCRATCH/Results/{}_mbir/ --x_width {}'.format(file_name, int(self.data.shape[0]))
slurm += ' --recon_x_width {} --num_dark {}'.format(str(int(self.data.shape[0])), str(len(self.data.fabimage.darks)))
slurm += ' --num_bright {} --z_numElts {}'.format(str(self.data.fabimage.flats),self.mbir_params.child('Z num elts').value())
slurm += ' --z_start {} --num_views {}'.format(self.mbir_params.child('Z start').value(), views)
slurm += ' --pix_size {} --rot_center {}'.format(px_size, self.center)
slurm += ' --smoothness {} --zinger_thresh {}'.format(self.mbir_params.child('Smoothness').value(),
self.mbir_params.child('Zinger thresh').value())
slurm += ' --Variance_Est 1 --num_threads 24 --num_nodes {} '.format(nodes)
slurm += '--view_subsmpl_fact {}'.format(self.mbir_params.child('View subsample factor').value())
parent_folder = self.path.split(self.path.split('/')[-1])[0]
write = os.path.join(parent_folder, '{}.slurm'.format(file_name))
with open(write, 'w') as job:
job.write(slurm)
msg.showMessage("Done.", timeout=0)
class ProjectionViewer(QtGui.QWidget):
"""
Class that holds a stack viewer, an ROImageOverlay and a few widgets to allow manual center detection
Attributes
----------
stackViewer : StackViewer
widgets.StackViewer used to display the data
data : loader.StackImage
Image data
imageoverlay_roi : widgets.ROIImageOverlay
Widget used in the cor_widget for manual center detection
selection_roi : pyqtgragh.ROI
ROI for selecting region to reconstruct
cor_widget : QtGui.QWidget
Widget used in manual center detection
setCenterButton : QtGui.QToolButton
Button for setting center value from cor_widget to reconstruction function in pipeline
roi_histogram : pyqtgraph.HistogramLUTWidget
Histogram for imageoverlay
mbir_viewer : MBIRViewer
Menu for generating slurm files for NERSC-based MBIR jobs
cor_box : QtGui.QStackWidget
Widget for holding COR - related widgets
cor_widget : QtGui.QWidget
Widget for holding manual COR-related widgets
auto_cor_widget : QtGui.QWidget
Widget for holding automatic COR-related widgets
Signals
-------
sigCenterChanged(float)
emits float with new center value
Parameters
----------
data : pipeline.loader.StackImage
Raw tomography data as a StackImage
view_label : str
String to show in QLabel lower right hand corner. Where the current index is displayed
center : float
center of rotation value
args
Additional arguments
kwargs
Additional keyword arguments
"""
sigCenterChanged = QtCore.Signal(float)
sigCORChanged = QtCore.Signal(bool)
sigROIWidgetChanged = QtCore.Signal(pg.ROI)
def __init__(self, data, toolbar=None, view_label=None, center=None, paths=None, *args, **kwargs):
super(ProjectionViewer, self).__init__(*args, **kwargs)
self.setMinimumHeight(200)
self.stackViewer = StackViewer(data, view_label=view_label)
self.toolbar = toolbar
self.imageItem = self.stackViewer.imageItem
self.data = self.stackViewer.data
self.normalized = False
self.imgoverlay_roi = ROImageOverlay(self.data, self.imageItem, [0, 0], parent=self.stackViewer.view)
self.imageItem.sigImageChanged.connect(self.imgoverlay_roi.updateImage)
self.stackViewer.view.addItem(self.imgoverlay_roi)
self.roi_histogram = pg.HistogramLUTWidget(image=self.imgoverlay_roi.imageItem, parent=self.stackViewer)
self.roi_histogram.vb.enableAutoRange(self.roi_histogram.vb.YAxis, False) #disable autoscaling for histogram
self.mbir_viewer = MBIRViewer(self.data, path = self.parentWidget().path, parent=self)
# roi to select region of interest
self.selection_roi = None
self.stackViewer.ui.gridLayout.addWidget(self.roi_histogram, 0, 3, 1, 2)
self.stackViewer.keyPressEvent = self.keyPressEvent
self.cor_widget = QtGui.QWidget(self)
self.auto_cor_widget = functionwidgets.CORSelectionWidget(parent=self)
self.cor_widget.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.auto_cor_widget.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.cor_widget.setMinimumHeight(50)
self.auto_cor_widget.setMinimumHeight(50)
self.cor_box = QtGui.QStackedWidget(self)
self.cor_box.addWidget(self.auto_cor_widget)
self.cor_box.addWidget(self.cor_widget)
self.cor_button_holder = QtGui.QGroupBox(parent = self)
h = QtGui.QHBoxLayout()
self.manual_cor_button = QtGui.QRadioButton('Manually input center of rotation')
self.manual_cor_button.clicked.connect(self.manualCOR)
self.auto_cor_button = QtGui.QRadioButton('Auto-detect center of rotation')
self.auto_cor_button.clicked.connect(self.autoCOR)
self.auto_cor_button.setChecked(True)
write_cor = QtGui.QPushButton('Write COR to metadata')
write_cor.clicked.connect(self.writeCOR)
h.addWidget(self.auto_cor_button)
h.addWidget(self.manual_cor_button)
h.addWidget(write_cor)
self.cor_button_holder.setLayout(h)
# push button for overlay widget's histogram range selection
self.setButton = histDialogButton('Set', parent=self)
self.setButton.connectToHistWidget(self.roi_histogram)
self.stackViewer.ui.gridLayout.addWidget(self.setButton, 1, 3, 1, 2)
clabel = QtGui.QLabel('Rotation Center:')
olabel = QtGui.QLabel('Offset:')
self.centerBox = QtGui.QDoubleSpinBox(parent=self.cor_widget) #QtGui.QLabel(parent=self.cor_widget)
self.centerBox.setDecimals(1)
self.setCenterButton = QtGui.QToolButton()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("xicam/gui/icons_45.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setCenterButton.setIcon(icon)
self.setCenterButton.setToolTip('Set center in pipeline')
originBox = QtGui.QLabel(parent=self.cor_widget)
originBox.setText('x={} y={}'.format(0, 0))
center = center if center is not None else data.shape[1]/2.0
self.centerBox.setValue(center) #setText(str(center))
h1 = QtGui.QHBoxLayout()
h1.setAlignment(QtCore.Qt.AlignLeft)
h1.setContentsMargins(0, 0, 0, 0)
h1.addWidget(clabel)
h1.addWidget(self.centerBox)
h1.addWidget(self.setCenterButton)
h1.addWidget(olabel)
h1.addWidget(originBox)
# hide center button since | |
Assert that the returned page shows the page submitted.
self.assertContains(
response,
'Submitted',
status_code=200,
html=False,
)
# Assert that the returned page shows the guardian name.
self.assertContains(
response,
'Guardian Student',
status_code=200,
html=False
)
# Assert that the student slip has actually been submitted.
permission_slip.refresh_from_db()
self.assertEqual(permission_slip.guardian_signature, 'Test Parent Submission')
self.assertTrue(permission_slip.guardian_signature_date)
def test_slip_view_POST_second_parent_slip(self):
"""A parent submission from a different parent should update the slip."""
# First get guardian1's slip and fill it out.
student1 = models.Student.objects.get(person_id='202300001')
guardian1 = models.Guardian.objects.get(person_id='2001')
trip = models.FieldTrip.objects.get(id=1)
permission_slip = models.PermissionSlip.objects.get(
field_trip=trip,
student=student1
)
slip_link = models.PermissionSlipLink.objects.get(
permission_slip=permission_slip,
guardian=guardian1
)
slip_url = reverse('permission slip', kwargs={'slip_id': slip_link.link_id})
csrf_token = self.client.get(slip_url).context.get('csrf_token')
response = self.client.post(
slip_url,
{
'name': 'Test Guardian 1 Submission',
'electronic_consent': True,
'csrf_token': csrf_token
},
)
permission_slip.refresh_from_db()
initial_sig_date = permission_slip.guardian_signature_date
# At this point the slip should be filled out by guardian1. Lets change
# that to guardian3 and assert that the database gets that update.
# First, let's wait a couple of seconds to ensure the time/date is
# different, as we'll be testing to make sure the database changes that
# stored/logged value.
sleep(2)
guardian3 = models.Guardian.objects.get(person_id='2003')
slip_link = models.PermissionSlipLink.objects.get(
permission_slip=permission_slip,
guardian=guardian3
)
slip_url = reverse('permission slip', kwargs={'slip_id': slip_link.link_id})
csrf_token = self.client.get(slip_url).context.get('csrf_token')
signature = 'Test Guardian 3 Submission'
response = self.client.post(
slip_url,
{
'name': signature,
'electronic_consent': True,
'csrf_token': csrf_token
},
)
# Assert response is correct
self.assertContains(
response,
'OtherGuardian Student',
status_code=200,
html=False,
)
# Assert database was updated
permission_slip.refresh_from_db()
final_sig_date = permission_slip.guardian_signature_date
self.assertEqual(permission_slip.guardian_signature, signature)
self.assertNotEqual(initial_sig_date, final_sig_date)
def test_slip_view_reject_invalid_parent_submission_ec(self):
"""Parent submissions with no electronic_consent must be rejected."""
# We're going to get guardian1's trip1 permission slip.
student1 = models.Student.objects.get(person_id='202300001')
guardian1 = models.Guardian.objects.get(person_id='2001')
trip = models.FieldTrip.objects.get(id=1)
permission_slip = models.PermissionSlip.objects.get(
field_trip=trip,
student=student1
)
slip_link = models.PermissionSlipLink.objects.get(
permission_slip=permission_slip,
guardian=guardian1
)
slip_url_id = slip_link.link_id
slip_url = reverse('permission slip', kwargs={'slip_id': slip_url_id})
initial_student_sig = permission_slip.student_signature
initial_student_sig_date = permission_slip.student_signature_date
csrf_token = self.client.get(slip_url).context.get('csrf_token')
response = self.client.post(
slip_url,
{
'name': '<NAME> Submission', # NOTE: This is not blank
'electronic_consent': False, # NOTE: This is blank
'csrf_token': csrf_token
},
)
# Assert that the returned page is an error.
self.assertEqual(response.status_code, 400)
# Assert that the permission_slip has not been updated.
permission_slip.refresh_from_db()
final_student_sig = permission_slip.student_signature
final_student_sig_date = permission_slip.student_signature_date
self.assertEqual(initial_student_sig, final_student_sig)
self.assertEqual(initial_student_sig_date, final_student_sig_date)
def test_slip_view_reject_invalid_parent_submission_sig(self):
"""Parent submissions with no name/signature must be rejected."""
# We're going to get guardian1's trip1 permission slip.
student1 = models.Student.objects.get(person_id='202300001')
guardian1 = models.Guardian.objects.get(person_id='2001')
trip = models.FieldTrip.objects.get(id=1)
permission_slip = models.PermissionSlip.objects.get(
field_trip=trip,
student=student1
)
slip_link = models.PermissionSlipLink.objects.get(
permission_slip=permission_slip,
guardian=guardian1
)
slip_url_id = slip_link.link_id
slip_url = reverse('permission slip', kwargs={'slip_id': slip_url_id})
initial_student_sig = permission_slip.student_signature
initial_student_sig_date = permission_slip.student_signature_date
csrf_token = self.client.get(slip_url).context.get('csrf_token')
response = self.client.post(
slip_url,
{
'name': '', # NOTE: This is blank
'electronic_consent': True, # NOTE: This is not blank
'csrf_token': csrf_token
},
)
# Assert that the returned page is an error.
self.assertEqual(response.status_code, 400)
# Assert that the permission_slip has not been updated.
permission_slip.refresh_from_db()
final_student_sig = permission_slip.student_signature
final_student_sig_date = permission_slip.student_signature_date
self.assertEqual(initial_student_sig, final_student_sig)
self.assertEqual(initial_student_sig_date, final_student_sig_date)
def test_completed_slip_shows_completed(self):
"""Completed slips should show a green badge in the upper right."""
# We're going to get guardian1 and student1s' trip1 permission slip.
student1 = models.Student.objects.get(person_id='202300001')
guardian1 = models.Guardian.objects.get(person_id='2001')
trip = models.FieldTrip.objects.get(id=1)
permission_slip = models.PermissionSlip.objects.get(
field_trip=trip,
student=student1
)
slip_link = models.PermissionSlipLink.objects.get(
permission_slip=permission_slip,
guardian=guardian1
)
slip_url = reverse('permission slip', kwargs={'slip_id': slip_link.link_id})
csrf_token = self.client.get(slip_url).context.get('csrf_token')
self.client.post(
slip_url,
{
'name': '<NAME>',
'electronic_consent': True,
'csrf_token': csrf_token
},
)
slip_link = models.PermissionSlipLink.objects.get(
permission_slip=permission_slip,
student=student1
)
slip_url = reverse('permission slip', kwargs={'slip_id': slip_link.link_id})
csrf_token = self.client.get(slip_url).context.get('csrf_token')
response = self.client.post(
slip_url,
{
'name': '<NAME>',
'electronic_consent': True,
'csrf_token': csrf_token
},
)
self.assertContains(
response,
'<h3 class="float-right"><span class="badge badge-success">Complete</span></h3>',
status_code=200,
html=False,
)
class TripListViewTest(ViewTest):
"""Test the trip_list view."""
def test_trip_list_view_exists(self):
"""Tests to ensure the trip list view exists."""
self.assertTrue(hasattr(views, 'index'))
def test_trip_list_view_mapped_correctly(self):
"""The trip_list view should be mapped to /trip/"""
self.assertEqual(reverse('trip list'), '/trip/')
def test_trip_list_hidden_view_mapped_correctly(self):
"""trip_list view should also have a pointer from /archive/"""
self.assertEqual(reverse('trip archive'), '/archive/')
def test_trip_list_login_redirect(self):
"""trip_list should redirect anonymous users to the login page"""
next_url = '/login?next={0}'.format(reverse('trip list'))
self.check_view_redirect(reverse('trip list'), next_url)
def test_trip_list_hidden_login_redirect(self):
"""trip_list archive should redirect anonymous users to the login page"""
next_url = '/login?next={0}'.format(reverse('trip archive'))
self.check_view_redirect(reverse('trip archive'), next_url)
def test_trip_list_get_admin(self):
"""trip list should include all trips when admin logged in"""
self.client.force_login(self.admin_user)
response = self.client.get(reverse('trip list'))
self.assertContains(
response,
'Test Trip',
status_code=200,
html=False
)
self.assertContains(
response,
'Trip 2',
status_code=200,
html=False
)
self.assertNotContains(
response,
'Trip 3 Hidden',
status_code=200,
html=False
)
def test_trip_list_get_teacher(self):
"""trip list should include only trips teacher is on when teacher logged in"""
self.client.force_login(self.teacher_user)
response = self.client.get(reverse('trip list'))
self.assertContains(
response,
'Test Trip',
status_code=200,
html=False
)
self.assertNotContains(
response,
'Trip 2',
status_code=200,
html=False
)
self.assertNotContains(
response,
'Trip 3 Hidden',
status_code=200,
html=False
)
def test_trip_list_archive_get_admin(self):
"""trip list archive should only return (all) hidden trips"""
self.client.force_login(self.admin_user)
response = self.client.get(
reverse('trip archive')
)
self.assertNotContains(
response,
'Test Trip',
status_code=200,
html=False
)
self.assertNotContains(
response,
'Trip 2',
status_code=200,
html=False
)
self.assertContains(
response,
'Trip 3 Hidden',
status_code=200,
html=False
)
def test_trip_list_archive_get_teacher(self):
"""teachers are not permitted to view archive"""
self.client.force_login(self.teacher_user)
response = self.client.get(reverse('trip archive'))
self.assertEqual(response.status_code, 403)
class TripDetailTest(ViewTest):
"""Tests the trip_detail view"""
def test_trip_detail_exists(self):
"""The test_trip view should exist"""
self.assertTrue(hasattr(views, 'trip_detail'))
def test_trip_detail_mapping(self):
"""trip_detail should map to /trip/<int:trip_id>/"""
self.assertEqual(reverse('trip detail', kwargs={'trip_id': 1}), '/trip/1/')
def test_trip_redirect_anonymous(self):
"""should redirect anonymous users to /login?next=/trip/"""
url = reverse('trip detail', kwargs={'trip_id': 1})
self.check_view_redirect(url, '/login?next={0}'.format(url))
def test_trip_detail_404_on_nonexistent_trip(self):
"""should return 404 when nonexistent trip requested"""
self.client.force_login(self.admin_user)
url = reverse('trip detail', kwargs={'trip_id': 99})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.__class__.__name__, 'HttpResponseNotFound')
def test_trip_detail_403_notadmin_notcoordinator(self):
"""should return 403 when not admin and not faculty coordinator for trip"""
self.client.force_login(self.teacher_user)
url = reverse('trip detail', kwargs={'trip_id': 2})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.__class__.__name__, 'HttpResponseForbidden')
def test_trip_detail_archived_trips_admin_readonly(self):
"""should return archived trips as readonly even for admin users"""
self.client.force_login(self.admin_user)
url = reverse('trip detail', kwargs={'trip_id': 3})
response = self.client.get(url)
self.assertTrue(response.context['form'].read_only)
def test_trip_detail_archived_trips_notadmin_readonly(self):
"""should return archived trips as readonly"""
self.client.force_login(self.teacher_user)
url = reverse('trip detail', kwargs={'trip_id': 3})
response = self.client.get(url)
self.assertTrue(response.context['form'].read_only)
def test_trip_detail_approved_notadmin_readonly(self):
"""should return readonly when trip approved and user non admin"""
trip1 = models.FieldTrip.objects.get(id=1)
trip1.approve()
url = reverse('trip detail', kwargs={'trip_id': 1})
self.client.force_login(self.teacher_user)
response = self.client.get(url)
self.assertTrue(response.context['form'].read_only)
def test_trip_detail_approved_admin_fillable(self):
"""should return fillable when trip approved and user admin"""
trip1 = models.FieldTrip.objects.get(id=1)
trip1.approve()
url = reverse('trip detail', kwargs={'trip_id': 1})
self.client.force_login(self.admin_user)
response = self.client.get(url)
self.assertFalse(response.context['form'].read_only)
def test_trip_detail_released_notadmin_readonly(self):
"""should return readonly when trip released and user non admin"""
trip1 = models.FieldTrip.objects.get(id=1)
trip1.approve()
trip1.release()
url = reverse('trip detail', kwargs={'trip_id': 1})
self.client.force_login(self.teacher_user)
response = self.client.get(url)
self.assertTrue(response.context['form'].read_only)
def test_trip_detail_released_admin_fillable(self):
"""should return fillable when trip released and user admin"""
trip1 = models.FieldTrip.objects.get(id=1)
trip1.approve()
trip1.release()
url = reverse('trip detail', kwargs={'trip_id': 1})
self.client.force_login(self.admin_user)
response = self.client.get(url)
self.assertFalse(response.context['form'].read_only)
def test_trip_detail_new_trip_mapped(self):
"""new trip /trip/new should be mapped"""
self.assertEqual(reverse('new field trip'), '/trip/new/')
def test_trip_detail_new_trip_uses_trip_detail(self):
"""/trip/new should use the trip_detail method/template"""
self.client.force_login(self.teacher_user)
response = self.client.get(reverse('new field trip'))
self.assertTemplateUsed(response, 'paperlesspermission/trip_detail.html')
def test_trip_detail_GET_redirected_new_trip_anonymous(self):
"""should return a login redirect when anonymous user requests /trip/new"""
expected_url = '/login?next={0}'.format(reverse('new field trip'))
self.check_view_redirect(reverse('new field trip'), expected_url)
def test_trip_detail_GET_200_new_trip_loggedin(self):
"""should return fillable when trip new and user logged in"""
self.client.force_login(self.teacher_user)
url = reverse('new field trip')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['form'].read_only)
def test_trip_detail_POST_400_on_invalid_data(self):
"""should return 400 on invalid data POST"""
self.client.force_login(self.teacher_user)
url = reverse('trip detail', kwargs={'trip_id': 1})
csrf_token = self.client.get(url).context.get('csrf_token')
response = self.client.post(url, {'csrf_token': csrf_token})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.__class__.__name__, 'HttpResponseBadRequest')
def test_trip_detail_POST_403_on_readonly(self):
"""should return 403 on 'authorized' POST to readonly form"""
self.client.force_login(self.teacher_user)
trip1 = models.FieldTrip.objects.get(id=1)
trip1.approve() # trip is now readonly for teacher_user
url = reverse('trip detail', kwargs={'trip_id': 1})
csrf_token = self.client.get(url).context.get('csrf_token')
response = self.client.post(url, {'csrf_token': csrf_token})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.__class__.__name__, 'HttpResponseForbidden')
def test_trip_detail_POST_302_on_valid(self):
"""should return redirect to /trip/ on authorized, valid data"""
self.client.force_login(self.teacher_user)
url = reverse('trip detail', kwargs={'trip_id': 1})
csrf_token = self.client.get(url).context.get('csrf_token')
post_body = {
'csrf_token' : csrf_token,
'name' : 'Updated Trip Name',
'due_date' : '05/05/2020',
'group_name' : 'Updated Group Name',
'location' : 'Updated Location',
'start_date' : '06/06/2020',
'dropoff_time' : '10:10',
'dropoff_location' : 'Updated Dropoff',
'end_date' : '06/07/2020',
'pickup_time' : '11:11',
'pickup_location' : 'Updated Pickup',
'faculty' : models.Faculty.objects.get(person_id='1000001').id,
'students' : models.Student.objects.get(person_id='202300001').id,
}
response = self.client.post(url, post_body)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.__class__.__name__, 'HttpResponseRedirect')
self.assertEqual(response.url, '/trip')
def test_trip_detail_POST_302_on_new(self):
"""should return redirect to /trip on authorized, valid data to /trip/new"""
self.client.force_login(self.teacher_user)
url = reverse('trip detail', kwargs={'trip_id': 1})
csrf_token = self.client.get(url).context.get('csrf_token')
post_body = {
'csrf_token' : csrf_token,
| |
+ 1, l, int(spec_matrix[z, c, l])))
del spec_matrix
return spectrum
def _fpgrowth_filter(concept, winlen, max_c, min_neu):
"""
Filter for selecting closed frequent items set with a minimum number of
neurons and a maximum number of occurrences and first spike in the first
bin position
"""
keep_concepts = len(
np.unique(np.array(
concept[0]) // winlen)) >= min_neu and concept[1] <= max_c and min(
np.array(concept[0]) % winlen) == 0
return keep_concepts
def _rereference_to_last_spike(transactions, winlen):
"""
Converts transactions from the default format
neu_idx * winlen + bin_idx (relative to window start)
into the format
neu_idx * winlen + bin_idx (relative to last spike)
"""
len_transactions = len(transactions)
neurons = np.zeros(len_transactions, dtype=int)
bins = np.zeros(len_transactions, dtype=int)
# extract neuron and bin indices
for idx, attribute in enumerate(transactions):
neurons[idx] = attribute // winlen
bins[idx] = attribute % winlen
# rereference bins to last spike
bins = bins.max() - bins
# calculate converted transactions
converted_transactions = neurons * winlen + bins
return converted_transactions
def _filter_for_moving_window_subsets(concepts, winlen):
"""
Since we're using a moving window subpatterns starting from
subsequent spikes after the first pattern spike will also be found.
This filter removes them if they do not occur on their own in
addition to the occurrences explained by their superset.
Uses a reverse map with a set representation.
"""
# don't do anything if the input list is empty
if not len(concepts):
return concepts
if hasattr(concepts[0], 'intent'):
# fca format
# sort the concepts by (decreasing) support
concepts.sort(key=lambda c: -len(c.extent))
support = np.array([len(c.extent) for c in concepts])
# convert transactions relative to last pattern spike
converted_transactions = [_rereference_to_last_spike(c.intent,
winlen=winlen)
for c in concepts]
else:
# fim.fpgrowth format
# sort the concepts by (decreasing) support
concepts.sort(key=lambda c: -c[1])
support = np.array([c[1] for c in concepts])
# convert transactions relative to last pattern spike
converted_transactions = [_rereference_to_last_spike(c[0],
winlen=winlen)
for c in concepts]
output = []
for current_support in np.unique(support):
support_indices = np.nonzero(support == current_support)[0]
# construct reverse map
reverse_map = defaultdict(set)
for map_idx, i in enumerate(support_indices):
for window_bin in converted_transactions[i]:
reverse_map[window_bin].add(map_idx)
for i in support_indices:
intersection = reduce(
operator.and_,
(reverse_map[window_bin]
for window_bin in converted_transactions[i]))
if len(intersection) == 1:
output.append(concepts[i])
return output
def _fast_fca(context, min_c=2, min_z=2, max_z=None,
max_c=None, report='a', winlen=1, min_neu=1):
"""
Find concepts of the context with the fast-fca algorithm.
Parameters
----------
context : list
List of tuples containing one object and one the correspondent
attribute
min_c: int
minimum support of an item set
Default: 2
min_z: int
minimum number of items per item set
Default: 2
max_z: None/int
maximum number of items per item set. If max_c==None no maximal
size required
Default: None
max_c: None/int
maximum support per item set. If max_c==None no maximal
support required
Default: None
report: str
'a': all the mined patterns
'#': pattern spectrum using as signature the pair:
(number of spikes, number of occurrence)
'3d#': pattern spectrum using as signature the triplets:
(number of spikes, number of occurrence, difference between the
times of the last and the first spike of the pattern)
Default: 'a'
The following parameters are specific to Massive parallel SpikeTrains
winlen: int (positive)
The size (number of bins) of the sliding window used for the
analysis. The maximal length of a pattern (delay between first and
last spike) is then given by winlen*binsize
Default: 1
min_neu: int (positive)
Minimum number of neurons in a sequence to considered a
potential pattern.
Default: 1
Returns
--------
If report == 'a':
All the pattern candidates (concepts) found in the data. Each
pattern is represented as a tuple containing
(spike IDs, discrete times (window position)
of the occurrences of the pattern). The spike IDs are defined as:
spike_id=neuron_id*bin_id; with neuron_id in [0, len(data)] and
bin_id in [0, winlen].
If report == '#':
The pattern spectrum is represented as a list of triplets each
formed by:
(pattern size, number of occurrences, number of patterns)
If report == '3d#':
The pattern spectrum is represented as a list of quadruplets each
formed by:
(pattern size, number of occurrences, difference between last
and first spike of the pattern, number of patterns)
"""
# Initializing outputs
concepts = []
# Check parameters
if min_neu < 1:
raise AttributeError('min_neu must be an integer >=1')
# By default set maximum number of attributes
if max_z is None:
max_z = len(context)
# By default set maximum number of data to number of bins
if max_c is None:
max_c = len(context)
if report == '#':
spec_matrix = np.zeros((max_z, max_c))
if report == '3d#':
spec_matrix = np.zeros((max_z, max_c, winlen))
spectrum = []
# Mining the data with fast fca algorithm
fca_out = fast_fca.FormalConcepts(context)
fca_out.computeLattice()
fca_concepts = fca_out.concepts
fca_concepts = list(filter(
lambda c: _fca_filter(
c, winlen, min_c, min_z, max_c, max_z, min_neu), fca_concepts))
fca_concepts = _filter_for_moving_window_subsets(fca_concepts, winlen)
# Applying min/max conditions
for fca_concept in fca_concepts:
intent = tuple(fca_concept.intent)
extent = tuple(fca_concept.extent)
concepts.append((intent, extent))
# computing spectrum
if report == '#':
spec_matrix[len(intent) - 1, len(extent) - 1] += 1
if report == '3d#':
spec_matrix[len(intent) - 1, len(extent) - 1, max(
np.array(intent) % winlen)] += 1
if report == 'a':
return concepts
del concepts
# returning spectrum
if report == '#':
for (z, c) in np.transpose(np.where(spec_matrix != 0)):
spectrum.append((z + 1, c + 1, int(spec_matrix[z, c])))
if report == '3d#':
for (z, c, l) in np.transpose(np.where(spec_matrix != 0)):
spectrum.append(
(z + 1, c + 1, l, int(spec_matrix[z, c, l])))
del spec_matrix
return spectrum
def _fca_filter(concept, winlen, min_c, min_z, max_c, max_z, min_neu):
"""
Filter to select concepts with minimum/maximum number of spikes and
occurrences and first spike in the first bin position
"""
intent = tuple(concept.intent)
extent = tuple(concept.extent)
keep_concepts = len(intent) >= min_z and len(extent) >= min_c and len(
intent) <= max_z and len(extent) <= max_c and len(
np.unique(np.array(intent) // winlen)) >= min_neu and min(
np.array(intent) % winlen) == 0
return keep_concepts
def pvalue_spectrum(data, binsize, winlen, dither, n_surr, min_spikes=2,
min_occ=2, max_spikes=None, max_occ=None, min_neu=1,
spectrum='#'):
"""
Compute the p-value spectrum of pattern signatures extracted from
surrogates of parallel spike trains, under the null hypothesis of
independent spiking.
* n_surr surrogates are obtained from each spike train by spike dithering
* pattern candidates (concepts) are collected from each surrogate data
* the signatures (number of spikes, number of occurrences) of all patterns
are computed, and their occurrence probability estimated by their
occurrence frequency (p-value spectrum)
Parameters
----------
data: list of neo.SpikeTrains
List containing the parallel spike trains to analyze
binsize: Quantity
The time precision used to discretize the data (binning).
winlen: int (positive)
The size (number of bins) of the sliding window used for the analysis.
The maximal length of a pattern (delay between first and last spike) is
then given by winlen*binsize
dither: Quantity
Amount of spike time dithering for creating the surrogates for
filtering the pattern spectrum. A spike at time t is placed randomly
within ]t-dither, t+dither[ (see also
elephant.spike_train_surrogates.dither_spikes).
Default: 15*pq.s
n_surr: int
Number of surrogates to generate to compute the p-value spectrum.
This number should be large (n_surr>=1000 is recommended for 100
spike trains in *sts*). If n_surr is 0, then the p-value spectrum is
not computed.
Default: 0
min_spikes: int (positive)
Minimum number of spikes of a sequence to be considered a pattern.
Default: 2
min_occ: int (positive)
Minimum number of occurrences of a sequence to be considered as a
pattern.
Default: 2
max_spikes: int (positive)
Maximum number of spikes of a sequence to be considered a pattern. If
None no maximal number of spikes is considered.
Default: None
max_occ: int (positive)
Maximum number of occurrences of a sequence to be considered as a
pattern. If None, no maximal number of occurrences is considered.
Default: None
min_neu: int (positive)
Minimum number of neurons in a sequence to considered a pattern.
Default: 1
spectrum: str
Defines the signature of the patterns, it can assume values:
'#': pattern spectrum using the as signature the pair:
(number of spikes, number of occurrence)
'3d#': pattern spectrum using the | |
<reponame>namnamir/21-Card-Game<gh_stars>0
import deck
import players
from input import get_text as get_text
from input import get_number as get_number
import config
from colored import fg, bg, attr
from terminaltables import SingleTable
play = {}
status = ['Stand', 'Hit', 'Bust', 'Split']
# print the hand
def print_hand(player, Flag):
for i in range(1, sorted(play[player].keys())[-1] + 1):
deck.print_card(play[player][i][0], 'Print')
if Flag == 'Print_Sum':
deck.print_sum(play[player][0])
# update the bet
def bet(player_name, previous_bet):
# define the messages; change them in config.py
input_txt = config.Messages.BET_INPUT_TXT
success_msg = config.Messages.BET_SUCCESS_TXT
error_msg1 = config.Messages.BET_ERROR_TXT1
error_msg2 = config.Messages.ERROR_TXT
# set the new bet
return get_number('Bet', player_name, input_txt, success_msg, error_msg1,
error_msg2, previous_bet)
# ask the desired value of Ace from the user
def is_ace(player_name, player, key, ace_type):
if player[key][0][0] == ace_type:
input_txt = config.Messages.ACE_INPUT_TXT
success_msg = config.Messages.ACE_SUCCESS_TXT
error_msg1 = config.Messages.ACE_ERROR_TXT
error_msg2 = config.Messages.ERROR_TXT
bet = 0
player[key][0][0] = get_number('Ace', player_name, input_txt,
success_msg, error_msg1, error_msg2,
bet)
player[0] = sum_up(player_name, player)
# # print the hand after defining the value of the Ace
# print(config.Messages.CARD_DETAILS_TXT4.format(fg(3), bg(234), player,
# attr(0)))
# for i in range(1, sorted(player.keys())[-1] + 1):
# deck.print_card(player[i][0])
# deck.print_sum(player[0])
# print(player)
# sum up the value of cards
def sum_up(player_name, player):
sum_value = 0
for key in list(player.keys()):
if key == 0:
continue
sum_value += deck.ranks[player[key][0][0]]
return sum_value
# define the winner and print the final table
def print_statistics(player, Flag):
table = [['Player Name', 'Cards', 'Sum', 'Bet', 'Status']]
temp2 = {} # for collecting sums
temp3 = [] # for collecting equal sums
equal_val = 0
for name in player:
temp1 = [] # for forming rows of the table
last_card = sorted(play[name].keys())[-1]
a = ''
# set a dic {name:sum} to find the winner
temp2[name] = play[name][0]
temp1.append(name) # name
for x in range(1, last_card + 1):
a += deck.print_card(play[name][x][0], None)
if x < last_card:
a += ' - '
temp1.append(a) # hand
temp1.append(play[name][0]) # sum
temp1.append(play[name][last_card][1]) # bet
if temp1[2] > 21: # status
temp1.append('Busted') # status
elif temp1[2] == 21:
temp1.append('Won') # status
else:
temp1.append('Lost') # status
table.append(temp1)
# set the statuses in case all are busted before the Dealer start playing
if Flag == 'All_Busted':
table[1][1] = '-'
table[1][2] = '-'
table[1][4] = 'Won'
print(config.Messages.ALL_BUSTED_TXT.format(fg(3), bg(234), attr(0)))
else:
# sort descendingly the results and remove the busted players
temp2 = list(reversed(sorted(temp2.items(), key=lambda x: x[1])))
for i in range(0, len(temp2)):
if temp2[i][1] > 21:
temp2.pop(i)
continue
# if 2 or more players have the same sum
try:
# to be sure that the last item is taken into account
if i == len(temp2) - 1 and temp2[i][1] == equal_val:
temp3.append(temp2[i][0])
# if first items (>= 2) are eqals
elif i < len(temp2) and temp2[i][1] == temp2[i+1][1]:
temp3.append(temp2[i][0])
equal_val = temp2[i][1]
except IndexError:
pass
# if the Dealer and others have the highest score the Dealer wins
if temp2[0][0] and 'Dealer' in temp3:
for item in table:
if item[0] in temp3:
if item[0] != 'Dealer':
item[4] = 'Lost'
else:
item[4] = 'Won'
print(config.Messages.WON_TXT2.format(attr(5), fg(15), bg(160),
'Dealer', attr(0)))
# if more than one wins, change the status of all to 'Won'
elif temp2[0][0] in temp3 and len(temp3) > 1:
for name in temp3:
for row in table:
if row[0] == name:
row[4] = 'Won'
print(config.Messages.WON_TXT3.format(attr(5), fg(15),
bg(160), name,
attr(0)))
break
# if none of the above, the winner is the first member of temp2
else:
for row in table:
if row[0] == temp2[0][0]:
row[4] = 'Won'
break
print(config.Messages.WON_TXT2.format(attr(5), fg(15), bg(160),
temp2[0][0], attr(0)))
# print the statisctics
table = SingleTable(table)
print(table.table)
# get the second, third and more card for each player
def play_game(player, j, Flag):
play[player][0] = sum_up(player, play[player])
while play[player][0] <= 21 and play[player][j-1][2] == 'Hit':
# initiate the round
play[player][j] = [None, 0, None]
# get a new card
play[player][j][0] = deck.deck.pop()
# print the new card and check if it is the ace or not
print(config.Messages.CARD_DETAILS_TXT2.format(fg(3), bg(234),
player, j, attr(0)))
deck.print_card(play[player][j][0], 'Print')
is_ace(player, play[player], j, 'Ace')
""" check if split is eligible
if it is exactly the scond hand && the last to chars before the
last one of the name of the palyer is not __ && the palayer is the
Dealer && two first cards have the same value
"""
if j == 2 and player[-3:-1] != '__' and player != 'Dealer' and \
play[player][1][0][0] == play[player][2][0][0]:
# ask the user if she wants to split
input_txt = config.Messages.CHOICE_TYPE_TXT2
error_msg = config.Messages.CHOICE_ERROR_TXT
answer = get_text(player, input_txt, error_msg,
['y', 'yes', 'n', 'no'])
# create two hands for the player and remove the old one
if answer.lower() in ['y', 'yes']:
# create the first part of the splited hand
play[player + '__1'] = play[player]
play[player + '__1'].pop(2)
# create the second part of the splited hand
play[player + '__2'] = play[player + '__1']
play[player + '__2'][1][0] = play[player][1][0]
# update sum
play[player + '__1'][0] = int(play[player][0] / 2)
play[player + '__2'][0] = int(play[player][0] / 2)
# delete the old hand
play.pop(player)
""" update the list of names
it doesn't remove the old name from the list,
it will be removed later in the calling loop.
"""
k = players.players_name.index(player)
players.players_name.insert(k + 1, player + '__1')
players.players_name.insert(k + 2, player + '__2')
break
"""if it's Dealer's turn, there are some rules:
The Dealer must hit when it has a total of 16 points or less
and must stand with a total of 17 points or more.
"""
play['Dealer'][0] = sum_up('Dealer', play['Dealer'])
if player == 'Dealer' and play['Dealer'][0] <= 16:
play[player][j][2] = 'Hit'
j += 1
continue
elif player == 'Dealer' and play['Dealer'][0] >= 17:
if play['Dealer'][0] == 21:
play[player][j][2] = 'Won'
elif play['Dealer'][0] > 21:
play[player][j][2] = 'Busted'
else:
play[player][j][2] = 'Stand'
break
# check if the hand is 21
play[player][0] = sum_up(player, play[player])
if play[player][0] == 21:
# update the bet of the new round as the previous one
play[player][j][1] = play[player][j-1][1]
print(config.Messages.WON_TXT1.format(attr(5), fg(15), bg(160),
player,
play[player][j-1][1],
attr(0)))
break
# if the player is not the Dealer
if j >= 2 and play[player][0] < 21:
# ask the player if she wants to increase the bet
input_txt = config.Messages.CHOICE_BET_TXT
error_msg = config.Messages.CHOICE_ERROR_TXT
answer = get_text(player, input_txt, error_msg,
['y', 'yes', 'n', 'no'])
if answer.lower() in ['y', 'yes']:
play[player][j][1] = bet(player, play[player][j-1][1])
bet_flag = 'Increased' # the bet is increased
# ask the player if she wants to hit or stand
input_txt = config.Messages.CHOICE_TYPE_TXT1
error_msg = config.Messages.CHOICE_ERROR_TXT
answer = get_text(player, input_txt, error_msg,
['h', 'hit', 's', 'stand'])
if answer.lower() in ['h', 'hit']:
play[player][j][2] = 'Hit'
j += 1
continue
else:
play[player][j][2] = 'Stand'
break
"""update the bet of the new round as the previous one if
it is not already increased by the player
"""
if bet_flag != 'Increased':
play[player][j][1] = play[player][j-1][1]
# check if the hand is busted
elif play[player][0] > 21:
# check if the player likes to change the value of Aces
for z in range(1, len(play[player])):
item = play[player][z][0][0]
if item in ['Ace', 'Ace1', 'Ace11']:
print(config.Messages.BUSTED_C_TXT.format(attr(5), fg(15),
bg(160), player,
play[player][j-1][1],
attr(0)))
print_hand(player, 'Print_Sum')
break
is_ace(player, play[player], z, item)
play[player][0] = sum_up(player, play[player])
continue
# update the bet of the new round as the previous one
play[player][j][1] = play[player][j-1][1]
play[player][j][2] = 'Bust'
# print the message as the busted hand
print(config.Messages.BUSTED_TXT.format(attr(1), attr(5), fg(1),
bg(15), player,
play[player][j][1]))
print_hand(player, 'Print_Sum')
break
# print the value of each card + the sum of them
print(config.Messages.CARD_DETAILS_TXT3.format(fg(3), bg(234),
player, j, attr(0)))
for i in range(1, sorted(play[player].keys())[-1] + 1):
deck.print_card(play[player][i][0], 'Print')
deck.print_sum(play[player][0])
j += 1
print('\n<<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>>')
print(config.Messages.CARD_DETAILS_TXT5.format(fg(3), bg(234),
player, attr(0)))
print_hand(player, 'Print_Sum')
print('<<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>><<>>\n')
player = players.players_name
# get the first card
for name in player:
play[name] = {0: 0, 1: [deck.deck.pop(), 0, 'Hit']}
is_ace(player, play[name], 1, 'Ace')
play[name][0] = sum_up(name, play[name])
# place the initial bet; except for the Dealer
print(config.Messages.CARD_DETAILS_TXT1.format(fg(3), bg(234), name, attr(0)))
# print the initial hand of each player and ask for the first bet
for i in range(1, len(player)):
print_hand(player[i], None)
play[player[i]][1][1] = bet(player[i], 0)
# create a temporary array which helps with splited hands
temp1 = player.copy()
temp1.remove('Dealer')
while temp1:
play_game(temp1[0], 2, 'init')
temp1.remove(temp1[0])
# remove the duplicated names
for item in player:
if | |
import unittest
from unittest.mock import patch
from gym_powerworld.envs import voltage_control_env
# noinspection PyProtectedMember
from gym_powerworld.envs.voltage_control_env import LOSS, \
MinLoadBelowMinGenError, MaxLoadAboveMaxGenError, OutOfScenariosError, \
MIN_V, MAX_V, MIN_V_SCALED, MAX_V_SCALED, _scale_voltages
import os
import pandas as pd
import numpy as np
import numpy.testing as np_test
import logging
import warnings
from esa import SAW, PowerWorldError
from gym.spaces import Discrete
import shutil
# Get full path to this directory.
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Cases are within this directory.
CASE_DIR = os.path.join(THIS_DIR, 'cases')
# IEEE 14 bus
DIR_14 = os.path.join(CASE_DIR, 'ieee_14')
PWB_14 = os.path.join(DIR_14, 'IEEE 14 bus.pwb')
AXD_14 = os.path.join(DIR_14, 'IEEE 14 bus.axd')
CONTOUR = os.path.join(DIR_14, 'contour.axd')
# Case with 3 gens modeled as condensers:
PWB_14_CONDENSERS = os.path.join(DIR_14, 'IEEE 14 bus condensers.pwb')
# Case with min and max MW limits on all 5 generators.
PWB_14_LIMITS = os.path.join(DIR_14, 'IEEE 14 bus limits.pwb')
# IL 200
PWB_200 = os.path.join(CASE_DIR, 'il_200', 'ACTIVSg200.pwb')
# TX 2000
PWB_2000 = os.path.join(CASE_DIR, 'tx_2000',
'ACTIVSg2000_AUG-09-2018_Ride_mod.PWB')
# Define some constants related to the IEEE 14 bus case.
N_GENS_14 = 5
N_LOADS_14 = 11
LOAD_MW_14 = 259.0
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusTestCase(unittest.TestCase):
"""Test initializing the environment with the 14 bus model."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 100
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer
)
# For easy comparison with the original case, get a fresh SAW
# object. Do not make any changes to this, use only "get" type
# methods.
cls.saw = SAW(PWB_14, early_bind=True)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.saw.exit()
cls.env.close()
def test_branches_to_open(self):
"""Ensure branches_to_open is the right shape and is in the
appropriate range.
"""
self.assertIsNotNone(self.env.branches_to_open)
self.assertEqual((self.num_scenarios,),
self.env.branches_to_open.shape)
self.assertTrue(self.env.branches_to_open.min() >= 0)
self.assertTrue(
self.env.branches_to_open.max()
< self.env.branch_init_data.shape[0])
def test_saw_load_state(self):
"""Ensure that calling saw.LoadState() works (testing that
saw.SaveState() has already been called).
"""
# NOTE: This changes the state of self.env.saw, which can cause
# issues in other tests.
self.assertIsNone(self.env.saw.LoadState())
def test_gen_key_fields(self):
"""Ensure the gen key fields are correct. Hard coding style."""
self.assertListEqual(['BusNum', 'GenID'], self.env.gen_key_fields)
def test_gen_init_fields(self):
self.assertListEqual(
self.env.gen_key_fields + self.env.GEN_INIT_FIELDS,
self.env.gen_init_fields)
def test_gen_obs_fields(self):
self.assertListEqual(self.env.gen_key_fields + self.env.GEN_OBS_FIELDS,
self.env.gen_obs_fields)
def test_gen_init_data(self):
self.assertIsInstance(self.env.gen_init_data, pd.DataFrame)
self.assertListEqual(self.env.gen_init_fields,
self.env.gen_init_data.columns.tolist())
def test_num_gens(self):
# 15 bus case has 5 generators.
self.assertEqual(5, self.env.num_gens)
def test_zero_negative_gen_mw_limits(self):
"""Ensure the _zero_negative_gen_mw_limits function works as
intended.
"""
# First, ensure it has been called.
self.assertTrue((self.env.gen_init_data['GenMWMin'] >= 0).all())
# Now, patch gen_init_data and saw and call the function.
gen_copy = self.env.gen_init_data.copy(deep=True)
gen_copy['GenMWMin'] = -10
# I wanted to use self.assertLogs, but that has trouble working
# with nested context managers...
with patch.object(self.env, '_gen_init_data', new=gen_copy):
with patch.object(self.env, 'saw') as p:
self.env._zero_negative_gen_mw_limits()
# The gen_copy should have had its GenMWMin values zeroed out.
self.assertTrue((gen_copy['GenMWMin'] == 0).all())
# change_parameters_multiple_element_df should have been
# called.
p.change_and_confirm_params_multiple_element.assert_called_once()
# Ensure the change was reflected in PowerWorld.
gens = self.env.saw.GetParametersMultipleElement(
'gen', ['BusNum', 'GenID', 'GenMWMin'])
self.assertTrue((gens['GenMWMin'] == 0).all())
# Finally, (this could have been done first, but oh well), make
# sure that the case started with negative GenMWMin values.
gens_orig = self.saw.GetParametersMultipleElement(
'gen', ['BusNum', 'GenID', 'GenMWMin'])
self.assertTrue((gens_orig['GenMWMin'] < 0).any())
def test_gen_mw_capacity(self):
# The generators are all set to a ridiculous maximum of 10 GW.
self.assertEqual(5 * 10000.0, self.env.gen_mw_capacity)
def test_gen_mvar_produce_capacity(self):
self.assertEqual(50. + 40. + 24. + 24.,
round(self.env.gen_mvar_produce_capacity, 2))
def test_gen_mvar_consume_capacity(self):
self.assertEqual(-40. - 6. - 6.,
round(self.env.gen_mvar_consume_capacity, 2))
def test_load_key_fields(self):
# Hard coding!
self.assertListEqual(self.env.load_key_fields, ['BusNum', 'LoadID'])
def test_load_init_fields(self):
self.assertListEqual(self.env.load_init_fields,
self.env.load_key_fields
+ self.env.LOAD_INIT_FIELDS)
def test_load_obs_fields(self):
self.assertListEqual(
self.env.load_obs_fields,
self.env.load_key_fields + self.env.LOAD_OBS_FIELDS)
def test_load_init_data(self):
self.assertIsInstance(self.env.load_init_data, pd.DataFrame)
self.assertListEqual(self.env.load_init_data.columns.tolist(),
self.env.load_init_fields)
def test_num_loads(self):
self.assertEqual(11, self.env.num_loads)
def test_zero_i_z_loads(self):
"""Patch the environment's load_init_data and ensure the method is
working properly.
"""
data = self.env.load_init_data.copy(deep=True)
data[voltage_control_env.LOAD_I_Z] = 1
with patch.object(self.env, '_load_init_data', new=data):
with patch.object(self.env, 'saw') as p:
self.env._zero_i_z_loads()
self.assertTrue((data[voltage_control_env.LOAD_I_Z] == 0).all().all())
p.change_and_confirm_params_multiple_element.assert_called_once()
def test_bus_key_fields(self):
self.assertListEqual(['BusNum'], self.env.bus_key_fields)
def test_bus_obs_fields(self):
self.assertListEqual(self.env.bus_key_fields + self.env.BUS_OBS_FIELDS,
self.env.bus_obs_fields)
def test_bus_init_data(self):
self.assertIsInstance(self.env.bus_init_data, pd.DataFrame)
self.assertListEqual(self.env.bus_init_fields,
self.env.bus_init_data.columns.tolist())
def test_num_buses(self):
self.assertEqual(14, self.env.num_buses)
def test_max_load_mw(self):
# System loading obtained from PowerWorld's Case Summary
# dialogue.
self.assertEqual(round(self.env.max_load_mw, 2),
self.max_load_factor * LOAD_MW_14)
def test_check_max_load_exception(self):
"""Ensure that an exception is thrown if maximum loading exceeds
maximum generation.
"""
with patch.object(self.env, 'max_load_mw', 10):
with patch.object(self.env, 'gen_mw_capacity', 9.9):
with self.assertRaisesRegex(MaxLoadAboveMaxGenError,
'The given max_load'):
self.env._check_max_load(2)
def test_check_max_load_warning(self):
"""Ensure we get a warning if the generation is in excess of
2x maximum load.
"""
with self.assertLogs(logger=self.env.log, level='WARNING'):
self.env._check_max_load(2)
def test_min_load_mw(self):
# System loading obtained from PowerWorld's Case Summary
# dialogue.
self.assertEqual(round(self.env.min_load_mw, 2),
self.min_load_factor * LOAD_MW_14)
def test_check_min_load(self):
# Get generator data.
gens = self.env.gen_init_data.copy(deep=True)
# Increase all minimum generation.
gens['GenMWMin'] = 10
# Patch:
with patch.object(self.env, '_gen_init_data', gens):
with patch.object(self.env, 'min_load_mw', 9.9):
with self.assertRaisesRegex(MinLoadBelowMinGenError,
'The given min_load'):
self.env._check_min_load(2)
def test_total_load_mw(self):
# Ensure it's 1D.
self.assertEqual(len(self.env.total_load_mw.shape), 1)
# Check shape.
self.assertEqual(self.env.total_load_mw.shape[0],
self.env.num_scenarios)
# Ensure all loads are less than the maximum.
np_test.assert_array_less(self.env.total_load_mw, self.env.max_load_mw)
# Ensure all loads are greater than the minimum.
np_test.assert_array_less(self.env.min_load_mw, self.env.total_load_mw)
def test_loads_mw(self):
# Check shape
self.assertEqual(self.env.loads_mw.shape,
(self.num_scenarios, self.env.num_loads))
# Ensure the individual loads match total loading.
np_test.assert_allclose(self.env.loads_mw.sum(axis=1),
self.env.total_load_mw, rtol=1e-6)
def test_loads_mvar(self):
# Check shape.
self.assertEqual(self.env.loads_mvar.shape,
(self.num_scenarios, self.env.num_loads))
# Ensure that portion of negative var loads (leading power
# factor) is close to the lead_pf_probability.
neg_portion = (self.env.loads_mvar < 0).sum().sum() \
/ (self.num_scenarios * self.env.num_loads)
# Ensure we're within 0.75 * prob and 1.25 * prob. This seems
# reasonable.
self.assertLessEqual(neg_portion, 1.25 * self.lead_pf_probability)
self.assertGreaterEqual(neg_portion, 0.75 * self.lead_pf_probability)
def test_load_power_factors(self):
"""Ensure all loads have a power factor greater than the min."""
# Ensure all power factors are valid. pf = P / |S|
s_mag = np.sqrt(np.square(self.env.loads_mw)
+ np.square(self.env.loads_mvar))
# Suppress numpy warnings - we'll be replacing NaNs.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pf = self.env.loads_mw / s_mag
# For sake of testing, set loads with 0 power to have a
# power factor of 1.
pf[np.isnan(pf)] = 1
np_test.assert_array_less(self.min_load_pf, pf)
def test_loads_on_match_probability(self):
"""Ensure the proportion of loads which are on matches the
load_on_probability to a reasonable tolerance.
"""
# First, ensure the zeros match up between loads_mw and loads_mvar.
mw_0 = self.env.loads_mw == 0
np.testing.assert_array_equal(mw_0, self.env.loads_mvar == 0)
# Now, ensure the total portion of loads that are "on" is close
# to the load_on_probability.
# noinspection PyUnresolvedReferences
portion = (~mw_0).sum().sum() \
/ (self.num_scenarios * self.env.num_loads)
# Ensure we're within 0.75 * prob and 1.25 * prob. This seems
# reasonable.
self.assertLessEqual(portion, 1.25 * self.load_on_probability)
self.assertGreaterEqual(portion, 0.75 * self.load_on_probability)
def test_gen_mw(self):
# Start with shape.
self.assertEqual(self.env.gen_mw.shape,
(self.num_scenarios, self.env.num_gens))
# Ensure total generation is close to total load plus losses.
np_test.assert_allclose(self.env.gen_mw.sum(axis=1),
self.env.total_load_mw * (1 + LOSS), rtol=1e-6)
# TODO: Since the generators in this case have ridiculously high
# maximums, I'm not going to bother testing that all gens are
# within their bounds. When we move to a more realistic case,
# e.g. the Texas 2000 bus case, we need to test that.
#
# # Ensure generator outputs are within bounds.
# for gen_idx, row in enumerate(env.gen_init_data.itertuples()):
# gen_output = env.gen_mw[:, gen_idx]
# # noinspection PyUnresolvedReferences
# self.assertTrue((gen_output <= row.GenMWMax).all())
# # noinspection PyUnresolvedReferences
# self.assertTrue((gen_output >= row.GenMWMin).all())
def test_gen_v(self):
# Shape.
self.assertEqual(self.env.gen_v.shape,
(self.env.num_scenarios, self.env.num_gens))
# Values.
self.assertTrue(
((self.env.gen_v >= self.gen_voltage_range[0]).all()
and
(self.env.gen_v <= self.gen_voltage_range[1]).all()
)
)
def test_action_space(self):
self.assertIsInstance(self.env.action_space, Discrete)
# Plus 1 because no-op action
self.assertEqual(self.env.action_space.n,
self.env.num_gens * self.num_gen_voltage_bins + 1)
def test_gen_bins(self):
# Hard coding!
np.testing.assert_allclose(
np.array([0.9, 0.925, 0.95, 0.975, 1.0, 1.025, 1.05, 1.075, 1.1]),
self.env.gen_bins)
def test_gen_action_array(self):
# Minus 1 because no-op action.
self.assertEqual(self.env.action_space.n - 1,
self.env.gen_action_array.shape[0])
self.assertEqual(2, self.env.gen_action_array.shape[1])
# Initialize array for comparison. Again, -1 due to no-op.
a = np.zeros(shape=(self.env.action_space.n - 1, 2), dtype=int)
# Put generator bus numbers in column 0. No need to worry about
# multiple generators at the same bus for this case.
a[:, 0] = np.array(
self.env.gen_init_data['BusNum'].tolist()
* self.num_gen_voltage_bins)
# Write a crappy, simple, loop to put the indices of the
# generator voltage levels in.
b = []
for i in range(self.num_gen_voltage_bins):
for _ in range(self.env.num_gens):
b.append(i)
a[:, 1] = np.array(b)
np.testing.assert_array_equal(a, self.env.gen_action_array)
def test_num_obs(self):
"""Ensure | |
#python
'''
This file serves to declutter plasmid_map_viewer.py by adding detailed functionality, specifically on adding visuals
to the Plasmid Circle for specific parts (promoter, terminator, CDS, etc).
Consider: https://sbolstandard.org/wp-content/uploads/2017/04/SBOL-Visual-2.1.pdf
'''
import math
import logging
"""
Inputs:
js_info: (dict)
circle_radius: (int) Radius size of circle in javascript (eg 300)
circle_line_width: (int) Thickness of line in plasmid
center_coordinates: (list) Each internal part is an int [x,y]
pointer_len: (int) Length of pointer
pointer_thick: (int) Thickness of pointer
text_size: (int) Size of text
title_text_size: (int) Size of title text
js_feat: (dict)
percentage: (float)
name: (str)
color: (str)
start_bp: (int) Start in plasmid in terms of base pairs.
end_bp: (int) End in plasmid in terms of base pairs.
start_circle: (list) [x,y] for starting point on canvas.
end_circle: (list) [x,y] for ending point on canvas.
bp_len: (int) Length in base pairs.
midpoint: (list) [x,y] midpoint location of feature
pointer_direction: (str) 'out' or 'in'.
typ: The type of entity
Outputs:
js_str: (An internal javascript string representing the visuals of this object.)
"""
def make_sbol_visuals_js(js_feat, js_info, gb_info):
js_str = ''
typ = js_feat['typ']
if typ == "promoter":
js_str += make_promoter_visual(js_feat, js_info)
elif typ == "terminator":
js_str += make_terminator_visual(js_feat, js_info)
elif typ == "rbs":
js_str += make_rbs_visual(js_feat, js_info)
elif typ == "cds":
js_str += make_cds_visual(js_feat, js_info)
elif typ == "scar":
js_str += make_scar_visual(js_feat, js_info)
else:
logging.critical("\n Could not recognize typ: " + typ + "\n")
js_str += ""
return js_str
"""
Inputs:
js_info: (dict)
circle_radius: (int) Radius size of circle in javascript (eg 200)
circle_line_width: (int) Thickness of line in plasmid
center_coordinates: (list) Each internal part is an int [x,y]
pointer_len: (int) Length of pointer
pointer_thick: (int) Thickness of pointer
text_size: (int) Size of text
promoter_info: (dict)
percent_start: (float) Number between [0,100] that represents how deep into the angle the promoter symbol starts.
20 if not given.
prcnt: (float) [necessary if no pixels] percent increase
pixels: (float) [necessary if no prcnt] pixel length increase
line_width: (float) [optional] line width of promoter arrow
arrow_angle: (float) [optional] (in degrees) angle between middle line of arrow and the two flags. 35 if not given. [10,80]
flag_length: (int) [optional] length of arrow flags in pixels
js_feat: (dict)
percentage: (float)
name: (str)
color: (str)
start_bp: (int) Start in plasmid in terms of base pairs.
end_bp: (int) End in plasmid in terms of base pairs.
start_circle: (list) [x,y] for starting point on canvas.
end_circle: (list) [x,y] for ending point on canvas.
bp_len: (int) Length in base pairs.
midpoint: (list) [x,y] midpoint location of feature
pointer_direction: (str) 'out' or 'in'.
typ: The type of entity
Outputs:
js_str: (An internal javascript string representing the visuals of this object.)
"""
def make_promoter_visual(js_feat, js_info):
logging.debug("Making Promoter.")
if 'promoter_info' in js_info:
promoter_info = js_info['promoter_info']
else:
logging.critical("DID NOT FIND PROMOTER INFO: ")
logging.critical(js_info)
return ""
js_str = "//Promoter Symbol:\n"
js_str += "ctx.strokeStyle = 'black'; "
"""
The promoter will start at the 20% point of the region.
"""
radius = js_info['circle_radius']
cc = js_info['center_coordinates']
percent_angle_to_promoter = promoter_info['percent_start']
relative_angle_to_start = (js_feat['percentage'] * (percent_angle_to_promoter/100))*(math.pi * 2)
starting_angle = get_angle_from_point(js_feat['start_circle'],cc)
promoter_symbol_start_angle = starting_angle + relative_angle_to_start
starting_coordinates = [cc[0] + radius*(math.cos(promoter_symbol_start_angle)),cc[1] + radius*(math.sin(promoter_symbol_start_angle))]
if 'prcnt' in promoter_info:
end_line_coordinates = line_extension_coordinates(cc,starting_coordinates,"prcnt", promoter_info['prcnt'])
elif 'pixels' in promoter_info:
end_line_coordinates = line_extension_coordinates(cc,starting_coordinates,"pixels", promoter_info['pixels'])
else:
raise Exception("Neither pixels nor percent provided in promoter line extension info dict.")
if 'line_width' in promoter_info:
line_width = promoter_info['line_width']
else:
line_width = 3
if 'arrow_angle' in promoter_info:
arrow_angle = promoter_info['arrow_angle']
if arrow_angle > 80 or arrow_angle < 10:
raise Exception("Angle of flags of arrow must be between 10 and 80 degrees.")
else:
arrow_angle = 35.0
if 'flag_length' in promoter_info:
flag_length = promoter_info['flag_length']
else:
flag_length = 10
js_str += 'ctx.lineWidth = ' + str(line_width) + '; '
js_str += 'ctx.beginPath(); '
js_str += 'ctx.moveTo('+ str(starting_coordinates[0]) + ',' + str(starting_coordinates[1]) + '); '
js_str += 'ctx.lineTo('+ str(end_line_coordinates[0]) + ',' + str(end_line_coordinates[1]) + '); '
js_str += 'ctx.stroke(); '
#Now we must draw an arrow- first we use an extended radius to create an arc around the promoter area.
#Then we make a small arrow at the end of the promoter region.
big_radius = math.sqrt(((end_line_coordinates[0] - cc[0])**2) + ((end_line_coordinates[1] - cc[0])**2))
ending_angle = get_angle_from_point(js_feat['end_circle'],cc)
#We draw the promoter symbol arc on the canvas:
js_str += 'ctx.beginPath();'
js_str += 'ctx.arc(' + str(cc[0]) + ',' + str(cc[1]) + ',' + str(big_radius) + ',' + str(promoter_symbol_start_angle)
js_str += ',' + str(ending_angle) + ');'
js_str += 'ctx.stroke(); \n'
#The ending point for the promoter symbol arc:
p_symbol_end = [cc[0] + big_radius*(math.cos(ending_angle)),cc[1] + big_radius*(math.sin(ending_angle))]
#We make the javascript text for the arrow flags
js_str += make_arrow(cc ,p_symbol_end, arrow_angle, flag_length, line_width)
js_str += "\n\n"
return js_str
"""
Inputs:
js_info: (dict)
circle_radius: (int) Radius size of circle in javascript (eg 200)
circle_line_width: (int) Thickness of line in plasmid
center_coordinates: (list) Each internal part is an int [x,y]
pointer_len: (int) Length of pointer
pointer_thick: (int) Thickness of pointer
text_size: (int) Size of text
terminator_info: (dict)
percent_center: (float) between [0,100] indicating where center of T is in region.
prcnt: (float) [necessary if no pixels] percent increase from center to outer edge of Terminator Symbol
pixels: (float) [necessary if no prcnt] pixel length increase from circle point to outer edge of Terminator Symbol
base_width: (float) [optional] Width of inner part of the "T".
base_height: (float) Height of inner part of the "T"
top_width: (float) width of the top part of the T.
top_height: (float) height of the top part of the T.
internal_color: (str) [optional] Coloring of the inside of the rbs circle.
border_color: (str) Color of the border of the T.
border_width: (float) width of the border of the T.
js_feat: (dict)
percentage: (float)
name: (str)
color: (str)
start_bp: (int) Start in plasmid in terms of base pairs.
end_bp: (int) End in plasmid in terms of base pairs.
start_circle: (list) [x,y] for starting point on canvas.
end_circle: (list) [x,y] for ending point on canvas.
bp_len: (int) Length in base pairs.
midpoint: (list) [x,y] midpoint location of feature
pointer_direction: (str) 'out' or 'in'.
typ: The type of entity
Outputs:
js_str: (An internal javascript string representing the visuals of this object.)
"""
def make_terminator_visual(js_feat, js_info):
"""
The terminator symbol will look like a capital T with filled in color light red: "#EA6062"
We will use two rectangles and two thin black lines to outline it (size 2)
There are 8 variables - 8 points that symbolize the T, each is a list of floats.
We calculate those and then build the rectangle using them.
"""
logging.debug("Making terminator")
if "terminator_info" in js_info:
terminator_info = js_info["terminator_info"]
else:
logging.critical("Terminator info not found in js_info.")
return ""
radius = js_info['circle_radius']
cc = js_info['center_coordinates']
js_str = "//Terminator Symbol: \n"
"""
First, we find the 8 variables.
"""
percent_center = terminator_info["percent_center"]
relative_angle_to_t_center = (js_feat['percentage'] * (percent_center/100))*(math.pi * 2)
starting_angle = get_angle_from_point(js_feat['start_circle'],cc)
terminator_symbol_center_angle = starting_angle + relative_angle_to_t_center
"""
We start using names of variables specific to the terminator symbol shape.
alpha represents the center of the base of the T on the circle's circumference.
beta represents the extension of the line from center to alpha where the base of T intersects the top section of the T.
gamma represents the extension of the same line from center to alpha and beta to the top of the T.
"""
alpha = [cc[0] + radius*(math.cos(terminator_symbol_center_angle)),cc[1] + radius*(math.sin(terminator_symbol_center_angle))]
logging.debug("ALPHA:")
logging.debug(alpha)
if "base_height" in terminator_info:
beta = line_extension_coordinates(cc,alpha,"pixels", terminator_info["base_height"])
logging.debug("BETA:")
logging.debug(beta)
else:
raise Exception("base_height must be included in the terminator info in the config file.")
if "top_height" in terminator_info:
gamma = line_extension_coordinates(cc,beta,"pixels", terminator_info["top_height"])
logging.debug("GAMMA:")
logging.debug(gamma)
else:
raise Exception("top_height must be included in the terminator info in the config file.")
"""
We start finding the variables on the edge of the T that represent it.
var_a is one base, var_b is intersection from a-base to a-side of top, var_c is a-side bottom corner of top,
var_d is top a-side corner of top, var_h is base opposite to var_a, var_e is top h-side corner of top,
var_f | |
<reponame>dangervon/neutron<gh_stars>0
# Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import types
import uuid
from keystoneauth1 import exceptions as ks_exc
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import port_resource_request
from neutron_lib.api.definitions import port_resource_request_groups
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import qos as qos_apidef
from neutron_lib.api.definitions import qos_bw_limit_direction
from neutron_lib.api.definitions import qos_bw_minimum_ingress
from neutron_lib.api.definitions import qos_default
from neutron_lib.api.definitions import qos_port_network_policy
from neutron_lib.api.definitions import qos_pps_minimum_rule
from neutron_lib.api.definitions import qos_pps_minimum_rule_alias
from neutron_lib.api.definitions import qos_pps_rule
from neutron_lib.api.definitions import qos_rule_type_details
from neutron_lib.api.definitions import qos_rule_type_filter
from neutron_lib.api.definitions import qos_rules_alias
from neutron_lib.callbacks import events as callbacks_events
from neutron_lib.callbacks import registry as callbacks_registry
from neutron_lib.callbacks import resources as callbacks_resources
from neutron_lib import constants as nl_constants
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import placement as place_exc
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.placement import client as pl_client
from neutron_lib.placement import utils as pl_utils
from neutron_lib.services.qos import constants as qos_consts
import os_resource_classes as orc
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.db import db_base_plugin_common
from neutron.exceptions import qos as neutron_qos_exc
from neutron.extensions import qos
from neutron.objects import base as base_obj
from neutron.objects import network as network_object
from neutron.objects import ports as ports_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import qos_policy_validator as checker
from neutron.objects.qos import rule as rule_object
from neutron.objects.qos import rule_type as rule_type_object
from neutron.services.qos.drivers import manager
LOG = logging.getLogger(__name__)
# TODO(przszc): Move this function to n-lib
def update_qos_allocation(self, consumer_uuid, alloc_diff):
"""Update allocation for QoS minimum bandwidth consumer
:param consumer_uuid: The uuid of the consumer, in case of bound port
owned by a VM, the VM uuid.
:param alloc_diff: A dict which contains RP UUIDs as keys and
corresponding fields to update for the allocation
under the given resource provider.
"""
for i in range(pl_client.GENERATION_CONFLICT_RETRIES):
body = self.list_allocations(consumer_uuid)
if not body['allocations']:
raise place_exc.PlacementAllocationRemoved(consumer=consumer_uuid)
# Count new values based on the diff in alloc_diff
for rp_uuid, diff in alloc_diff.items():
if rp_uuid not in body['allocations']:
raise place_exc.PlacementAllocationRpNotExists(
resource_provider=rp_uuid, consumer=consumer_uuid)
for drctn, value in diff.items():
orig_value = (body['allocations'][rp_uuid][
'resources'].get(drctn, 0))
new_value = orig_value + value
if new_value > 0:
body['allocations'][rp_uuid]['resources'][
drctn] = new_value
else:
# Remove the resource class if the new value is 0
resources = body['allocations'][rp_uuid]['resources']
resources.pop(drctn, None)
# Remove RPs without any resources
body['allocations'] = {
rp: alloc for rp, alloc in body['allocations'].items()
if alloc.get('resources')}
try:
# Update allocations has no return body, but leave the loop
return self.update_allocation(consumer_uuid, body)
except ks_exc.Conflict as e:
resp = e.response.json()
if resp['errors'][0]['code'] == 'placement.concurrent_update':
continue
raise
raise place_exc.PlacementAllocationGenerationConflict(
consumer=consumer_uuid)
@resource_extend.has_resource_extenders
class QoSPlugin(qos.QoSPluginBase):
"""Implementation of the Neutron QoS Service Plugin.
This class implements a Quality of Service plugin that provides quality of
service parameters over ports and networks.
"""
supported_extension_aliases = [
qos_apidef.ALIAS,
qos_bw_limit_direction.ALIAS,
qos_default.ALIAS,
qos_rule_type_details.ALIAS,
qos_rule_type_filter.ALIAS,
port_resource_request.ALIAS,
port_resource_request_groups.ALIAS,
qos_bw_minimum_ingress.ALIAS,
qos_rules_alias.ALIAS,
qos_port_network_policy.ALIAS,
qos_pps_rule.ALIAS,
qos_pps_minimum_rule.ALIAS,
qos_pps_minimum_rule_alias.ALIAS,
]
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
def __init__(self):
super(QoSPlugin, self).__init__()
self.driver_manager = manager.QosServiceDriverManager()
self._placement_client = pl_client.PlacementAPIClient(cfg.CONF)
self._placement_client.update_qos_allocation = types.MethodType(
update_qos_allocation, self._placement_client)
callbacks_registry.subscribe(
self._validate_create_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_CREATE)
callbacks_registry.subscribe(
self._check_port_for_placement_allocation_change,
callbacks_resources.PORT,
callbacks_events.BEFORE_UPDATE)
callbacks_registry.subscribe(
self._validate_update_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_UPDATE)
callbacks_registry.subscribe(
self._validate_update_network_callback,
callbacks_resources.NETWORK,
callbacks_events.PRECOMMIT_UPDATE)
callbacks_registry.subscribe(
self._validate_create_network_callback,
callbacks_resources.NETWORK,
callbacks_events.PRECOMMIT_CREATE)
callbacks_registry.subscribe(
self._check_network_for_placement_allocation_change,
callbacks_resources.NETWORK,
callbacks_events.AFTER_UPDATE)
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_resource_request(port_res, port_db):
"""Add resource request to a port."""
if isinstance(port_db, ports_object.Port):
qos_id = port_db.qos_policy_id or port_db.qos_network_policy_id
else:
qos_id = None
if port_db.get('qos_policy_binding'):
qos_id = port_db.qos_policy_binding.policy_id
elif port_db.get('qos_network_policy_binding'):
qos_id = port_db.qos_network_policy_binding.policy_id
port_res['resource_request'] = None
if not qos_id:
return port_res
if port_res.get('bulk'):
port_res['resource_request'] = {
'qos_id': qos_id,
'network_id': port_db.network_id,
'vnic_type': port_res[portbindings.VNIC_TYPE],
'port_id': port_db.id,
}
return port_res
min_bw_request_group = QoSPlugin._get_min_bw_request_group(
qos_id, port_db.id, port_res[portbindings.VNIC_TYPE],
port_db.network_id)
min_pps_request_group = QoSPlugin._get_min_pps_request_group(
qos_id, port_db.id, port_res[portbindings.VNIC_TYPE])
port_res['resource_request'] = (
QoSPlugin._get_resource_request(min_bw_request_group,
min_pps_request_group))
return port_res
@staticmethod
def _get_resource_request(min_bw_request_group, min_pps_request_group):
resource_request = None
request_groups = []
if min_bw_request_group:
request_groups += [min_bw_request_group]
if min_pps_request_group:
request_groups += [min_pps_request_group]
if request_groups:
resource_request = {
'request_groups': request_groups,
'same_subtree': [rg['id'] for rg in request_groups],
}
return resource_request
@staticmethod
def _get_min_bw_resources(min_bw_rules):
resources = {}
# NOTE(ralonsoh): we should move this translation dict to n-lib.
rule_direction_class = {
nl_constants.INGRESS_DIRECTION:
orc.NET_BW_IGR_KILOBIT_PER_SEC,
nl_constants.EGRESS_DIRECTION:
orc.NET_BW_EGR_KILOBIT_PER_SEC
}
for rule in min_bw_rules:
resources[rule_direction_class[rule.direction]] = rule.min_kbps
return resources
@staticmethod
def _get_min_bw_request_group(qos_policy_id, port_id, vnic_type,
network_id, min_bw_rules=None,
segments=None):
request_group = {}
if not min_bw_rules:
min_bw_rules = rule_object.QosMinimumBandwidthRule.get_objects(
context.get_admin_context(), qos_policy_id=qos_policy_id)
min_bw_resources = QoSPlugin._get_min_bw_resources(min_bw_rules)
if not segments:
segments = network_object.NetworkSegment.get_objects(
context.get_admin_context(), network_id=network_id)
min_bw_traits = QoSPlugin._get_min_bw_traits(vnic_type, segments)
if min_bw_resources and min_bw_traits:
request_group.update({
'id': str(pl_utils.resource_request_group_uuid(
uuid.UUID(port_id), min_bw_rules)),
'required': min_bw_traits,
'resources': min_bw_resources,
})
return request_group
@staticmethod
def _get_min_pps_request_group(qos_policy_id, port_id, vnic_type,
min_pps_rules=None):
request_group = {}
if not min_pps_rules:
min_pps_rules = rule_object.QosMinimumPacketRateRule.get_objects(
context.get_admin_context(),
qos_policy_id=qos_policy_id)
min_pps_resources = QoSPlugin._get_min_pps_resources(min_pps_rules)
min_pps_traits = [pl_utils.vnic_type_trait(vnic_type)]
if min_pps_resources and min_pps_traits:
request_group.update({
'id': str(pl_utils.resource_request_group_uuid(
uuid.UUID(port_id), min_pps_rules)),
'required': min_pps_traits,
'resources': min_pps_resources,
})
return request_group
@staticmethod
def _get_min_pps_resources(min_pps_rules):
resources = {}
rule_direction_class = {
nl_constants.INGRESS_DIRECTION:
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC,
nl_constants.EGRESS_DIRECTION:
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC,
nl_constants.ANY_DIRECTION:
orc.NET_PACKET_RATE_KILOPACKET_PER_SEC,
}
for rule in min_pps_rules:
resources[rule_direction_class[rule.direction]] = rule.min_kpps
return resources
@staticmethod
def _get_min_bw_traits(vnic_type, segments):
# TODO(lajoskatona): Change to handle all segments when any traits
# support will be available. See Placement spec:
# https://review.opendev.org/565730
first_segment = segments[0]
if not first_segment or not first_segment.physical_network:
return []
physnet_trait = pl_utils.physnet_trait(
first_segment.physical_network)
# NOTE(ralonsoh): we should not rely on the current execution order of
# the port extending functions. Although here we have
# port_res[VNIC_TYPE], we should retrieve this value from the port DB
# object instead.
vnic_trait = pl_utils.vnic_type_trait(vnic_type)
return [physnet_trait, vnic_trait]
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME_BULK])
def _extend_port_resource_request_bulk(ports_res, noop):
"""Add resource request to a list of ports."""
min_bw_rules = dict()
min_pps_rules = dict()
net_segments = dict()
for port_res in ports_res:
if port_res.get('resource_request') is None:
continue
qos_id = port_res['resource_request'].pop('qos_id', None)
if not qos_id:
port_res['resource_request'] = None
continue
net_id = port_res['resource_request'].pop('network_id')
vnic_type = port_res['resource_request'].pop('vnic_type')
port_id = port_res['resource_request'].pop('port_id')
if qos_id not in min_bw_rules:
rules = rule_object.QosMinimumBandwidthRule.get_objects(
context.get_admin_context(), qos_policy_id=qos_id)
min_bw_rules[qos_id] = rules
if net_id not in net_segments:
segments = network_object.NetworkSegment.get_objects(
context.get_admin_context(),
network_id=net_id)
net_segments[net_id] = segments
min_bw_request_group = QoSPlugin._get_min_bw_request_group(
qos_id, port_id, vnic_type, net_id,
min_bw_rules[qos_id], net_segments[net_id])
if qos_id not in min_pps_rules:
rules = rule_object.QosMinimumPacketRateRule.get_objects(
context.get_admin_context(), qos_policy_id=qos_id)
min_pps_rules[qos_id] = rules
min_pps_request_group = QoSPlugin._get_min_pps_request_group(
qos_id, port_id, vnic_type, min_pps_rules[qos_id])
port_res['resource_request'] = (
QoSPlugin._get_resource_request(min_bw_request_group,
min_pps_request_group))
return ports_res
def _get_ports_with_policy(self, context, policy):
networks_ids = policy.get_bound_networks()
ports_with_net_policy = ports_object.Port.get_objects(
context, network_id=networks_ids)
# Filter only this ports which don't have overwritten policy
ports_with_net_policy = [
port for port in ports_with_net_policy if
port.qos_policy_id is None
]
ports_ids = policy.get_bound_ports()
ports_with_policy = ports_object.Port.get_objects(
context, id=ports_ids)
return list(set(ports_with_policy + ports_with_net_policy))
def _validate_create_port_callback(self, resource, event, trigger,
payload=None):
context = payload.context
port_id = payload.resource_id
port = ports_object.Port.get_object(context, id=port_id)
policy_id = port.qos_policy_id or port.qos_network_policy_id
if policy_id is None:
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self.validate_policy_for_port(context, policy, port)
def _check_port_for_placement_allocation_change(self, resource, event,
trigger, payload):
context = payload.context
orig_port = payload.states[0]
port = payload.latest_state
original_policy_id = (orig_port.get(qos_consts.QOS_POLICY_ID) or
orig_port.get(qos_consts.QOS_NETWORK_POLICY_ID))
if (qos_consts.QOS_POLICY_ID not in port and
qos_consts.QOS_NETWORK_POLICY_ID not in port):
return
policy_id = (port.get(qos_consts.QOS_POLICY_ID) or
port.get(qos_consts.QOS_NETWORK_POLICY_ID))
if policy_id == original_policy_id:
return
# Do this only for compute bound ports
if (nl_constants.DEVICE_OWNER_COMPUTE_PREFIX in
orig_port['device_owner']):
original_policy = policy_object.QosPolicy.get_object(
context.elevated(), id=original_policy_id)
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self._change_placement_allocation(original_policy, policy,
orig_port, port)
def _translate_rule_for_placement(self, rule):
dir = rule.get('direction')
if isinstance(rule, rule_object.QosMinimumBandwidthRule):
value = rule.get('min_kbps')
# TODO(lajoskatona): move this to neutron-lib, see similar
# dict @l125.
if dir == 'egress':
drctn = orc.NET_BW_EGR_KILOBIT_PER_SEC
else:
drctn = orc.NET_BW_IGR_KILOBIT_PER_SEC
return {drctn: value}
elif isinstance(rule, rule_object.QosMinimumPacketRateRule):
value = rule.get('min_kpps')
# TODO(przszc): move this to neutron-lib, see similar
# dict @l268.
rule_direction_class = {
nl_constants.INGRESS_DIRECTION:
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC,
nl_constants.EGRESS_DIRECTION:
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC,
nl_constants.ANY_DIRECTION:
orc.NET_PACKET_RATE_KILOPACKET_PER_SEC,
}
return {rule_direction_class[dir]: value}
return {}
def _prepare_allocation_needs(self, original_port, rule_type_to_rp_map,
original_rules, desired_rules):
alloc_diff = {}
for rule in original_rules:
translated_rule = self._translate_rule_for_placement(rule)
# NOTE(przszc): Updating Placement resource allocation relies on
# calculating a difference between current allocation and desired
# one. If we want to release resources we need to get a negative
# value of the original allocation.
translated_rule = {rc: v * -1 for rc, v in | |
validating any provided loading and
runtime contexts, and considering all Workflow steps if applicable, or the single application otherwise.
:param package_dict: package content representation as a json dictionary.
:param package_name: name to use to create the package file.
:param data_source: identifier of the data source to map to specific ADES, or map to localhost if ``None``.
:param only_dump_file: specify if the ``CWLFactoryCallable`` should be validated and returned.
:param tmp_dir: location of the temporary directory to dump files (deleted on exit).
:param loading_context: cwltool context used to create the cwl package (required if ``only_dump_file=False``)
:param runtime_context: cwltool context used to execute the cwl package (required if ``only_dump_file=False``)
:param process_offering: JSON body of the process description payload (used as I/O hint ordering)
:returns:
If ``only_dump_file`` is ``True``: ``None``.
Otherwise, tuple of:
- instance of ``CWLFactoryCallable``
- package type (``PROCESS_WORKFLOW`` or ``PROCESS_APPLICATION``)
- mapping of each step ID with their package name that must be run
.. warning::
Specified :paramref:`tmp_dir` will be deleted on exit.
"""
tmp_dir = tmp_dir or tempfile.mkdtemp()
tmp_json_cwl = os.path.join(tmp_dir, package_name)
# for workflows, retrieve each 'sub-package' file
package_type = _get_package_type(package_dict)
workflow_steps = get_package_workflow_steps(package_dict)
step_packages = {}
for step in workflow_steps:
# generate sub-package file and update workflow step to point to it
step_process_url = get_process_location(step["reference"], data_source)
package_body, package_name = _get_process_package(step_process_url)
_load_package_content(package_body, package_name, tmp_dir=tmp_dir,
data_source=data_source, only_dump_file=True)
package_dict["steps"][step["name"]]["run"] = package_name
step_packages[step["name"]] = package_name
# fix I/O to preserve ordering from dump/load, and normalize them to consistent list of objects
process_offering_hint = process_offering or {}
package_input_hint = process_offering_hint.get("inputs", [])
package_output_hint = process_offering_hint.get("outputs", [])
package_dict["inputs"] = normalize_ordered_io(package_dict["inputs"], order_hints=package_input_hint)
package_dict["outputs"] = normalize_ordered_io(package_dict["outputs"], order_hints=package_output_hint)
with open(tmp_json_cwl, "w") as f:
json.dump(package_dict, f)
if only_dump_file:
return
factory = CWLFactory(loading_context=loading_context, runtime_context=runtime_context)
package = factory.make(tmp_json_cwl) # type: CWLFactoryCallable
shutil.rmtree(tmp_dir)
return package, package_type, step_packages
def _merge_package_inputs_outputs(wps_inputs_defs, # type: Union[List[ANY_IO_Type], Dict[str, ANY_IO_Type]]
cwl_inputs_list, # type: List[WPS_Input_Type]
wps_outputs_defs, # type: Union[List[ANY_IO_Type], Dict[str, ANY_IO_Type]]
cwl_outputs_list, # type: List[WPS_Output_Type]
): # type: (...) -> Tuple[List[JSON_IO_Type], List[JSON_IO_Type]]
"""
Merges corresponding metadata of I/O definitions from :term:`CWL` and :term:`WPS` sources.
Merges I/O definitions to use for process creation and returned by ``GetCapabilities``, ``DescribeProcess``
using the `WPS` specifications (from request ``POST``) and `CWL` specifications (extracted from file).
.. note::
Parameters :paramref:`cwl_inputs_list` and :paramref:`cwl_outputs_list` are expected to be
in :term:`WPS`-like format (i.e.: :term:`CWL` I/O converted to corresponding :term:`WPS` I/O objects).
.. seealso::
Conversion of :term:`CWL` to :term:`WPS`-equivalent objects is handled by :func:`_get_package_inputs_outputs`
and its underlying functions.
:param wps_inputs_defs: list or mapping of provided :term:`WPS` input definitions.
:param cwl_inputs_list: processed list of :term:`CWL` inputs from the :term:`Application Package`.
:param wps_outputs_defs: list or mapping of provided :term:`WPS` output definitions.
:param cwl_outputs_list: processed list of :term:`CWL` inputs from the :term:`Application Package`.
:returns:
Tuple of (inputs, outputs) consisting of lists of I/O with merged contents between :term:`CWL` and :term:`WPS`.
"""
if isinstance(wps_inputs_defs, dict):
wps_inputs_defs = normalize_ordered_io(wps_inputs_defs)
if isinstance(wps_outputs_defs, dict):
wps_outputs_defs = normalize_ordered_io(wps_outputs_defs)
wps_inputs_merged = merge_package_io(wps_inputs_defs, cwl_inputs_list, WPS_INPUT)
wps_outputs_merged = merge_package_io(wps_outputs_defs, cwl_outputs_list, WPS_OUTPUT)
return [wps2json_io(i) for i in wps_inputs_merged], [wps2json_io(o) for o in wps_outputs_merged]
def _get_package_io(package_factory, io_select, as_json):
# type: (CWLFactoryCallable, str, bool) -> List[PKG_IO_Type]
"""
Retrieves I/O definitions from a validated :class:`CWLFactoryCallable`.
.. seealso::
Factory can be obtained with validation using :func:`_load_package_content`.
:param package_factory: `CWL` factory that contains I/O references to the package definition.
:param io_select: either :data:`WPS_INPUT` or :data:`WPS_OUTPUT` according to what needs to be processed.
:param as_json: toggle to specific the desired output type.
:returns: I/O format depends on value :paramref:`as_json`.
If ``True``, converts the I/O definitions into `JSON` representation.
If ``False``, converts the I/O definitions into `WPS` objects.
"""
if io_select == WPS_OUTPUT:
io_attrib = "outputs_record_schema"
elif io_select == WPS_INPUT:
io_attrib = "inputs_record_schema"
else:
raise PackageTypeError("Unknown I/O selection: '{}'.".format(io_select))
cwl_package_io = getattr(package_factory.t, io_attrib)
wps_package_io = [cwl2wps_io(io_item, io_select) for io_item in cwl_package_io["fields"]]
if as_json:
return [wps2json_io(io) for io in wps_package_io]
return wps_package_io
def _get_package_inputs_outputs(package_factory, # type: CWLFactoryCallable
as_json=False, # type: bool
): # type: (...) -> Tuple[List[PKG_IO_Type], List[PKG_IO_Type]]
"""
Generates `WPS-like` ``(inputs, outputs)`` tuple using parsed CWL package definitions.
"""
return (_get_package_io(package_factory, io_select=WPS_INPUT, as_json=as_json),
_get_package_io(package_factory, io_select=WPS_OUTPUT, as_json=as_json))
def _update_package_metadata(wps_package_metadata, cwl_package_package):
# type: (JSON, CWL) -> None
"""
Updates the package `WPS` metadata dictionary from extractable `CWL` package definition.
"""
wps_package_metadata["title"] = wps_package_metadata.get("title", cwl_package_package.get("label", ""))
wps_package_metadata["abstract"] = wps_package_metadata.get("abstract", cwl_package_package.get("doc", ""))
if (
"$schemas" in cwl_package_package
and isinstance(cwl_package_package["$schemas"], list)
and "$namespaces" in cwl_package_package
and isinstance(cwl_package_package["$namespaces"], dict)
):
metadata = wps_package_metadata.get("metadata", [])
namespaces_inv = {v: k for k, v in cwl_package_package["$namespaces"]}
for schema in cwl_package_package["$schemas"]:
for namespace_url in namespaces_inv:
if schema.startswith(namespace_url):
metadata.append({"title": namespaces_inv[namespace_url], "href": schema})
wps_package_metadata["metadata"] = metadata
if "s:keywords" in cwl_package_package and isinstance(cwl_package_package["s:keywords"], list):
wps_package_metadata["keywords"] = list(
set(wps_package_metadata.get("keywords", [])) | set(cwl_package_package.get("s:keywords", []))
)
def _generate_process_with_cwl_from_reference(reference):
# type: (str) -> Tuple[CWL, JSON]
"""
Resolves the ``reference`` type (`CWL`, `WPS-1`, `WPS-2`, `WPS-3`) and generates a `CWL` ``package`` from it.
Additionally provides minimal process details retrieved from the ``reference``.
The number of details obtained from the process will depend on available parameters from its description as well
as the number of metadata that can be mapped between it and the generated `CWL` package.
"""
cwl_package = None
process_info = dict()
# match against direct CWL reference
reference_path, reference_ext = os.path.splitext(reference)
reference_name = os.path.split(reference_path)[-1]
if reference_ext.replace(".", "") in PACKAGE_EXTENSIONS:
cwl_package = _load_package_file(reference)
process_info = {"identifier": reference_name}
# match against WPS-1/2 reference
else:
settings = get_settings()
response = request_extra("GET", reference, retries=3, settings=settings)
if response.status_code != HTTPOk.code:
raise HTTPServiceUnavailable("Couldn't obtain a valid response from [{}]. Service response: [{} {}]"
.format(reference, response.status_code, response.reason))
content_type = get_header("Content-Type", response.headers)
if any(ct in content_type for ct in CONTENT_TYPE_ANY_XML):
# attempt to retrieve a WPS-1 ProcessDescription definition
cwl_package, process_info = xml_wps2cwl(response, settings)
elif any(ct in content_type for ct in [CONTENT_TYPE_APP_JSON]):
payload = response.json()
# attempt to retrieve a WPS-3 Process definition, owsContext is expected in body
# OLD schema nests everything under 'process', OGC schema provides everything at the root
if "process" in payload or "owsContext" in payload:
process_info = payload.get("process", payload)
ows_ref = process_info.get("owsContext", {}).get("offering", {}).get("content", {}).get("href")
cwl_package = _load_package_file(ows_ref)
# if somehow the CWL was referenced without an extension, handle it here
# also handle parsed WPS-3 process description also with a reference
elif "cwlVersion" in payload:
cwl_package = _load_package_file(reference)
process_info = {"identifier": reference_name}
return cwl_package, process_info
def get_application_requirement(package):
# type: (CWL) -> Dict[str, Any]
"""
Retrieve the principal requirement that allows mapping to the appropriate process implementation.
Obtains the first item in `CWL` package ``requirements`` or ``hints`` that corresponds to a `Weaver`-specific
application type as defined in :py:data:`CWL_REQUIREMENT_APP_TYPES`.
:returns: dictionary that minimally has ``class`` field, and optionally other parameters from that requirement.
"""
# package can define requirements and/or hints,
# if it's an application, only one CWL_REQUIREMENT_APP_TYPES is allowed,
# workflow can have multiple, but they are not explicitly handled
reqs = package.get("requirements", {})
hints = package.get("hints", {})
all_hints = _get_package_requirements_as_class_list(reqs) + _get_package_requirements_as_class_list(hints)
app_hints = list(filter(lambda h: any(h["class"].endswith(t) for t in CWL_REQUIREMENT_APP_TYPES), all_hints))
if len(app_hints) > 1:
raise ValueError("Package 'requirements' and/or 'hints' define too many conflicting values: {}, "
"only one permitted amongst {}.".format(list(app_hints), list(CWL_REQUIREMENT_APP_TYPES)))
requirement = app_hints[0] if app_hints else {"class": ""}
cwl_supported_reqs = [item for item in CWL_REQUIREMENT_APP_TYPES] + [CWL_REQUIREMENT_INIT_WORKDIR]
if not all(item.get("class") in cwl_supported_reqs for item in all_hints):
raise PackageTypeError("Invalid requirement, the requirements supported are {0}".format(cwl_supported_reqs))
return requirement
def check_package_instance_compatible(package):
# type: (CWL) -> Optional[str]
"""
Verifies if an :term:`Application Package` definition is valid for the employed `Weaver` instance configuration.
Given that the :term:`CWL` is invalid for the active application, explains the reason why that package `always`
require remote execution.
When a package can sometimes be executed locally (:term:`ADES`) or remotely (:term:`EMS`) depending on the instance
configuration, such as in the case of a :data:`CWL_REQUIREMENT_APP_DOCKER`, return ``None``. This function instead
detects cases where a remote server is mandatory without ambiguity related to the current `Weaver` instance,
regardless whether remote should be an :term:`ADES` or a remote :term:`Provider` (:term:`WPS` or :term:`ESGF-CWT`).
:param package: CWL definition for the process.
:returns: reason message if must be executed remotely or ``None`` if it *could* be executed | |
#!/usr/bin/env python3
"""scan2pdf
Usage:
scan2pdf -L
scan2pdf --create-configuration DEVICE [CONFIG]
scan2pdf [--debug] [-C CONFIG] DEVICE TARGET
Options:
-L, --list-devices show available scanner devices
DEVICE device to use for scanning
TARGET target filename for scan
CONFIG configuration file
-C CONFIG, --configuration CONFIG configuration options in JSON format
--debug print debug information on error
--create-configuration create a configuration file with defaults
"""
import sys
import json
import os
import os.path
import ctypes
import re
import io
from copy import deepcopy
from collections import OrderedDict
from docopt import docopt
import pyinsane.abstract as pyinsane
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak
from reportlab.lib.pagesizes import inch, cm, A4
from reportlab.platypus import Image as LabImage
class Error(Exception):
'''
Generic Error class.
Message should be end-user readable.
'''
def __init__(self, message, *args, **kwargs):
super().__init__(*args)
self.message = message
for name,value in kwargs.items():
setattr(self, name, value)
def __str__(self):
return self.message
def main():
global __doc__
cmdline = docopt(__doc__, version='scan2pdf 0.1.0')
try:
if cmdline['--list-devices']:
main_list_devices(cmdline)
elif cmdline['--create-configuration']:
main_create_configuration(cmdline)
else:
main_scan(cmdline)
except Error as ex:
print('Error:', ex.message, file=sys.stderr)
if cmdline['--debug'] and hasattr(ex, 'inner'):
raise ex.inner
else:
sys.exit(-1)
def main_list_devices(cmdline):
try:
devices = pyinsane.get_devices()
except Exception as ex:
raise Error('Unable to list devices. Is sane installed?', inner=ex)
if len(devices) == 0:
raise Error('no devices found')
for device in devices:
print(device.name)
def main_create_configuration(cmdline):
device = pyinsane.Scanner(name=cmdline['DEVICE'])
try:
device._open()
except Exception as ex:
raise Error('Unable to open device "%s"' % device.name, inner=ex)
def iter_options():
try:
nb_options = pyinsane.rawapi.sane_get_option_value(pyinsane.sane_dev_handle[1], 0)
for opt_idx in range(1, nb_options):
opt_desc = pyinsane.rawapi.sane_get_option_descriptor(pyinsane.sane_dev_handle[1], opt_idx)
opt = pyinsane.ScannerOption.build_from_rawapi(device, opt_idx, opt_desc)
yield opt
except Exception as ex:
raise Error('Unable to retrieve options for device "%s"' % device.name, inner=ex)
def iter_filtered_options():
grp = None
for opt in iter_options():
# group
if opt.val_type == pyinsane.rawapi.SaneValueType.GROUP:
grp = opt
continue
# if both of these are set, option is invalid
if (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.SOFT_SELECT) and (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.HARD_SELECT):
continue
# invalid to select but not detect
if (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.SOFT_SELECT) and not (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.SOFT_DETECT):
continue
# standard allows this, though it makes little sense
# if (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.HARD_SELECT) and not (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.SOFT_DETECT):
# continue
# if one of these three is not set, option is useless, skip it
if not (opt.capabilities._SaneFlags__flags & (pyinsane.SaneCapabilities.SOFT_SELECT | pyinsane.SaneCapabilities.HARD_SELECT | pyinsane.SaneCapabilities.SOFT_DETECT)):
continue
# only worry about settable values
if not (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.SOFT_SELECT):
continue
# yield group with first valid option
if grp is not None:
yield grp
grp_idx, grp_desc, grp = None, None, None
# yield option
yield opt
def iter_config():
yield '# configuration options for %s' % device.name
yield ''
for opt in iter_filtered_options():
# group
if opt.val_type == pyinsane.rawapi.SaneValueType.GROUP:
yield '[%(name)s]' % dict(
name = opt.name
)
continue
# option
yield '# %(title)s' % dict(
title = opt.title
)
yield '# %(desc)s' % dict(
desc = opt.desc
)
valid = ''
if opt.val_type == pyinsane.rawapi.SaneValueType.BOOL:
valid = 'yes|no'
elif opt.val_type != pyinsane.rawapi.SaneValueType.BUTTON:
valid = ''
if opt.constraint_type == pyinsane.rawapi.SaneConstraintType.NONE:
if opt.val_type == pyinsane.rawapi.SaneValueType.INT:
valid = '<int>'
elif opt.val_type == pyinsane.rawapi.SaneValueType.FIXED:
valid = '<float>'
elif opt.val_type == pyinsane.rawapi.SaneValueType.STRING:
valid = '<string>'
if opt.val_type != pyinsane.rawapi.SaneValueType.STRING and opt.size > ctypes.sizeof(ctypes.c_int):
valid = valid + ',...'
elif opt.constraint_type == pyinsane.rawapi.SaneConstraintType.RANGE:
# ToDo: see scanimage. might need to adjust x and y
valid_from, valid_to, valid_step = opt.constraint
valid_unit = get_unit(opt.unit._SaneEnum__value)
if opt.val_type != pyinsane.rawapi.SaneValueType.STRING and opt.size > ctypes.sizeof(ctypes.c_int):
valid_extra = ',...'
else:
valid_extra = ''
if opt.val_type == pyinsane.rawapi.SaneValueType.INT:
valid = '%(from_)d..%(to)d%(unit)s%(extra)s (in steps of %(step)d)' % dict(
from_ = valid_from,
to = valid_to,
extra = valid_extra,
step = valid_step,
unit = valid_unit
)
else:
valid = '%(from_)g..%(to)g%(unit)s%(extra)s (in steps of %(step)g)' % dict(
from_ = unfix(valid_from),
to = unfix(valid_to),
extra = valid_extra,
step = unfix(valid_step),
unit = valid_unit
)
elif opt.constraint_type == pyinsane.rawapi.SaneConstraintType.WORD_LIST:
if opt.val_type == pyinsane.rawapi.SaneValueType.INT:
valid_words = ('%d' % word for word in opt.constraint)
else:
valid_words = ('%g' % unfix(word) for word in opt.constraint)
valid = '|'.join(valid_words)
if opt.val_type != pyinsane.rawapi.SaneValueType.STRING and opt.size > ctypes.sizeof(ctypes.c_int):
valid = valid + ',...'
elif opt.constraint_type == pyinsane.rawapi.SaneConstraintType.STRING_LIST:
valid = '|'.join('%r' % string for string in opt.constraint)
if opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.AUTOMATIC:
valid = 'auto|' + valid
flags = []
if (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.INACTIVE):
flags.append('[inactive]')
if (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.HARD_SELECT):
flags.append('[hardware]')
if not (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.SOFT_SELECT) and (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.SOFT_DETECT):
flags.append('[read-only]')
if len(flags) != 0:
flags = ' ' + ' '.join(flags)
else:
flags = ''
yield '# %(name)s = %(valid)s%(flags)s' % dict(
name = opt.name,
valid = valid,
flags = flags
)
if opt.val_type == pyinsane.rawapi.SaneValueType.STRING or opt.size == ctypes.sizeof(ctypes.c_int):
if not (opt.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.INACTIVE):
value = ''
if opt.val_type == pyinsane.rawapi.SaneValueType.BOOL:
value = 'yes' if opt.val_type else 'no'
elif opt.val_type == pyinsane.rawapi.SaneValueType.INT:
# ToDo: see scanimage
value = '%d' % opt.value
elif opt.val_type == pyinsane.rawapi.SaneValueType.FIXED:
# ToDo: see scanimage
value = '%g' % unfix(opt.value)
elif opt.val_type == pyinsane.rawapi.SaneValueType.STRING:
value = '%r' % opt.value
yield '%(name)s = %(value)s' % dict(
name = opt.name,
value = value
)
else:
yield '# %(name)s = ' % dict(
name = opt.name
)
else:
yield '# %(name)s = ' % dict(
name=opt.name
)
yield ''
def get_unit(value):
try:
return ['', 'pixel', 'bit', 'mm', 'dpi', '%', 'µs'][value]
except IndexError as ex:
return ''
def unfix(value):
return float(value) / (1 << 16)
if cmdline['CONFIG'] is not None:
filename = cmdline['CONFIG']
try:
fp = open(filename, 'wt')
except Exception as ex:
raise Error('Unable to write configuration file "%s"' % filename, inner=ex)
with fp:
for line in iter_config():
print(line, file=fp)
else:
for line in iter_config():
print(line)
def main_scan(cmdline):
device = pyinsane.Scanner(name=cmdline['DEVICE'])
try:
device._open()
except Exception as ex:
raise Error('Unable to open device "%s"' % device.name, inner=ex)
apply_configuration(cmdline, device)
def iter_scan():
try:
session = device.scan(multiple=True)
except StopIteration:
raise Error('Nothing to scan')
while True:
try:
session.scan.read()
except EOFError:
img = session.images[-1]
# Set DPI if possible
if 'dpi' not in img.info and 'resolution' in device.options:
img.info['dpi'] = (device.options['resolution'].value, device.options['resolution'].value)
yield img
except StopIteration:
return
images2pdf(iter_scan(), cmdline['TARGET'])
def apply_configuration(cmdline, device):
filename = cmdline['--configuration']
if filename is None:
return
def iter_settings():
try:
fp = open(filename, 'r')
except Exception as ex:
raise Error('Unable to read configuration file "%s"' % filename, inner=ex)
re_comment = re.compile(r'^#.*$')
re_empty = re.compile(r'^\s*$')
re_group = re.compile(r'^\s*\[(?P<group>.*)\]\s*$')
re_option = re.compile(r'^\s*(?P<name>.*?)\s*=\s*(?P<value>.*?)\s*$')
with fp:
for iline, line in enumerate(fp):
# comment
if re_comment.match(line):
continue
# empty
if re_empty.match(line):
continue
# group
match = re_group.match(line)
if match:
continue
# option
match = re_option.match(line)
if match:
yield (match.group('name'), match.group('value'))
continue
raise Error('Invalid syntax on line %d of configuration file "%s"' % (iline + 1, filename))
re_value_string = re.compile(r'(?P<quote>\"|\')(?P<value>.*)(?P=quote)')
for name, value in iter_settings():
if name not in device.options:
raise Error('Unknown option "%s" in configuration file "%s"' % (name, filename))
option = device.options[name]
if value.lower() == 'auto' and (option.capabilities._SaneFlags__flags & pyinsane.SaneCapabilities.AUTOMATIC):
pass
elif option.val_type == pyinsane.rawapi.SaneValueType.BOOL:
value = value.lower()
if value == 'yes':
value = True
elif value == 'no':
value = False
else:
raise Error('invalid value for option "%s" in configuration file "%s"' % (name, filename))
elif option.val_type == pyinsane.rawapi.SaneValueType.INT:
try:
value = int(value)
except ValueError as ex:
raise Error('invalid value for option "%s" in configuration file "%s"' % (name, filename), inner = ex)
elif option.val_type == pyinsane.rawapi.SaneValueType.FIXED:
try:
value = float(value)
except ValueError as ex:
raise Error('invalid value for option "%s" in configuration file "%s"' % (name, filename), inner = ex)
value = int(value*(1 << 16))
elif option.val_type == pyinsane.rawapi.SaneValueType.STRING:
match = re_value_string.match(value)
if match is None:
raise Error('invalid value for option "%s" in configuration file "%s"' % (name, filename), inner=ex)
value = match.group('value')
value = bytes(value, 'utf-8').decode('unicode_escape')
else:
continue
try:
option.value = value
except Exception as ex:
raise Error('unable to set option "%s"', inner=ex)
def pil2lab(img):
dpiw, dpih = img.info['dpi'] if 'dpi' in img.info else (1, 1)
buf = io.BytesIO()
img.save(buf, 'TIFF')
buf.seek(0)
return LabImage(buf, width=float(img.width) / dpiw * inch, height=float(img.height) / dpih * inch)
def images2pdf(images, filename):
images = iter(images)
try:
img = next(images)
except StopIteration:
raise Error('Nothing scanned')
img = pil2lab(img)
doc = SimpleDocTemplate(
filename,
pagesize=(
img._width + 1.5 * cm,
img._height + 2.0 * cm
),
showBoundary=1,
leftMargin=0.5 * cm, rightMargin=0.5 * cm, topMargin=0.5 * cm, | |
from src.PhaseIdentification.common import *
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.mixture import GaussianMixture
import seaborn as sns
class PhaseIdentification(Feeder):
"""
A PhaseIdentification object is formed by performing one of the phase identification methods on the feeder object
It contains most notably an array with the found phase labels by the method
"""
def __init__(self, feeder, error_class=ErrorClass(0)):
"""
Initialize the PhaseIdentification object by reading out the data from JSON files in the specified directory.
feeder = attach feeder object or feederID
include_three_phase = put on True if you want to include 3 phase customers in your analysis, 3 phase customers
will be regarded as 3 single phase customers
measurement_error = std of the amount of noise added to the voltage (p.u.)
length = number of data samples used, the first samples are used
"""
self.__dict__ = feeder.__dict__.copy()
size_v = (np.size(self.voltage_features,0), np.size(self.voltage_features,1))
size_p = (np.size(self.load_features,0), np.size(self.load_features,1))
self.voltage_features = self.voltage_features + np.random.normal(0,error_class.get_voltage_noise(), size_v)
self.load_features = self.load_features + np.random.normal(0,error_class.get_load_noise(), size_p)
self._score = np.nan
def hierarchal_clustering(self, n_clusters=3, normalized=True, criterion='avg_silhouette'):
"""
Method that assigns phase labels to PhaseIdebtification object obtained by performing hierarchal clustering of the specified featureset
By default the features will be normalized first. By scaling the features to have a mean of 0 and unit variance.
(More info: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
"""
if normalized:
scaler = StandardScaler()
data = scaler.fit_transform(self.voltage_features)
else:
data = self.voltage_features
labels = AgglomerativeClustering(n_clusters).fit(data).labels_
if criterion == 'global_silhouette':
score = global_silhouette_criterion(data, labels)
if criterion == 'avg_silhouette':
score = silhouette_score(data, labels)
self._algorithm = 'hierarchal clustering'
self._n_repeats = 1
self.partial_phase_labels = labels + 1
self.match_labels()
def k_means_clustering(self, n_clusters=3, normalized=True, n_repeats=1, criterion='avg_silhouette',length=24*20):
"""
Method that assigns phase labels to PhaseIdentification object obtained by performing K-means++ on the specified feeder.
A number of repetitions can be specified, the best result according to the specified criterion will be returned
By default the features will be normalized first. By scaling the features to have a mean of 0 and unit variance.
(More info: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
"""
if normalized == True:
scaler = StandardScaler()
data = scaler.fit_transform(self.voltage_features)
else:
data = self.voltage_features
self.voltage_features = np.array([x[0:length] for x in self.voltage_features])
if criterion == 'avg_silhouette':
best_cluster_labels = np.zeros(np.size(data, 0))
score = -1
for i in range(0, n_repeats):
i_cluster_labels = KMeans(n_clusters).fit(data).labels_
i_silhouette_avg = silhouette_score(data, i_cluster_labels)
if i_silhouette_avg > score:
score = i_silhouette_avg
best_cluster_labels = i_cluster_labels
if criterion == 'global_silhouette':
best_cluster_labels = np.zeros(np.size(data, 0))
score = -1
for i in range(0, n_repeats):
i_cluster_labels = KMeans(n_clusters).fit(data).labels_
i_silhouette_global = global_silhouette_criterion(data, i_cluster_labels)
if i_silhouette_global > score:
score = i_silhouette_global
best_cluster_labels = i_cluster_labels
self._algorithm = 'k-means++'
self._n_repeats = n_repeats
self.partial_phase_labels = best_cluster_labels + 1
self.match_labels()
def k_medoids_clustering(self, n_clusters=3, normalized=True, n_repeats=1, criterion='avg_silhouette'):
"""
Method that assigns phase labels to PhaseIdentification object obtained by performing K-medoids++ on the specified feeder.
A number of repetitions can be specified, the best result according to the specified criterion will be returned
By default the features will be normalized first. By scaling the features to have a mean of 0 and unit variance.
(More info: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
"""
if normalized == True:
scaler = StandardScaler()
data = scaler.fit_transform(self.voltage_features)
else:
data = self.voltage_features
if criterion == 'avg_silhouette':
best_cluster_labels = np.zeros(np.size(data, 0))
score = -1
for i in range(0, n_repeats):
i_cluster_labels = KMedoids(n_clusters, init='k-medoids++').fit(data).labels_
i_silhouette_avg = silhouette_score(data, i_cluster_labels)
if i_silhouette_avg > score:
score = i_silhouette_avg
best_cluster_labels = i_cluster_labels
if criterion == 'global_silhouette':
best_cluster_labels = np.zeros(np.size(data, 0))
score = -1
for i in range(0, n_repeats):
i_cluster_labels = KMedoids(n_clusters, init='k-medoids++').fit(data).labels_
i_silhouette_global = global_silhouette_criterion(data, i_cluster_labels)
if i_silhouette_global > score:
score = i_silhouette_global
best_cluster_labels = i_cluster_labels
self._algorithm = 'k-medoids++'
self._n_repeats = n_repeats
self.partial_phase_labels = best_cluster_labels + 1
self.match_labels()
def gaussian_mixture_model(self, n_clusters=3, normalized=True, n_repeats=1):
"""
Method that assigns phase labels to PhaseIdentification object obtained by performing K-means++ on the specified feeder.
A number of repetitions can be specified, the best result according to the average silhouette score will be
returned
By default the features will be normalized first. By scaling the features to have a mean of 0 and unit variance.
(More info: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
"""
if normalized == True:
scaler = StandardScaler()
data = scaler.fit_transform(self.voltage_features)
else:
data = self.voltage_features
best_cluster_labels = np.zeros(np.size(data, 0))
score = -1
reg_values = np.linspace(0.001, .1, num=n_repeats)
for i in range(0, n_repeats):
i_cluster_labels = np.array(
GaussianMixture(n_components=n_clusters, reg_covar=reg_values[i]).fit_predict(data))
i_silhouette_avg = silhouette_score(data, i_cluster_labels)
if i_silhouette_avg > score:
score = i_silhouette_avg
best_cluster_labels = i_cluster_labels
self._algorithm = 'Gaussian mixture model'
self._n_repeats = n_repeats
self.partial_phase_labels = best_cluster_labels + 1
self.match_labels()
return Cluster(best_cluster_labels, 'Gaussian mixture model', normalized, n_repeats, 'avg_silhouette', score)
def get_reference_3phase_customer(self):
id_ = np.array(self.device_IDs)
try:
id_3 = self.multiphase_IDs[0]
except IndexError:
raise ValueError("No 3 phase reference found")
else:
profiles = self.voltage_features
profiles = profiles[id_ == id_3]
labels = self.phase_labels
labels = labels[id_ == id_3]
return labels, profiles
def voltage_correlation(self,length=24*20):
"""
Voltage correlation method to perform phase identification, correlates voltage of each customer to voltage profile
of 3 phase reference customer and assigns the phase according to which phase it is highest correlated to.
"""
labels, profiles = self.get_reference_3phase_customer()
phase_labels = []
scores = []
for device in self.voltage_features:
corr = 0
label = np.nan
for phase in range(0, 3):
n_corr = sum((profiles[phase]-np.mean(profiles[phase])) * (device-np.mean(device)))\
/(np.std(profiles[phase])*np.std(device))
if n_corr > corr:
corr = n_corr
label = labels[phase]
phase_labels += [label]
scores += [corr]
self._algorithm = 'voltage_correlation'
self._n_repeats = 1
self.partial_phase_labels = phase_labels
def voltage_correlation_transfo_ref(self,length=24*20, min_corr=-np.inf):
"""
Voltage correlation method that perform phase identification using collected voltage data of the reference transformer
"""
labels = [1,2,3]
profiles = self.voltage_features_transfo
phase_labels = []
scores = []
for device in self.voltage_features:
corr = min_corr
label = 0
for phase in range(0, 3):
std_transfo = np.std(profiles[phase][0:length])
std_customer = np.std(device[0:length])
n_corr = sum((profiles[phase][0:length] - np.mean(profiles[phase][0:length])) * (device[0:length] - np.mean(device[0:length]))) \
/ (std_transfo * std_customer)
if n_corr >= corr:
corr = n_corr
label = labels[phase]
phase_labels += [label]
scores += [corr]
if label == 0:
print('Phase could not be allocated, Corr: ', n_corr, ' std transfo: ',std_transfo,' std consumer: ',std_customer)
self._algorithm = 'voltage_correlation'
self._n_repeats = 1
self.partial_phase_labels = phase_labels
self._algorithm = 'Voltage correlation with transformer ref'
def plot_voltage_correlation_transfo_ref(self,length=24*20, min_corr=-np.inf):
"""
Voltage correlation method that perform phase identification using collected voltage data of the reference transformer
"""
labels = [1,2,3]
profiles = self.voltage_features_transfo
phase_labels = []
scores = []
for device in self.voltage_features:
corr = []
label = 0
for phase in range(0, 3):
std_transfo = np.std(profiles[phase][0:length])
std_customer = np.std(device[0:length])
corr += [sum((profiles[phase][0:length] - np.mean(profiles[phase][0:length])) * (device[0:length] - np.mean(device[0:length]))) \
/ (std_transfo * std_customer)]
phase_labels += [np.argmax(corr)+1]
scores.append(np.sort(corr))
scores = np.array(scores)
self._algorithm = 'voltage_correlation'
self._n_repeats = 1
self.partial_phase_labels = phase_labels
self._algorithm = 'Voltage correlation with transformer ref'
return scores
def accuracy(self):
correct_labels = self.partial_phase_labels
labels = self.phase_labels
if len(labels) != len(correct_labels):
raise IndexError("Phase labels not of same length")
c = 0.0
for i in range(0, len(labels)):
if labels[i] == correct_labels[i]:
c = c + 1.0
return c / len(labels)
def find_wrong_IDs(self):
correct_labels = self.phase_labels
labels = self.partial_phase_labels
id_s = self.device_IDs
wrong_ids = []
if len(labels) != len(correct_labels):
raise IndexError("Phase labels not of same length")
for i in range(0, len(labels)):
if labels[i] != correct_labels[i]:
wrong_ids += [id_s[i]]
return np.array(wrong_ids)
def match_labels(self):
best_labels = self.partial_phase_labels
best_acc = 0.0
for i in range(0, 7):
acc = self.accuracy()
labels = self.partial_phase_labels
if acc > best_acc:
best_acc = acc
best_labels = labels
if i == 3:
for j in range(0, len(labels)):
if labels[j] == 1:
labels[j] = 2
elif labels[j] == 2:
labels[j] = 1
self.partial_phase_labels = np.array(labels)
else:
self.partial_phase_labels = (list(map(lambda x: x % 3 + 1, labels)))
self.partial_phase_labels = np.array(best_labels)
def plot_voltages(self, length=48, x_axis=None, y_axis=None):
"""
"""
voltage_data = self.voltage_features
plt.figure(figsize=(8, 6))
markers = ["s", "o", "D", ">", "<", "v", "+"]
x = np.arange(0, length)
for i in range(1, 4):
color = plt.cm.viridis((float(i))/3)
for j, line in enumerate(voltage_data):
if self.partial_phase_labels[j] == i:
plt.plot(x, line[0:length], color=color, alpha=0.85)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title(self._algorithm)
plt.show()
def add_noise(self, error=0, data="voltage",inplace=True):
""""
Method to add noise to existing Feeder object
"""
if inplace:
if data == "voltage":
voltage_features | |
result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_row failed: unknown result");
def get_row_as_arrays(self, ns, name, row):
"""
Alternative interface using array as cell
Parameters:
- ns
- name
- row
"""
self.send_get_row_as_arrays(ns, name, row)
return self.recv_get_row_as_arrays()
def send_get_row_as_arrays(self, ns, name, row):
self._oprot.writeMessageBegin('get_row_as_arrays', TMessageType.CALL, self._seqid)
args = get_row_as_arrays_args()
args.ns = ns
args.name = name
args.row = row
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_row_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_row_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_row_as_arrays failed: unknown result");
def get_row_serialized(self, ns, table_name, row):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- ns
- table_name
- row
"""
self.send_get_row_serialized(ns, table_name, row)
return self.recv_get_row_serialized()
def send_get_row_serialized(self, ns, table_name, row):
self._oprot.writeMessageBegin('get_row_serialized', TMessageType.CALL, self._seqid)
args = get_row_serialized_args()
args.ns = ns
args.table_name = table_name
args.row = row
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_row_serialized(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_row_serialized_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_row_serialized failed: unknown result");
def get_cell(self, ns, table_name, row, column):
"""
Get a cell (convenience method for random access a cell)
@param ns - namespace id
@param table_name - table name
@param row - row key
@param column - column name
@return value (byte sequence)
Parameters:
- ns
- table_name
- row
- column
"""
self.send_get_cell(ns, table_name, row, column)
return self.recv_get_cell()
def send_get_cell(self, ns, table_name, row, column):
self._oprot.writeMessageBegin('get_cell', TMessageType.CALL, self._seqid)
args = get_cell_args()
args.ns = ns
args.table_name = table_name
args.row = row
args.column = column
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cell(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cell_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cell failed: unknown result");
def get_cells(self, ns, table_name, scan_spec):
"""
Get cells (convenience method for access small amount of cells)
@param ns - namespace id
@param table_name - table name
@param scan_spec - scan specification
@return a list of cells (a cell with no row key set is assumed to have
the same row key as the previous cell)
Parameters:
- ns
- table_name
- scan_spec
"""
self.send_get_cells(ns, table_name, scan_spec)
return self.recv_get_cells()
def send_get_cells(self, ns, table_name, scan_spec):
self._oprot.writeMessageBegin('get_cells', TMessageType.CALL, self._seqid)
args = get_cells_args()
args.ns = ns
args.table_name = table_name
args.scan_spec = scan_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cells(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cells_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cells failed: unknown result");
def get_cells_as_arrays(self, ns, name, scan_spec):
"""
Alternative interface using array as cell
Parameters:
- ns
- name
- scan_spec
"""
self.send_get_cells_as_arrays(ns, name, scan_spec)
return self.recv_get_cells_as_arrays()
def send_get_cells_as_arrays(self, ns, name, scan_spec):
self._oprot.writeMessageBegin('get_cells_as_arrays', TMessageType.CALL, self._seqid)
args = get_cells_as_arrays_args()
args.ns = ns
args.name = name
args.scan_spec = scan_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cells_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cells_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cells_as_arrays failed: unknown result");
def get_cells_serialized(self, ns, name, scan_spec):
"""
Alternative interface returning buffer of serialized cells
Parameters:
- ns
- name
- scan_spec
"""
self.send_get_cells_serialized(ns, name, scan_spec)
return self.recv_get_cells_serialized()
def send_get_cells_serialized(self, ns, name, scan_spec):
self._oprot.writeMessageBegin('get_cells_serialized', TMessageType.CALL, self._seqid)
args = get_cells_serialized_args()
args.ns = ns
args.name = name
args.scan_spec = scan_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_cells_serialized(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_cells_serialized_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.e != None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cells_serialized failed: unknown result");
def refresh_shared_mutator(self, ns, table_name, mutate_spec):
"""
Create a shared mutator with specified MutateSpec.
Delete and recreate it if the mutator exists.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
Parameters:
- ns
- table_name
- mutate_spec
"""
self.send_refresh_shared_mutator(ns, table_name, mutate_spec)
self.recv_refresh_shared_mutator()
def send_refresh_shared_mutator(self, ns, table_name, mutate_spec):
self._oprot.writeMessageBegin('refresh_shared_mutator', TMessageType.CALL, self._seqid)
args = refresh_shared_mutator_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_refresh_shared_mutator(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = refresh_shared_mutator_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cells(self, ns, table_name, mutate_spec, cells):
"""
Open a shared periodic mutator which causes cells to be written asyncronously.
Users beware: calling this method merely writes
cells to a local buffer and does not guarantee that the cells have been persisted.
If you want guaranteed durability, use the open_mutator+set_cells* interface instead.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
@param cells - set of cells to be written
Parameters:
- ns
- table_name
- mutate_spec
- cells
"""
self.send_offer_cells(ns, table_name, mutate_spec, cells)
self.recv_offer_cells()
def send_offer_cells(self, ns, table_name, mutate_spec, cells):
self._oprot.writeMessageBegin('offer_cells', TMessageType.CALL, self._seqid)
args = offer_cells_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cells = cells
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cells(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cells_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cells_as_arrays(self, ns, table_name, mutate_spec, cells):
"""
Alternative to offer_cell interface using array as cell
Parameters:
- ns
- table_name
- mutate_spec
- cells
"""
self.send_offer_cells_as_arrays(ns, table_name, mutate_spec, cells)
self.recv_offer_cells_as_arrays()
def send_offer_cells_as_arrays(self, ns, table_name, mutate_spec, cells):
self._oprot.writeMessageBegin('offer_cells_as_arrays', TMessageType.CALL, self._seqid)
args = offer_cells_as_arrays_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cells = cells
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cells_as_arrays(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cells_as_arrays_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cell(self, ns, table_name, mutate_spec, cell):
"""
Open a shared periodic mutator which causes cells to be written asyncronously.
Users beware: calling this method merely writes
cells to a local buffer and does not guarantee that the cells have been persisted.
If you want guaranteed durability, use the open_mutator+set_cells* interface instead.
@param ns - namespace id
@param table_name - table name
@param mutate_spec - mutator specification
@param cell - cell to be written
Parameters:
- ns
- table_name
- mutate_spec
- cell
"""
self.send_offer_cell(ns, table_name, mutate_spec, cell)
self.recv_offer_cell()
def send_offer_cell(self, ns, table_name, mutate_spec, cell):
self._oprot.writeMessageBegin('offer_cell', TMessageType.CALL, self._seqid)
args = offer_cell_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cell = cell
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cell(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cell_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def offer_cell_as_array(self, ns, table_name, mutate_spec, cell):
"""
Alternative to offer_cell interface using array as cell
Parameters:
- ns
- table_name
- mutate_spec
- cell
"""
self.send_offer_cell_as_array(ns, table_name, mutate_spec, cell)
self.recv_offer_cell_as_array()
def send_offer_cell_as_array(self, ns, table_name, mutate_spec, cell):
self._oprot.writeMessageBegin('offer_cell_as_array', TMessageType.CALL, self._seqid)
args = offer_cell_as_array_args()
args.ns = ns
args.table_name = table_name
args.mutate_spec = mutate_spec
args.cell = cell
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_offer_cell_as_array(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = offer_cell_as_array_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e != None:
raise result.e
return
def open_mutator(self, ns, table_name, flags, flush_interval):
"""
Open a table mutator
@param ns - namespace id
@param table_name - table name
@param flags - mutator flags
@param flush_interval - auto-flush interval in milliseconds; 0 disables it.
@return mutator id
Parameters:
- ns
- table_name
- flags
- flush_interval
"""
self.send_open_mutator(ns, table_name, flags, flush_interval)
return self.recv_open_mutator()
def send_open_mutator(self, ns, table_name, flags, flush_interval):
self._oprot.writeMessageBegin('open_mutator', TMessageType.CALL, self._seqid)
args = open_mutator_args()
args.ns = ns
args.table_name = table_name
args.flags | |
<reponame>abderrahmane8/ByRequests<gh_stars>1-10
# UPDATED 2018-10-04
chrome_ua = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Mozilla/5.0 (Linux; Android 4.4.2; XMP-6250 Build/HAWK) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Safari/537.36 ADAPI/2.0 (UUID:9e7df0ed-2a5c-4a19-bec7-2cc54800f99d) RK3188-ADAPI/1.2.84.533 (MODEL:XMP-6250)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.83 Mobile Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0; vivo 1713 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.124 Mobile Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.1; Mi A1 Build/N2G47H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.83 Mobile Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.65 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; vivo 1603 Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.83 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; CPH1607 Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.111 Mobile Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; Redmi 4A Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/60.0.3112.116 Mobile Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.104 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0; vivo 1606 Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.124 Mobile Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.1; vivo 1716 Build/N2G47H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.98 Mobile Safari/537.36"
]
firefox_ua = [
"Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 6.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 5.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Mozilla/5.0 (Windows NT 5.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Mozilla/5.0 (Windows NT 6.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)",
"Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0",
"Mozilla/5.0 (Windows NT 5.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Windows NT 6.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0",
"Mozilla/5.0 (Windows NT 5.1; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"
]
safari_ua = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.21 (KHTML, like Gecko) Mwendo/1.1.5 Safari/537.21",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/601.7.7 (KHTML, like Gecko) Version/9.1.2 Safari/601.7.7",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G36 Safari/601.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8",
"Mozilla/5.0 (iPad; CPU OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 Safari/605.1.15",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/604.5.6 (KHTML, like Gecko) Version/11.0.3 Safari/604.5.6",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/601.4.4 (KHTML, like Gecko) Version/9.0.3 Safari/601.4.4",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/601.5.17 (KHTML, like Gecko) Version/9.1 Safari/601.5.17",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E188a Safari/601.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/601.6.17 (KHTML, like Gecko) Version/9.1.1 Safari/601.6.17",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G60 Safari/602.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0.2 Safari/604.4.7",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14",
"Mozilla/5.0 (iPhone; CPU iPhone OS 8_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B410 Safari/600.1.4",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Safari/604.1.38",
"Mozilla/5.0 (Macintosh; Intel | |
Ohio",3714),
("Mount Pleasant village, Ohio",420),
("Mount Repose CDP, Ohio",4446),
("Mount Sterling village, Ohio",1988),
("Mount Vernon city, Ohio",16669),
("Mount Victory village, Ohio",590),
("Mowrystown village, Ohio",353),
("Mulberry CDP, Ohio",3772),
("Munroe Falls city, Ohio",5065),
("Murray City village, Ohio",401),
("Mutual village, Ohio",141),
("Napoleon city, Ohio",8676),
("Nashville village, Ohio",167),
("Navarre village, Ohio",1797),
("Neapolis CDP, Ohio",287),
("Neffs CDP, Ohio",623),
("Negley CDP, Ohio",165),
("Nellie village, Ohio",101),
("Nelsonville city, Ohio",4990),
("Nettle Lake CDP, Ohio",254),
("Nevada village, Ohio",841),
("Neville village, Ohio",76),
("New Albany city, Ohio",10896),
("New Alexandria village, Ohio",286),
("Newark city, Ohio",49070),
("New Athens village, Ohio",304),
("New Baltimore CDP, Ohio",920),
("New Bavaria village, Ohio",112),
("New Bloomington village, Ohio",473),
("New Boston village, Ohio",2375),
("New Bremen village, Ohio",3318),
("Newburgh Heights village, Ohio",1778),
("New Burlington CDP, Ohio",4819),
("New California CDP, Ohio",1738),
("New Carlisle city, Ohio",5622),
("Newcomerstown village, Ohio",3883),
("New Concord village, Ohio",2627),
("New Franklin city, Ohio",14178),
("New Hampshire CDP, Ohio",250),
("New Haven CDP (Hamilton County), Ohio",507),
("New Haven CDP (Huron County), Ohio",270),
("New Holland village, Ohio",848),
("New Knoxville village, Ohio",965),
("New Lebanon village, Ohio",4243),
("New Lexington village, Ohio",5189),
("New London village, Ohio",2458),
("New Madison village, Ohio",906),
("New Marshfield CDP, Ohio",369),
("New Miami village, Ohio",2416),
("New Middletown village, Ohio",1490),
("New Paris village, Ohio",1692),
("New Philadelphia city, Ohio",17432),
("New Pittsburg CDP, Ohio",411),
("Newport CDP (Shelby County), Ohio",152),
("Newport CDP (Washington County), Ohio",867),
("New Richmond village, Ohio",2666),
("New Riegel village, Ohio",266),
("New Straitsville village, Ohio",650),
("Newton Falls village, Ohio",4564),
("Newtonsville village, Ohio",374),
("Newtown village, Ohio",2658),
("New Vienna village, Ohio",1214),
("New Washington village, Ohio",1120),
("New Waterford village, Ohio",1190),
("New Weston village, Ohio",98),
("Ney village, Ohio",226),
("Niles city, Ohio",18606),
("North Baltimore village, Ohio",3316),
("North Bend village, Ohio",843),
("Northbrook CDP, Ohio",10832),
("North Canton city, Ohio",17362),
("North College Hill city, Ohio",9307),
("North Fairfield village, Ohio",486),
("Northfield village, Ohio",3677),
("Northgate CDP, Ohio",7099),
("North Hampton village, Ohio",439),
("North Kingsville village, Ohio",2816),
("North Lawrence CDP, Ohio",221),
("North Lewisburg village, Ohio",1607),
("North Madison CDP, Ohio",8573),
("North Olmsted city, Ohio",31888),
("North Perry village, Ohio",701),
("North Randall village, Ohio",1023),
("Northridge CDP, Ohio",7343),
("North Ridgeville city, Ohio",32954),
("North Robinson village, Ohio",354),
("North Royalton city, Ohio",30322),
("North Star village, Ohio",259),
("Northwood city, Ohio",5383),
("North Zanesville CDP, Ohio",2390),
("Norton city, Ohio",12047),
("Norwalk city, Ohio",16881),
("Norwich village, Ohio",117),
("Norwood city, Ohio",19796),
("Oak Harbor village, Ohio",2724),
("Oak Hill village, Ohio",1691),
("Oakwood village (Cuyahoga County), Ohio",3692),
("Oakwood city, Ohio",9030),
("Oakwood village (Paulding County), Ohio",644),
("Oberlin city, Ohio",8299),
("Obetz village, Ohio",4740),
("Oceola CDP, Ohio",96),
("Octa village, Ohio",41),
("Ohio City village, Ohio",833),
("Olde West Chester CDP, Ohio",357),
("Old Fort CDP, Ohio",189),
("Old Washington village, Ohio",506),
("Olmsted Falls city, Ohio",8912),
("Ontario city, Ohio",6100),
("Orange village, Ohio",3285),
("Orangeville village, Ohio",173),
("Oregon city, Ohio",20012),
("Orient CDP, Ohio",254),
("Orrville city, Ohio",8239),
("Orwell village, Ohio",1583),
("Osgood village, Ohio",239),
("Ostrander village, Ohio",955),
("Ottawa village, Ohio",4377),
("Ottawa Hills village, Ohio",4436),
("Ottoville village, Ohio",816),
("Otway village, Ohio",57),
("Owensville village, Ohio",946),
("Oxford city, Ohio",22612),
("Painesville city, Ohio",19828),
("Palestine village, Ohio",242),
("Pancoastburg CDP, Ohio",144),
("Pandora village, Ohio",1048),
("Park Layne CDP, Ohio",4082),
("Parkman CDP, Ohio",123),
("Parma city, Ohio",79559),
("Parma Heights city, Ohio",20162),
("Parral village, Ohio",207),
("Pataskala city, Ohio",15465),
("Patterson village, Ohio",152),
("Paulding village, Ohio",3465),
("Payne village, Ohio",1197),
("Peebles village, Ohio",1851),
("Pemberville village, Ohio",1590),
("Peninsula village, Ohio",632),
("Pepper Pike city, Ohio",6242),
("Perry village, Ohio",1646),
("Perry Heights CDP, Ohio",8581),
("Perrysburg city, Ohio",21430),
("Perrysville village, Ohio",758),
("Pettisville CDP, Ohio",455),
("Pheasant Run CDP, Ohio",1337),
("Phillipsburg village, Ohio",445),
("Philo village, Ohio",1148),
("Pickerington city, Ohio",20148),
("Pigeon Creek CDP, Ohio",721),
("Piketon village, Ohio",2137),
("Pioneer village, Ohio",1650),
("Piqua city, Ohio",20941),
("Pitsburg village, Ohio",371),
("Plain City village, Ohio",4421),
("Plainfield village, Ohio",132),
("Plainville CDP, Ohio",182),
("Pleasant City village, Ohio",430),
("Pleasant Grove CDP, Ohio",1682),
("Pleasant Hill village, Ohio",1305),
("Pleasant Hills CDP, Ohio",779),
("Pleasant Plain village, Ohio",156),
("Pleasant Run CDP, Ohio",4460),
("Pleasant Run Farm CDP, Ohio",4653),
("Pleasantville village, Ohio",1065),
("Plumwood CDP, Ohio",365),
("Plymouth village, Ohio",1600),
("Poland village, Ohio",2582),
("Polk village, Ohio",347),
("Pomeroy village, Ohio",1786),
("Portage village, Ohio",492),
("Portage Lakes CDP, Ohio",7139),
("Port Clinton city, Ohio",5939),
("Port Jefferson village, Ohio",492),
("Portsmouth city, Ohio",20366),
("Port Washington village, Ohio",447),
("Port William village, Ohio",226),
("Potsdam village, Ohio",328),
("Pottery Addition CDP, Ohio",201),
("Powell city, Ohio",12909),
("Powhatan Point village, Ohio",1524),
("Proctorville village, Ohio",574),
("Prospect village, Ohio",975),
("Pulaski CDP, Ohio",104),
("Put-in-Bay village, Ohio",68),
("Quaker City village, Ohio",376),
("Quincy village, Ohio",605),
("Racine village, Ohio",704),
("Radnor CDP, Ohio",159),
("Rarden village, Ohio",192),
("Ravenna city, Ohio",11552),
("Rawson village, Ohio",504),
("Rayland village, Ohio",422),
("Raymond CDP, Ohio",156),
("Reading city, Ohio",10286),
("Reminderville village, Ohio",4113),
("Remington CDP, Ohio",273),
("Rendville village, Ohio",27),
("Reno CDP, Ohio",1138),
("Republic village, Ohio",625),
("Reynoldsburg city, Ohio",37644),
("Richfield village, Ohio",3647),
("Richmond village, Ohio",390),
("Richmond Dale CDP, Ohio",367),
("Richmond Heights city, Ohio",10449),
("Richville CDP, Ohio",3298),
("Richwood village, Ohio",2381),
("Ridgeville Corners CDP, Ohio",525),
("Ridgeway village, Ohio",334),
("Rio Grande village, Ohio",842),
("Ripley village, Ohio",1786),
("Risingsun village, Ohio",635),
("Rittman city, Ohio",6541),
("Riverlea village, Ohio",541),
("Riverside city, Ohio",25087),
("Roaming Shores village, Ohio",1494),
("Robertsville CDP, Ohio",241),
("Rochester village, Ohio",168),
("Rockbridge CDP, Ohio",27),
("Rock Creek village, Ohio",403),
("Rockford village, Ohio",941),
("Rocky Fork Point CDP, Ohio",681),
("Rocky Ridge village, Ohio",521),
("Rocky River city, Ohio",20310),
("Rogers village, Ohio",263),
("Rome village, Ohio",67),
("Rose Farm CDP, Ohio",247),
("Rosemount CDP, Ohio",1993),
("Roseville village, Ohio",2262),
("Rosewood CDP, Ohio",192),
("Ross CDP, Ohio",3526),
("Rossburg village, Ohio",201),
("Rossford city, Ohio",6507),
("Rossmoyne CDP, Ohio",1744),
("Roswell village, Ohio",226),
("Rudolph CDP, Ohio",579),
("Rushsylvania village, Ohio",582),
("Rushville village, Ohio",397),
("Russells Point village, Ohio",1158),
("Russellville village, Ohio",827),
("Russia village, Ohio",724),
("Rutland village, Ohio",453),
("Sabina village, Ohio",2454),
("St. Bernard village, Ohio",4355),
("St. Clairsville city, Ohio",5071),
("St. Henry village, Ohio",2503),
("St. Johns CDP, Ohio",158),
("St. Louisville village, Ohio",500),
("St. Martin CDP, Ohio",265),
("St. Marys city, Ohio",8145),
("St. Paris village, Ohio",2059),
("Salem city, Ohio",11870),
("Salem Heights CDP, Ohio",3277),
("Salesville CDP, Ohio",116),
("Salineville village, Ohio",1223),
("Sandusky city, Ohio",25012),
("Sandyville CDP, Ohio",290),
("Sarahsville village, Ohio",133),
("Sardinia village, Ohio",1360),
("Sardis CDP, Ohio",659),
("Savannah village, Ohio",367),
("Sawyerwood CDP, Ohio",1331),
("Scio village, Ohio",952),
("Sciotodale CDP, Ohio",1057),
("Scott village, Ohio",286),
("Seaman village, Ohio",1117),
("Sebring village, Ohio",4263),
("Senecaville village, Ohio",336),
("Seven Hills city, Ohio",11690),
("Seven Mile village, Ohio",828),
("Seville village, Ohio",2598),
("Shadyside village, Ohio",3623),
("Shaker Heights city, Ohio",27620),
("Sharonville city, Ohio",13942),
("Shawnee CDP, Ohio",598),
("Shawnee village, Ohio",449),
("Shawnee Hills village, Ohio",785),
("Shawnee Hills CDP, Ohio",2254),
("Sheffield village, Ohio",4126),
("Sheffield Lake city, Ohio",9011),
("Shelby city, Ohio",8809),
("Sherrodsville village, Ohio",292),
("Sherwood village, Ohio",854),
("Sherwood CDP, Ohio",3448),
("Shiloh village, Ohio",663),
("Shreve village, Ohio",1627),
("Sidney city, Ohio",20639),
("Silver Lake village, Ohio",2511),
("Silverton village, Ohio",4767),
("Sinking Spring village, Ohio",191),
("Sixteen Mile Stand CDP, Ohio",2804),
("Skyline Acres CDP, Ohio",2522),
("Smithfield village, Ohio",787),
("Smithville village, Ohio",1346),
("Solon city, Ohio",23038),
("Somerset village, Ohio",1673),
("Somerville CDP, Ohio",244),
("South Amherst village, Ohio",1807),
("South Bloomfield village, Ohio",2116),
("South Canal CDP, Ohio",894),
("South Charleston village, Ohio",1937),
("South Euclid city, Ohio",21688),
("South Lebanon village, Ohio",4622),
("South Point village, Ohio",3609),
("South Russell village, Ohio",3801),
("South Salem village, Ohio",278),
("South Solon village, Ohio",309),
("South Vienna village, Ohio",585),
("South Webster village, Ohio",680),
("South Zanesville village, Ohio",2253),
("Sparta village, Ohio",161),
("Spencer village, Ohio",594),
("Spencerville village, Ohio",2367),
("Springboro city, Ohio",18042),
("Springdale city, Ohio",11228),
("Springfield city, Ohio",59305),
("Spring Valley village, Ohio",535),
("Stafford village, Ohio",112),
("Sterling CDP, Ohio",406),
("Steubenville city, Ohio",18095),
("Stewart CDP, Ohio",327),
("Stockdale CDP, Ohio",19),
("Stockport village, Ohio",745),
("Stone Creek village, Ohio",187),
("Stony Prairie CDP, Ohio",1307),
("Stony Ridge CDP, Ohio",432),
("Stoutsville village, Ohio",655),
("Stow city, Ohio",34796),
("Strasburg village, Ohio",2814),
("Stratton village, Ohio",363),
("Streetsboro city, Ohio",16395),
("Strongsville city, Ohio",44752),
("Struthers city, Ohio",10325),
("Stryker village, Ohio",1170),
("Sugar Bush Knolls village, Ohio",196),
("Sugarcreek village, Ohio",2220),
("Sugar Grove village, Ohio",381),
("Sulphur Springs CDP, Ohio",127),
("Summerfield village, Ohio",224),
("Summerside CDP, Ohio",4692),
("Summitville village, Ohio",98),
("Sunbury village, Ohio",5280),
("Swanton village, Ohio",3637),
("Sycamore village, Ohio",1031),
("Sylvania city, Ohio",18928),
("Syracuse village, Ohio",787),
("Tallmadge city, Ohio",17581),
("Tarlton village, Ohio",259),
("Taylor Creek CDP, Ohio",3336),
("Tedrow CDP, Ohio",116),
("Terrace Park village, Ohio",2350),
("The Plains CDP, Ohio",3001),
("The Village of Indian Hill city, Ohio",5876),
("Thornport CDP, Ohio",964),
("Thornville village, Ohio",1083),
("Thurston village, Ohio",627),
("Tiffin city, Ohio",17599),
("Tiltonsville village, Ohio",1385),
("Timberlake village, Ohio",751),
("Tipp City city, Ohio",9912),
("Tippecanoe CDP, Ohio",90),
("Tiro village, Ohio",193),
("Toledo city, Ohio",278193),
("Tontogany village, Ohio",520),
("Toronto city, Ohio",5298),
("Tremont City village, Ohio",319),
("Trenton city, Ohio",12755),
("Trimble village, Ohio",532),
("Trinway CDP, Ohio",216),
("Trotwood city, Ohio",24361),
("Troy city, Ohio",25894),
("Tuppers Plains CDP, Ohio",372),
("Turpin Hills CDP, Ohio",5351),
("Tuscarawas village, Ohio",1461),
("Twinsburg city, Ohio",18915),
("Twinsburg Heights CDP, Ohio",735),
("Uhrichsville city, Ohio",5370),
("Union city, Ohio",6555),
("Union City village, Ohio",1755),
("Uniontown CDP, Ohio",3250),
("Unionville Center village, Ohio",369),
("Uniopolis CDP, Ohio",222),
("University Heights city, Ohio",13138),
("Upper Arlington city, Ohio",35223),
("Upper Sandusky city, Ohio",6757),
("Urbana city, Ohio",11434),
("Urbancrest village, Ohio",1085),
("Utica village, Ohio",1801),
("Valley City CDP, Ohio",552),
("Valley Hi village, Ohio",175),
("Valley View village, Ohio",2013),
("Valleyview village, Ohio",526),
("Van Buren village, Ohio",448),
("Vandalia city, Ohio",15080),
("Vanlue village, Ohio",283),
("Van Wert city, Ohio",11077),
("Vaughnsville CDP, Ohio",203),
("Venedocia village, Ohio",142),
("Vermilion city, Ohio",10226),
("Verona village, Ohio",446),
("Versailles village, Ohio",2676),
("Vickery CDP, Ohio",47),
("Vienna Center CDP, Ohio",623),
("Vincent CDP, Ohio",242),
("Vinton village, Ohio",279),
("Wadsworth city, Ohio",23155),
("Waite Hill village, Ohio",399),
("Wakeman village, Ohio",1218),
("Walbridge village, Ohio",3110),
("Waldo village, Ohio",327),
("Walnut Creek CDP, Ohio",1271),
("Walton Hills village, Ohio",2215),
("Wapakoneta city, Ohio",9732),
("Warren city, Ohio",39668),
("Warrensville Heights city, Ohio",13258),
("Warsaw village, Ohio",605),
("Washington Court House city, Ohio",14135),
("Washingtonville village, Ohio",983),
("Waterford CDP, Ohio",651),
("Waterville city, Ohio",5484),
("Wauseon city, Ohio",7531),
("Waverly village, Ohio",4294),
("Wayne village, Ohio",1047),
("Wayne Lakes village, Ohio",641),
("Waynesburg village, Ohio",822),
("Waynesfield village, Ohio",722),
("Waynesville village, Ohio",3038),
("Wellington village, Ohio",5249),
("Wellston city, Ohio",5540),
("Wellsville village, Ohio",3393),
("West Alexandria village, Ohio",1603),
("West Carrollton city, Ohio",12954),
("West Elkton village, Ohio",185),
("Westerville city, Ohio",39242),
("West Farmington village, Ohio",434),
("Westfield Center village, Ohio",1191),
("West Hill CDP, Ohio",2045),
("West Jefferson village, Ohio",4343),
("West Lafayette village, Ohio",2445),
("Westlake city, Ohio",32378),
("West Leipsic village, Ohio",223),
("West Liberty village, Ohio",1696),
("West Manchester village, Ohio",436),
("West Mansfield village, Ohio",688),
("West Millgrove village, Ohio",128),
("West Milton village, Ohio",4728),
("Westminster CDP, Ohio",544),
("Weston village, Ohio",1543),
("West Portsmouth CDP, Ohio",2795),
("West Rushville village, Ohio",251),
("West Salem village, Ohio",1562),
("West Union village, Ohio",3231),
("West Unity village, Ohio",1749),
("Wetherington CDP, Ohio",1283),
("Wharton village, Ohio",346),
("Wheelersburg CDP, Ohio",5996),
("Whitehall city, Ohio",18874),
("Whitehouse village, Ohio",4713),
("White Oak CDP, Ohio",19900),
("Whites Landing CDP, Ohio",478),
("Wickliffe city, Ohio",12733),
("Wightmans Grove CDP, Ohio",47),
("Wilberforce CDP, Ohio",2291),
("Wilkesville village, Ohio",178),
("Willard city, Ohio",6060),
("Williamsburg village, Ohio",2535),
("Williamsdale CDP, Ohio",1076),
("Williamsport village, Ohio",955),
("Williston CDP, Ohio",407),
("Willoughby city, Ohio",22770),
("Willoughby Hills city, Ohio",9526),
("Willowick city, Ohio",14160),
("Willshire village, Ohio",399),
("Wilmington city, Ohio",12403),
("Wilmot village, Ohio",329),
("Wilson village, Ohio",136),
("Winchester village, Ohio",1065),
("Windham village, Ohio",1988),
("Winesburg CDP, Ohio",279),
("Wintersville village, Ohio",4482),
("Withamsville CDP, Ohio",7788),
("Wolfhurst CDP, Ohio",1232),
("Woodlawn village, Ohio",3319),
("Woodmere village, Ohio",786),
("Woodsfield village, Ohio",2371),
("Woodstock village, Ohio",301),
("Woodville village, Ohio",2118),
("Wooster city, Ohio",26778),
("Worthington city, Ohio",14705),
("Wren village, Ohio",110),
("Wright-Patterson AFB CDP, Ohio",2579),
("Wyoming city, Ohio",8535),
("Xenia city, Ohio",26364),
("Yankee Lake village, Ohio",87),
("Yellow Springs village, Ohio",3858),
("Yorkshire village, Ohio",83),
("Yorkville village, Ohio",1049),
("Youngstown city, Ohio",64734),
("Zaleski village, Ohio",285),
("Zanesfield village, Ohio",303),
("Zanesville city, Ohio",25376),
("Zoar village, Ohio",169),
("Achille town, Oklahoma",359),
("Ada city, Oklahoma",17276),
("Adair town, Oklahoma",873),
("Addington town, Oklahoma",141),
("Afton town, Oklahoma",1189),
("Agra town, Oklahoma",294),
("Akins CDP, Oklahoma",558),
("Albany CDP, Oklahoma",113),
("Albion town, Oklahoma",93),
("Alderson town, Oklahoma",205),
("Alex town, Oklahoma",546),
("Aline town, Oklahoma",149),
("Allen town, Oklahoma",870),
("Altus city, Oklahoma",19034),
("Alva city, Oklahoma",5154),
("Amber town, Oklahoma",423),
("Ames town, Oklahoma",264),
("Amorita town, Oklahoma",34),
("Anadarko city, Oklahoma",6664),
("Antlers city, Oklahoma",2594),
("Apache town, Oklahoma",1415),
("Arapaho town, Oklahoma",897),
("Arcadia town, Oklahoma",252),
("Ardmore city, Oklahoma",24892),
("Arkoma town, Oklahoma",1872),
("Armstrong town, Oklahoma",107),
("Arnett town, Oklahoma",585),
("Arpelar CDP, Oklahoma",321),
("Asher town, Oklahoma",368),
("Ashland town, Oklahoma",35),
("Atoka | |
<reponame>google-cloud-sdk-unofficial/google-cloud-sdk
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build and deploy to Google Kubernetes Engine command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os.path
import uuid
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudbuild import snapshot
from googlecloudsdk.api_lib.storage import storage_api
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exceptions
from googlecloudsdk.command_lib.builds import staging_bucket_util
from googlecloudsdk.command_lib.builds.deploy import build_util
from googlecloudsdk.command_lib.builds.deploy import git
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.resource import resource_transform
from googlecloudsdk.core.util import times
import six
_ALLOWED_SOURCE_EXT = ['.zip', '.tgz', '.gz']
class FailedDeployException(core_exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedDeployException, self).__init__(
'failed to build or deploy: build {id} completed with status "{status}"'
.format(id=build.id, status=build.status))
class DeployGKE(base.Command):
"""Build and deploy to a target Google Kubernetes Engine cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
source = parser.add_mutually_exclusive_group()
source.add_argument(
'source',
nargs='?',
default='.', # By default, the current directory is used.
help='Location of the source and configs to build and deploy. '
'The location can be a directory on a local disk or a '
'gzipped archive file (.tar.gz) in Google Cloud Storage.')
source.add_argument(
'--no-source',
action='store_true',
help='Specify that no source should be uploaded with this build.')
docker = parser.add_mutually_exclusive_group(
help="""
Image to use to build and/or deploy.
To build an image with a default tag, omit these flags. The resulting
tag will be in the format 'gcr.io/[PROJECT_ID]/[IMAGE]/[TAG], where
[PROJECT_ID] is your project ID, [IMAGE] is the value provided by
`--app-name`, if provided, else it is the name of the provided source
directory, and [TAG] is the value provided by `--app-version`, if
provided, else it is the commit SHA of your provided source.
""")
docker.add_argument(
'--tag',
help="""
Tag to use with a 'docker build' image creation. Cloud Build runs a
remote 'docker build -t $TAG .' command, where $TAG is the tag provided
by this flag. The tag must be in the gcr.io/* or *.gcr.io/* namespaces.
If you specify a tag in this command, your source must include a
Dockerfile. For instructions on building using a Dockerfile see
https://cloud.google.com/cloud-build/docs/quickstart-docker.
""")
docker.add_argument(
'--image',
help='Existing container image to deploy. If set, Cloud Build deploys '
'the container image to the target Kubernetes cluster. The image must '
'be in the gcr.io/* or *.gcr.io/* namespaces.')
parser.add_argument(
'--gcs-staging-dir',
help="""
Path to the Google Cloud Storage subdirectory into which to copy the
source and configs (suggested base and expanded Kubernetes YAML files)
that are used to stage and deploy your app. If the bucket in this path
doesn't exist, Cloud Build creates it.
If this field is not set, the source and configs are written to
```gs://[PROJECT_ID]_cloudbuild/deploy```, where source is written to
the 'source' sub-directory and configs are written to the 'config'
sub-directory.
""")
parser.add_argument(
'--app-name',
help='If specified, the following label is added to the Kubernetes '
"manifests: 'app.kubernetes.io/name: APP_NAME'. Defaults to the "
'container image name provided by `--image` or `--tag` without the tag, '
"e.g. 'my-app' for 'gcr.io/my-project/my-app:1.0.0'.")
parser.add_argument(
'--app-version',
help='If specified, the following label is added to the Kubernetes '
"manifests: 'app.kubernetes.io/version: APP_VERSION'. Defaults to the "
'container image tag provided by `--image` or `--tag`. If no image tag '
'is provided and `SOURCE` is a valid git repository, defaults to the '
'short revision hash of the HEAD commit.')
parser.add_argument(
'--cluster',
help='Name of the target cluster to deploy to.',
required=True)
parser.add_argument(
'--location',
help='Region or zone of the target cluster to deploy to.',
required=True)
parser.add_argument(
'--namespace',
help='Namespace of the target cluster to deploy to. If this field is '
"not set, the 'default' namespace is used.")
parser.add_argument(
'--config',
help="""
Path to the Kubernetes YAML, or directory containing multiple
Kubernetes YAML files, used to deploy the container image. The path is
relative to the repository root provided by [SOURCE]. The files must
reference the provided container image or tag.
If this field is not set, a default Deployment config and Horizontal
Pod Autoscaler config are used to deploy the image.
""")
parser.add_argument(
'--timeout',
help='Maximum time a build is run before it times out. For example, '
'"2h15m5s" is 2 hours, 15 minutes, and 5 seconds. If you '
'do not specify a unit, seconds is assumed. Overrides the default '
'builds/timeout property value for this command invocation.',
action=actions.StoreProperty(properties.VALUES.builds.timeout),
)
parser.add_argument(
'--expose',
type=int,
help='Port that the deployed application listens on. If set, a '
"Kubernetes Service of type 'LoadBalancer' is created with a "
'single TCP port mapping that exposes this port.')
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
Raises:
FailedDeployException: If the build is completed and not 'SUCCESS'.
"""
if not args.source and not args.no_source:
raise c_exceptions.InvalidArgumentException(
'--no-source', 'To omit source, use the --no-source flag.')
if args.no_source:
if args.tag:
raise c_exceptions.RequiredArgumentException(
'SOURCE',
'Source is required to build container image.'
)
if args.config:
raise c_exceptions.RequiredArgumentException(
'SOURCE',
'Source is required when specifying --config because it is a '
'relative path in the source directory.')
do_build_and_push = args.image is None
if not do_build_and_push and not args.config:
args.no_source = True
image = self._DetermineImageFromArgs(args)
# Determine app_name
if args.app_name:
app_name = args.app_name
else:
app_name = self._ImageName(image)
# Determine app_version
app_version = None
image_has_tag = '@' not in image and ':' in image
if args.app_version:
app_version = args.app_version
elif image_has_tag:
app_version = image.split(':')[-1] # Set version to tag
elif args.source:
if git.IsGithubRepository(
args.source) and not git.HasPendingChanges(args.source):
commit_sha = git.GetGitHeadRevision(args.source)
if commit_sha:
app_version = commit_sha
# Validate expose
if args.expose and args.expose < 0:
raise c_exceptions.InvalidArgumentException('--expose',
'port number is invalid')
# Determine gcs_staging_dir_bucket and gcs_staging_dir_object
if args.gcs_staging_dir is None:
gcs_staging_dir_bucket = staging_bucket_util.GetDefaultStagingBucket()
gcs_staging_dir_object = 'deploy'
else:
try:
gcs_staging_dir_ref = resources.REGISTRY.Parse(
args.gcs_staging_dir, collection='storage.objects')
gcs_staging_dir_object = gcs_staging_dir_ref.object
except resources.WrongResourceCollectionException:
gcs_staging_dir_ref = resources.REGISTRY.Parse(
args.gcs_staging_dir, collection='storage.buckets')
gcs_staging_dir_object = None
gcs_staging_dir_bucket = gcs_staging_dir_ref.bucket
gcs_client = storage_api.StorageClient()
try:
gcs_client.CreateBucketIfNotExists(
gcs_staging_dir_bucket,
check_ownership=args.gcs_staging_dir is None)
except storage_api.BucketInWrongProjectError:
# If we're using the default bucket but it already exists in a different
# project, then it could belong to a malicious attacker (b/33046325).
raise c_exceptions.RequiredArgumentException(
'--gcs-staging-dir',
'A bucket with name {} already exists and is owned by '
'another project. Specify a bucket using '
'--gcs-staging-dir.'.format(gcs_staging_dir_bucket))
if gcs_staging_dir_object:
gcs_config_staging_path = '{}/{}/config'.format(
gcs_staging_dir_bucket, gcs_staging_dir_object)
else:
gcs_config_staging_path = gcs_staging_dir_bucket
if not args.no_source:
staged_source = self._StageSource(args.source, gcs_staging_dir_bucket,
gcs_staging_dir_object)
else:
staged_source = None
messages = cloudbuild_util.GetMessagesModule()
build_config = build_util.CreateBuild(
messages,
build_timeout=properties.VALUES.builds.timeout.Get(),
build_and_push=do_build_and_push,
staged_source=staged_source,
image=image,
dockerfile_path='Dockerfile',
app_name=app_name,
app_version=app_version,
config_path=args.config,
namespace=args.namespace,
expose_port=args.expose,
gcs_config_staging_path=gcs_config_staging_path,
cluster=args.cluster,
location=args.location,
build_tags=([] if not args.app_name else [args.app_name]))
client = cloudbuild_util.GetClientInstance()
self._SubmitBuild(
client, messages, build_config, gcs_config_staging_path,
args.config is None, args.async_)
def _DetermineImageFromArgs(self, args):
"""Gets the image to use for the build, given the user args.
Args:
args: argsparse object from the DeployGKE command.
Returns:
Full image string representation.
"""
if args.tag:
if (properties.VALUES.builds.check_tag.GetBool() and
'gcr.io/' not in args.tag):
raise c_exceptions.InvalidArgumentException(
'--tag',
'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
return args.tag
elif args.image:
if (properties.VALUES.builds.check_tag.GetBool() and
'gcr.io/' not in args.image):
raise c_exceptions.InvalidArgumentException(
'--image',
'Image value must be in the gcr.io/* or *.gcr.io/* namespace.')
return args.image
else: # Default tag
if args.app_name:
default_name = args.app_name
elif os.path.isdir(args.source): # I.e., the source is | |
use_stdin:
write('\n'.join(diff))
return exit_code, diff
def compute_unified_diff(filename, content2, **kwargs):
# type: (str, bytes, **Any) -> Tuple[int, Iterable[str]]
diff = () # type: Iterable[str]
exit_code = ERROR
kw = kwargs.copy()
if 'n' not in kwargs:
# zero context lines
kw['n'] = 0
try:
content1 = get_cached_file(filename)
if PY3:
c1 = unistr(content1)
c2 = unistr(content2)
else:
c1 = content1
c2 = content2
diff = difflib.unified_diff(c1.splitlines(True), c2.splitlines(True), **kw)
exit_code = OK
finally:
return exit_code, diff
# ---------------------------------------------------------------------
# Spare the user from specifying a formatter by finding a suitable one.
def formatterclass(fmtname):
# type: (str) -> Any
"""Returns the class of a formatter given an executable name like
'yapf' or a version string like 'clang-format version 3.9.0 (trunk)'.
"""
fmtname = fmtname.lower()
for prefix, fmtclass in [
('clang-format', ClangFormatter),
('LLVM', ClangFormatter),
('yapf', YapfFormatter),
('uncrustify', UncrustifyFormatter),
('indent', IndentFormatter),
('gindent', IndentFormatter),
('GNU indent', IndentFormatter),
('astyle', ArtisticStyleFormatter),
('Artistic Style', ArtisticStyleFormatter),
('tidy', HtmlTidyFormatter),
('HTML Tidy', HtmlTidyFormatter),
('scalariform', ScalariformFormatter),
('scalafmt', ScalafmtFormatter),
('rfmt', RfmtFormatter),
('rustfmt', RustfmtFormatter),
]:
if fmtname.startswith(prefix.lower()):
return fmtclass
return None
class FormatterFailedError(OSError):
pass
def formatter_version(fmtpath):
# type: (str) -> str
"""Returns the formatter version, e.g. 'yapf 0.8.2'.
"""
exeresult = run_executable(fmtpath, ['--version'])
if exeresult.error is not None:
raise FormatterFailedError(exeresult.error)
version_string = unistr(exeresult.stdout).strip()
orig_version_string = version_string
if not version_string:
version_string = unistr(exeresult.stderr).strip()
if version_string.startswith('indent'):
version_string = 'indent'
elif version_string.startswith('usage: rfmt'):
version_string = 'rfmt'
else:
version_string = ''
if not version_string or re.match(r'^\d', version_string):
# Call for help if we have no or only the version string without the formatter name.
exeresult = run_executable(fmtpath, ['--help'])
if exeresult.error is not None:
raise FormatterFailedError(exeresult.error)
version_string = unistr(exeresult.stdout).strip()
if 'rustfmt' in version_string:
version_string = 'rustfmt ' + orig_version_string
else:
version_string = ''
return version_string
def choose_formatters(filenames, report=True):
# type: (List[str], bool) -> List[str]
"""Returns a list of formatters that can be found in the users PATH that support
every extension of the filenames.
"""
formatters = formatters_for_files(filenames)
if not formatters:
if report:
reporterror('Error: based on your file types none of the supported '
'formatters match your file types, please specify '
'--formatter')
return formatters
if report:
iprint(INFO_USER, 'The following formatters support your '
'file types: %s' % ', '.join(formatters))
inpath = installed_formatters(formatters)
if report:
if inpath:
iprint(INFO_USER, 'We found the following in your PATH:')
for idx, fmtpath in enumerate(inpath):
colorfunc = green if idx == 0 else yellow
iprint(INFO_USER, colorfunc(' %s' % fmtpath))
else:
reporterror('Error: no suitable formatter could be found in your '
'PATH, please specify --formatter')
return inpath
def formatters_for_files(filenames):
# type: (List[str]) -> List[str]
"""Returns a list of formatter names that support every extension of these filenames.
"""
exts = set() # type: Set[str]
for f in filenames:
root, ext = os.path.splitext(f)
ext = ext.lower()
if not ext and root.startswith('.'):
# Recognize extension-only filenames as well.
ext = root.lower()
exts.add(ext)
supported = []
for fmt, fmtexts in SUPPORTED_EXTS:
fmt_exts = set(fmtexts.split()) # type: Set[str]
if not exts or exts.issubset(fmt_exts):
supported.append(fmt)
return supported
def installed_formatters(formatters):
# type: (List[str]) -> List[str]
"""Returns a list of absolute filenames of the formatters that can
be found in the users PATH.
"""
inpath = []
for shortname in formatters:
formatter = formatterclass(shortname) # type: CodeFormatter
for executable in formatter.executable_names():
path = which(executable)
if path is not None:
inpath.append(path)
return inpath
# ---------------------------------------------------------------------
@contextmanager
def pagercontext(usepager):
# type: (bool) -> Generator
"""Yields a file object that is connected with 'more' or 'less' when this is possible
and requested.
"""
fp = None
if not usepager or WINOS or not sys.stdout.isatty():
pagercmdargs = [] # type: List[str]
else:
pagercmdargs = systempager_cmdargs()
stty_settings = ''
proc = None
try:
if pagercmdargs:
if is_executable(STTY_CMD):
sproc = subprocess.Popen([STTY_CMD, '-g'], stdout=subprocess.PIPE)
stty_settings, _ = sproc.communicate()
if sproc.returncode == 0:
stty_settings = unistr(stty_settings.strip())
proc = subprocess.Popen(pagercmdargs, stdin=subprocess.PIPE)
assert proc is not None
fp = proc.stdin
yield fp
if proc is not None:
proc.communicate()
except KeyboardInterrupt:
# Terminate the pager process
if proc is not None:
proc.terminate()
# Restore stty settings otherwise the terminal might no longer echo input.
if stty_settings:
subprocess.Popen([STTY_CMD, stty_settings]).communicate()
raise
def whatstyle(args, parser):
# type: (argparse.Namespace, argparse.ArgumentParser) -> int
"""Parses the command line and executes the according actions.
"""
global COLOR_SUPPORT
global STDERR_OUTPUT
global NO_PROGRESS
global LOGFILE
global LOGSPLITDIR
global MESSAGE_CATEGORY_FILES
if args.logfile:
LOGFILE = args.logfile
if args.logsplitdir:
LOGSPLITDIR = args.logsplitdir
MESSAGE_CATEGORY_FILES = {}
if args.colors == 'on':
COLOR_SUPPORT = True
elif args.colors == 'off':
COLOR_SUPPORT = False
if DEBUG_OPTIONS in args_debug:
defaultargs = parser.parse_args([])
parts = []
for key, defaultvalue in sorted(defaultargs.__dict__.items()):
value = getattr(args, key)
if isinstance(value, list):
value = '[' + ', '.join(value) + ']'
if isinstance(defaultvalue, list):
defaultvalue = '[' + ', '.join(defaultvalue) + ']'
if value != defaultvalue:
key = green(key)
value = yellow(value)
parts.append('%s: %s' % (key, value))
dprint(DEBUG_OPTIONS, ', '.join(parts))
filenames = args.filename
NO_PROGRESS = args.noprogress
if args.cachepath:
cachepath = args.cachepath
else:
cachepath = default_cachepath()
if args.usage_examples:
with pagercontext(not args.nopager) as fp:
outline(__doc__, fp=fp)
return OK
cache = None
if sqlite3:
if args.cache == 'disk':
cache = Cache(cachepath + '.sqlite')
elif args.cache == 'memory':
cache = Cache(':memory:')
if args.cache == 'dirs' or (args.cache == 'disk' and not sqlite3):
cache = HexCache(cachepath)
if args.deletecache and cache:
return cache.deletecache()
if args.stderr:
STDERR_OUTPUT = True
if args.stdindiff:
if len(filenames) != 1:
parser.error('--stdindiff needs exactly one filename')
exit_code, diff = unified_diff(filenames[0])
return exit_code
fmtpath = args.formatter
if fmtpath is None:
inpath = choose_formatters(filenames, report=not args.supported)
if args.supported:
for path in inpath:
outline(path)
return OK
if not inpath:
return ERROR
fmtpath = inpath[0]
if fmtpath is None:
return ERROR
fmtpath = exename(fmtpath)
if not filenames:
if args.references:
parser.error('When using --references please specify at least two files')
else:
parser.error('Please specify at least one source file')
if args.mode == MODE_STYLEDIFF and not args.references:
parser.error('When using mode stylediff you need to use --references as well')
if args.references and len(filenames) % 2 != 0:
parser.error('--references only works with an even number of '
'files, e.g. file1 reffile1 file2 reffile2 ...')
# Check if all files can be read
for filename in filenames:
try:
get_cached_file(filename)
except IOError as exc:
reporterror('Error: unable to read file "%s": %s' % (filename, exc.strerror))
return ERROR
if args.savemetric and not args.output:
reporterror('Error: please specify --output filename with --savemetric')
return ERROR
try:
try:
version_string = formatter_version(fmtpath)
except FormatterFailedError as exc:
reporterror("Error: Unable to run formatter: %s" % str(exc))
return ERROR
for difftool in diff_tools(args.difftool):
break
else:
reporterror('Error: We have not found one usable diff tool')
return ERROR
fmtclass = formatterclass(version_string)
if fmtclass is None:
reporterror('Unknown formatter type %s %s' % (fmtpath, version_string))
supported = ', '.join([fmt for fmt, _ in SUPPORTED_EXTS])
reporterror('Currently only these formatters are supported: %s' % supported)
return ERROR
formatter = fmtclass(fmtpath, cache=cache)
iprint(INFO_USER, '-----------------------\nRunning whatstyle ' + __version__)
iprint(INFO_USER, 'Using formatter %s (%s)' % (green(formatter.exe), version_string))
diffcmd = ' '.join([difftool[1]] + difftool[2])
iprint(INFO_USER, 'Using this for comparing files: %s' % diffcmd)
formatter.use_startstyle(args.startstyle)
formatter.allow_encoding_change = args.allow_encoding_change
formatter.keeptempfiles = args.keeptempfiles
formatter.register_options()
ignoreopts = args.ignoreopts.split(',') if args.ignoreopts else []
params = ParameterSet(formatter, difftool, args.mode, args.sourcefactor,
args.variantsfactor, args.references, args.maxrounds,
ignoreopts, args.bestofround, args.concat, args.concurrent)
result = find_style(params, filenames, language=args.language)
exit_code = handle_results(args,
formatter,
filenames,
args.mode,
args.references,
ignoreopts,
result,
args.diff,
args.uncondensed,
output=args.output,
savemetric=args.savemetric)
finally:
if cache is not None:
cache.close()
return exit_code
def show_diffs(formatter, # type: CodeFormatter
filenames, # type: List[str]
style, # type: Style
ansi, # type: bool
ansihtml, # type: bool
html, # type: bool
nopager, # type: bool
numlines, # type: int
wrapcolumn=0, # type: int
linenumbers=False, # type: bool
enc='utf-8' # type: str
):
# type: (...) -> None
"""Show the differences between the current and reformatted sources.
"""
if not ansi and not html:
if supports_color():
ansi = True
else:
html = True
pairs = []
for filename in filenames:
sourcedata = get_cached_file(filename)
content = formatter.formatcode(style, sourcedata, filename=filename)
pairs.append((sourcedata, content))
unifilenames = '\n'.join([unifilename(f) for f in filenames]) + '\n'
htmldiffer = HtmlMultiDiff(tabsize=4, wrapcolumn=wrapcolumn)
table = htmldiffer.table_from_pairs(pairs,
enc,
fromdesc='',
todesc='',
context=True,
numlines=numlines)
headerhtml = '<pre>\n' + unifilenames + '</pre>'
customhtml = make_custom_html(htmldiffer, headerhtml, [table], enc=enc)
htmldata = unescape_ill_surrencode(customhtml, | |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['cognitiveservices'] = """
type: group
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
"""
helps['cognitiveservices account'] = """
type: group
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
"""
helps['cognitiveservices account create'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
parameters:
- name: --kind
populator-commands:
- az cognitiveservices account list-kinds
- name: --sku
populator-commands:
- az cognitiveservices account list-skus
examples:
- name: Create an S0 face API Cognitive Services account in West Europe without confirmation required.
text: az cognitiveservices account create -n myresource -g myResourceGroup --kind Face --sku S0 -l WestEurope --yes
- name: Manage Azure Cognitive Services accounts. (autogenerated)
text: az cognitiveservices account create --kind Face --location WestEurope --name myresource --resource-group myResourceGroup --sku S0 --subscription MySubscription --yes
crafted: true
"""
helps['cognitiveservices account delete'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Delete account.
text: az cognitiveservices account delete --name myresource-luis -g cognitive-services-resource-group
"""
helps['cognitiveservices account keys'] = """
type: group
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
"""
helps['cognitiveservices account keys list'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Get current resource keys.
text: az cognitiveservices account keys list --name myresource -g cognitive-services-resource-group
- name: Manage Azure Cognitive Services accounts. (autogenerated)
text: az cognitiveservices account keys list --name myresource --resource-group cognitive-services-resource-group --subscription MySubscription
crafted: true
"""
helps['cognitiveservices account keys regenerate'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Get new keys for resource.
text: az cognitiveservices account keys regenerate --name myresource -g cognitive-services-resource-group --key-name key1
"""
helps['cognitiveservices account list'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: List all the Cognitive Services accounts in a resource group.
text: az cognitiveservices account list -g MyResourceGroup
"""
helps['cognitiveservices account list-skus'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
parameters:
- name: --name -n
long-summary: |
--kind and --location will be ignored when --name is specified.
--resource-group is required when when --name is specified.
- name: --resource-group -g
long-summary: |
--resource-group is used when when --name is specified. In other cases it will be ignored.
- name: --kind
populator-commands:
- az cognitiveservices account list-kinds
examples:
- name: Show SKUs.
text: az cognitiveservices account list-skus --kind Face --location westus
"""
helps['cognitiveservices account network-rule'] = """
type: group
short-summary: Manage network rules.
"""
helps['cognitiveservices account network-rule add'] = """
type: command
short-summary: Add a network rule.
long-summary: >
Rules can be created for an IPv4 address, address range (CIDR format), or a virtual network subnet.
examples:
- name: Create a rule to allow a specific address-range.
text: az cognitiveservices account network-rule add -g myRg --name MyAccount --ip-address 23.45.1.0/24
- name: Create a rule to allow access for a subnet.
text: az cognitiveservices account network-rule add -g myRg --name MyAccount --vnet myvnet --subnet mysubnet
"""
helps['cognitiveservices account network-rule list'] = """
type: command
short-summary: List network rules.
examples:
- name: List network rules.
text: az cognitiveservices account network-rule list --name MyAccount --resource-group MyResourceGroup
crafted: true
"""
helps['cognitiveservices account network-rule remove'] = """
type: command
short-summary: Remove a network rule.
examples:
- name: Remove a network rule.
text: az cognitiveservices account network-rule remove --name MyAccount --resource-group MyResourceGroup --subnet mysubnet
crafted: true
- name: Remove a network rule.
text: az cognitiveservices account network-rule remove --name MyAccount --ip-address 172.16.31.10/24 --resource-group MyResourceGroup
crafted: true
"""
helps['cognitiveservices account show'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show account information.
text: az cognitiveservices account show --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account update'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
parameters:
- name: --sku
populator-commands:
- az cognitiveservices account list-skus
examples:
- name: Update sku and tags.
text: az cognitiveservices account update --name myresource -g cognitive-services-resource-group --sku S0 --tags external-app=chatbot-HR azure-web-app-bot=HR-external azure-app-service=HR-external-app-service
"""
helps['cognitiveservices list'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: List all the Cognitive Services accounts in a resource group.
text: az cognitiveservices list -g MyResourceGroup
"""
helps['cognitiveservices account identity'] = """
type: group
short-summary: Manage identity of Cognitive Services accounts.
"""
helps['cognitiveservices account identity assign'] = """
type: command
short-summary: Assign an identity of a Cognitive Services account.
long-summary: Assign an identity object of a Cognitive Services account. An system assigned identity will be generate and assigned to the account.
examples:
- name: Assign an identity of Cognitive Services accounts.
text: az cognitiveservices account identity assign --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account identity show'] = """
type: command
short-summary: Show the identity of a Cognitive Services account.
long-summary: Show the identity object of a Cognitive Services account, empty object might be returned if the account has no assigned identity.
examples:
- name: Show the identity of Cognitive Services accounts.
text: az cognitiveservices account identity show --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account identity remove'] = """
type: command
short-summary: Remove the identity from a Cognitive Services account.
long-summary: Remove the identity (if exists) from a Cognitive Services account.
examples:
- name: Remove the identity from a Cognitive Services account.
text: az cognitiveservices account identity remove --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account list-deleted'] = """
type: command
short-summary: List soft-deleted Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: List all the Cognitive Services accounts in a subscription.
text: az cognitiveservices account list-deleted
"""
helps['cognitiveservices account show-deleted'] = """
type: command
short-summary: Show a soft-deleted Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. | |
#
# Copyright (c) 2018, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# Load a Platform Interface Manager JSON database.
#
import os
import sys
import json
class jsondb(object):
#
# The constructor loads a database and returns it as a dictionary in db.
#
def __init__(self, fname=None,
db_search_path=None, db_category=None,
quiet=False):
self.file_path = None
self.file_name = None
self.quiet = quiet
self.db = {}
self.db_category = db_category
if (not fname):
self.__errorExit("jsondb() expects a database path")
if (db_category not in ['AFU', 'platform', 'platform-params']):
self.__errorExit("Illegal db_category ({0})".format(db_category))
self.__loadDb(fname, db_search_path)
#
# Canonicalize and check the database
#
def canonicalize(self):
if (self.db_category == 'platform-params'):
self.__canonicalizePlatformDefaultsDb()
else:
self.__canonicalizeDb()
#
# Dump the database (for debugging)
#
def dump(self, fname):
try:
with open(fname, "w") as f:
json.dump(self.db, f, indent=4, sort_keys=True)
except IOError:
self.__errorExit("Failed to open {0} for writing.".format(fname))
def __errorExit(self, msg):
sys.stderr.write("\nError: " + msg + "\n")
sys.exit(1)
#
# Return a dictionary derived from a JSON file, using a search path.
#
def __loadDb(self, fname, db_search_path):
if (os.path.isfile(fname)):
json_fname = fname
else:
# Find the DB in a directory using the search path
json_fname = None
for db_dir in db_search_path:
fn = os.path.join(db_dir, fname + ".json")
if (os.path.isfile(fn)):
json_fname = fn
break
if (not json_fname):
c = self.db_category
if (c == 'AFU'):
c = 'AFU top-level interface'
self.__errorExit(
"Failed to find {0} JSON file for {1}".format(c, fname))
if (not self.quiet):
print("Loading {0} database: {1}".format(
self.db_category, json_fname))
self.file_path = json_fname
self.file_name = os.path.splitext(os.path.basename(json_fname))[0]
db = self.__loadJsonDbWithIncludes(json_fname)
# Store the file path in the dictionary and in the class
db['file_path'] = self.file_path
db['file_name'] = self.file_name
# First pass canonicalization guarantees that module ports
# are ready for merging with parents.
db = self.__canonicalizeStg1Db(db)
# Does the database have a parent with more data?
if ('parent' in db):
if (not self.quiet):
print(" Loading parent database: {0}".format(db['parent']))
# Load parents recursively.
db_parent = jsondb(db['parent'], db_search_path, self.db_category,
self.quiet).db
if (self.db_category == 'platform'):
db = self.__mergeDbs(db_parent, db, 'module-ports-offered')
elif (self.db_category == 'AFU'):
db = self.__mergeDbs(db_parent, db, 'module-ports')
else:
self.__errorExit(
("'parent' keys are not supported in {0} " +
"databases ({1})").format(self.db_category, json_fname))
self.db = db
#
# Load a JSON file and detect include directives within it, replacing the
# include with the contents of the included JSON sub-file.
#
def __loadJsonDbWithIncludes(self, json_fname, parent_fname=None):
try:
f = open(json_fname)
except IOError:
if (parent_fname):
self.__errorExit(
"Failed to open file {0}, included by {1}".format(
json_fname, parent_fname))
else:
self.__errorExit("Failed to open file {0}".format(json_fname))
db = json.load(f)
f.close()
db = self.__replaceJsonIncludes(db, json_fname)
return db
#
# Do a recursive walk of a loaded JSON database, looking for dictionaries
# with the key "...". Treat the values of "..." keys as a relative path
# of another JSON file. The included JSON file replaces the contents of
# the "..." dictionary.
#
def __replaceJsonIncludes(self, db, json_fname):
if (not isinstance(db, dict) and not isinstance(db, list)):
return db
for k, v in db.items():
if (k == '...'):
path = os.path.join(os.path.dirname(json_fname), v)
return self.__loadJsonDbWithIncludes(path, json_fname)
elif isinstance(v, dict):
db[k] = self.__replaceJsonIncludes(v, json_fname)
elif isinstance(v, list):
for i, e in enumerate(v):
db[k][i] = self.__replaceJsonIncludes(e, json_fname)
return db
#
# Merge parent and child databases by overwriting parent
# fields with updates from the child.
#
# Note: for module-ports and module-ports-offered,
# the child completely overwrites an entry. Namely, for
# AFUs if both the parent and child have a local-memory
# class then the parent's local-memory descriptor is deleted
# and replaced with the child's. For platform databases,
# the same is true, but for class/interface pairs.
#
def __mergeDbs(self, db, db_child, module_port_key):
# Copy everything from the child that isn't a module ports.
# Ports are special. They will be checked by class.
for k in list(db_child.keys()):
if (k != module_port_key):
db[k] = db_child[k]
if (module_port_key not in db):
# No parent module ports
if (module_port_key in db_child):
db[module_port_key] = db_child[module_port_key]
elif (module_port_key in db_child):
# Both databases have module ports. Overwrite any parent entries
# with matching classes.
for k in list(db_child[module_port_key].keys()):
db[module_port_key][k] = db_child[module_port_key][k]
return db
#
# First canonicalization pass over a database. This pass runs before
# parent databases are imported, so many fields may be missing.
#
def __canonicalizeStg1Db(self, db):
if (not isinstance(db, dict)):
self.__errorExit("{0} interface JSON is not a dictionary!".format(
self.db_category))
fname = self.file_path
# Convert module ports lists to dictionaries.
for ports_key in ['module-ports', 'module-ports-offered']:
if (ports_key in db):
port_dict = dict()
for port in db[ports_key]:
# Module ports must be dictionaries
if (not isinstance(port, dict)):
self.__errorExit(
"{0} in {1} must be dictionaries ({2})".format(
ports_key, fname, port))
# Check for mandatory keys
for key in ['class', 'interface']:
if (key not in port):
self.__errorExit(
"module port {0} is missing {1} in {2}".format(
port, key, fname))
# For AFU module-ports the key is just the class, since
# classes must be unique. Platforms may offer more than
# one instance of a class, so their keys are
# class/instance.
k = port['class']
if (ports_key == 'module-ports-offered'):
k = k + '/' + port['interface']
# No duplicate keys allowed!
if k in port_dict:
self.__errorExit(
("multiple instances of module port key " +
"'{0}' in {1}").format(k, fname))
port_dict[k] = port
db[ports_key] = port_dict
return db
#
# Validate an interface database and add some default fields to
# avoid having to check whether they are present.
#
def __canonicalizeDb(self):
db = self.db
fname = self.file_path
# Differences between platform and AFU db
keys_expected = ['version', 'platform-name', 'module-ports-offered']
ports_key = 'module-ports-offered'
if (self.db_category == 'AFU'):
keys_expected = ['version', 'module-name', 'module-ports']
ports_key = 'module-ports'
for key in keys_expected:
if (key not in db):
self.__errorExit("{0} entry missing in {1}".format(key, fname))
if (db['version'] != 1):
self.__errorExit(
("Unsupported {0} interface dictionary version " +
"{1} ({2})").format(self.db_category, db['version'], fname))
# Add empty global list of preprocessor variables to define
# if not present.
if ('define' not in db):
db['define'] = []
# Make sure AFU has a 'platform-shim-module-name'
if (self.db_category == 'AFU'):
if ('platform-shim-module-name' not in db):
db['platform-shim-module-name'] = None
if (not db['platform-shim-module-name']):
# No platform shim supported for this top-level module class.
# Fake the shim name to keep the platform happy but record
# that there is no shim.
db['platform-shim-supported'] = False
db['platform-shim-module-name'] = db['module-name']
else:
db['platform-shim-supported'] = True
# Walk the module ports list
classes_seen = dict()
for port in list(db[ports_key].values()):
# Default optional is False
if ('optional' not in port):
port['optional'] = False
if (port['class'] in classes_seen):
if (self.db_category == 'AFU'):
# AFU's can have only a single instance of a class
self.__errorExit(
("multiple instances of module port class " +
"'{0}' in {1}").format(port['class'], fname))
| |
of the SMTP credential.
:param UpdateSmtpCredentialDetails update_smtp_credential_details: (required)
Request object for updating a SMTP credential.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.SmtpCredentialSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/users/{userId}/smtpCredentials/{smtpCredentialId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_smtp_credential got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"smtpCredentialId": smtp_credential_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_smtp_credential_details,
response_type="SmtpCredentialSummary")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_smtp_credential_details,
response_type="SmtpCredentialSummary")
def update_swift_password(self, user_id, swift_password_id, update_swift_password_details, **kwargs):
"""
**Deprecated. Use :func:`update_auth_token` instead.**
Updates the specified Swift password's description.
:param str user_id: (required)
The OCID of the user.
:param str swift_password_id: (required)
The OCID of the Swift password.
:param UpdateSwiftPasswordDetails update_swift_password_details: (required)
Request object for updating a Swift password.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.SwiftPassword`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/users/{userId}/swiftPasswords/{swiftPasswordId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_swift_password got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"swiftPasswordId": <PASSWORD>_password_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_swift_password_details,
response_type="SwiftPassword")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_swift_password_details,
response_type="SwiftPassword")
def update_tag(self, tag_namespace_id, tag_name, update_tag_details, **kwargs):
"""
Updates the specified tag definition.
Setting `validator` determines the value type. Tags can use either a static value or a
list of possible values. Static values are entered by a user applying the tag to a resource.
Lists are created by you and the user must apply a value from the list. On update, any values
in a list that were previously set do not change, but new values must pass validation. Values
already applied to a resource do not change.
You cannot remove list values that appear in a TagDefault. To remove a list value that
appears in a TagDefault, first update the TagDefault to use a different value.
:param str tag_namespace_id: (required)
The OCID of the tag namespace.
:param str tag_name: (required)
The name of the tag.
:param UpdateTagDetails update_tag_details: (required)
Request object for updating a tag.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.Tag`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/tagNamespaces/{tagNamespaceId}/tags/{tagName}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_tag got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tagNamespaceId": tag_namespace_id,
"tagName": tag_name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_tag_details,
response_type="Tag")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_tag_details,
response_type="Tag")
def update_tag_default(self, tag_default_id, update_tag_default_details, **kwargs):
"""
Updates the specified tag default. If you specify that a value is required, a value is set
during resource creation (either by the user creating the resource or another tag defualt).
If no value is set, resource creation is blocked.
* If the `isRequired` flag is set to \"true\", the value is set during resource creation.
* If the `isRequired` flag is set to \"false\", the value you enter is set during resource creation.
:param str tag_default_id: (required)
The OCID of the tag default.
:param UpdateTagDefaultDetails update_tag_default_details: (required)
Request object for updating a tag default.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a
particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation | |
trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Tsm_Tsr_Tgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Tgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Tgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Tgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Tgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Tgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Tgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
# self.check_data(train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Fsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Tslice_Fdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Fdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
#loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
#def test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25(self):
# """
# test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25
# """
# TestFleetBase.__init__(self, pservers=2, trainers=2)
# self.run_params = {'sync_mode':'async','cpu_num':2,'num_threads':2,
# 'slice_var_up':True,'enable_dc_asgd':False, 'split_method':True,
# 'runtime_split_send_recv':True,'geo_sgd':True,'wait_port':True,
# 'use_hierarchical_allreduce':True,'push_nums':25}
# self.test_ctr_2ps_2tr_async_2thread_Tslice_Tdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(self.run_params)
# train_data_list1 = self.get_result(self._model_file, update_method='pserver')
# train_data_list2 = self.get_result(self._model_file, update_method='pserver')
# # 判断两个list输出是否为2
# assert len(train_data_list1) == 2
# assert len(train_data_list2) == 2
# # 两个train的loss值存在微小差距
# self.check_data(train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# #loss值与预期相符
# self.check_data(train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': True,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_async_2thread_Fslice_Tdc_Tsm_Tsr_Tgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
| |
"GMP",
"RKSW" : "",
"FMCZ" : "DZA",
"FMCV" : "AJN",
"GOGS" : "CSK",
"YSHK" : "MJK",
"FMCN" : "YVA",
"FMCH" : "HAH",
"FMCI" : "NWA",
"GOGG" : "ZIG",
"DNIM" : "QOW",
"DNIL" : "ILR",
"DNIB" : "IBA",
"ZSCN" : "KHN",
"ZSCG" : "CZX",
"KPLN" : "PLN",
"BIKR" : "SAK",
"LRCV" : "CRA",
"OAUZ" : "UND",
"BIKF" : "KEF",
"SKEJ" : "EJA",
"LRCS" : "CSB",
"HTZA" : "ZNZ",
"YNRV" : "RVT",
"EHVB" : "LID",
"WBSB" : "BWN",
"SERO" : "",
"LXGB" : "GIB",
"MMIA" : "CLQ",
"SBFI" : "IGU",
"MMIM" : "ISJ",
"SBFL" : "FLN",
"MMIO" : "SLW",
"SBFN" : "FEN",
"TAPA" : "ANU",
"FAEA" : "ELL",
"SBFU" : "",
"MMIT" : "",
"HLZA" : "",
"EFTP" : "TMP",
"EFTS" : "",
"SBFZ" : "FOR",
"EFTU" : "TKU",
"MKJS" : "MBJ",
"MKJP" : "KIN",
"FAEO" : "",
"YROM" : "RMA",
"FWLK" : "LIX",
"SNBR" : "BRA",
"KDRT" : "DRT",
"FNGI" : "VPE",
"KDRI" : "DRI",
"KDRO" : "DRO",
"KDRM" : "DRM",
"KA39" : "A39",
"FVBM" : "",
"YUCX" : "",
"ZPDQ" : "DIG",
"KGVL" : "GVL",
"VEGT" : "GAU",
"UTTT" : "TAS",
"KGVQ" : "GVQ",
"GFBO" : "KBS",
"GFBN" : "BTE",
"KGVT" : "GVT",
"OMAL" : "AAN",
"EKTD" : "",
"EKTS" : "TED",
"OPMJ" : "MJD",
"CYZX" : "YZX",
"OPMI" : "",
"CYZR" : "YZR",
"CYZP" : "YZP",
"OPMA" : "",
"CYZV" : "YZV",
"CYZW" : "YZW",
"CYZT" : "YZT",
"CYZU" : "YZU",
"CYZH" : "YZH",
"OPMR" : "",
"CYZF" : "YZF",
"CYZG" : "YZG",
"CYZD" : "YZD",
"CYZE" : "YZE",
"LTBE" : "BTZ",
"SUSO" : "STY",
"LDLO" : "LSZ",
"SCTN" : "WCH",
"ZPBS" : "BSD",
"KLHM" : "LHM",
"KIDL" : "IDL",
"KLHX" : "LHX",
"KAHN" : "AHN",
"UHOO" : "OHO",
"KLHW" : "",
"VOSM" : "",
"KM94" : "M94",
"VEJS" : "IXW",
"VEJT" : "JRH",
"VEJH" : "",
"FVWN" : "WKM",
"GSUM" : "",
"K5B2" : "5B2",
"VTCN" : "NNT",
"KYUM" : "YUM",
"SADF" : "",
"HSOB" : "EBD",
"SADJ" : "",
"SADL" : "LPG",
"SADM" : "",
"HMCY" : "",
"SADP" : "",
"EGJJ" : "JER",
"VNPK" : "PKR",
"VNPL" : "PPL",
"VTCH" : "HGN",
"ENRK" : "",
"LPOV" : "",
"YCUN" : "",
"ATWN" : "",
"LEST" : "SCQ",
"LESU" : "LEU",
"LESA" : "SLM",
"LESB" : "",
"LESL" : "",
"LESO" : "EAS",
"LESJ" : "",
"GUKU" : "KSI",
"KTKI" : "TKI",
"KGST" : "XEG",
"KGSP" : "GSP",
"GCTS" : "TFS",
"YBIE" : "BEU",
"LFSX" : "",
"LFSR" : "RHE",
"LFSQ" : "BOR",
"LFSP" : "",
"LFST" : "SXB",
"LFSI" : "",
"LFSH" : "",
"LFSO" : "",
"LFSN" : "ENC",
"LFSM" : "",
"LFSL" : "BVE",
"LFSC" : "",
"LFSB" : "MLH",
"LFSG" : "EPL",
"LFSF" : "MZM",
"LFSD" : "DIJ",
"HEMK" : "HMB",
"HEMM" : "MUH",
"HEMA" : "RMF",
"UKCW" : "VSG",
"SVJC" : "LSP",
"UKCM" : "MPW",
"UKCC" : "DOK",
"SVJM" : "",
"SPZA" : "NZA",
"SPZO" : "CUZ",
"HEAT" : "ATZ",
"YELD" : "ELC",
"HAAX" : "AXU",
"KPWK" : "PWK",
"KPWM" : "PWM",
"HAAL" : "",
"HAAM" : "AMH",
"HAAB" : "ADD",
"KPWT" : "PWT",
"EGUB" : "",
"ENOL" : "OLA",
"EGUN" : "MHZ",
"EGUL" : "",
"EGUW" : "",
"ENOV" : "HOV",
"EGUY" : "",
"KNUC" : "",
"KHIB" : "HIB",
"KHIF" : "HIF",
"OEKK" : "HBT",
"KNUW" : "NUW",
"KNUQ" : "NUQ",
"KCWI" : "CWI",
"PSKA" : "KIA",
"FSSB" : "BDI",
"FSSC" : "",
"ZJSY" : "SYX",
"CYQB" : "YQB",
"KNLC" : "NLC",
"NWWD" : "KNQ",
"NWWE" : "ILP",
"KDMA" : "DMA",
"NWWA" : "TGJ",
"NWWC" : "BMY",
"NWWL" : "LIF",
"NWWM" : "GEA",
"MDPC" : "PUJ",
"XUDS" : "",
"NWWK" : "KOC",
"NWWU" : "TOU",
"NWWV" : "UVE",
"NWWW" : "NOU",
"EBFS" : "",
"NWWR" : "MEE",
"GGOV" : "OXB",
"MDPP" : "POP",
"EDTY" : "",
"EDTK" : "",
"EDTN" : "",
"EDTM" : "",
"EDTL" : "LHA",
"EDTG" : "",
"EDTF" : "",
"EDTD" : "ZQL",
"GFYE" : "WYE",
"UEVD" : "",
"LHOY" : "",
"MGQC" : "AQB",
"PTKK" : "TKK",
"ZSAM" : "XMN",
"YMTG" : "MGB",
"ZSAQ" : "AQG",
"YMTI" : "ONG",
"KMIC" : "MIC",
"KMIB" : "MIB",
"KMIA" : "MIA",
"RCLG" : "TXG",
"NIUE" : "IUE",
"EBCV" : "",
"LGPL" : "JTY",
"SPEP" : "",
"SPEQ" : "",
"SPEO" : "CHM",
"LGPZ" : "PVK",
"SOGS" : "",
"FYTM" : "TSB",
"SKCZ" : "CZU",
"SKCU" : "CAQ",
"BIES" : "BIE",
"SKCO" : "TCO",
"SKCL" : "CLO",
"SKCC" : "CUC",
"SKCA" : "CPB",
"BIEG" : "EGS",
"KEVV" : "EVV",
"KEVW" : "EVW",
"SKCD" : "COG",
"FZMA" : "BKY",
"MMGM" : "GYM",
"MMGL" : "GDL",
"MKTP" : "KTP",
"FAGY" : "",
"FAGT" : "",
"FAGR" : "",
"MPEJ" : "ONX",
"FAGM" : "QRA",
"HLTD" : "LTD",
"FAGG" : "GRJ",
"SBHT" : "ATM",
"MMGR" : "GUB",
"NGTS" : "TSU",
"NGTR" : "AIS",
"LIRZ" : "PEG",
"LIRS" : "GRS",
"LIRQ" : "FLR",
"LIRP" : "PSA",
"LIRV" : "",
"LIRU" : "",
"SGPJ" : "PJC",
"LIRJ" : "EBA",
"LIRI" : "QSR",
"KAEX" : "AEX",
"LIRN" : "NAP",
"LIRM" : "",
"LIRL" : "QLT",
"KDPA" : "DPA",
"LIRA" : "CIA",
"LIRG" : "",
"LIRF" : "FCO",
"LIRE" : "",
"CFJ2" : "",
"KA50" : "A50",
"DIMN" : "MJC",
"FQPB" : "POL",
"KGTU" : "GTU",
"KGTR" : "GTR",
"KGTF" : "GTF",
"KGTB" : "GTB",
"NGTB" : "AEA",
"EKVJ" : "STA",
"KCKC" : "GRM",
"EKVH" : "",
"LKLN" : "",
"EKVG" : "FAE",
"EKVD" : "",
"OPKC" : "KHI",
"OPKD" : "HDD",
"OPKH" : "KDD",
"VIPT" : "PGH",
"VIPL" : "",
"VIPK" : "IXP",
"YSWG" : "WGA",
"KLVS" : "LVS",
"KAJR" : "",
"KLVM" : "LVM",
"KUNI" : "",
"KLVK" : "LVK",
"NSFA" : "APW",
"NSFI" : "FGI",
"NSFQ" : "FTI",
"VYXG" : "",
"YYND" : "YUE",
"LEUT" : "LES",
"VLHS" : "OUI",
"RCYU" : "HUN",
"SVC" : "SVC",
"FNLK" : "LBZ",
"VNRC" : "RHP",
"VAKP" : "KLH",
"VNRT" : "RUM",
"KDUC" : "DUC",
"LJSG" : "",
"SMBN" : "ABN",
"PWAK" : "AWK",
"MUKW" : "",
"CLTN" : "",
"SLZI" : "",
"SSCK" : "CCI",
"YBOU" : "BQL",
"YBOI" : "GIC",
"YBOK" : "OKY",
"ZBXH" : "XIL",
"LKSZ" : "LKS",
"MRFI" : "",
"LFQA" : "",
"LFQC" : "",
"LFQB" : "QYR",
"LFQE" : "",
"KK83" : "K83",
"LFQG" : "NVS",
"LFQF" : "",
"LFQI" : "",
"LFQJ" : "",
"LFQM" : "",
"LFQQ" : "LIL",
"LFQP" : "",
"LFQT" : "",
"LFQW" : "",
"LFQV" : "",
"UAAA" : "ALA",
"KIWA" : "AZA",
"KTIK" : "TIK",
"KTIW" : "TIW",
"KIWS" : "IWS",
"EHKD" : "DHR",
"ZGHA" : "CSX",
"KPUW" : "PUW",
"KPUB" : "PUB",
"KPUC" : "PUC",
"MHPL" : "PEU",
"ENML" : "MOL",
"EGWC" : "",
"HAGM" : "GMB",
"HAGN" : "GDQ",
"HAGO" : "GDE",
"HAGR" : "GOR",
"HTAR" : "ARK",
"EGWU" : "NHT",
"ENMS" : "MJF",
"KHOP" : "HOP",
"KHOT" : "HOT",
"KHOU" : "HOU",
"KHON" : "HON",
"KHOB" : "HOB",
"FAZR" : "",
"KJKA" : "JKA",
"OIIK" : "",
"OIII" : "THR",
"NCAI" : "AIT",
"OIIM" : "",
"OIIC" : "",
"OIIA" : "",
"SKLM" : "",
"OIIG" : "",
"OIIE" : "IKA",
"OIID" : "",
"KDKX" : "DKX",
"FZJH" : "IRP",
"NCAT" : "AIU",
"EPBY" : "BZG",
"VDPP" : "PNH",
"EPBA" : "",
"EPBC" : "",
"PACV" : "CDV",
"VTST" : "TST",
"EDVE" : "BWE",
"VTSR" : "",
"PACR" : "IRC",
"KBQK" : "BQK",
"EDVI" : "",
"EDVH" : "",
"EDVK" : "KSF",
"EDVM" : "",
"PACY" : "CYT",
"PACZ" : "CZF",
"PACD" : "CDB",
"VTSF" : "NST",
"VTSE" : "CJM",
"EDVR" : "",
"VTSC" : "NAW",
"VTSB" : "URT",
"VTSA" : "",
"PACL" : "Z84",
"VTSN" : "",
"VTSM" : "USM",
"VTSK" : "PAN",
"PACI" : "CIK",
"VTSH" : "",
"ZSOF" : "HFE",
"KMKL" : "MKL",
"KMKO" : "MKO",
"KMKC" : "MKC",
"KMKE" : "MKE",
"KMKG" : "MKG",
"KMKY" : "MRK",
"USSK" : "",
"LTCT" : "IGD",
"BUCH" : "BUH",
"LGRP" | |
== (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v3)
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in range(4):
for x in range(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in range(4):
for x in range(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
def test_HeatmapsOnImage_draw_on_image():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0)
assert np.all(heatmaps_drawn[0:4, 2:3, :] == 128) or np.all(heatmaps_drawn[0:4, 2:3, :] == 127)
assert np.all(heatmaps_drawn[0:4, 3:4, :] == 255) or np.all(heatmaps_drawn[0:4, 3:4, :] == 254)
image = np.uint8([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, resize="image", cmap=None)[0]
assert heatmaps_drawn.shape == (2, 2, 3)
assert np.all(heatmaps_drawn[0:2, 0, :] == 0)
assert np.all(heatmaps_drawn[0:2, 1, :] == 128) or np.all(heatmaps_drawn[0:2, 1, :] == 127)
def test_HeatmapsOnImage_pad():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, mode="edge")
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
])
)
def test_HeatmapsOnImage_avg_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.avg_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 0.75],
[0.0, 0.75]])
)
def test_HeatmapsOnImage_max_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.max_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 1.0],
[0.0, 1.0]])
)
def test_HeatmapsOnImage_scale():
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(size=(4, 4), interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(size=2.0, interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
def test_BatchLoader():
def _load_func():
for _ in sm.xrange(20):
yield ia.Batch(images=np.zeros((2, 4, 4, 3), dtype=np.uint8))
# TODO these loops somehow require a `or len(loaded) < 20*nb_workers` on Travis, but not
# locally. (On Travis, usually one batch is missing, i.e. probably still in the queue.)
# That shouldn't be neccessary due to loader.all_finished(), but something breaks here.
# queue.close() works on Tavis py2, but not py3 as it raises an `OSError: handle is closed`.
for nb_workers in [1, 2]:
# repeat these tests many times to catch rarer race conditions
for _ in sm.xrange(50):
loader = ia.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=True)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty() or len(loaded) < 20*nb_workers) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
#loader.queue.close()
#while not loader.queue.empty():
# loaded.append(loader.queue.get())
assert len(loaded) == 20*nb_workers, "Expected %d to be loaded by threads, got %d for %d workers at counter %d." % (20*nb_workers, len(loaded), nb_workers, counter)
loader = ia.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=True)
loader.terminate()
assert loader.all_finished
loader = ia.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=False)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty() or len(loaded) < 20*nb_workers) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
#loader.queue.close()
#while not loader.queue.empty():
# loaded.append(loader.queue.get())
assert len(loaded) == 20*nb_workers, "Expected %d to be loaded by background processes, got %d for %d workers at counter %d." % (20*nb_workers, len(loaded), nb_workers, counter)
loader = ia.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=False)
loader.terminate()
assert loader.all_finished
def test_Noop():
reseed()
images = create_random_images((16, 70, 50, 3))
keypoints = create_random_keypoints((16, 70, 50, 3), 4)
aug = iaa.Noop()
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
assert iaa.Noop().get_parameters() == []
def test_Lambda():
reseed()
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
images_aug = images + 1
images_aug_list = [image + 1 for image in images_list]
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps_arr_aug = np.float32([[0.5, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=1),
ia.Keypoint(x=0, y=2)], shape=base_img.shape)]
def func_images(images, random_state, parents, hooks):
if isinstance(images, list):
images = [image + 1 for image in images]
else:
images = images + 1
return images
def func_heatmaps(heatmaps, random_state, parents, hooks):
heatmaps[0].arr_0to1[0, 0] += 0.5
return heatmaps
def func_keypoints(keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for kp in keypoints_on_image.keypoints:
kp.x = (kp.x + 1) % 3
return keypoints_on_images
aug = iaa.Lambda(func_images, func_heatmaps, func_keypoints)
aug_det = aug.to_deterministic()
# check once that the augmenter can handle lists correctly
observed = aug.augment_images(images_list)
expected = images_aug_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_aug_list
assert array_equal_lists(observed, expected)
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images_aug
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images_aug
assert np.array_equal(observed, expected)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_aug)
observed = aug_det.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_aug)
observed = aug.augment_keypoints(keypoints)
expected = keypoints_aug
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints_aug
assert keypoints_equal(observed, expected)
def test_AssertLambda():
reseed()
base_img = np.array([[0, 0, 1],
[0, 0, 1],
| |
access_type='online',
response_type='token'
)
authorize_url = flow.step1_get_authorize_url()
parsed = urllib.parse.urlparse(authorize_url)
q = urllib.parse.parse_qs(parsed[4])
self.assertEqual('client_id+1', q['client_id'][0])
self.assertEqual('token', q['response_type'][0])
self.assertEqual('foo', q['scope'][0])
self.assertEqual(client.OOB_CALLBACK_URN, q['redirect_uri'][0])
self.assertEqual('online', q['access_type'][0])
def test__oauth2_web_server_flow_params(self):
params = client._oauth2_web_server_flow_params({})
self.assertEqual(params['access_type'], 'offline')
self.assertEqual(params['response_type'], 'code')
params = client._oauth2_web_server_flow_params({
'approval_prompt': 'force'})
self.assertEqual(params['prompt'], 'consent')
self.assertNotIn('approval_prompt', params)
params = client._oauth2_web_server_flow_params({
'approval_prompt': 'other'})
self.assertEqual(params['approval_prompt'], 'other')
@mock.patch('oauth2client.client.logger')
def test_step1_get_authorize_url_redirect_override(self, logger):
flow = client.OAuth2WebServerFlow('client_id+1', scope='foo',
redirect_uri=client.OOB_CALLBACK_URN)
alt_redirect = 'foo:bar'
self.assertEqual(flow.redirect_uri, client.OOB_CALLBACK_URN)
result = flow.step1_get_authorize_url(redirect_uri=alt_redirect)
# Make sure the redirect value was updated.
self.assertEqual(flow.redirect_uri, alt_redirect)
query_params = {
'client_id': flow.client_id,
'redirect_uri': alt_redirect,
'scope': flow.scope,
'access_type': 'offline',
'response_type': 'code',
}
expected = client._update_query_params(flow.auth_uri, query_params)
assertUrisEqual(self, expected, result)
# Check stubs.
self.assertEqual(logger.warning.call_count, 1)
def test_step1_get_authorize_url_without_redirect(self):
flow = client.OAuth2WebServerFlow('client_id+1', scope='foo',
redirect_uri=None)
with self.assertRaises(ValueError):
flow.step1_get_authorize_url(redirect_uri=None)
def test_step1_get_authorize_url_without_login_hint(self):
login_hint = 'There are wascally wabbits nearby'
flow = client.OAuth2WebServerFlow('client_id+1', scope='foo',
redirect_uri=client.OOB_CALLBACK_URN,
login_hint=login_hint)
result = flow.step1_get_authorize_url()
query_params = {
'client_id': flow.client_id,
'login_hint': login_hint,
'redirect_uri': client.OOB_CALLBACK_URN,
'scope': flow.scope,
'access_type': 'offline',
'response_type': 'code',
}
expected = client._update_query_params(flow.auth_uri, query_params)
assertUrisEqual(self, expected, result)
def test_step1_get_device_and_user_codes_wo_device_uri(self):
flow = client.OAuth2WebServerFlow('CID', scope='foo', device_uri=None)
with self.assertRaises(ValueError):
flow.step1_get_device_and_user_codes()
def _step1_get_device_and_user_codes_helper(
self, extra_headers=None, user_agent=None, default_http=False,
content=None):
flow = client.OAuth2WebServerFlow('CID', scope='foo',
user_agent=user_agent)
device_code = 'bfc06756-062e-430f-9f0f-460ca44724e5'
user_code = '5faf2780-fc83-11e5-9bc2-00c2c63e5792'
ver_url = 'http://foo.bar'
if content is None:
content = json.dumps({
'device_code': device_code,
'user_code': user_code,
'verification_url': ver_url,
})
http = HttpMockSequence([
({'status': http_client.OK}, content),
])
if default_http:
with mock.patch('httplib2.Http', return_value=http):
result = flow.step1_get_device_and_user_codes()
else:
result = flow.step1_get_device_and_user_codes(http=http)
expected = client.DeviceFlowInfo(
device_code, user_code, None, ver_url, None)
self.assertEqual(result, expected)
self.assertEqual(len(http.requests), 1)
self.assertEqual(
http.requests[0]['uri'], oauth2client.GOOGLE_DEVICE_URI)
body = http.requests[0]['body']
self.assertEqual(urllib.parse.parse_qs(body),
{'client_id': [flow.client_id],
'scope': [flow.scope]})
headers = {'content-type': 'application/x-www-form-urlencoded'}
if extra_headers is not None:
headers.update(extra_headers)
self.assertEqual(http.requests[0]['headers'], headers)
def test_step1_get_device_and_user_codes(self):
self._step1_get_device_and_user_codes_helper()
def test_step1_get_device_and_user_codes_w_user_agent(self):
user_agent = 'spiderman'
extra_headers = {'user-agent': user_agent}
self._step1_get_device_and_user_codes_helper(
user_agent=user_agent, extra_headers=extra_headers)
def test_step1_get_device_and_user_codes_w_default_http(self):
self._step1_get_device_and_user_codes_helper(default_http=True)
def test_step1_get_device_and_user_codes_bad_payload(self):
non_json_content = b'{'
with self.assertRaises(client.OAuth2DeviceCodeError):
self._step1_get_device_and_user_codes_helper(
content=non_json_content)
def _step1_get_device_and_user_codes_fail_helper(self, status,
content, error_msg):
flow = client.OAuth2WebServerFlow('CID', scope='foo')
http = HttpMockSequence([
({'status': status}, content),
])
with self.assertRaises(client.OAuth2DeviceCodeError) as exc_manager:
flow.step1_get_device_and_user_codes(http=http)
self.assertEqual(exc_manager.exception.args, (error_msg,))
def test_step1_get_device_and_user_codes_non_json_failure(self):
status = int(http_client.BAD_REQUEST)
content = 'Nope not JSON.'
error_msg = 'Invalid response {0}.'.format(status)
self._step1_get_device_and_user_codes_fail_helper(status, content,
error_msg)
def test_step1_get_device_and_user_codes_basic_failure(self):
status = int(http_client.INTERNAL_SERVER_ERROR)
content = b'{}'
error_msg = 'Invalid response {0}.'.format(status)
self._step1_get_device_and_user_codes_fail_helper(status, content,
error_msg)
def test_step1_get_device_and_user_codes_failure_w_json_error(self):
status = int(http_client.BAD_GATEWAY)
base_error = 'ZOMG user codes failure.'
content = json.dumps({'error': base_error})
error_msg = 'Invalid response {0}. Error: {1}'.format(status,
base_error)
self._step1_get_device_and_user_codes_fail_helper(status, content,
error_msg)
def test_step2_exchange_no_input(self):
flow = client.OAuth2WebServerFlow('client_id+1', scope='foo')
with self.assertRaises(ValueError):
flow.step2_exchange()
def test_step2_exchange_code_and_device_flow(self):
flow = client.OAuth2WebServerFlow('client_id+1', scope='foo')
with self.assertRaises(ValueError):
flow.step2_exchange(code='code', device_flow_info='dfi')
def test_scope_is_required(self):
with self.assertRaises(TypeError):
client.OAuth2WebServerFlow('client_id+1')
def test_exchange_failure(self):
http = HttpMockSequence([
({'status': '400'}, b'{"error":"invalid_request"}'),
])
with self.assertRaises(client.FlowExchangeError):
self.flow.step2_exchange(code='some random code', http=http)
def test_urlencoded_exchange_failure(self):
http = HttpMockSequence([
({'status': '400'}, b'error=invalid_request'),
])
with self.assertRaisesRegexp(client.FlowExchangeError,
'invalid_request'):
self.flow.step2_exchange(code='some random code', http=http)
def test_exchange_failure_with_json_error(self):
# Some providers have 'error' attribute as a JSON object
# in place of regular string.
# This test makes sure no strange object-to-string coversion
# exceptions are being raised instead of FlowExchangeError.
payload = (b'{'
b' "error": {'
b' "message": "Error validating verification code.",'
b' "type": "OAuthException"'
b' }'
b'}')
http = HttpMockSequence([({'status': '400'}, payload)])
with self.assertRaises(client.FlowExchangeError):
self.flow.step2_exchange(code='some random code', http=http)
def _exchange_success_test_helper(self, code=None, device_flow_info=None):
payload = (b'{'
b' "access_token":"<PASSWORD>",'
b' "expires_in":3600,'
b' "refresh_token":"<PASSWORD>"'
b'}')
http = HttpMockSequence([({'status': '200'}, payload)])
credentials = self.flow.step2_exchange(
code=code, device_flow_info=device_flow_info, http=http)
self.assertEqual('SlAV32hkKG', credentials.access_token)
self.assertNotEqual(None, credentials.token_expiry)
self.assertEqual('8<PASSWORD>tZp8', credentials.refresh_token)
self.assertEqual('dummy_revoke_uri', credentials.revoke_uri)
self.assertEqual(set(['foo']), credentials.scopes)
def test_exchange_success(self):
self._exchange_success_test_helper(code='some random code')
def test_exchange_success_with_device_flow_info(self):
device_flow_info = client.DeviceFlowInfo(
'some random code', None, None, None, None)
self._exchange_success_test_helper(device_flow_info=device_flow_info)
def test_exchange_success_binary_code(self):
binary_code = b'some random code'
access_token = '<PASSWORD>'
expires_in = '3600'
refresh_token = '<PASSWORD>'
revoke_uri = 'dummy_revoke_uri'
payload = ('{'
' "access_token":"' + access_token + '",'
' "expires_in":' + expires_in + ','
' "refresh_token":"' + refresh_token + '"'
'}')
http = HttpMockSequence(
[({'status': '200'}, _helpers._to_bytes(payload))])
credentials = self.flow.step2_exchange(code=binary_code, http=http)
self.assertEqual(access_token, credentials.access_token)
self.assertIsNotNone(credentials.token_expiry)
self.assertEqual(refresh_token, credentials.refresh_token)
self.assertEqual(revoke_uri, credentials.revoke_uri)
self.assertEqual(set(['foo']), credentials.scopes)
def test_exchange_dictlike(self):
class FakeDict(object):
def __init__(self, d):
self.d = d
def __getitem__(self, name):
return self.d[name]
def __contains__(self, name):
return name in self.d
code = 'some random code'
not_a_dict = FakeDict({'code': code})
payload = (b'{'
b' "access_token":"<PASSWORD>hkKG",'
b' "expires_in":3600,'
b' "refresh_token":"<PASSWORD>"'
b'}')
http = HttpMockSequence([({'status': '200'}, payload)])
credentials = self.flow.step2_exchange(code=not_a_dict, http=http)
self.assertEqual('SlAV32hkKG', credentials.access_token)
self.assertNotEqual(None, credentials.token_expiry)
self.assertEqual('<PASSWORD>', credentials.refresh_token)
self.assertEqual('dummy_revoke_uri', credentials.revoke_uri)
self.assertEqual(set(['foo']), credentials.scopes)
request_code = urllib.parse.parse_qs(
http.requests[0]['body'])['code'][0]
self.assertEqual(code, request_code)
def test_exchange_using_authorization_header(self):
auth_header = 'Basic Y2xpZW50X2lkKzE6c2Vjexc_managerV0KzE=',
flow = client.OAuth2WebServerFlow(
client_id='client_id+1',
authorization_header=auth_header,
scope='foo',
redirect_uri=client.OOB_CALLBACK_URN,
user_agent='unittest-sample/1.0',
revoke_uri='dummy_revoke_uri',
)
http = HttpMockSequence([
({'status': '200'}, b'access_token=SlAV32hkKG'),
])
credentials = flow.step2_exchange(code='some random code', http=http)
self.assertEqual('SlAV32hkKG', credentials.access_token)
test_request = http.requests[0]
# Did we pass the Authorization header?
self.assertEqual(test_request['headers']['Authorization'], auth_header)
# Did we omit client_secret from POST body?
self.assertTrue('client_secret' not in test_request['body'])
def test_urlencoded_exchange_success(self):
http = HttpMockSequence([
({'status': '200'}, b'access_token=SlAV32hkKG&expires_in=3600'),
])
credentials = self.flow.step2_exchange(code='some random code',
http=http)
self.assertEqual('SlAV32hkKG', credentials.access_token)
self.assertNotEqual(None, credentials.token_expiry)
def test_urlencoded_expires_param(self):
http = HttpMockSequence([
# Note the 'expires=3600' where you'd normally
# have if named 'expires_in'
({'status': '200'}, b'access_token=Sl<PASSWORD>&expires=3600'),
])
credentials = self.flow.step2_exchange(code='some random code',
http=http)
self.assertNotEqual(None, credentials.token_expiry)
def test_exchange_no_expires_in(self):
payload = (b'{'
b' "access_token":"<PASSWORD>",'
b' "refresh_token":"<PASSWORD>"'
b'}')
http = HttpMockSequence([({'status': '200'}, payload)])
credentials = self.flow.step2_exchange(code='some random code',
http=http)
self.assertEqual(None, credentials.token_expiry)
def test_urlencoded_exchange_no_expires_in(self):
http = HttpMockSequence([
# This might be redundant but just to make sure
# urlencoded access_token gets parsed correctly
({'status': '200'}, b'access_token=<PASSWORD>'),
])
credentials = self.flow.step2_exchange(code='some random code',
http=http)
self.assertEqual(None, credentials.token_expiry)
def test_exchange_fails_if_no_code(self):
payload = (b'{'
b' "access_token":"<PASSWORD>",'
b' "refresh_token":"<PASSWORD>"'
b'}')
http = HttpMockSequence([({'status': '200'}, payload)])
code = {'error': 'thou shall not pass'}
with self.assertRaisesRegexp(
client.FlowExchangeError, 'shall not pass'):
self.flow.step2_exchange(code=code, http=http)
def test_exchange_id_token_fail(self):
payload = (b'{'
b' "access_token":"<PASSWORD>",'
b' "refresh_token":"<PASSWORD>",'
b' "id_token": "<PASSWORD>"'
b'}')
http = HttpMockSequence([({'status': '200'}, payload)])
with self.assertRaises(client.VerifyJwtTokenError):
self.flow.step2_exchange(code='some random code', http=http)
def test_exchange_id_token(self):
body = {'foo': 'bar'}
body_json = json.dumps(body).encode('ascii')
payload = base64.urlsafe_b64encode(body_json).strip(b'=')
jwt = (base64.urlsafe_b64encode(b'stuff') + b'.' + payload + b'.' +
base64.urlsafe_b64encode(b'signature'))
payload = (b'{'
b' "access_token":"<PASSWORD>",'
b' "refresh_token":"<PASSWORD>",'
b' "id_token": "' + jwt + b'"'
b'}')
http = HttpMockSequence([({'status': '200'}, payload)])
credentials = self.flow.step2_exchange(code='some random code',
http=http)
self.assertEqual(credentials.id_token, body)
class FlowFromCachedClientsecrets(unittest2.TestCase):
def test_flow_from_clientsecrets_cached(self):
cache_mock = CacheMock()
load_and_cache('client_secrets.json', 'some_secrets', cache_mock)
flow = client.flow_from_clientsecrets(
'some_secrets', '', redirect_uri='oob', cache=cache_mock)
self.assertEqual('foo_client_secret', flow.client_secret)
@mock.patch('oauth2client.clientsecrets.loadfile')
def _flow_from_clientsecrets_success_helper(self, loadfile_mock,
device_uri=None,
revoke_uri=None):
client_type = clientsecrets.TYPE_WEB
client_info = {
'auth_uri': 'auth_uri',
'token_uri': 'token_uri',
'client_id': 'client_id',
'client_secret': 'client_secret',
}
if revoke_uri is not None:
client_info['revoke_uri'] = revoke_uri
loadfile_mock.return_value = client_type, client_info
filename = object()
scope = ['baz']
cache = object()
if device_uri is not None:
result = client.flow_from_clientsecrets(
filename, scope, cache=cache, device_uri=device_uri)
self.assertEqual(result.device_uri, device_uri)
else:
result = client.flow_from_clientsecrets(
filename, scope, cache=cache)
self.assertIsInstance(result, client.OAuth2WebServerFlow)
loadfile_mock.assert_called_once_with(filename, cache=cache)
def test_flow_from_clientsecrets_success(self):
self._flow_from_clientsecrets_success_helper()
def test_flow_from_clientsecrets_success_w_device_uri(self):
device_uri = 'http://device.uri'
self._flow_from_clientsecrets_success_helper(device_uri=device_uri)
def test_flow_from_clientsecrets_success_w_revoke_uri(self):
revoke_uri = 'http://revoke.uri'
self._flow_from_clientsecrets_success_helper(revoke_uri=revoke_uri)
@mock.patch('oauth2client.clientsecrets.loadfile',
side_effect=clientsecrets.InvalidClientSecretsError)
def test_flow_from_clientsecrets_invalid(self, loadfile_mock):
filename = object()
cache = object()
with self.assertRaises(clientsecrets.InvalidClientSecretsError):
client.flow_from_clientsecrets(
filename, None, cache=cache, message=None)
loadfile_mock.assert_called_once_with(filename, cache=cache)
@mock.patch('oauth2client.clientsecrets.loadfile',
side_effect=clientsecrets.InvalidClientSecretsError)
@mock.patch('sys.exit')
def test_flow_from_clientsecrets_invalid_w_msg(self, sys_exit,
loadfile_mock):
filename = object()
cache = object()
message = 'hi mom'
client.flow_from_clientsecrets(
filename, None, cache=cache, message=message)
sys_exit.assert_called_once_with(message)
loadfile_mock.assert_called_once_with(filename, cache=cache)
@mock.patch('oauth2client.clientsecrets.loadfile',
side_effect=clientsecrets.InvalidClientSecretsError('foobar'))
@mock.patch('sys.exit')
def test_flow_from_clientsecrets_invalid_w_msg_and_text(self, sys_exit,
loadfile_mock):
filename = object()
cache = object()
message = 'hi mom'
expected = ('The client secrets were invalid: '
'\n{0}\n{1}'.format('foobar', 'hi mom'))
client.flow_from_clientsecrets(
filename, None, cache=cache, message=message)
sys_exit.assert_called_once_with(expected)
loadfile_mock.assert_called_once_with(filename, cache=cache)
@mock.patch('oauth2client.clientsecrets.loadfile')
def test_flow_from_clientsecrets_unknown_flow(self, loadfile_mock):
client_type = 'UNKNOWN'
loadfile_mock.return_value = client_type, None
filename = object()
cache = object()
err_msg = ('This OAuth 2.0 flow is unsupported: '
'{0!r}'.format(client_type))
with self.assertRaisesRegexp(client.UnknownClientSecretsFlowError,
err_msg):
client.flow_from_clientsecrets(filename, None, cache=cache)
loadfile_mock.assert_called_once_with(filename, cache=cache)
class CredentialsFromCodeTests(unittest2.TestCase):
def setUp(self):
self.client_id = 'client_id_abc'
self.client_secret = 'secret_use_code'
self.scope = 'foo'
self.code = '12345abcde'
self.redirect_uri = 'postmessage'
def test_exchange_code_for_token(self):
token = '<KEY>'
payload = json.dumps({'access_token': token, 'expires_in': 3600})
http = HttpMockSequence([
({'status': '200'}, payload.encode('utf-8')),
])
credentials = client.credentials_from_code(
self.client_id, self.client_secret, self.scope,
self.code, http=http, redirect_uri=self.redirect_uri)
self.assertEqual(credentials.access_token, token)
self.assertNotEqual(None, credentials.token_expiry)
self.assertEqual(set(['foo']), credentials.scopes)
def test_exchange_code_for_token_fail(self):
http = HttpMockSequence([
({'status': '400'}, b'{"error":"invalid_request"}'),
])
with self.assertRaises(client.FlowExchangeError):
client.credentials_from_code(
self.client_id, self.client_secret, self.scope,
self.code, http=http, redirect_uri=self.redirect_uri)
def test_exchange_code_and_file_for_token(self):
payload = (b'{'
b' "access_token":"<KEY>",'
b' "expires_in":3600'
b'}')
http = HttpMockSequence([({'status': '200'}, payload)])
credentials = client.credentials_from_clientsecrets_and_code(
datafile('client_secrets.json'), self.scope,
self.code, http=http)
self.assertEqual(credentials.access_token, 'asdfgh<PASSWORD>')
self.assertNotEqual(None, credentials.token_expiry)
self.assertEqual(set(['foo']), credentials.scopes)
def test_exchange_code_and_cached_file_for_token(self):
http = HttpMockSequence([
({'status': '200'}, b'{ "access_token":"asdf<PASSWORD>"}'),
])
cache_mock = CacheMock()
load_and_cache('client_secrets.json', 'some_secrets', cache_mock)
credentials = client.credentials_from_clientsecrets_and_code(
'some_secrets', self.scope,
self.code, http=http, cache=cache_mock)
self.assertEqual(credentials.access_token, 'asdfgh<PASSWORD>')
self.assertEqual(set(['foo']), credentials.scopes)
def test_exchange_code_and_file_for_token_fail(self):
http = HttpMockSequence([
({'status': '400'}, b'{"error":"invalid_request"}'),
])
with self.assertRaises(client.FlowExchangeError):
client.credentials_from_clientsecrets_and_code(
datafile('client_secrets.json'), self.scope,
self.code, http=http)
class Test__save_private_file(unittest2.TestCase):
| |
<gh_stars>10-100
"""Test structure prediction module"""
import itertools
import json
import logging
import os
import pickle
import unittest
from contextlib import contextmanager
from operator import itemgetter
from random import sample
import numpy as np
import pandas as pd
import pymatgen
from pandas.testing import assert_frame_equal, assert_series_equal
from pymatgen.analysis.structure_prediction.substitution_probability import SubstitutionProbability
import smact
from smact import Species
from smact.structure_prediction.database import StructureDB
from smact.structure_prediction.mutation import CationMutator
from smact.structure_prediction.prediction import StructurePredictor
from smact.structure_prediction.structure import SmactStructure
files_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "files")
TEST_STRUCT = os.path.join(files_dir, "test_struct")
TEST_POSCAR = os.path.join(files_dir, "test_poscar.txt")
TEST_PY_STRUCT = os.path.join(files_dir, "pymatgen_structure.json")
TEST_LAMBDA_JSON = os.path.join(files_dir, "test_lambda_tab.json")
TEST_LAMBDA_CSV = os.path.join(files_dir, "test_lambda_tab.csv")
TEST_MP_DATA = os.path.join(files_dir, "mp_data")
TEST_PREDICTOR_DB = os.path.join(files_dir, "test_predictor.db")
TEST_PREDICTOR_TABLE = "TEST"
def generate_test_structure(comp: str) -> bool:
"""Generate a pickled test structure for comparison."""
poscar_file = os.path.join(files_dir, f"{comp}.txt")
s = SmactStructure.from_file(poscar_file)
with open(TEST_STRUCT, 'wb') as f:
pickle.dump(s, f)
with open(TEST_POSCAR, 'w') as f:
f.write(s.as_poscar())
return True
@contextmanager
def ignore_warnings(logger: logging.Logger) -> int:
"""Ignore logging warnings."""
log_lvl_buff = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
yield log_lvl_buff
logger.setLevel(log_lvl_buff)
class StructureTest(unittest.TestCase):
"""`SmactStructure` testing."""
TEST_SPECIES = {
"CaTiO3": [('Ca', 2, 1), ('Ti', 4, 1), ('O', -2, 3)],
"NaCl": [('Na', 1, 1), ('Cl', -1, 1)],
"Fe": [('Fe', 0, 1)],
}
def assertStructAlmostEqual(self, s1: SmactStructure, s2: SmactStructure, places: int = 7):
"""Assert that two SmactStructures are almost equal.
Almost equality dependent on how many decimal places the site coordinates
are equal to.
"""
must_equal = ["species", "lattice_param"]
for cond in must_equal:
self.assertEqual(getattr(s1, cond), getattr(s2, cond))
self.assertTrue(np.array_equal(s1.lattice_mat, s2.lattice_mat))
self.assertEqual(list(s1.sites.keys()), list(s2.sites.keys()))
for si, sj in zip(s1.sites, s2.sites):
for c1, c2 in zip(si, sj):
self.assertAlmostEqual(c1, c2, places=places)
def test_as_poscar(self):
"""Test POSCAR generation."""
for comp in self.TEST_SPECIES.keys():
with self.subTest(comp=comp):
comp_file = os.path.join(files_dir, f"{comp}.txt")
with open(comp_file, "r") as f:
struct = SmactStructure.from_file(comp_file)
self.assertEqual(struct.as_poscar(), f.read())
@staticmethod
def _gen_empty_structure(species):
"""Generate an empty set of arguments for `SmactStructure` testing."""
lattice_mat = np.array([[0] * 3] * 3)
if isinstance(species[0][0], str):
species_strs = [
"{ele}{charge}{sign}".format(
ele=spec[0],
charge=abs(spec[1]),
sign='+' if spec[1] >= 0 else '-', ) for spec in species
]
else:
species_strs = [
"{ele}{charge}{sign}".format(
ele=spec[0].symbol,
charge=abs(spec[0].oxidation),
sign='+' if spec[0].oxidation >= 0 else '-',
) for spec in species
]
sites = {spec: [[]] for spec in species_strs}
return species, lattice_mat, sites
def test_from_py_struct(self):
"""Test generation of SmactStructure from a pymatgen Structure."""
with open(TEST_PY_STRUCT, 'r') as f:
d = json.load(f)
py_structure = pymatgen.core.Structure.from_dict(d)
with ignore_warnings(smact.structure_prediction.logger):
s1 = SmactStructure.from_py_struct(py_structure)
s2 = SmactStructure.from_file(os.path.join(files_dir, "CaTiO3.txt"))
self.assertStructAlmostEqual(s1, s2)
def test_has_species(self):
"""Test determining whether a species is in a `SmactStructure`."""
s1 = SmactStructure(*self._gen_empty_structure([('Ba', 2, 2), ('O', -2, 1), ('F', -1, 2)]))
self.assertTrue(s1.has_species(('Ba', 2)))
self.assertFalse(s1.has_species(('Ba', 3)))
self.assertFalse(s1.has_species(('Ca', 2)))
def test_smactStruc_comp_key(self):
"""Test generation of a composition key for `SmactStructure`s."""
s1 = SmactStructure(*self._gen_empty_structure([('Ba', 2, 2), ('O', -2, 1), ('F', -1, 2)]))
s2 = SmactStructure(*self._gen_empty_structure([('Fe', 2, 1), ('Fe', 3, 2), ('O', -2, 4)]))
Ba = Species('Ba', 2)
O = Species('O', -2)
F = Species('F', -1)
Fe2 = Species('Fe', 2)
Fe3 = Species('Fe', 3)
s3 = SmactStructure(*self._gen_empty_structure([(Ba, 2), (O, 1), (F, 2)]))
s4 = SmactStructure(*self._gen_empty_structure([(Fe2, 1), (Fe3, 2), (O, 4)]))
Ba_2OF_2 = "Ba_2_2+F_2_1-O_1_2-"
Fe_3O_4 = "Fe_2_3+Fe_1_2+O_4_2-"
self.assertEqual(s1.composition(), Ba_2OF_2)
self.assertEqual(s2.composition(), Fe_3O_4)
self.assertEqual(s3.composition(), Ba_2OF_2)
self.assertEqual(s4.composition(), Fe_3O_4)
def test_smactStruc_from_file(self):
"""Test the `from_file` method of `SmactStructure`."""
with open(TEST_STRUCT, 'rb') as f:
s1 = pickle.load(f)
s2 = SmactStructure.from_file(TEST_POSCAR)
self.assertEqual(s1, s2)
def test_equality(self):
"""Test equality determination of `SmactStructure`."""
struct_files = [os.path.join(files_dir, f"{x}.txt") for x in ["CaTiO3", "NaCl"]]
CaTiO3 = SmactStructure.from_file(struct_files[0])
NaCl = SmactStructure.from_file(struct_files[1])
with self.subTest(msg="Testing equality of same object."):
self.assertEqual(CaTiO3, CaTiO3)
with self.subTest(msg="Testing inequality of different types."):
self.assertNotEqual(CaTiO3, "CaTiO3")
with self.subTest(msg="Testing inequality of different objects."):
self.assertNotEqual(CaTiO3, NaCl)
def test_ele_stoics(self):
"""Test acquiring element stoichiometries."""
s1 = SmactStructure(*self._gen_empty_structure([('Fe', 2, 1), ('Fe', 3, 2), ('O', -2, 4)]))
s1_stoics = {'Fe': 3, 'O': 4}
s2 = SmactStructure(*self._gen_empty_structure([('Ba', 2, 2), ('O', -2, 1), ('F', -1, 2)]))
s2_stoics = {'Ba': 2, 'O': 1, 'F': 2}
for test, expected in [(s1, s1_stoics), (s2, s2_stoics)]:
with self.subTest(species=test.species):
self.assertEqual(SmactStructure._get_ele_stoics(test.species), expected)
@unittest.skipUnless(os.environ.get("MPI_KEY"), "requires MPI key to be set.")
def test_from_mp(self):
"""Test downloading structures from materialsproject.org."""
# TODO Needs ensuring that the structure query gets the same
# structure as we have downloaded.
api_key = os.environ.get("MPI_KEY")
for comp, species in self.TEST_SPECIES.items():
with self.subTest(comp=comp):
comp_file = os.path.join(files_dir, f"{comp}.txt")
local_struct = SmactStructure.from_file(comp_file)
mp_struct = SmactStructure.from_mp(species, api_key)
self.assertEqual(local_struct, mp_struct)
class StructureDBTest(unittest.TestCase):
"""Test StructureDB interface."""
TEST_DB = os.path.join(files_dir, "test_db.tmp")
TEST_TABLE = "Structures"
TEST_MP_TABLE = "Structures1"
@classmethod
def tearDownClass(cls):
"""Remove database files."""
if os.path.exists(cls.TEST_DB):
os.remove(cls.TEST_DB)
def test_db_interface(self):
"""Test interfacing with database."""
with self.subTest(msg="Instantiating database."):
self.db = StructureDB(self.TEST_DB)
with self.subTest(msg="Adding table."):
try:
self.db.add_table(self.TEST_TABLE)
except Exception as e:
self.fail(e)
struct_file = os.path.join(files_dir, "CaTiO3.txt")
struct = SmactStructure.from_file(struct_file)
with self.subTest(msg="Adding structure to table."):
try:
self.db.add_struct(struct, self.TEST_TABLE)
except Exception as e:
self.fail(e)
with self.subTest(msg="Getting structure from table."):
struct_list = self.db.get_structs(struct.composition(), self.TEST_TABLE)
self.assertEqual(len(struct_list), 1)
self.assertEqual(struct_list[0], struct)
struct_files = [os.path.join(files_dir, f"{x}.txt") for x in ["NaCl", "Fe"]]
structs = [SmactStructure.from_file(fname) for fname in struct_files]
with self.subTest(msg="Adding multiple structures to table."):
try:
self.db.add_structs(structs, self.TEST_TABLE)
except Exception as e:
self.fail(e)
test_with_species_args = [
[("Na", 1)],
[("Cl", -1)],
[("Na", 1), ("Cl", -1)],
[("Cl", -1), ("Na", 1)],
[("Cl", -1)],
[("Na", 1), ("Cl", 1)],
[("O", -2)],
[("Ca", 2), ("Ti", 4), ("O", -2)],
]
test_with_species_exp = [
[structs[0]],
[structs[0]],
[structs[0]],
[structs[0]],
[structs[0]],
[],
[struct],
[struct],
]
for spec, expected in zip(test_with_species_args, test_with_species_exp):
with self.subTest(msg=f"Retrieving species with {spec}"):
self.assertEqual(self.db.get_with_species(spec, self.TEST_TABLE), expected)
with open(TEST_MP_DATA, "rb") as f:
mp_data = pickle.load(f)
with self.subTest(msg="Testing adding downloaded MP structures."):
added: int = self.db.add_mp_icsd(self.TEST_MP_TABLE, mp_data)
self.assertEqual(added, 3)
class CationMutatorTest(unittest.TestCase):
"""Test the CationMutator class."""
@classmethod
def setUpClass(cls):
"""Set up the test initial structure and mutator."""
cls.test_struct = SmactStructure.from_file(TEST_POSCAR)
cls.test_mutator = CationMutator.from_json(lambda_json=TEST_LAMBDA_JSON)
cls.test_pymatgen_mutator = CationMutator.from_json(
lambda_json=None, alpha=lambda x, y: -5
)
# 5 random test species -> 5! test pairs
cls.test_species = sample(cls.test_pymatgen_mutator.specs, 5)
cls.test_pairs = list(itertools.combinations_with_replacement(cls.test_species, 2))
cls.pymatgen_sp = SubstitutionProbability(lambda_table=None, alpha=-5)
def test_lambda_tab_pop(self):
"""Test if lambda table is populated correctly."""
lambda_dat = [
[-5.0, 0.5, -5.0],
[0.5, -5.0, 0.3],
[-5.0, 0.3, -5.0], ]
labels = ["A", "B", "C"]
exp_lambda = pd.DataFrame(lambda_dat, index=labels, columns=labels)
assert_frame_equal(
self.test_mutator.lambda_tab,
exp_lambda,
check_names=False, )
def test_partition_func_Z(self):
"""Test the partition function for the whole table."""
# 2e^0.5 + 2e^0.3 + 5e^{-5} \approx 6.0308499
self.assertAlmostEqual(self.test_mutator.Z, 6.0308499)
def test_pymatgen_lambda_import(self):
"""Test importing pymatgen lambda table."""
self.assertIsInstance(self.test_pymatgen_mutator.lambda_tab, pd.DataFrame)
def test_lambda_interface(self):
"""Test getting lambda values."""
test_cases = [itertools.permutations(x) for x in [("A", "B"), ("A", "C"), ("B", "C")]]
expected = [0.5, -5.0, 0.3]
for test_case, expectation in zip(test_cases, expected):
for spec_comb in test_case:
s1, s2 = spec_comb
with self.subTest(s1=s1, s2=s2):
self.assertEqual(self.test_mutator.get_lambda(s1, s2), expectation)
def test_ion_mutation(self):
"""Test mutating an ion of a SmactStructure."""
ca_file = os.path.join(files_dir, "CaTiO3.txt")
ba_file = os.path.join(files_dir, "BaTiO3.txt")
CaTiO3 = SmactStructure.from_file(ca_file)
BaTiO3 = SmactStructure.from_file(ba_file)
with self.subTest(s1="CaTiO3", s2="BaTiO3"):
mutation = self.test_mutator._mutate_structure(CaTiO3, "Ca2+", "Ba2+")
self.assertEqual(mutation, BaTiO3)
na_file = os.path.join(files_dir, "NaCl.txt")
NaCl = SmactStructure.from_file(na_file)
with self.subTest(s1="Na1+Cl1-", s2="Na2+Cl1-"):
with self.assertRaises(ValueError):
self.test_mutator._mutate_structure(NaCl, "Na1+", "Na2+")
# TODO Confirm functionality with more complex substitutions
def test_sub_prob(self):
"""Test determining substitution probabilities."""
for s1, s2 in self.test_pairs:
with self.subTest(s1=s1, s2=s2):
self.assertAlmostEqual(
self.pymatgen_sp.prob(s1, s2),
self.test_pymatgen_mutator.sub_prob(s1, s2), )
def test_cond_sub_probs(self):
"""Test determining conditional substitution probabilities for a row."""
for s1 in ["A", "B", "C"]:
with self.subTest(s=s1):
cond_sub_probs_test = self.test_mutator.cond_sub_probs(s1)
vals = [
(s1, s2, self.test_mutator.cond_sub_prob(s1, s2)) for s2 in ["A", "B", "C"]
]
test_df = pd.DataFrame(vals)
test_df: pd.DataFrame = test_df.pivot(index=0, columns=1, values=2)
# Slice to convert to series
assert_series_equal(cond_sub_probs_test, test_df.iloc[0])
def test_cond_sub_prob(self):
"""Test determining conditional substitution probabilities."""
for s1, s2 in self.test_pairs:
with self.subTest(s1=s1, s2=s2):
self.assertAlmostEqual(
self.pymatgen_sp.cond_prob(s1, s2),
self.test_pymatgen_mutator.cond_sub_prob(s1, s2),
)
def test_pair_corr(self):
"""Test determining conditional substitution probabilities."""
for s1, s2 in self.test_pairs:
with self.subTest(s1=s1, s2=s2):
self.assertAlmostEqual(
self.pymatgen_sp.pair_corr(s1, s2),
self.test_pymatgen_mutator.pair_corr(s1, s2),
)
def test_from_df(self):
"""Test creating a CationMutator from an existing DataFrame."""
lambda_df = pd.read_csv(TEST_LAMBDA_CSV, index_col=0)
csv_test = CationMutator(lambda_df=lambda_df)
assert_frame_equal(
csv_test.lambda_tab,
self.test_mutator.lambda_tab,
check_names=False, )
def test_complete_cond_probs(self):
"""Test getting all conditional probabilities."""
pairs = itertools.product(["A", "B", "C"], repeat=2)
vals = [(
s1,
s2,
self.test_mutator.cond_sub_prob(s1, s2), ) for s1, s2 in pairs]
cond_probs = pd.DataFrame(vals)
cond_probs = cond_probs.pivot(index=0, columns=1, values=2)
assert_frame_equal(self.test_mutator.complete_cond_probs(), cond_probs)
def test_complete_sub_probs(self):
"""Test getting all probabilities."""
pairs = itertools.product(["A", "B", "C"], repeat=2)
vals = [(
s1,
s2,
self.test_mutator.sub_prob(s1, s2), ) for s1, s2 in pairs]
sub_probs = pd.DataFrame(vals)
sub_probs = sub_probs.pivot(index=0, columns=1, values=2)
assert_frame_equal(self.test_mutator.complete_sub_probs(), sub_probs)
def test_complete_pair_corrs(self):
"""Test getting all pair correlations."""
pairs = itertools.product(["A", "B", "C"], repeat=2)
vals = [(
s1,
s2,
self.test_mutator.pair_corr(s1, s2), ) for s1, s2 in pairs]
pair_corrs = pd.DataFrame(vals)
pair_corrs = pair_corrs.pivot(index=0, columns=1, values=2)
assert_frame_equal(self.test_mutator.complete_pair_corrs(), pair_corrs)
class PredictorTest(unittest.TestCase):
"""Testing for the StructurePredictor wrapper."""
@classmethod
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['BucketObject']
class BucketObject(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_base64: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_language: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,
object_lock_mode: Optional[pulumi.Input[str]] = None,
object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
storage_class: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
website_redirect: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a S3 bucket object resource.
## Example Usage
### Encrypting with KMS Key
```python
import pulumi
import pulumi_aws as aws
examplekms = aws.kms.Key("examplekms",
description="KMS key 1",
deletion_window_in_days=7)
examplebucket = aws.s3.Bucket("examplebucket", acl="private")
examplebucket_object = aws.s3.BucketObject("examplebucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
kms_key_id=examplekms.arn)
```
### Server Side Encryption with S3 Default Master Key
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.Bucket("examplebucket", acl="private")
examplebucket_object = aws.s3.BucketObject("examplebucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
server_side_encryption="aws:kms")
```
### Server Side Encryption with AWS-Managed Key
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.Bucket("examplebucket", acl="private")
examplebucket_object = aws.s3.BucketObject("examplebucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
server_side_encryption="AES256")
```
### S3 Object Lock
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.Bucket("examplebucket",
acl="private",
versioning=aws.s3.BucketVersioningArgs(
enabled=True,
),
object_lock_configuration=aws.s3.BucketObjectLockConfigurationArgs(
object_lock_enabled="Enabled",
))
examplebucket_object = aws.s3.BucketObject("examplebucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("important.txt"),
object_lock_legal_hold_status="ON",
object_lock_mode="GOVERNANCE",
object_lock_retain_until_date="2021-12-31T23:59:60Z",
force_destroy=True)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acl: The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Defaults to "private".
:param pulumi.Input[str] bucket: The name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
:param pulumi.Input[str] cache_control: Specifies caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
:param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
:param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
:param pulumi.Input[str] content_disposition: Specifies presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
:param pulumi.Input[str] content_encoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
:param pulumi.Input[str] content_language: The language the content is in e.g. en-US or en-GB.
:param pulumi.Input[str] content_type: A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] etag: Used to trigger updates. The only meaningful value is `${filemd5("path/to/file")}` (this provider 0.11.12 or later) or `${md5(file("path/to/file"))}` (this provider 0.11.11 or earlier).
This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"`.
:param pulumi.Input[bool] force_destroy: Allow the object to be deleted by removing any legal hold on any object version.
Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
:param pulumi.Input[str] key: The name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: Specifies the AWS KMS Key ARN to use for object encryption.
This value is a fully qualified **ARN** of the KMS Key. If using `kms.Key`,
use the exported `arn` attribute:
`kms_key_id = "${aws_kms_key.foo.arn}"`
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: A map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
:param pulumi.Input[str] object_lock_legal_hold_status: The [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
:param pulumi.Input[str] object_lock_mode: The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
:param pulumi.Input[str] object_lock_retain_until_date: The date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
:param pulumi.Input[str] server_side_encryption: Specifies server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: The path to a file that will be read and uploaded as raw bytes for the object content.
:param pulumi.Input[str] storage_class: Specifies the desired [Storage Class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
for the object. Can be either "`STANDARD`", "`REDUCED_REDUNDANCY`", "`ONEZONE_IA`", "`INTELLIGENT_TIERING`", "`GLACIER`", "`DEEP_ARCHIVE`", or "`STANDARD_IA`". Defaults to "`STANDARD`".
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the object.
:param pulumi.Input[str] website_redirect: Specifies a target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['acl'] = acl
if bucket is None:
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['cache_control'] = cache_control
__props__['content'] = content
__props__['content_base64'] = content_base64
__props__['content_disposition'] = content_disposition
__props__['content_encoding'] = content_encoding
__props__['content_language'] = content_language
__props__['content_type'] = content_type
__props__['etag'] = etag
__props__['force_destroy'] = force_destroy
__props__['key'] = key
__props__['kms_key_id'] = kms_key_id
__props__['metadata'] = metadata
__props__['object_lock_legal_hold_status'] = object_lock_legal_hold_status
__props__['object_lock_mode'] = object_lock_mode
__props__['object_lock_retain_until_date'] = object_lock_retain_until_date
__props__['server_side_encryption'] = server_side_encryption
__props__['source'] = source
__props__['storage_class'] = storage_class
__props__['tags'] = tags
__props__['website_redirect'] = website_redirect
__props__['version_id'] = None
super(BucketObject, __self__).__init__(
'aws:s3/bucketObject:BucketObject',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_base64: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_language: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,
object_lock_mode: Optional[pulumi.Input[str]] = None,
object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
storage_class: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None,
website_redirect: Optional[pulumi.Input[str]] = None) -> 'BucketObject':
"""
Get an existing BucketObject resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acl: The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Defaults to "private".
:param pulumi.Input[str] bucket: The name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
:param pulumi.Input[str] cache_control: Specifies caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
:param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
:param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
:param pulumi.Input[str] content_disposition: Specifies presentational information for the object. | |
[u'f'] ,
u'圪' : [u'g'] ,
u'螵' : [u'p'] ,
u'昴' : [u'm'] ,
u'媷' : [u'r'] ,
u'㔾' : [u'j'] ,
u'槁' : [u'k', u'g'] ,
u'譂' : [u'c'] ,
u'燑' : [u't'] ,
u'鍒' : [u'r'] ,
u'噔' : [u'd'] ,
u'蛟' : [u'j'] ,
u'敞' : [u'c'] ,
u'姡' : [u'k'] ,
u'㑨' : [u'y'] ,
u'棫' : [u'y'] ,
u'詬' : [u'h', u'g'] ,
u'䵮' : [u'y'] ,
u'烻' : [u'y', u's'] ,
u'鉼' : [u'b'] ,
u'啾' : [u'j'] ,
u'撈' : [u'l'] ,
u'鬍' : [u'h'] ,
u'帏' : [u'w'] ,
u'視' : [u's'] ,
u'洙' : [u'z'] ,
u'䲘' : [u'k', u'w', u'g'] ,
u'㰣' : [u'z'] ,
u'醦' : [u's'] ,
u'甩' : [u's'] ,
u'咨' : [u'z'] ,
u'掲' : [u'q', u'j'] ,
u'騷' : [u'x', u's'] ,
u'崹' : [u't'] ,
u'裀' : [u'y'] ,
u'汃' : [u'p', u'b'] ,
u'䯂' : [u'j'] ,
u'郐' : [u'k'] ,
u'瑓' : [u'l'] ,
u'叒' : [u'r'] ,
u'拜' : [u'b'] ,
u'饡' : [u'z'] ,
u'屣' : [u'x'] ,
u'歭' : [u'c'] ,
u'䫬' : [u'z'] ,
u'獽' : [u'r'] ,
u'勼' : [u'j'] ,
u'䢁' : [u'y'] ,
u'莇' : [u'z'] ,
u'簎' : [u'z'] ,
u'傑' : [u'j'] ,
u'䐞' : [u'r'] ,
u'綣' : [u'q'] ,
u'㺥' : [u'c', u't'] ,
u'鼤' : [u'w'] ,
u'愰' : [u'h'] ,
u'䖳' : [u'z'] ,
u'鲹' : [u's'] ,
u'䥀' : [u'c'] ,
u'滅' : [u'm'] ,
u'聆' : [u'l'] ,
u'児' : [u'e'] ,
u'盕' : [u'f'] ,
u'臛' : [u'h'] ,
u'穢' : [u'h'] ,
u'廥' : [u'k'] ,
u'㽤' : [u'j'] ,
u'䉲' : [u'b', u'm'] ,
u'篷' : [u'p'] ,
u'㳹' : [u'w'] ,
u'鵸' : [u'q'] ,
u'蠁' : [u'x'] ,
u'澄' : [u'c', u'd'] ,
u'夋' : [u'q'] ,
u'逑' : [u'q'] ,
u'瞔' : [u'z'] ,
u'躚' : [u'x'] ,
u'戝' : [u'z'] ,
u'徤' : [u'j'] ,
u'險' : [u'y', u'x', u'j'] ,
u'蔳' : [u'q'] ,
u'碶' : [u'q'] ,
u'刽' : [u'g'] ,
u'鯌' : [u'g'] ,
u'罏' : [u'l'] ,
u'淘' : [u't'] ,
u'䝟' : [u'y'] ,
u'鹥' : [u'y'] ,
u'痨' : [u'l'] ,
u'㛪' : [u'y'] ,
u'賮' : [u'j'] ,
u'恱' : [u'y'] ,
u'巸' : [u'y'] ,
u'链' : [u'l'] ,
u'躃' : [u'b'] ,
u'椂' : [u'l'] ,
u'䶅' : [u'h'] ,
u'隓' : [u'h'] ,
u'喕' : [u'm'] ,
u'撟' : [u'j'] ,
u'騠' : [u't'] ,
u'夢' : [u'm'] ,
u'㞩' : [u'l'] ,
u'覭' : [u'm'] ,
u'栬' : [u'z'] ,
u'䲯' : [u'j'] ,
u'㬶' : [u'h'] ,
u'醽' : [u'l'] ,
u'瀼' : [u'r'] ,
u'咿' : [u'y'] ,
u'柉' : [u'f'] ,
u'蕊' : [u'r', u'j'] ,
u'塌' : [u't'] ,
u'裗' : [u'l'] ,
u'歖' : [u'x'] ,
u'俙' : [u'x'] ,
u'㩠' : [u'p'] ,
u'郧' : [u'y'] ,
u'獦' : [u'g'] ,
u'埩' : [u'z'] ,
u'曳' : [u'y'] ,
u'葴' : [u'q', u'x', u'z'] ,
u'孶' : [u'z'] ,
u'檀' : [u's', u't'] ,
u'贅' : [u'z'] ,
u'䀇' : [u'q', u'g'] ,
u'犐' : [u'k'] ,
u'锕' : [u'a'] ,
u'螞' : [u'm'] ,
u'笡' : [u'q'] ,
u'媠' : [u't', u'd'] ,
u'谯' : [u'q'] ,
u'䌱' : [u'l'] ,
u'綺' : [u'q', u'y'] ,
u'鐿' : [u'y'] ,
u'蛈' : [u't'] ,
u'穋' : [u'l'] ,
u'䗊' : [u'c', u't'] ,
u'㕕' : [u'h'] ,
u'轙' : [u'y'] ,
u'䉛' : [u'y'] ,
u'糤' : [u's'] ,
u'革' : [u'j', u'g'] ,
u'臲' : [u'n'] ,
u'敵' : [u'd'] ,
u'䓴' : [u'r'] ,
u'㑿' : [u'z', u't'] ,
u'戆' : [u'z', u'g'] ,
u'䚉' : [u'q', u'x'] ,
u'鶏' : [u'j'] ,
u'䨖' : [u'j'] ,
u'掛' : [u'g'] ,
u'脜' : [u'r'] ,
u'刦' : [u'j'] ,
u'䮫' : [u'l'] ,
u'花' : [u'h'] ,
u'缸' : [u'h', u'g'] ,
u'去' : [u'q'] ,
u'㰺' : [u'x'] ,
u'䝈' : [u'e'] ,
u'磍' : [u'x'] ,
u'㷏' : [u'y'] ,
u'鹎' : [u'b'] ,
u'汚' : [u'y', u'w'] ,
u'䃝' : [u'q'] ,
u'瑪' : [u'm'] ,
u'淯' : [u'y'] ,
u'荰' : [u'd'] ,
u'屺' : [u'q'] ,
u'痿' : [u'w'] ,
u'伃' : [u'y'] ,
u'蘉' : [u'm'] ,
u'禌' : [u'z'] ,
u'㺎' : [u'r'] ,
u'圓' : [u'y'] ,
u'䆜' : [u'y'] ,
u'颢' : [u'h'] ,
u'簥' : [u'q', u'j'] ,
u'謫' : [u'z'] ,
u'溮' : [u's'] ,
u'䐵' : [u'h'] ,
u'皾' : [u'd'] ,
u'跄' : [u'q'] ,
u'慇' : [u'y'] ,
u'廎' : [u'q'] ,
u'闔' : [u'h', u'g'] ,
u'䥗' : [u'k'] ,
u'聝' : [u'g'] ,
u'篠' : [u'x'] ,
u'㣢' : [u't'] ,
u'內' : [u'n'] ,
u'䏰' : [u'r'] ,
u'髶' : [u'r'] ,
u'繹' : [u'y', u's'] ,
u'鲋' : [u'f'] ,
u'挊' : [u'n'] ,
u'徍' : [u'w'] ,
u'癩' : [u'l'] ,
u'溗' : [u's'] ,
u'蠘' : [u'j'] ,
u'㶡' : [u'h', u'z', u'd'] ,
u'皧' : [u'a'] ,
u'逨' : [u'l'] ,
u'匪' : [u'f'] ,
u'戴' : [u'd'] ,
u'康' : [u'k'] ,
u'淁' : [u'q'] ,
u'轂' : [u'g'] ,
u'䩄' : [u't'] ,
u'痑' : [u'd'] ,
u'青' : [u'q', u'j'] ,
u'剔' : [u't'] ,
u'苟' : [u'g'] ,
u'慞' : [u'z'] ,
u'巡' : [u'x'] ,
u'泫' : [u'x', u'j'] ,
u'蹬' : [u'd'] ,
u'䥮' : [u'z'] ,
u'瓻' : [u'c'] ,
u'陼' : [u'z', u'd'] ,
u'兾' : [u'j'] ,
u'悈' : [u'j'] ,
u'鼍' : [u't'] ,
u'娏' : [u'm'] ,
u'㞒' : [u'x'] ,
u'趖' : [u's'] ,
u'㠣' : [u'l'] ,
u'閦' : [u'c'] ,
u'焩' : [u'p'] ,
u'储' : [u'c'] ,
u'枲' : [u'x'] ,
u'鸷' : [u'z'] ,
u'夹' : [u'j'] ,
u'賀' : [u'h'] ,
u'桃' : [u't'] ,
u'係' : [u'x'] ,
u'铐' : [u'k'] ,
u'灓' : [u'l'] ,
u'埒' : [u'l'] ,
u'曜' : [u'y'] ,
u'鵡' : [u'w'] ,
u'塣' : [u'c'] ,
u'㗦' : [u'l'] ,
u'潭' : [u'y', u'x', u't', u'd'] ,
u'们' : [u'm'] ,
u'㹷' : [u'x', u's'] ,
u'鯺' : [u'z'] ,
u'睽' : [u'k', u'j'] ,
u'囼' : [u't'] ,
u'䲁' : [u'w'] ,
u'螇' : [u'x'] ,
u'砎' : [u'j'] ,
u'咑' : [u'q', u'j'] ,
u'鬤' : [u'r'] ,
u'攰' : [u'g'] ,
u'䆳' : [u'q', u's'] ,
u'颹' : [u'w'] ,
u'䵀' : [u's'] ,
u'櫅' : [u'j'] ,
u'葆' : [u'b'] ,
u'啐' : [u'c'] ,
u'狕' : [u'a'] ,
u'藛' : [u'x'] ,
u'繢' : [u'h'] ,
u'嫥' : [u'z'] ,
u'㭤' : [u'n'] ,
u'翷' : [u'l'] ,
u'饸' : [u'h', u'j'] ,
u'谁' : [u's'] ,
u'殄' : [u't'] ,
u'崋' : [u'h'] ,
u'鐑' : [u'j'] ,
u'玔' : [u'q', u'c'] ,
u'誚' : [u'q'] ,
u'昝' : [u'c', u'z'] ,
u'室' : [u's'] ,
u'銪' : [u'y'] ,
u'中' : [u'z'] ,
u'粶' : [u'l'] ,
u'㦸' : [u'j'] ,
u'嘽' : [u'c', u't'] ,
u'䓆' : [u'z'] ,
u'筏' : [u'f'] ,
u'様' : [u'y'] ,
u'䍟' : [u'z'] ,
u'驥' : [u'j'] ,
u'燨' : [u'x'] ,
u'裮' : [u'c'] ,
u'摱' : [u'm'] ,
u'姸' : [u'y'] ,
u'郾' : [u'y'] ,
u'誃' : [u'y', u'c'] ,
u'洂' : [u'y'] ,
u'䦅' : [u's', u'z'] ,
u'㰌' : [u'q'] ,
u'銓' : [u'q'] ,
u'甒' : [u'w'] ,
u'冕' : [u'm'] ,
u'悟' : [u'w'] ,
u'鸠' : [u'j'] ,
u'崢' : [u'z'] ,
u'趭' : [u'y', u'j'] ,
u'氬' : [u'y'] ,
u'䢯' : [u'l'] ,
u'㼶' : [u'y'] ,
u'閽' : [u'h'] ,
u'琼' : [u'q'] ,
u'傿' : [u'y'] ,
u'揉' : [u'r'] ,
u'腊' : [u'x', u'l'] ,
u'屌' : [u'd'] ,
u'賗' : [u'c'] ,
u'潖' : [u'p'] ,
u'䯙' : [u'p'] ,
u'㹠' : [u't'] ,
u'铧' : [u'h'] ,
u'睦' : [u'm'] ,
u'叩' : [u'k'] ,
u'拳' : [u'q'] ,
u'聴' : [u't'] ,
u'彶' : [u'j'] ,
u'㗽' : [u'x'] ,
u'満' : [u'm'] ,
u'褅' : [u't'] ,
u'䐇' : [u'c'] ,
u'皐' : [u'g'] ,
u'鄕' : [u'x'] ,
u'莞' : [u'w', u'g'] ,
u'缡' : [u'l'] ,
u'庠' : [u'x'] ,
u'蠯' : [u'b'] ,
u'䜱' : [u'm'] ,
u'禺' : [u'y', u'o'] ,
u'逿' : [u'd', u't'] ,
u'苈' : [u'l'] ,
u'繋' : [u'x'] ,
u'譙' : [u'q'] ,
u'䙛' : [u'g'] ,
u'磤' : [u'y'] ,
u'鍩' : [u't', u'n'] ,
u'慵' : [u'y'] ,
u'昆' : [u'h', u'k'] ,
u'䊉' : [u's'] ,
u'馏' : [u'l'] ,
u'世' : [u's'] ,
u'枛' : [u'z'] ,
u'蔜' : [u'a'] ,
u'嘦' : [u'j'] ,
u'侫' : [u'n'] ,
u'蚱' : [u'z'] ,
u'笸' : [u'p'] ,
u'垻' : [u'b'] ,
u'㠺' : [u'm'] ,
u'䍈' : [u'p'] ,
u'糍' : [u'c', u'z'] ,
u'驎' : [u'l'] ,
u'桚' : [u'z'] ,
u'䓝' : [u'm'] ,
u'鯣' : [u'y'] ,
u'灪' : [u'y'] ,
u'槯' : [u'c'] ,
u'蝰' : [u'h', u'k'] ,
u'塺' : [u'm'] ,
u'燿' : [u'y', u's'] ,
u'䬃' : [u's'] ,
u'舉' : [u'j'] ,
u'綌' : [u'x'] ,
u'㪎' : [u's'] ,
u'匓' : [u'j'] ,
u'䖜' : [u'y'] ,
u'鲢' : [u'l'] ,
u'砥' : [u'z', u'd'] ,
u'㔧' : [u'x', u'l'] ,
u'輫' : [u'p'] ,
u'檮' : [u't', u'd'] ,
u'䀵' : [u's'] ,
u'犾' : [u'y'] ,
u'规' : [u'g'] ,
u'敇' : [u'c'] ,
u'嫎' : [u'p'] ,
u'釔' : [u'y'] ,
u'䵗' : [u'f'] ,
u'葝' : [u'j'] ,
u'翠' : [u'c'] ,
u'啧' : [u'z'] ,
u'䟰' : [u'j'] ,
u'黶' : [u'y'] ,
u'穹' : [u'q', u'k'] ,
u'㝻' : [u'j'] ,
u'肋' : [u'j', u'l'] ,
u'缊' : [u'y', u'w'] ,
u'犗' : [u'j'] ,
u'鐘' : [u'z'] ,
u'圚' : [u'h'] ,
u'檧' : [u's'] ,
u'谨' : [u'j'] ,
u'伪' : [u'w'] ,
u'㔮' : [u'n'] ,
u'縴' : [u'q'] ,
u'燁' : [u'y'] ,
u'噄' : [u'c'] ,
u'槑' : [u'm'] ,
u'譒' : [u'b'] ,
u'乔' : [u'q'] ,
u'㑘' : [u'j'] ,
u'黟' : [u'y'] | |
<filename>tencentcloud/cii/v20210408/models.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class CompareMetricsData(AbstractModel):
"""结构化对比指标(准确率/召回率)数据
"""
def __init__(self):
r"""
:param ShortStructAccuracy: 短文准确率
注意:此字段可能返回 null,表示取不到有效值。
:type ShortStructAccuracy: str
:param ShortStructRecall: 短文召回率
注意:此字段可能返回 null,表示取不到有效值。
:type ShortStructRecall: str
:param LongStructAccuracy: 长文结构化准确率
注意:此字段可能返回 null,表示取不到有效值。
:type LongStructAccuracy: str
:param LongStructRecall: 长文结构化召回率
注意:此字段可能返回 null,表示取不到有效值。
:type LongStructRecall: str
:param LongContentAccuracy: 长文提取准确率
注意:此字段可能返回 null,表示取不到有效值。
:type LongContentAccuracy: str
:param LongContentRecall: 长文提取召回率
注意:此字段可能返回 null,表示取不到有效值。
:type LongContentRecall: str
"""
self.ShortStructAccuracy = None
self.ShortStructRecall = None
self.LongStructAccuracy = None
self.LongStructRecall = None
self.LongContentAccuracy = None
self.LongContentRecall = None
def _deserialize(self, params):
self.ShortStructAccuracy = params.get("ShortStructAccuracy")
self.ShortStructRecall = params.get("ShortStructRecall")
self.LongStructAccuracy = params.get("LongStructAccuracy")
self.LongStructRecall = params.get("LongStructRecall")
self.LongContentAccuracy = params.get("LongContentAccuracy")
self.LongContentRecall = params.get("LongContentRecall")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateStructureTaskInfo(AbstractModel):
"""创建结构化任务子任务信息
"""
def __init__(self):
r"""
:param TaskType: 任务类型
:type TaskType: str
:param FileList: 报告文件上传的地址列表,需按顺序排列。如果使用ImageList参数,置为空数组即可
:type FileList: list of str
:param CustomerId: 客户号
:type CustomerId: str
:param CustomerName: 客户姓名
:type CustomerName: str
:param ImageList: 报告上传的图片内容数组,图片内容采用base64编码,需按顺序排列
:type ImageList: list of str
:param Year: 报告年份
:type Year: str
"""
self.TaskType = None
self.FileList = None
self.CustomerId = None
self.CustomerName = None
self.ImageList = None
self.Year = None
def _deserialize(self, params):
self.TaskType = params.get("TaskType")
self.FileList = params.get("FileList")
self.CustomerId = params.get("CustomerId")
self.CustomerName = params.get("CustomerName")
self.ImageList = params.get("ImageList")
self.Year = params.get("Year")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateStructureTaskRequest(AbstractModel):
"""CreateStructureTask请求参数结构体
"""
def __init__(self):
r"""
:param ServiceType: 服务类型
Structured 仅结构化
Underwrite 结构化+核保
:type ServiceType: str
:param TaskInfos: 创建任务时可以上传多个报告,后台生成多个识别子任务,子任务的详细信息
:type TaskInfos: list of CreateStructureTaskInfo
:param PolicyId: 保单号
:type PolicyId: str
:param TriggerType: 核保触发方式
Auto 自动
Manual 手动
:type TriggerType: str
:param InsuranceTypes: 险种,如果是体检报告类型,此参数是必填,类型说明如下:
CriticalDiseaseInsurance:重疾险
LifeInsurance:寿险
AccidentInsurance:意外险
:type InsuranceTypes: list of str
:param CallbackUrl: 回调地址,接收Post请求传送结果
:type CallbackUrl: str
"""
self.ServiceType = None
self.TaskInfos = None
self.PolicyId = None
self.TriggerType = None
self.InsuranceTypes = None
self.CallbackUrl = None
def _deserialize(self, params):
self.ServiceType = params.get("ServiceType")
if params.get("TaskInfos") is not None:
self.TaskInfos = []
for item in params.get("TaskInfos"):
obj = CreateStructureTaskInfo()
obj._deserialize(item)
self.TaskInfos.append(obj)
self.PolicyId = params.get("PolicyId")
self.TriggerType = params.get("TriggerType")
self.InsuranceTypes = params.get("InsuranceTypes")
self.CallbackUrl = params.get("CallbackUrl")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateStructureTaskResponse(AbstractModel):
"""CreateStructureTask返回参数结构体
"""
def __init__(self):
r"""
:param MainTaskId: 创建的主任务号,用于查询结果
:type MainTaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.MainTaskId = None
self.RequestId = None
def _deserialize(self, params):
self.MainTaskId = params.get("MainTaskId")
self.RequestId = params.get("RequestId")
class CreateStructureTaskTestRequest(AbstractModel):
"""CreateStructureTaskTest请求参数结构体
"""
def __init__(self):
r"""
:param ServiceType: 服务类型
Structured 仅结构化
Underwrite 结构化+核保
:type ServiceType: str
:param TaskInfos: 创建任务时可以上传多个报告,后台生成多个识别子任务,子任务的详细信息
:type TaskInfos: list of CreateStructureTaskInfo
:param PolicyId: 保单号
:type PolicyId: str
:param TriggerType: 核保触发方式
Auto 自动
Manual 手动
:type TriggerType: str
:param InsuranceTypes: 险种,如果是体检报告类型,此参数是必填,类型说明如下:
CriticalDiseaseInsurance:重疾险
LifeInsurance:寿险
AccidentInsurance:意外险
:type InsuranceTypes: list of str
:param CallbackUrl: 回调地址,接收Post请求传送结果
:type CallbackUrl: str
"""
self.ServiceType = None
self.TaskInfos = None
self.PolicyId = None
self.TriggerType = None
self.InsuranceTypes = None
self.CallbackUrl = None
def _deserialize(self, params):
self.ServiceType = params.get("ServiceType")
if params.get("TaskInfos") is not None:
self.TaskInfos = []
for item in params.get("TaskInfos"):
obj = CreateStructureTaskInfo()
obj._deserialize(item)
self.TaskInfos.append(obj)
self.PolicyId = params.get("PolicyId")
self.TriggerType = params.get("TriggerType")
self.InsuranceTypes = params.get("InsuranceTypes")
self.CallbackUrl = params.get("CallbackUrl")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateStructureTaskTestResponse(AbstractModel):
"""CreateStructureTaskTest返回参数结构体
"""
def __init__(self):
r"""
:param MainTaskId: 创建的主任务号,用于查询结果
:type MainTaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.MainTaskId = None
self.RequestId = None
def _deserialize(self, params):
self.MainTaskId = params.get("MainTaskId")
self.RequestId = params.get("RequestId")
class DescribeStructCompareDataRequest(AbstractModel):
"""DescribeStructCompareData请求参数结构体
"""
def __init__(self):
r"""
:param MainTaskId: 主任务号
:type MainTaskId: str
:param SubTaskId: 子任务号
:type SubTaskId: str
"""
self.MainTaskId = None
self.SubTaskId = None
def _deserialize(self, params):
self.MainTaskId = params.get("MainTaskId")
self.SubTaskId = params.get("SubTaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeStructCompareDataResponse(AbstractModel):
"""DescribeStructCompareData返回参数结构体
"""
def __init__(self):
r"""
:param PolicyId: 保单号
:type PolicyId: str
:param MainTaskId: 主任务号
:type MainTaskId: str
:param CustomerId: 客户号
注意:此字段可能返回 null,表示取不到有效值。
:type CustomerId: str
:param CustomerName: 客户姓名
注意:此字段可能返回 null,表示取不到有效值。
:type CustomerName: str
:param ReviewTime: 复核时间
:type ReviewTime: str
:param MachineResult: 算法识别结果
:type MachineResult: str
:param ManualResult: 人工复核结果
:type ManualResult: str
:param Metrics: 结构化对比指标数据
:type Metrics: :class:`tencentcloud.cii.v20210408.models.CompareMetricsData`
:param NewItems: 新增项
:type NewItems: str
:param ModifyItems: 修改项
:type ModifyItems: str
:param SubTaskId: 子任务号
:type SubTaskId: str
:param AllTasks: 所有的子任务
:type AllTasks: list of ReviewDataTaskInfo
:param TaskType: 任务类型
:type TaskType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PolicyId = None
self.MainTaskId = None
self.CustomerId = None
self.CustomerName = None
self.ReviewTime = None
self.MachineResult = None
self.ManualResult = None
self.Metrics = None
self.NewItems = None
self.ModifyItems = None
self.SubTaskId = None
self.AllTasks = None
self.TaskType = None
self.RequestId = None
def _deserialize(self, params):
self.PolicyId = params.get("PolicyId")
self.MainTaskId = params.get("MainTaskId")
self.CustomerId = params.get("CustomerId")
self.CustomerName = params.get("CustomerName")
self.ReviewTime = params.get("ReviewTime")
self.MachineResult = params.get("MachineResult")
self.ManualResult = params.get("ManualResult")
if params.get("Metrics") is not None:
self.Metrics = CompareMetricsData()
self.Metrics._deserialize(params.get("Metrics"))
self.NewItems = params.get("NewItems")
self.ModifyItems = params.get("ModifyItems")
self.SubTaskId = params.get("SubTaskId")
if params.get("AllTasks") is not None:
self.AllTasks = []
for item in params.get("AllTasks"):
obj = ReviewDataTaskInfo()
obj._deserialize(item)
self.AllTasks.append(obj)
self.TaskType = params.get("TaskType")
self.RequestId = params.get("RequestId")
class DescribeStructureResultRequest(AbstractModel):
"""DescribeStructureResult请求参数结构体
"""
def __init__(self):
r"""
:param MainTaskId: 创建任务时返回的主任务ID
:type MainTaskId: str
"""
self.MainTaskId = None
def _deserialize(self, params):
self.MainTaskId = params.get("MainTaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeStructureResultResponse(AbstractModel):
"""DescribeStructureResult返回参数结构体
"""
def __init__(self):
r"""
:param Status: 结果状态:
0:返回成功
1:结果未生成
2:结果生成失败
:type Status: int
:param Results: 结构化结果
:type Results: list of StructureResultObject
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Results = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = StructureResultObject()
obj._deserialize(item)
self.Results.append(obj)
self.RequestId = params.get("RequestId")
class DescribeStructureTaskResultRequest(AbstractModel):
"""DescribeStructureTaskResult请求参数结构体
"""
def __init__(self):
r"""
:param MainTaskId: 结构化任务ID
:type MainTaskId: str
"""
self.MainTaskId = None
def _deserialize(self, params):
self.MainTaskId = params.get("MainTaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeStructureTaskResultResponse(AbstractModel):
"""DescribeStructureTaskResult返回参数结构体
"""
def __init__(self):
r"""
:param Status: 结果状态:
0:返回成功
1:结果未生成
2:结果生成失败
:type Status: int
:param Results: 结构化识别结果数组,每个数组元素对应一个图片的结构化结果,顺序和输入参数的ImageList或FileList对应。
注意:此字段可能返回 null,表示取不到有效值。
:type Results: list of ResultObject
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Results = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = ResultObject()
obj._deserialize(item)
self.Results.append(obj)
self.RequestId = params.get("RequestId")
class DescribeStructureTaskResultTestRequest(AbstractModel):
"""DescribeStructureTaskResultTest请求参数结构体
"""
def __init__(self):
r"""
:param MainTaskId: 结构化任务ID
:type MainTaskId: str
"""
self.MainTaskId = None
def _deserialize(self, params):
self.MainTaskId = params.get("MainTaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeStructureTaskResultTestResponse(AbstractModel):
"""DescribeStructureTaskResultTest返回参数结构体
"""
def __init__(self):
r"""
:param Status: 结果状态:
0:返回成功
1:结果未生成
2:结果生成失败
:type Status: int
:param Results: 结构化识别结果数组,每个数组元素对应一个图片的结构化结果,顺序和输入参数的ImageList或FileList对应。
注意:此字段可能返回 null,表示取不到有效值。
:type Results: list of ResultObject
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Results = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = ResultObject()
obj._deserialize(item)
self.Results.append(obj)
self.RequestId = params.get("RequestId")
class ResultObject(AbstractModel):
"""用于返回结构化任务结果
"""
def __init__(self):
r"""
:param Quality: 图片质量分
:type Quality: float
:param StructureResult: 由结构化算法结构化json转换的字符串,具体协议参见算法结构化结果协议
:type StructureResult: str
"""
self.Quality = None
self.StructureResult = None
def _deserialize(self, params):
self.Quality = params.get("Quality")
self.StructureResult = params.get("StructureResult")
memeber_set = set(params.keys())
| |
<filename>tool/tests/clusterfuzz/reproducers_test.py
"""Test the reproducers."""
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import mock
from clusterfuzz import common
from clusterfuzz import output_transformer
from clusterfuzz import reproducers
from error import error
from tests import libs
from test_libs import helpers
def create_reproducer(klass):
"""Creates a LinuxChromeJobReproducer for use in testing."""
binary_provider = mock.Mock(symbolizer_path='/path/to/symbolizer')
binary_provider.get_binary_path.return_value = '/fake/build_dir/test_binary'
binary_provider.get_build_dir_path.return_value = '/fake/build_dir'
testcase = mock.Mock(
gestures=None,
stacktrace_lines=[{
'content': 'line'
}],
job_type='job_type',
reproduction_args='--original',
android_package_name='package')
testcase.get_testcase_path.return_value = '/fake/testcase_dir/testcase'
reproducer = klass(
definition=mock.Mock(),
binary_provider=binary_provider,
testcase=testcase,
sanitizer='UBSAN',
options=libs.make_options(target_args='--test'))
reproducer.args = '--always-opt'
reproducer.environment = {}
reproducer.source_directory = '/fake/source_dir'
reproducer.original_testcase_path = '/fake/original_testcase_dir/testcase'
return reproducer
class SetUpSymbolizersSuppressionsTest(helpers.ExtendedTestCase):
"""Tests the set_up_symbolizers_suppressions method."""
def setUp(self):
self.setup_fake_filesystem()
helpers.patch(self, ['clusterfuzz.common.get_resource'])
def test_set_up_correct_env(self):
"""Ensures all the setup methods work correctly."""
root_path = '/fake'
self.fs.CreateFile('/fake/resources/llvm-symbolizer', contents='t')
self.fs.CreateFile(
'/fake/resources/suppressions/lsan_suppressions.txt', contents='t')
self.fs.CreateFile(
'/fake/resources/suppressions/ubsan_suppressions.txt', contents='t')
def get(_, *paths):
return os.path.join(root_path, *paths)
self.mock.get_resource.side_effect = get
self.binary_provider = mock.Mock()
self.definition = mock.Mock()
self.testcase = mock.Mock(
gestures=None,
stacktrace_lines=[{
'content': 'line'
}],
job_type='job_type',
reproduction_args='--orig')
self.reproducer = reproducers.BaseReproducer(
self.definition,
self.binary_provider,
self.testcase,
'UBSAN',
libs.make_options(target_args='--test'))
self.reproducer.environment = {
'UBSAN_OPTIONS': ('external_symbolizer_path=/not/correct/path:other_'
'option=1:suppressions=/not/correct/path:'
'coverage_dir=test'),
'CFI_OPTIONS': ('external_symbolizer_path=/not/correct/path:other_'
'option=1:suppressions=/not/correct/path'),
'LSAN_OPTIONS':
'other=0:suppressions=not/correct/path:option=1'
}
self.reproducer.set_up_symbolizers_suppressions()
result = self.reproducer.environment
for i in result:
if '_OPTIONS' in i:
result[i] = reproducers.deserialize_sanitizer_options(result[i])
self.assertEqual(
result, {
'UBSAN_OPTIONS': {
'external_symbolizer_path':
'%s/resources/llvm-symbolizer' % root_path,
'other_option':
'1',
'suppressions': (
'%s/resources/suppressions/ubsan_suppressions.txt' %
root_path)
},
'CFI_OPTIONS': {
'external_symbolizer_path':
'%s/resources/llvm-symbolizer' % root_path,
'other_option':
'1',
'suppressions': (
'%s/resources/suppressions/ubsan_suppressions.txt' %
root_path)
},
'LSAN_OPTIONS': {
'other':
'0',
'suppressions': (
'%s/resources/suppressions/lsan_suppressions.txt' %
root_path),
'option':
'1'
},
'UBSAN_SYMBOLIZER_PATH':
'%s/resources/llvm-symbolizer' % root_path,
'DISPLAY':
':0.0'
})
class SanitizerOptionsSerializerTest(helpers.ExtendedTestCase):
"""Test the serializer & deserializers for sanitizer options."""
def test_serialize(self):
in_dict = {
'suppressions': '/a/b/c/d/suppresions.txt',
'option': '1',
'symbolizer': 'abcde/llvm-symbolizer'
}
out_str = ('suppressions=/a/b/c/d/suppresions.txt:option=1'
':symbolizer=abcde/llvm-symbolizer')
self.assertEqual(reproducers.serialize_sanitizer_options(in_dict), out_str)
def test_deserialize(self):
out_dict = {
'suppressions': '/a/b/c/d/suppresions.txt',
'option': '1',
'symbolizer': 'abcde/llvm-symbolizer'
}
in_str = ('suppressions=/a/b/c/d/suppresions.txt:option=1'
':symbolizer=abcde/llvm-symbolizer')
self.assertEqual(
reproducers.deserialize_sanitizer_options(in_str), out_dict)
class ReproduceCrashTest(helpers.ExtendedTestCase):
"""Tests the reproduce_crash method."""
def setUp(self):
self.setup_fake_filesystem()
helpers.patch(self, [
'clusterfuzz.common.start_execute',
'clusterfuzz.common.wait_execute',
'clusterfuzz.common.execute',
'clusterfuzz.common.UserStdin',
'clusterfuzz.reproducers.LinuxChromeJobReproducer.run_gestures',
'clusterfuzz.reproducers.LinuxChromeJobReproducer.get_testcase_path',
'clusterfuzz.reproducers.Xvfb.__enter__',
'clusterfuzz.reproducers.Xvfb.__exit__',
'clusterfuzz.reproducers.symbolize',
'clusterfuzz.common.get_resource',
])
self.mock.get_resource.return_value = (
'/chrome/source/folder/llvm-symbolizer')
self.mock.wait_execute.return_value = (0, 'lines')
self.mock.symbolize.return_value = 'symbolized'
self.app_directory = '/chrome/source/folder'
self.testcase_path = os.path.expanduser(
os.path.join('~', '.clusterfuzz', '1234_testcase', 'testcase.js'))
self.mock.get_testcase_path.return_value = self.testcase_path
self.definition = mock.Mock()
def test_base(self):
"""Test base's reproduce_crash."""
mocked_testcase = mock.Mock(
id=1234,
reproduction_args='--repro',
environment={'ASAN_OPTIONS': 'test-asan'},
gestures=None,
stacktrace_lines=[{
'content': 'line'
}],
job_type='job_type')
mocked_testcase.get_testcase_path.return_value = self.testcase_path
mocked_provider = mock.Mock(
symbolizer_path='%s/llvm-symbolizer' % self.app_directory)
mocked_provider.get_binary_path.return_value = '%s/d8' % self.app_directory
mocked_provider.get_build_dir_path.return_value = self.app_directory
reproducer = reproducers.BaseReproducer(
self.definition,
mocked_provider,
mocked_testcase,
'UBSAN',
libs.make_options(target_args='--test'))
reproducer.setup_args()
reproducer.reproduce_crash()
self.assert_exact_calls(self.mock.execute, [
mock.call(
'/chrome/source/folder/d8',
'--repro --test %s' % self.testcase_path,
'/chrome/source/folder',
env={'ASAN_OPTIONS': 'test-asan'},
exit_on_error=False,
timeout=30,
stdout_transformer=mock.ANY,
redirect_stderr_to_stdout=True,
stdin=self.mock.UserStdin.return_value,
read_buffer_length=1)
])
def test_base_with_env_args(self):
"""Test base's reproduce_crash with environment args."""
mocked_testcase = mock.Mock(
id=1234,
reproduction_args='--app-dir=%APP_DIR% --testcase=%TESTCASE%',
environment={'ASAN_OPTIONS': 'test-asan'},
gestures=None,
stacktrace_lines=[{
'content': 'line'
}],
job_type='job_type')
mocked_testcase.get_testcase_path.return_value = self.testcase_path
mocked_provider = mock.Mock(
symbolizer_path='%s/llvm-symbolizer' % self.app_directory)
mocked_provider.get_binary_path.return_value = '%s/d8' % self.app_directory
mocked_provider.get_build_dir_path.return_value = self.app_directory
reproducer = reproducers.BaseReproducer(
self.definition,
mocked_provider,
mocked_testcase,
'UBSAN',
libs.make_options(target_args='--test'))
reproducer.setup_args()
reproducer.reproduce_crash()
self.assert_exact_calls(self.mock.execute, [
mock.call(
'/chrome/source/folder/d8',
'--app-dir=%s --testcase=%s --test' %
(self.app_directory, self.testcase_path),
'/chrome/source/folder',
env={'ASAN_OPTIONS': 'test-asan'},
exit_on_error=False,
timeout=30,
stdout_transformer=mock.ANY,
redirect_stderr_to_stdout=True,
stdin=self.mock.UserStdin.return_value,
read_buffer_length=1)
])
def test_chromium(self):
"""Test chromium's reproduce_crash."""
self.mock.start_execute.return_value = mock.Mock()
self.mock.__enter__.return_value = ':display'
mocked_testcase = mock.Mock(
id=1234,
reproduction_args='--repro',
environment={'ASAN_OPTIONS': 'test-asan'},
gestures=None,
stacktrace_lines=[{
'content': 'line'
}],
job_type='job_type')
mocked_testcase.get_testcase_path.return_value = self.testcase_path
mocked_provider = mock.Mock(
symbolizer_path='%s/llvm-symbolizer' % self.app_directory)
mocked_provider.get_binary_path.return_value = '%s/d8' % self.app_directory
mocked_provider.get_build_dir_path.return_value = self.app_directory
reproducer = reproducers.LinuxChromeJobReproducer(
self.definition,
mocked_provider,
mocked_testcase,
'UBSAN',
libs.make_options(target_args='--test'))
reproducer.gestures = ['gesture,1', 'gesture,2']
reproducer.setup_args()
err, text = reproducer.reproduce_crash()
self.assertEqual(err, 0)
self.assertEqual(text, 'symbolized')
self.assert_exact_calls(self.mock.start_execute, [
mock.call(
'/chrome/source/folder/d8',
'--repro --test %s' % self.testcase_path,
'/chrome/source/folder',
env={
'DISPLAY': ':display',
'ASAN_OPTIONS': 'test-asan',
},
redirect_stderr_to_stdout=True,
stdin=self.mock.UserStdin.return_value)
])
self.assert_exact_calls(self.mock.wait_execute, [
mock.call(
self.mock.start_execute.return_value,
exit_on_error=False,
timeout=30,
stdout_transformer=mock.ANY,
read_buffer_length=1)
])
self.assert_exact_calls(self.mock.run_gestures, [
mock.call(reproducer, self.mock.start_execute.return_value, ':display')
])
class SetupArgsTest(helpers.ExtendedTestCase):
"""Test setup_args."""
def setUp(self):
helpers.patch(self, [
'clusterfuzz.common.edit_if_needed',
'clusterfuzz.reproducers.LinuxChromeJobReproducer.get_testcase_path',
'clusterfuzz.reproducers.update_for_gdb_if_needed',
])
self.testcase = mock.Mock(
id=1234,
reproduction_args='--repro',
environment={'ASAN_OPTIONS': 'test-asan'},
gestures=None,
stacktrace_lines=[{
'content': 'line'
}],
job_type='job_type')
self.testcase_path = os.path.expanduser(
os.path.join('~', '.clusterfuzz', '1234_testcase', 'testcase.js'))
self.mock.get_testcase_path.return_value = self.testcase_path
self.provider = mock.Mock(
symbolizer_path='/chrome/source/folder/llvm-symbolizer')
self.provider.get_binary_path.return_value = '/chrome/source/folder/d8'
self.provider.get_build_dir_path.return_value = '/chrome/source/folder'
self.definition = mock.Mock()
self.mock.update_for_gdb_if_needed.side_effect = (
lambda binary_path, args, timeout, should_enable_gdb: (binary_path, args, timeout)
)
self.mock.edit_if_needed.side_effect = (
lambda content, prefix, comment, should_edit: content)
def test_disable_xvfb(self):
"""Test disable xvfb."""
reproducer = reproducers.LinuxChromeJobReproducer(
self.definition, self.provider, self.testcase, 'UBSAN',
libs.make_options(
disable_xvfb=True,
target_args='--test --disable-gl-drawing-for-tests'))
reproducer.args = '--repro %TESTCASE_FILE_URL%'
reproducer.setup_args()
self.assertEqual('--repro %s --test' % self.testcase_path, reproducer.args)
self.mock.update_for_gdb_if_needed.assert_called_once_with(
reproducer.binary_path, reproducer.args, reproducer.timeout,
reproducer.options.enable_debug)
self.mock.edit_if_needed.assert_called_once_with(
reproducer.args,
prefix=mock.ANY,
comment=mock.ANY,
should_edit=reproducer.options.edit_mode)
def test_enable_xvfb(self):
"""Test enable xvfb and edit args."""
reproducer = reproducers.LinuxChromeJobReproducer(
self.definition, self.provider, self.testcase, 'UBSAN',
libs.make_options(target_args='--test', edit_mode=True))
reproducer.setup_args()
self.assertEqual('--repro --test %s' % self.testcase_path, reproducer.args)
self.mock.update_for_gdb_if_needed.assert_called_once_with(
reproducer.binary_path, reproducer.args, reproducer.timeout,
reproducer.options.enable_debug)
self.mock.edit_if_needed.assert_called_once_with(
reproducer.args,
prefix=mock.ANY,
comment=mock.ANY,
should_edit=reproducer.options.edit_mode)
class LinuxChromeJobReproducerTest(helpers.ExtendedTestCase):
"""Tests the extra functions of LinuxUbsanChromeReproducer."""
def setUp(self):
helpers.patch(self, [
'clusterfuzz.reproducers.BaseReproducer.pre_build_steps',
'clusterfuzz.reproducers.ensure_user_data_dir_if_needed',
'clusterfuzz.reproducers.update_testcase_path_in_layout_test',
'clusterfuzz.common.get_resource',
'pyfakefs.fake_filesystem.FakeFilesystem.RenameObject',
])
self.mock.get_resource.return_value = 'llvm'
self.mock.ensure_user_data_dir_if_needed.side_effect = (
lambda args, require_user_data_dir: args + ' --test-user-data-dir')
self.reproducer = create_reproducer(reproducers.LinuxChromeJobReproducer)
self.reproducer.definition.require_user_data_dir = False
self.reproducer.original_testcase_path = '/fake/LayoutTests/testcase'
def test_reproduce_crash(self):
"""Ensures pre-build steps run correctly."""
self.reproducer.pre_build_steps()
self.assert_exact_calls(self.mock.pre_build_steps,
[mock.call(self.reproducer)])
self.assertEqual(self.reproducer.args, '--always-opt --test-user-data-dir')
self.mock.ensure_user_data_dir_if_needed.assert_called_once_with(
'--always-opt', False)
def test_get_testcase_path(self):
"""Tests get_testcase_path."""
self.mock.update_testcase_path_in_layout_test.return_value = 'new-path'
self.assertEqual('new-path', self.reproducer.get_testcase_path())
self.mock.update_testcase_path_in_layout_test.assert_called_once_with(
self.reproducer.testcase.get_testcase_path(),
self.reproducer.original_testcase_path,
self.reproducer.source_directory, self.reproducer.testcase.created_at)
class XdotoolCommandTest(helpers.ExtendedTestCase):
"""Tests the xdotool_command method."""
def setUp(self):
helpers.patch(
self, ['clusterfuzz.common.execute', 'clusterfuzz.common.BlockStdin'])
self.reproducer = create_reproducer(reproducers.LinuxChromeJobReproducer)
def test_call(self):
"""Tests calling the method."""
self.reproducer.xdotool_command('command to run', ':2753')
self.assert_exact_calls(self.mock.execute, [
mock.call(
'xdotool',
'command to run',
'.',
env={'DISPLAY': ':2753'},
stdin=self.mock.BlockStdin.return_value)
])
class FindWindowsForProcessTest(helpers.ExtendedTestCase):
"""Tests the find_windows_for_process method."""
def setUp(self):
helpers.patch(self, [
'clusterfuzz.reproducers.LinuxChromeJobReproducer.get_process_ids',
'clusterfuzz.common.execute', 'time.sleep'
])
self.reproducer = create_reproducer(reproducers.LinuxChromeJobReproducer)
def test_no_pids(self):
"""Tests when no PIDs are available."""
self.mock.get_process_ids.return_value = []
self.reproducer.find_windows_for_process(1234, ':45434')
self.assert_n_calls(0, [self.mock.execute])
def test_dedup_pids(self):
"""Tests when duplicate pids are introduced."""
self.mock.get_process_ids.return_value = [1234, 5678]
self.mock.execute.side_effect = [(0, '234\n567\nabcd\n890'),
(0, '123\n567\n345')]
result = self.reproducer.find_windows_for_process(1234, ':45434')
self.assertEqual(result, set(['234', '567', '890', '123', '345']))
self.assert_exact_calls(self.mock.sleep, [mock.call(30)])
class GetProcessIdsTest(helpers.ExtendedTestCase):
"""Tests the get_process_ids method."""
def setUp(self):
helpers.patch(self, ['psutil.Process', 'psutil.pid_exists'])
self.reproducer = create_reproducer(reproducers.LinuxChromeJobReproducer)
def test_process_not_running(self):
"""Tests exiting when psutil is not supported."""
self.mock.pid_exists.return_value = False
result = self.reproducer.get_process_ids(1234)
self.assertEqual(result, [])
self.assert_n_calls(0, [self.mock.Process])
def test_psutil_working(self):
"""Tests grabbing process IDs when process is running."""
self.mock.pid_exists.return_value = True
psutil_handle = mock.Mock()
psutil_handle.children.return_value = [
mock.Mock(pid=123), mock.Mock(pid=456)
]
self.mock.Process.return_value = psutil_handle
result = self.reproducer.get_process_ids(1234)
self.assertEqual(result, [1234, 123, 456])
def _raise(self, _):
raise Exception('Oops')
def test_exception_handling(self):
"""Tests functionality when an exception is raised."""
self.mock.Process.side_effect = self._raise
with self.assertRaises(Exception):
self.reproducer.get_process_ids(1234)
class RunGesturesTest(helpers.ExtendedTestCase):
"""Tests the run_gestures method."""
def setUp(self):
helpers.patch(self, [
'time.sleep',
('clusterfuzz.reproducers.LinuxChromeJobReproducer.get_gesture_start_'
'time'),
('clusterfuzz.reproducers.LinuxChromeJobReproducer.find_windows_for'
'_process'),
'clusterfuzz.reproducers.LinuxChromeJobReproducer.xdotool_command',
'clusterfuzz.reproducers.LinuxChromeJobReproducer.execute_gesture'
])
self.reproducer = create_reproducer(reproducers.LinuxChromeJobReproducer)
self.mock.get_gesture_start_time.return_value = 5
self.mock.find_windows_for_process.return_value = ['123']
self.reproducer.gestures = [
'windowsize,2', 'type,\'ValeM1khbW4Gt!\'', 'Trigger:2'
]
self.reproducer.gesture_start_time = 5
def test_execute_gestures(self):
"""Tests executing the gestures."""
self.reproducer.run_gestures(mock.Mock(pid=1234), ':display')
self.assert_exact_calls(
self.mock.xdotool_command,
[mock.call(self.reproducer, 'windowactivate --sync 123', ':display')])
self.assert_exact_calls(self.mock.sleep, [mock.call(5)])
class GetGestureStartTimeTest(helpers.ExtendedTestCase):
"""Test the get_gesture_start_time method."""
def setUp(self):
self.reproducer = create_reproducer(reproducers.LinuxChromeJobReproducer)
def test_with_trigger(self):
self.reproducer.gestures = [
'windowsize,2', 'type,\'ValeM1khbW4Gt!\'', 'Trigger:2'
]
result = self.reproducer.get_gesture_start_time()
self.assertEqual(result, 2)
def test_no_trigger(self):
self.reproducer.gestures = ['windowsize,2', 'type,\'ValeM1khbW4Gt!\'']
result = self.reproducer.get_gesture_start_time()
self.assertEqual(result, 5)
class ExecuteGestureTest(helpers.ExtendedTestCase):
"""Test the execute_gesture method."""
def setUp(self):
helpers.patch(
self,
['clusterfuzz.reproducers.LinuxChromeJobReproducer.xdotool_command'])
self.reproducer = create_reproducer(reproducers.LinuxChromeJobReproducer)
self.reproducer.gestures = ['windowsize,2', 'type,\'ValeM1khbW4Gt!\'']
def test_call_execute_gesture(self):
"""Test parsing gestures."""
for gesture in self.reproducer.gestures:
self.reproducer.execute_gesture(gesture, '12345', ':display')
self.assert_exact_calls(self.mock.xdotool_command, [
mock.call(self.reproducer, 'windowsize 12345 2', ':display'),
mock.call(self.reproducer, 'type -- \'ValeM1khbW4Gt!\'', ':display')
])
class XvfbTest(helpers.ExtendedTestCase):
"""Used to test the Xvfb context manager."""
def setUp(self):
helpers.patch(self, ['xvfbwrapper.Xvfb', 'subprocess.Popen', 'time.sleep'])
def test_correct_oserror_exception(self):
"""Ensures the correct exception is raised when Xvfb is not found."""
def _raise_with_message(*_unused, **_kwunused):
del _unused, _kwunused #Not used by this method
raise OSError('[Errno 2] No such file or directory')
self.mock.Popen.side_effect = _raise_with_message
self.mock.Xvfb.return_value = mock.Mock(
xvfb_cmd=['not_display', ':display'])
with self.assertRaises(error.NotInstalledError):
with reproducers.Xvfb(False) as display_name:
self.assertNotEqual(display_name, ':display')
self.assert_n_calls(0, [
self.mock.Popen.return_value.kill, self.mock.sleep,
self.mock.Xvfb.return_value.stop
])
def test_incorrect_oserror_exception(self):
"""Ensures OSError raises when message is not Errno 2."""
self.mock.Popen.side_effect = OSError
self.mock.Xvfb.return_value = mock.Mock(
xvfb_cmd=['not_display', ':display'])
with self.assertRaises(OSError):
with reproducers.Xvfb(False) as display_name:
self.assertNotEqual(display_name, ':display')
self.assert_n_calls(0, [
self.mock.Popen.return_value.kill, self.mock.sleep,
self.mock.Xvfb.return_value.stop
])
def test_start_stop_blackbox(self):
"""Tests that the context manager starts/stops xvfbwrapper and blackbox."""
self.mock.Xvfb.return_value = mock.Mock(
xvfb_cmd=['not_display', ':display'])
with reproducers.Xvfb(False) as display_name:
self.assertEqual(display_name, ':display')
self.assert_exact_calls(self.mock.Xvfb,
[mock.call(width=1280, height=1024)])
self.assert_exact_calls(self.mock.Xvfb.return_value.start, [mock.call()])
self.assert_exact_calls(self.mock.Xvfb.return_value.stop,
[mock.call.stop()])
self.assert_exact_calls(
self.mock.Popen, [mock.call(['blackbox'], env={
'DISPLAY': ':display'
})])
self.assert_exact_calls(self.mock.Popen.return_value.kill, | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.252667,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.45786,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.069908,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.257597,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.39967,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.19912,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.321173,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.162117,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.682411,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.166461,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.84012,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0755062,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00835199,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0856676,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0617681,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.161174,
'Execution Unit/Register Files/Runtime Dynamic': 0.07012,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.197958,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.464346,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.87985,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000792122,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000792122,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000699709,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000276214,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000887303,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00317126,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00724564,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0593792,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.77703,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.156433,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.201679,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.17885,
'Instruction Fetch Unit/Runtime Dynamic': 0.427907,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.034022,
'L2/Runtime Dynamic': 0.00755801,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.85623,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.784692,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0523822,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0523822,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.10359,
'Load Store Unit/Runtime Dynamic': 1.09541,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.129166,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.258331,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0458413,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0463346,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.234842,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0256967,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.469698,
'Memory Management Unit/Runtime Dynamic': 0.0720314,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.2158,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.198623,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0114009,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0981942,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
# main imports
import numpy as np
import sys
# image transform imports
from PIL import Image
from skimage import color
from sklearn.decomposition import FastICA
from sklearn.decomposition import IncrementalPCA
from sklearn.decomposition import TruncatedSVD
from numpy.linalg import svd as lin_svd
from scipy.signal import medfilt2d, wiener, cwt
import pywt
import cv2
from ipfml.processing import transform, compression, segmentation
from ipfml.filters import convolution, kernels
from ipfml import utils
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
def get_image_features(data_type, block):
"""
Method which returns the data type expected
"""
if data_type == 'lab':
block_file_path = '/tmp/lab_img.png'
block.save(block_file_path)
data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))
if data_type == 'mscn':
img_mscn_revisited = transform.rgb_to_mscn(block)
# save tmp as img
img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
img_output.save(mscn_revisited_file_path)
img_block = Image.open(mscn_revisited_file_path)
# extract from temp image
data = compression.get_SVD_s(img_block)
"""if data_type == 'mscn':
img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
img_mscn_norm = transform.normalize_2D_arr(img_mscn)
img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
data = compression.get_SVD_s(img_mscn_gray)
"""
if data_type == 'low_bits_6':
low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
data = compression.get_SVD_s(low_bits_6)
if data_type == 'low_bits_5':
low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
data = compression.get_SVD_s(low_bits_5)
if data_type == 'low_bits_4':
low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
data = compression.get_SVD_s(low_bits_4)
if data_type == 'low_bits_3':
low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
data = compression.get_SVD_s(low_bits_3)
if data_type == 'low_bits_2':
low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
data = compression.get_SVD_s(low_bits_2)
if data_type == 'low_bits_4_shifted_2':
data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(block, (3, 6)))
if data_type == 'sub_blocks_stats':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_stats_reduced':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area_normed':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
l_svd_data = utils.normalize_arr(l_svd_data)
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'mscn_var_4':
data = _get_mscn_variance(block, (100, 100))
if data_type == 'mscn_var_16':
data = _get_mscn_variance(block, (50, 50))
if data_type == 'mscn_var_64':
data = _get_mscn_variance(block, (25, 25))
if data_type == 'mscn_var_16_max':
data = _get_mscn_variance(block, (50, 50))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'mscn_var_64_max':
data = _get_mscn_variance(block, (25, 25))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'ica_diff':
current_image = transform.get_LAB_L(block)
ica = FastICA(n_components=50)
ica.fit(current_image)
image_ica = ica.fit_transform(current_image)
image_restored = ica.inverse_transform(image_ica)
final_image = utils.normalize_2D_arr(image_restored)
final_image = np.array(final_image * 255, 'uint8')
sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))
data = abs(np.array(sv_values) - np.array(ica_sv_values))
if data_type == 'svd_trunc_diff':
current_image = transform.get_LAB_L(block)
svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
transformed_image = svd.fit_transform(current_image)
restored_image = svd.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'ipca_diff':
current_image = transform.get_LAB_L(block)
transformer = IncrementalPCA(n_components=20, batch_size=25)
transformed_image = transformer.fit_transform(current_image)
restored_image = transformer.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'svd_reconstruct':
reconstructed_interval = (90, 200)
begin, end = reconstructed_interval
lab_img = transform.get_LAB_L(block)
lab_img = np.array(lab_img, 'uint8')
U, s, V = lin_svd(lab_img, full_matrices=True)
smat = np.zeros((end-begin, end-begin), dtype=complex)
smat[:, :] = np.diag(s[begin:end])
output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))
output_img = np.array(output_img, 'uint8')
data = compression.get_SVD_s(output_img)
if 'sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'wave_sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'sv_std_filters_full' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
if 'sv_entropy_std_filters' in data_type:
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
sv_vector = []
sv_entropy_list = []
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_entropy = [utils.get_entropy_contribution_of_i(s, id_sv) for id_sv, sv in enumerate(s)]
sv_entropy_list.append(sv_entropy)
sv_std = | |
foo, bar, and sus will all treat their return values safely*/"""
elif suffix == "callee":
comm_suffix = """
/*In this test, foo and bar will treat their return values safely, but sus will
not, through invalid pointer arithmetic, an unsafe cast, etc*/"""
elif suffix == "caller":
comm_suffix = """
/*In this test, foo and sus will treat their return values safely, but bar will
not, through invalid pointer arithmetic, an unsafe cast, etc.*/"""
elif suffix == "both":
comm_suffix = """
/*In this test, foo will treat its return value safely, but sus and bar will
not, through invalid pointer arithmetic, an unsafe cast, etc.*/"""
comm_dec = """
/******************************************************************************/
"""
comment = ''.join([
comm_dec, comm_general, comm_prefix, comm_proto, comm_suffix, comm_dec
])
file = open(name, "r")
noallfile = open(cnameNOALL, "r")
allfile = open(cnameALL, "r")
# gather all the lines
lines = str(file.read()).split("\n")
noall = str(noallfile.read()).split("\n")
yeall = str(allfile.read()).split("\n")
file.close()
noallfile.close()
allfile.close()
os.system("rm {} {}".format(cnameNOALL, cnameALL))
# Prior to the introduction of lower bound inference an automatic lower
# bounds generation, the initial and converted code should have had the
# same number of lines. Generated lower bounds introduce new lines,
# so this is nolonger the case.
# assert len(lines) == len(noall) == len(yeall), "fix file " + name
if proto == "multi":
file2 = open(name2, "r")
noallfile2 = open(cname2NOALL, "r")
allfile2 = open(cname2ALL, "r")
# gather all the lines
lines2 = str(file2.read()).split("\n")
noall2 = str(noallfile2.read()).split("\n")
yeall2 = str(allfile2.read()).split("\n")
file2.close()
noallfile2.close()
allfile2.close()
os.system("rm {} {}".format(cname2NOALL, cname2ALL))
# See earlier comment on why this is disable.
# assert len(lines2) == len(noall2) == len(yeall2), "fix file " + name
def runtime_cname(s):
assert s.startswith("tmp.")
return "%t." + s[len("tmp."):]
cnameNOALL = runtime_cname(cnameNOALL)
cnameALL = runtime_cname(cnameALL)
cname2NOALL = runtime_cname(cname2NOALL)
cname2ALL = runtime_cname(cname2ALL)
# our keywords that indicate we should add an annotation
keywords = "int char struct double float".split(" ")
ckeywords = "_Ptr _Array_ptr _Nt_array_ptr _Checked _Unchecked".split(" ")
keywords_re = re.compile("\\b(" + "|".join(keywords) + ")\\b")
ckeywords_re = re.compile("\\b(" + "|".join(ckeywords) + ")\\b")
in_extern = False
ye_offset = 0
for i in range(0, len(lines)):
line = lines[i]
noline = noall[i]
yeline = yeall[i + ye_offset]
if "extern" in line:
in_extern = True
if (not in_extern and
((keywords_re.search(line) and line.find("*") != -1) or
ckeywords_re.search(noline) or ckeywords_re.search(yeline))):
indentation = re.search("^\s*", noline).group(0)
# Hack to match how clang-format will indent the CHECK comments,
# even though it would arguably be more logical to leave them at the
# same indentation as the code they refer to.
if noline.endswith("{"):
indentation += " "
if noline == yeline:
lines[i] += "\n" + indentation + "//CHECK: " + noline.lstrip()
else:
lines[i] += ("\n" + indentation + "//CHECK_NOALL: " + noline.lstrip())
lines[i] += ("\n" + indentation + "//CHECK_ALL: " + yeline.lstrip())
# This is a hack needed to properly updated tests where an array
# variable declaration has been duplicated to allow for generating
# fresh lower bound.
if i + ye_offset + 1 < len(yeall):
yeline_next = yeall[i + ye_offset + 1]
if "= __3c_" in yeline_next and "> __3c_" in yeline:
lines[i] += ("\n" + indentation + "//CHECK_ALL: " + yeline_next.lstrip())
ye_offset += 1
if ";" in line:
in_extern = False
if proto == "multi":
in_extern = False
ye_offset = 0
for i in range(0, len(lines2)):
line = lines2[i]
noline = noall2[i]
yeline = yeall2[i + ye_offset]
if "extern" in line:
in_extern = True
if (not in_extern and
((keywords_re.search(line) and line.find("*") != -1) or
ckeywords_re.search(noline) or ckeywords_re.search(yeline))):
indentation = re.search("^\s*", noline).group(0)
if noline.endswith("{"):
indentation += " "
if noline == yeline:
lines2[i] += ("\n" + indentation + "//CHECK: " +
noline.lstrip())
else:
lines2[i] += ("\n" + indentation + "//CHECK_NOALL: " +
noline.lstrip())
lines2[i] += ("\n" + indentation + "//CHECK_ALL: " +
yeline.lstrip())
# See above comment for why this hack is necessary.
if i + ye_offset + 1 < len(yeall2):
yeline_next = yeall2[i + ye_offset + 1]
if "= __3c_" in yeline_next and "> __3c_" in yeline:
lines2[i] += ("\n" + indentation + "//CHECK_ALL: " + yeline_next.lstrip())
ye_offset += 1
if ";" in line:
in_extern = False
run = f"""\
// RUN: rm -rf %t*
// RUN: 3c -base-dir=%S -alltypes -addcr %s -- | FileCheck -match-full-lines -check-prefixes="CHECK_ALL","CHECK" %s
// RUN: 3c -base-dir=%S -addcr %s -- | FileCheck -match-full-lines -check-prefixes="CHECK_NOALL","CHECK" %s
// RUN: 3c -base-dir=%S -addcr %s -- | %clang -c -fcheckedc-extension -x c -o /dev/null -
// RUN: 3c -base-dir=%S -alltypes -output-dir=%t.checked %s --
// RUN: 3c -base-dir=%t.checked -alltypes %t.checked/{name} -- | diff %t.checked/{name} -\
"""
if proto == "multi":
cname = "%t.checked/" + name
cname2 = "%t.checked/" + name2
cnameALLtwice1 = "%t.convert_again/" + name
cnameALLtwice2 = "%t.convert_again/" + name2
run = f"""\
// RUN: rm -rf %t*
// RUN: 3c -base-dir=%S -addcr -alltypes -output-dir=%t.checkedALL %s %S/{name2} --
// RUN: 3c -base-dir=%S -addcr -output-dir=%t.checkedNOALL %s %S/{name2} --
// RUN: %clang -working-directory=%t.checkedNOALL -c {name} {name2}
// RUN: FileCheck -match-full-lines -check-prefixes="CHECK_NOALL","CHECK" --input-file {cnameNOALL} %s
// RUN: FileCheck -match-full-lines -check-prefixes="CHECK_ALL","CHECK" --input-file {cnameALL} %s
// RUN: 3c -base-dir=%S -alltypes -output-dir=%t.checked %S/{name2} %s --
// RUN: 3c -base-dir=%t.checked -alltypes -output-dir=%t.convert_again {cname} {cname2} --
// RUN: test ! -f {cnameALLtwice1}
// RUN: test ! -f {cnameALLtwice2}\
"""
cnameNOALL2 = "%t.checkedNOALL2/" + name
cnameALL2 = "%t.checkedALL2/" + name
cname2NOALL2 = "%t.checkedNOALL2/" + name2
cname2ALL2 = "%t.checkedALL2/" + name2
# uncomment the following lines if we ever decide we want to generate
# buggy tests that don't compile
# if bug_generated:
# cname21 = prefix + suffix + proto + "1_BUG.checked2.c"
# cname22 = prefix + suffix + proto + "2_BUG.checked2.c"
run2 = f"""\
// RUN: rm -rf %t*
// RUN: 3c -base-dir=%S -addcr -alltypes -output-dir=%t.checkedALL2 %S/{name} %s --
// RUN: 3c -base-dir=%S -addcr -output-dir=%t.checkedNOALL2 %S/{name} %s --
// RUN: %clang -working-directory=%t.checkedNOALL2 -c {name} {name2}
// RUN: FileCheck -match-full-lines -check-prefixes="CHECK_NOALL","CHECK" --input-file {cname2NOALL2} %s
// RUN: FileCheck -match-full-lines -check-prefixes="CHECK_ALL","CHECK" --input-file {cname2ALL2} %s
// RUN: 3c -base-dir=%S -alltypes -output-dir=%t.checked %S/{name} %s --
// RUN: 3c -base-dir=%t.checked -alltypes -output-dir=%t.convert_again {cname} {cname2} --
// RUN: test ! -f {cnameALLtwice1}
// RUN: test ! -f {cnameALLtwice2}\
"""
file = open(name, "w+")
file.write(run + comment + "\n".join(lines))
file.close()
if proto == "multi":
file = open(name2, "w+")
file.write(run2 + comment + "\n".join(lines2))
file.close()
return
def annot_gen_smart(prefix, proto, suffix):
# generate the body of the file
[susproto, sus, foo, bar] = method_gen(prefix, proto, suffix)
name = prefix + proto + suffix + ".c"
cnameNOALL = "tmp.checkedNOALL/" + name
cnameALL = "tmp.checkedALL/" + name
name2 = name
cname2NOALL = cnameNOALL
cname2ALL = cnameALL
if proto == "multi":
name = prefix + suffix + proto + "1.c"
name2 = prefix + suffix + proto + "2.c"
cnameNOALL = "tmp.checkedNOALL/" + name
cnameALL = "tmp.checkedALL/" + name
cname2NOALL = "tmp.checkedNOALL/" + name2
cname2ALL = "tmp.checkedALL/" + name2
if proto == "proto":
test = header + definitions + susproto + foo + bar + sus
elif proto == "multi":
test = header + definitions2 + susproto + foo + bar
else:
test = header + definitions + sus + foo + bar
# write the main file
file = open(name, "w+")
file.write(test)
file.close()
# generate the second file if a multi example
if proto == "multi":
test2 = header + definitions2 + sus
file = open(name2, "w+")
file.write(test2)
file.close()
# run the porting tool on the file(s)
if proto == "multi":
os.system(
"{}3c -alltypes -addcr -output-dir=tmp.checkedALL {} {} --".format(
bin_path, name, name2))
os.system("{}3c -addcr -output-dir=tmp.checkedNOALL {} {} --".format(
bin_path, name, name2))
else:
os.system(
"{}3c -alltypes -addcr -output-dir=tmp.checkedALL {} --".format(
bin_path, name))
os.system("{}3c -addcr -output-dir=tmp.checkedNOALL {} --".format(
bin_path, name))
# compile the files and if it doesn't compile, then let's indicate that a
# bug was generated for this file
bug_generated = False
if proto != "multi":
# Avoid leaving an object file in the working directory.
out = subprocess.Popen(
['{}clang'.format(bin_path), '-c', '-o', '/dev/null', cnameNOALL],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
stdout = str(stdout)
if "error:" in stdout:
bug_generated = True
# name = prefix + proto + suffix + "_BUG.c"
else:
# In this case, since there are two source files, clang will give an
# error if we | |
<reponame>CoderHuo/asn1tools
import unittest
from .utils import Asn1ToolsBaseTest
import asn1tools
import sys
from copy import deepcopy
import string
sys.path.append('tests/files')
sys.path.append('tests/files/3gpp')
from rrc_8_6_0 import EXPECTED as RRC_8_6_0
from lpp_14_3_0 import EXPECTED as LPP_14_3_0
from x691_a2 import EXPECTED as X691_A2
from x691_a3 import EXPECTED as X691_A3
from x691_a4 import EXPECTED as X691_A4
class Asn1ToolsUPerTest(Asn1ToolsBaseTest):
maxDiff = None
def test_foo(self):
foo = asn1tools.compile_files('tests/files/foo.asn', 'uper')
self.assertEqual(len(foo.types), 2)
self.assertTrue(foo.types['Question'] is not None)
self.assertTrue(foo.types['Answer'] is not None)
self.assertEqual(len(foo.modules), 1)
self.assertTrue(foo.modules['Foo'] is not None)
# Question.
encoded = foo.encode('Question', {'id': 1, 'question': 'Is 1+1=3?'})
self.assertEqual(encoded, b'\x01\x01\x09\x93\xcd\x03\x15\x6c\x5e\xb3\x7e')
decoded = foo.decode('Question', encoded)
self.assertEqual(decoded, {'id': 1, 'question': 'Is 1+1=3?'})
# Answer.
encoded = foo.encode('Answer', {'id': 1, 'answer': False})
self.assertEqual(encoded, b'\x01\x01\x00')
decoded = foo.decode('Answer', encoded)
self.assertEqual(decoded, {'id': 1, 'answer': False})
# Encode a question with missing field 'id'.
with self.assertRaises(asn1tools.EncodeError) as cm:
encoded = foo.encode('Question', {'question': 'Is 1+1=3?'})
self.assertEqual(
str(cm.exception),
"Sequence member 'id' not found in {'question': 'Is 1+1=3?'}.")
def test_decode_length(self):
foo = asn1tools.compile_files('tests/files/foo.asn', 'uper')
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode_length(b'')
self.assertEqual(str(cm.exception),
': Decode length is not supported for this codec.')
def test_versions(self):
foo = asn1tools.compile_files('tests/files/versions.asn', 'uper')
# Encode as V1, decode as V1, V2 and V3
decoded_v1 = {
'userName': 'myUserName',
'password': '<PASSWORD>',
'accountNumber': 54224445
}
encoded_v1 = foo.encode('V1', decoded_v1)
self.assertEqual(foo.decode('V1', encoded_v1), decoded_v1)
self.assertEqual(foo.decode('V2', encoded_v1), decoded_v1)
self.assertEqual(foo.decode('V3', encoded_v1), decoded_v1)
# Encode as V2, decode as V1, V2 and V3
decoded_v2 = {
'userName': 'myUserName',
'password': '<PASSWORD>',
'accountNumber': 54224445,
'minutesLastLoggedIn': 5
}
encoded_v2 = foo.encode('V2', decoded_v2)
self.assertEqual(foo.decode('V1', encoded_v2), decoded_v1)
self.assertEqual(foo.decode('V2', encoded_v2), decoded_v2)
self.assertEqual(foo.decode('V3', encoded_v2), decoded_v2)
# Encode as V3, decode as V1, V2 and V3
decoded_v3 = {
'userName': 'myUserName',
'password': '<PASSWORD>',
'accountNumber': 54224445,
'minutesLastLoggedIn': 5,
'certificate': None,
'thumb': None
}
encoded_v3 = foo.encode('V3', decoded_v3)
self.assertEqual(foo.decode('V1', encoded_v3), decoded_v1)
self.assertEqual(foo.decode('V2', encoded_v3), decoded_v2)
self.assertEqual(foo.decode('V3', encoded_v3), decoded_v3)
def test_x691_a1(self):
a1 = asn1tools.compile_files('tests/files/x691_a1.asn', 'uper')
decoded = {
'name': {
'givenName': 'John',
'initial': 'P',
'familyName': 'Smith'
},
'title': 'Director',
'number': 51,
'dateOfHire': '19710917',
'nameOfSpouse': {
'givenName': 'Mary',
'initial': 'T',
'familyName': 'Smith'
},
'children': [
{
'name': {
'givenName': 'Ralph',
'initial': 'T',
'familyName': 'Smith'
},
'dateOfBirth': '19571111'
},
{
'name': {
'givenName': 'Susan',
'initial': 'B',
'familyName': 'Jones'
},
'dateOfBirth': '19590717'
}
]
}
encoded = (
b'\x82\x4a\xdf\xa3\x70\x0d\x00\x5a\x7b\x74\xf4\xd0\x02\x66\x11\x13'
b'\x4f\x2c\xb8\xfa\x6f\xe4\x10\xc5\xcb\x76\x2c\x1c\xb1\x6e\x09\x37'
b'\x0f\x2f\x20\x35\x01\x69\xed\xd3\xd3\x40\x10\x2d\x2c\x3b\x38\x68'
b'\x01\xa8\x0b\x4f\x6e\x9e\x9a\x02\x18\xb9\x6a\xdd\x8b\x16\x2c\x41'
b'\x69\xf5\xe7\x87\x70\x0c\x20\x59\x5b\xf7\x65\xe6\x10\xc5\xcb\x57'
b'\x2c\x1b\xb1\x6e'
)
self.assert_encode_decode(a1, 'PersonnelRecord', decoded, encoded)
def test_x691_a2(self):
a2 = asn1tools.compile_dict(deepcopy(X691_A2), 'uper')
decoded = {
'name': {
'givenName': 'John',
'initial': 'P',
'familyName': 'Smith'
},
'title': 'Director',
'number': 51,
'dateOfHire': '19710917',
'nameOfSpouse': {
'givenName': 'Mary',
'initial': 'T',
'familyName': 'Smith'
},
'children': [
{
'name': {
'givenName': 'Ralph',
'initial': 'T',
'familyName': 'Smith'
},
'dateOfBirth': '19571111'
},
{
'name': {
'givenName': 'Susan',
'initial': 'B',
'familyName': 'Jones'
},
'dateOfBirth': '19590717'
}
]
}
encoded = (
b'\x86\x5d\x51\xd2\x88\x8a\x51\x25\xf1\x80\x99\x84\x44\xd3\xcb\x2e'
b'\x3e\x9b\xf9\x0c\xb8\x84\x8b\x86\x73\x96\xe8\xa8\x8a\x51\x25\xf1'
b'\x81\x08\x9b\x93\xd7\x1a\xa2\x29\x44\x97\xc6\x32\xae\x22\x22\x22'
b'\x98\x5c\xe5\x21\x88\x5d\x54\xc1\x70\xca\xc8\x38\xb8'
)
self.assert_encode_decode(a2, 'PersonnelRecord', decoded, encoded)
def test_x691_a3(self):
a3 = asn1tools.compile_dict(deepcopy(X691_A3), 'uper')
decoded = {
'name': {
'givenName': 'John',
'initial': 'P',
'familyName': 'Smith'
},
'title': 'Director',
'number': 51,
'dateOfHire': '19710917',
'nameOfSpouse': {
'givenName': 'Mary',
'initial': 'T',
'familyName': 'Smith'
},
'children': [
{
'name': {
'givenName': 'Ralph',
'initial': 'T',
'familyName': 'Smith'
},
'dateOfBirth': '19571111'
},
{
'name': {
'givenName': 'Susan',
'initial': 'B',
'familyName': 'Jones'
},
'dateOfBirth': '19590717',
'sex': 'female'
}
]
}
encoded = (
b'\x40\xcb\xaa\x3a\x51\x08\xa5\x12\x5f\x18\x03\x30\x88\x9a\x79\x65'
b'\xc7\xd3\x7f\x20\xcb\x88\x48\xb8\x19\xce\x5b\xa2\xa1\x14\xa2\x4b'
b'\xe3\x01\x13\x72\x7a\xe3\x54\x22\x94\x49\x7c\x61\x95\x71\x11\x18'
b'\x22\x98\x5c\xe5\x21\x84\x2e\xaa\x60\xb8\x32\xb2\x0e\x2e\x02\x02'
b'\x80'
)
self.assert_encode_decode(a3, 'PersonnelRecord', decoded, encoded)
def test_x691_a4(self):
a4 = asn1tools.compile_dict(deepcopy(X691_A4), 'uper')
decoded = {
'a': 253,
'b': True,
'c': ('e', True),
'g': '123',
'h': True
}
encoded = (
b'\x9e\x00\x06\x00\x04\x0a\x46\x90'
)
self.assert_encode_decode(a4, 'Ax', decoded, encoded)
def test_rrc_8_6_0(self):
rrc = asn1tools.compile_dict(deepcopy(RRC_8_6_0), 'uper')
# Message 1.
decoded = {
'message': (
'c1',
(
'paging',
{
'systemInfoModification': 'true',
'nonCriticalExtension': {
}
}
)
)
}
encoded = b'\x28'
self.assert_encode_decode(rrc, 'PCCH-Message', decoded, encoded)
# Message 2.
decoded = {
'message': (
'c1',
('paging', {})
)
}
encoded = b'\x00'
self.assert_encode_decode(rrc, 'PCCH-Message', decoded, encoded)
# Message 3.
decoded = {
'message': {
'dl-Bandwidth': 'n6',
'phich-Config': {
'phich-Duration': 'normal',
'phich-Resource': 'half'
},
'systemFrameNumber': (b'\x12', 8),
'spare': (b'\x34\x40', 10)
}
}
encoded = b'\x04\x48\xd1'
self.assert_encode_decode(rrc, 'BCCH-BCH-Message', decoded, encoded)
# Message #4.
decoded = {
'message': (
'c1',
(
'systemInformation',
{
'criticalExtensions': (
'systemInformation-r8',
{
'sib-TypeAndInfo': [
(
'sib2',
{
'ac-BarringInfo': {
'ac-BarringForEmergency': True,
'ac-BarringForMO-Data': {
'ac-BarringFactor': 'p95',
'ac-BarringTime': 's128',
'ac-BarringForSpecialAC': (b'\xf0', 5)
}
},
'radioResourceConfigCommon': {
'rach-ConfigCommon': {
'preambleInfo': {
'numberOfRA-Preambles': 'n24',
'preamblesGroupAConfig': {
'sizeOfRA-PreamblesGroupA': 'n28',
'messageSizeGroupA': 'b144',
'messagePowerOffsetGroupB': 'minusinfinity'
}
},
'powerRampingParameters': {
'powerRampingStep': 'dB0',
'preambleInitialReceivedTargetPower': 'dBm-102'
},
'ra-SupervisionInfo': {
'preambleTransMax': 'n8',
'ra-ResponseWindowSize': 'sf6',
'mac-ContentionResolutionTimer': 'sf48'
},
'maxHARQ-Msg3Tx': 8
},
'bcch-Config': {
'modificationPeriodCoeff': 'n2'
},
'pcch-Config': {
'defaultPagingCycle': 'rf256',
'nB': 'twoT'
},
'prach-Config': {
'rootSequenceIndex': 836,
'prach-ConfigInfo': {
'prach-ConfigIndex': 33,
'highSpeedFlag': False,
'zeroCorrelationZoneConfig': 10,
'prach-FreqOffset': 64
}
},
'pdsch-ConfigCommon': {
'referenceSignalPower': -60,
'p-b': 2
},
'pusch-ConfigCommon': {
'pusch-ConfigBasic': {
'n-SB': 1,
'hoppingMode': 'interSubFrame',
'pusch-HoppingOffset': 10,
'enable64QAM': False
},
'ul-ReferenceSignalsPUSCH': {
'groupHoppingEnabled': True,
'groupAssignmentPUSCH': 22,
'sequenceHoppingEnabled': False,
'cyclicShift': 5
}
},
'pucch-ConfigCommon': {
'deltaPUCCH-Shift': 'ds1',
'nRB-CQI': 98,
'nCS-AN': 4,
'n1PUCCH-AN': 2047
},
'soundingRS-UL-ConfigCommon': (
'setup',
{
'srs-BandwidthConfig': 'bw0',
'srs-SubframeConfig': 'sc4',
'ackNackSRS-SimultaneousTransmission': True
}),
'uplinkPowerControlCommon': {
'p0-NominalPUSCH': -126,
'alpha': 'al0',
'p0-NominalPUCCH': -127,
'deltaFList-PUCCH': {
'deltaF-PUCCH-Format1': 'deltaF-2',
'deltaF-PUCCH-Format1b': 'deltaF1',
'deltaF-PUCCH-Format2': 'deltaF0',
'deltaF-PUCCH-Format2a': 'deltaF-2',
'deltaF-PUCCH-Format2b': 'deltaF0'
},
'deltaPreambleMsg3': -1
},
'ul-CyclicPrefixLength': 'len1'
},
'ue-TimersAndConstants': {
't300': 'ms100',
't301': 'ms200',
't310': 'ms50',
'n310': 'n2',
't311': 'ms30000',
'n311': 'n2'
},
'freqInfo': {
'additionalSpectrumEmission': 3
},
'timeAlignmentTimerCommon': 'sf500'
}
),
(
'sib3',
{
'cellReselectionInfoCommon': {
'q-Hyst': 'dB0',
'speedStateReselectionPars': {
'mobilityStateParameters': {
't-Evaluation': 's180',
't-HystNormal': 's180',
'n-CellChangeMedium': 1,
'n-CellChangeHigh': 16
},
'q-HystSF': {
'sf-Medium': 'dB-6',
'sf-High': 'dB-4'
}
}
},
'cellReselectionServingFreqInfo': {
'threshServingLow': 7,
'cellReselectionPriority': 3
},
'intraFreqCellReselectionInfo': {
'q-RxLevMin': -33,
's-IntraSearch': 0,
'presenceAntennaPort1': False,
'neighCellConfig': (b'\x80', 2),
't-ReselectionEUTRA': 4
}
}
),
(
'sib4',
{
}
),
(
'sib5',
{
'interFreqCarrierFreqList': [
{
'dl-CarrierFreq': 1,
'q-RxLevMin': -45,
't-ReselectionEUTRA': 0,
'threshX-High': 31,
'threshX-Low': 29,
'allowedMeasBandwidth': 'mbw6',
'presenceAntennaPort1': True,
'neighCellConfig': (b'\x00', 2),
'q-OffsetFreq': 'dB0'
}
]
}
),
(
'sib6',
{
't-ReselectionUTRA': 3
}
),
(
'sib7',
{
't-ReselectionGERAN': 3
}
),
(
'sib8',
{
'parameters1XRTT': {
'longCodeState1XRTT': (b'\x01#Eg\x89\x00', 42)
}
}
),
(
'sib9',
{
'hnb-Name': b'4'
}
),
(
'sib10',
{
'messageIdentifier': (b'#4', 16),
'serialNumber': (b'\x124', 16),
'warningType': b'2\x12'
}
),
(
'sib11',
{
'messageIdentifier': (b'g\x88', 16),
'serialNumber': (b'T5', 16),
'warningMessageSegmentType': 'notLastSegment',
'warningMessageSegmentNumber': 19,
'warningMessageSegment': b'\x12'
}
)
]
}
)
}
)
)
}
encoded = (
b'\x04\x81\x3f\xbe\x2a\x64\x12\xb2\xf3\x3a\x24\x2a\x80\x02\x02\x9b'
b'\x29\x8a\x7f\xf8\x24\x00\x00\x11\x00\x24\xe2\x08\x05\x06\xc3\xc4'
b'\x76\x92\x81\x41\x00\xc0\x00\x00\x0b\x23\xfd\x10\x80\xca\x19\x82'
b'\x80\x48\xd1\x59\xe2\x43\xa0\x1a\x20\x23\x34\x12\x34\x32\x12\x48'
b'\xcf\x10\xa8\x6a\x4c\x04\x48'
)
self.assert_encode_decode(rrc, 'BCCH-DL-SCH-Message', decoded, encoded)
# Message 5.
decoded = {
'message': (
'c1',
(
'counterCheck', {
'rrc-TransactionIdentifier': 0,
'criticalExtensions': (
'criticalExtensionsFuture',
{
}
)
}
)
)
}
encoded = b'\x41'
self.assert_encode_decode(rrc, 'DL-DCCH-Message', decoded, encoded)
# Message 6.
decoded = {
'message': (
'c1',
(
'counterCheck',
{
'rrc-TransactionIdentifier': 0,
'criticalExtensions': (
'c1',
(
'counterCheck-r8',
{
'drb-CountMSB-InfoList': [
{
'drb-Identity': 32,
'countMSB-Uplink': 33554431,
'countMSB-Downlink': 33554431
}
],
'nonCriticalExtension': {
}
}
)
)
}
)
)
}
encoded = b'\x40\x21\xff\xff\xff\xff\xff\xff\xfc'
self.assert_encode_decode(rrc, 'DL-DCCH-Message', decoded, encoded)
# Message 7.
decoded = {
'message': (
'c1',
(
'counterCheckResponse',
{
'rrc-TransactionIdentifier': 0,
'criticalExtensions': (
'counterCheckResponse-r8',
{
'drb-CountInfoList': [
],
'nonCriticalExtension': {
}
}
)
}
)
)
}
encoded = b'\x50\x80'
self.assert_encode_decode(rrc, 'UL-DCCH-Message', decoded, encoded)
def test_lpp_14_3_0(self):
lpp = asn1tools.compile_dict(deepcopy(LPP_14_3_0), 'uper')
# Message 1.
decoded = {
'transactionID': {
'initiator': 'targetDevice',
'transactionNumber': 254
},
'endTransaction': True,
'lpp-MessageBody': (
'c1',
(
'provideAssistanceData',
{
'criticalExtensions': (
'c1',
(
'spare1',
None
)
)
}
)
)
}
encoded = b'\x93\xfd\x1b'
self.assert_encode_decode(lpp, 'LPP-Message', decoded, encoded)
# Message 2.
decoded = {
'transactionID': {
'initiator': 'targetDevice',
'transactionNumber': 254
},
'endTransaction': True,
'lpp-MessageBody': (
'c1',
(
'requestCapabilities',
{
'criticalExtensions': (
'c1',
(
'requestCapabilities-r9',
{
'bt-RequestCapabilities-r13':{
}
}
)
)
}
)
)
}
encoded = b'\x93\xfd\x00\x80\x04\x04\x40'
self.assert_encode_decode(lpp, 'LPP-Message', decoded, encoded)
# Message 3.
decoded = {
'transactionID': {
'initiator': 'targetDevice',
'transactionNumber': 255
},
'endTransaction': False,
'lpp-MessageBody': (
'c1',
(
'requestCapabilities',
{
'criticalExtensions': (
'c1',
(
'requestCapabilities-r9',
{
'epdu-RequestCapabilities': [
{
'ePDU-Identifier': {
'ePDU-ID': 256
},
'ePDU-Body': b''
}
],
'tbs-RequestCapabilities-r13': {
},
'bt-RequestCapabilities-r13':{
}
}
)
)
}
)
)
}
encoded = b'\x93\xfe\x00\x84\x0f\xf0\x00\x10\x15\x00'
self.assert_encode_decode(lpp, 'LPP-Message', decoded, encoded)
# Message 4.
decoded = {
'transactionID': {
'initiator': 'targetDevice',
'transactionNumber': 0
},
'endTransaction': False,
'lpp-MessageBody': (
'c1',
(
'provideLocationInformation',
{
'criticalExtensions': (
'c1',
(
'provideLocationInformation-r9',
{
'ecid-ProvideLocationInformation': {
'ecid-SignalMeasurementInformation': | |
values are the full section names.
layout : dict
Layout of the header. Similar to `header`, but instead of the
lines, contains the layout of the line.
comments : dict
Comments in the LAS header. The keys are the linenumber of the
comment and the values are the comments themselves.
See Also
--------
_getlinelayout : Obtain the line layout.
"""
global _VERBOSE
header = OrderedDict()
sectionnames = {}
comments = {}
layout = {}
currentsection = None
linecount = 0
for line in headerlines:
if not line:
continue
elif line.lstrip().startswith('#'):
comments[linecount] = line.split('\n')[0]
elif line.lstrip().startswith('~'):
currentsection = []
sectionname = line.split('\n')[0]
sectionkey = sectionname.split('~')[1][0].upper()
header[sectionkey] = currentsection
sectionnames[sectionkey] = sectionname
else:
currentsection.append(line.split('\n')[0])
linecount += 1
for sectionkey, lines in header.items():
try:
section = OrderedDict()
sectionlayout = {}
for line in lines:
parsedline, linelayout = LASReader._parseline(line, True)
# if parsedline['MNEM'] in section:
# print "Curva repetida:", parsedline['MNEM'] # TODO: Fazer algo
# section[parsedline['MNEM']] = parsedline
# sectionlayout[parsedline['MNEM']] = linelayout
# TODO: Melhorar e ver se funcionou
old_mnem = parsedline['MNEM']
new_mnem = old_mnem
count = 0
while new_mnem in section:
count += 1
new_mnem = old_mnem + '_{:0>4}'.format(count)
if _VERBOSE and count:
print ("Nome de curva repetido:", old_mnem)
print ("Substituindo por:", new_mnem)
parsedline['MNEM'] = new_mnem
section[new_mnem] = parsedline
sectionlayout[new_mnem] = linelayout
if not section:
header[sectionkey] = ''
else:
header[sectionkey] = section
layout[sectionkey] = sectionlayout
except:
header[sectionkey] = '\n'.join(lines)
if (not withsectionnames) and (not withlayout) and (not withcomments):
return header
else:
returns = (header,)
if withsectionnames:
returns += (sectionnames,)
if withlayout:
returns += (layout,)
if withcomments:
returns += (comments,)
return returns
@staticmethod
def _getdatalines(fileobject):
"""
Obtain the LAS ASCII section lines from a file object.
Parameters
----------
fileobject : file-like object
The file object from which the data lines will be obtained.
Returns
-------
datalines : list
A list containing the lines that belong to a LAS file ASCII
section.
"""
fileobject.seek(0)
line = fileobject.readline()
while not line.lstrip().startswith('~A'):
line = fileobject.readline()
datalines = fileobject.readlines()
return datalines
@staticmethod
def _getflatdata(datalines):
"""
Obtain a flat `numpy.ndarray` from a list of data lines.
Concatenate the lines; split the resulting string, convert each element
to float and convert to a `numpy.ndarray`.
Parameters
----------
datalines : list
A list containing the lines that belong to a LAS file ASCII
section.
Returns
-------
flatdata : numpy.ndarray
A flat (i.e. one-dimensional) array containing data from
`datalines`.
"""
flatdata = np.asarray([float(a) for a in ' '.join(datalines).split()])
return flatdata
@staticmethod
def _reshapeflatdata(flatdata, ncurves):
"""
Reshape the flat data into a 2-dimensional data.
The reshaped data will have the same number of elements as `flatdata`
and first dimension with length `ncurves`. This way, `data[0]` will
be the data from the first curve in the file.
Parameters
----------
flatdata : numpy.ndarray
A flat (i.e. one-dimensional) array containing data from a LAS
file.
ncurves : int
Number of existing curves in the same file
Returns
-------
data : numpy.ndarray
Reshaped data with first dimension lenght equal to `ncurves`
"""
data = np.reshape(flatdata, (-1, ncurves)).T
return data
@staticmethod
def _replacenullvalues(data, nullvalue, copy=False):
"""
Replace null values in an array with `np.nan`.
Parameters
----------
data : np.ndarray
Array containing null values to be replaced.
nullvalue : float
The value that will be replaced by `np.nan`.
copy : bool, optional
Whether the operation will be performed in a copy of the data or
in the data itself.
Returns
-------
newdata : np.ndarray
A array with `nullvalue` replaced with `np.nan`.
"""
if copy:
newdata = np.copy(data)
else:
newdata = data
where = (newdata == nullvalue)
newdata[where] = np.nan
return newdata
@staticmethod
def _reorderdata(data, copy=False):
"""
Reorder the data so that the first line is in ascending order.
This method suposes that the first line of `data` is already sorted
in descending order. It will invert the order of the rows in the array,
i.e. the last row will become the first, the second last will become
the second and so on.
Parameters
----------
data : np.ndarray
The array that will be reordered.
copy : bool, optional
Whether the operation will be performed in a copy of the data or
in the data itself.
Returns
-------
newdata : np.ndarray
A array with the rows in reverse order.
"""
if copy:
newdata = np.copy(data)
else:
newdata = data
return newdata[:, ::-1]
def read(self):
"""
Read the file.
Notes
-----
When creating a `LASReader` object the file is not read immediately.
This method must be called after the creation. Once it is called, all
of the instance's attributes will be read from the file and the file
will be closed.
"""
fileobject = builtins.open(self.filename, 'r')
headerlines = LASReader._getheaderlines(fileobject)
datalines = LASReader._getdatalines(fileobject)
fileobject.close()
self.header, self.headersectionnames, self.headerlayout, self.headercomments = LASReader._getheader(headerlines, True, True, True)
ncurves = len(self.header["C"])
nullvalue = float(self.header["W"]["NULL"]["DATA"])
stepvalue = float(self.header["W"]["STEP"]["DATA"])
flattendata = LASReader._getflatdata(datalines)
nandata = LASReader._replacenullvalues(flattendata, nullvalue)
self.data = LASReader._reshapeflatdata(nandata, ncurves)
if (stepvalue == nullvalue) or (stepvalue == 0.0):
stepvalue = self.data[0][1] - self.data[0][0]
if stepvalue < 0:
self.data = LASReader._reorderdata(self.data)
class LASWriter(LASFile):
"""
A specialization of `LASFile` for writing files.
Notes
-----
When creating a `LASReader` object the file is not written immediately. The
`header` and `data` attributes must be defined before calling the `write`
method. The other attributes (`headersectionnames`, `headerlayout` and
`headercomments`) are optional.
No verification is done to guarantee that the header and data are
compatible (i.e. have the same number of curves and the same depth range).
There are two methods that can be used for this: `correctwellsection` and
`correctcurvesection`.
In order to get a better layout for the header, the method
`getprettyheaderlayout` may be used.
Examples
--------
>>> lasfile = LASWriter("filename.las")
>>> lasfile.header = existing_header
>>> lasfile.data = existing_data
>>> lasfile.write()
"""
DEFAULTMNEMSTYLE = {'leftmargin': 1, 'rightmargin': 0, 'allign': 'left'}
DEFAULTDATASTYLE = {'leftmargin': 1, 'rightmargin': 1, 'allign': 'left'}
DEFAULTDESCSTYLE = {'leftmargin': 1, 'righmargin': 0, 'allign': 'left'}
DEFAULTUNIFORMSECTIONS = True
MINIMALLINELAYOUT = [0,0,1,0,0,0]
LASLINEPATTERN = "{0[0]}{MNEM}{0[1]}.{UNIT}{0[2]}{DATA}{0[3]}:{0[4]}{DESC}{0[5]}"
def __init__(self, filename):
super(LASWriter, self).__init__(filename)
@staticmethod
def _composeline(parsedline, linelayout=None):
"""
Turn a LAS parsed line into a one-string-line.
Parameters
----------
parsedline : dict
A LAS parsed line, i.e. a dict with keys "MNEM", "UNIT", "DATA"
and "DESC" which values are the respective LAS line parts.
linelayout : list, optional
A list containing 6 ints, each representing the number of
whitespaces in a portion of the LAS line. If not provided a minimal
layout will be used.
Returns
-------
line : str
A line composed using the `parsedline` parts and `linelayout`
whitespaces.
Examples
--------
>>> parsedline = {'DATA': '', 'DESC': 'MEASURED DEPTH',
'MNEM': 'DEPTH','UNIT': 'M'}
>>> layout = [2, 0, 7, 0, 1, 2]
>>> LASWriter._composeline(parsedline, linelayout)
' DEPTH.M : MEASURED DEPTH '
"""
if not linelayout:
linelayout = LASWriter.MINIMALLINELAYOUT
line = LASWriter.LASLINEPATTERN.format([" "*n for n in linelayout], **parsedline)
return line
@staticmethod
def _getspaces(style, spaces):
"""
Return the number of left and right whitespaces in a LAS line element.
Here LAS line element refers to either the MNEM, DATA or DESCRIPTION
part of a LAS line (the UNIT part cannot have whitespaces). The
distribution of whitespaces will be done according to `style`.
Parameters
----------
style : dict
A dictionary contaning the style parameters of a LAS line
element. The possible style parameters are 'allign',
'leftmargin' and 'rightmargin'. All of them are optional. If
'allign' is not provided, the other parameters are not used.
'allign' can be 'left', 'center' or 'right' and describes the
allignment of the LAS line element. 'leftmargin' and
'rightmargin' are the number of extra whitespaces to the left
or to the right of the line element, respectively.
spaces : int
Number of whitespaces to | |
<filename>LittleGarden_Code/Backend/app.py
# pylint: skip-file
from repositories.DataRepository import DataRepository
from flask import Flask, request, jsonify
from flask_socketio import SocketIO
from flask_cors import CORS
from helpers.sensors.MeasureHelper import MeasureHelper
from helpers.devices_id.DeviceHelper import DeviceHelper
from helpers.lcd.IPaddressHelper import IPaddressHelper
from helpers.lcd.LCD_PCF import LCD_PCF
from helpers.ActionHelper import ActionHelper
from datetime import datetime, timedelta
import time
import threading
from RPi import GPIO
# Code voor setup hardware
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
measureHelper = MeasureHelper(19)
last_water_value = 0
last_light_value = 0
ipHelper = IPaddressHelper()
actionHelper = ActionHelper(21, 20)
SDA = 27
SCL = 22
E = 6
RS = 5
address = 56
lcd = LCD_PCF(SDA, SCL, E, RS, address, False, False)
lcd.displayOn = True
lcd.cursorOn = False
lcd.cursorBlink = False
lcd.init_LCD()
# Code voor setup backend en frontend
endpoint = '/api/v1'
app = Flask(__name__)
app.config['SECRET_KEY'] = 'Hier mag je om het even wat schrijven, zolang het maar geheim blijft en een string is'
socketio = SocketIO(app, cors_allowed_origins="*")
CORS(app)
# API ENDPOINTS
# ---------------------------------------------------------------------------------------------------------------------------------------------------
@app.route('/')
def hallo():
return "Server is running, er zijn momenteel geen API endpoints beschikbaar."
#region METHODS
#**********************************************************************************************************************************************************
def get_dict_eenheden():
id_devices = DeviceHelper.get_ids_sensors()
dict_units = {}
for id in id_devices:
device = get_device_by_id(id)
unit = device["eenheid"]
dict_units[id] = unit
return dict_units
@app.route(endpoint + '/ip', methods=['GET'])
def get_ip_address():
if request.method == 'GET':
print('REQUEST ip address')
ip_array = ipHelper.get_ip_address()
if ip_array is not None:
return jsonify(ip_array=ip_array), 200
else:
print("ERROR: No ips send to frontend")
return jsonify("ERROR: No ips send to frontend"), 404
@app.route(endpoint + '/date', methods=['GET'])
def route_setup_period():
if request.method == 'GET':
print('REQUEST period dates')
dates = get_default_period()
if dates is not None:
return jsonify(dates=dates), 200
else:
print("ERROR: GET period failed")
return jsonify("ERROR: GET period failed"), 404
def get_default_period():
now = datetime.today()
nowDate = str(now.strftime("%Y-%m-%d"))
timestamp = str(now.strftime("%H:%M:%S"))
yesterday = now - timedelta(days=1)
yesterday = str(yesterday.strftime("%Y-%m-%d"))
dates = {
"dateStart" : yesterday,
"timeStart" : timestamp,
"dateEnd" : nowDate,
"timeEnd" : timestamp
}
return dates
#endregion
#region Devices
#**********************************************************************************************************************************************************
def get_device_by_id(id):
response = DataRepository.read_device_by_id(id)
return response
@app.route(endpoint + '/devices/type/<measureType>', methods=['GET'])
def get_devices_by_type(type):
if request.method == 'GET':
print('GET Device by type')
response = DataRepository.read_device_by_type(measureType)
return jsonify(devices=response), 200
#endregion
#region MEASUREMENTS
#**********************************************************************************************************************************************************
#region MEASUREMENTS_JSON
#-------------------------------------------------------------------------------------
def get_measure_json_latest(measureType):
deviceId = DeviceHelper.get_sensor_for_measureType(measureType)
#print(f"get_measure_json_latest - {deviceId}")
response = DataRepository.read_metingen_last_by_device(deviceId)
measurement = None
if response is not None:
value = response["waarde"]
comment = response["commentaar"]
date = str(response["datum"])
average = response["gemiddelde"]
device = get_device_by_id(deviceId)
unit = device["eenheid"]
measurement = {"measureType" : measureType, "value" : value,"comment" : comment,"date" : date, "average" : average, "unit" : unit}
else:
print(f"No measure to json - type: {measureType}")
return measurement
def get_measure_json_list(measureType, periodStart = None, periodEnd = None):
deviceId = DeviceHelper.get_sensor_for_measureType(measureType)
dict_units = get_dict_eenheden()
if periodStart is None or periodEnd is None:
json_period = get_default_period()
dateStart = json_period["dateStart"]
timeStart = json_period["timeStart"]
periodStart = f"{dateStart} {timeStart}"
dateEnd = json_period["dateEnd"]
timeEnd = json_period["timeEnd"]
periodEnd = f"{dateEnd} {timeEnd}"
print(f"{periodStart} - {periodEnd}")
periodStart_datetime = datetime.strptime(str(periodStart), "%Y-%m-%d %H:%M:%S")
periodStart = periodStart_datetime.strftime("%Y-%m-%d %H:%M")
#"%Y-%m-%d %H:%M"
periodEnd_datetime = datetime.strptime(str(periodEnd), "%Y-%m-%d %H:%M:%S")
periodEnd = periodEnd_datetime.strftime("%Y-%m-%d %H:%M")
dates = DataRepository.read_dates_in_period(periodStart, periodEnd)
print(f"from till {periodStart} - {periodEnd} => {len(dates)} dates found")
print(f" ")
hasMoreDays = False
if (periodEnd_datetime - periodStart_datetime).days > 1:
hasMoreDays = True
measurement_list = []
if dates is not None:
for dates_obj in dates:
date = dates_obj["datum_distinct"]
#print(date)
response = DataRepository.read_metingen_by_date_and_device(date, deviceId)
if response is not None:
date_string = str(date)
measurement_arr = []
for response_obj in response:
timestamp = str(response_obj["datum"])
timestamp_datetime = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
timestamp_string = str(timestamp_datetime.strftime("%H:%M"))
label = str(timestamp_datetime.strftime("%d/%m %H:%M"))
if hasMoreDays == False:
label = str(timestamp_datetime.strftime("%H:%M"))
label += "h"
measurement = {
"measureType" : measureType,
"value" : response_obj["waarde"],
"comment" : response_obj["commentaar"],
"timestamp" : timestamp_string,
"label" : label,
"unit" : dict_units[deviceId]
}
measurement_arr.append(measurement)
if len(measurement_arr) > 0:
list_obj = {"day": date, "measurements": measurement_arr, "measureType" : measureType}
measurement_list.append(list_obj)
if len(measurement_list) == 0:
return measurement_list, 1
elif measurement_list is not None and len(measurement_list) > 0:
return measurement_list, 0
return print('ERROR: return a list of measurements FAILED'), 2
#endregion
@app.route(endpoint + '/measurements/latest', methods=['GET'])
def route_measurements_latest_list():
if request.method == 'GET':
print("")
print('REQUEST last measurement')
types = DeviceHelper.get_type_sensors()
json_list = {}
ERROR = None
for measureType in types:
measurement = get_measure_json_latest(measureType)
if measurement is not None:
json_list[measureType] = measurement
print(f"get {measureType} - SUCCESS")
else:
ERROR = jsonify(f"ERROR: Get latest {measureType} failed")
print(f"get {measureType} - FAILED")
if ERROR is None:
return jsonify(measurements=json_list), 200
else:
return ERROR, 404
@app.route(endpoint + '/measurements/period/<measureType>', methods=['PUT'])
def get_measurements_period(measureType):
if request.method == 'PUT':
print("")
print('REQUEST period measurements')
periodStart = None
periodEnd = None
try:
period_data = DataRepository.json_or_formdata(request)
start = period_data["periodStart"]
periodStart = str(datetime.strptime(start, "%Y-%m-%d %H:%M:%S"))
end = period_data["periodEnd"]
periodEnd = str(datetime.strptime(end, "%Y-%m-%d %H:%M:%S"))
print(f'{periodStart} - {periodEnd}')
except:
print("No valid dates given")
measurent_list, message = get_measure_json_list(measureType, periodStart, periodEnd)
if message == 0 or message == 1:
return jsonify(measurements=measurent_list), 200
else:
return jsonify("ERROR: Get period measurements FAILED"), 404
@app.route(endpoint + '/measurements', methods=['POST'])
def route_create_measurement():
print("")
print("CREATE measurements")
if request.method == 'POST':
result = create_measurement()
if result == 0:
return jsonify(status="CREATE measure SUCCES"), 201
else:
return jsonify(status="CREATE measure FAILED"), 404
def create_measurement():
result = take_measurements()
if result == 0:
global last_water_value
global last_light_value
toggle_actuator("water", last_water_value)
toggle_actuator("light", last_light_value)
return result
def take_measurements():
json_metingen = measureHelper.create_meting_json()
date_string = json_metingen["date"]
date = datetime.strptime(date_string, '%d/%m/%Y %H:%M:%S')
measurement_list = json_metingen["measurements"]
failCount = 0
for measurement in measurement_list:
result = add_measurement(measurement, date)
if result == False:
failCount += 1
return failCount
def add_measurement(json, date):
waarde = json["waarde"]
comment = json["commentaar"]
measureType = json["type"]
deviceId = DeviceHelper.get_sensor_for_measureType(measureType)
nieuwId = DataRepository.create_meting(deviceId, waarde, comment, date)
print("")
print(f"Sensor measure")
print(f"-deviceId: {deviceId} - {measureType}")
print(f"-value: {waarde}")
print(f"-comment: {comment}")
print(f"-date: {date}")
print(f"-result - data ID - {nieuwId}")
print("")
print(f'DATABASE')
if nieuwId is not None:
if measureType == "water":
global last_water_value
last_water_value = waarde
if measureType == "light":
global last_light_value
last_light_value = waarde
print(f'- NEW {measureType} measurement-sensor success')
print("")
return True
else:
print(f'- NEW {measureType} measurement-sensor failed')
print("")
return False
#endregion
#region Settings
#**********************************************************************************************************************************************************
@app.route(endpoint + '/settings/<measureType>', methods=['GET','PUT'])
def route_settings(measureType):
if request.method == 'GET':
return get_response_settings(measureType)
if request.method == 'PUT':
data = DataRepository.json_or_formdata(request)
return put_settings(measureType, data)
def get_response_settings(measureType):
json = get_settings(measureType)
if json is not None:
return jsonify(settings=json), 200
else:
return ERROR, 404
def get_settings(measureType):
deviceId = DeviceHelper.get_sensor_for_measureType(measureType)
print(f"Request settings - {measureType} - {deviceId}")
settingMax = DataRepository.read_setting_max_by_deviceId(deviceId)
settingMin = DataRepository.read_setting_min_by_deviceId(deviceId)
device = DataRepository.read_device_by_id(deviceId)
unit = device["eenheid"]
json = {"measureType" : measureType, "unit" : unit}
if settingMax is not None:
json["settingMax"] = settingMax["waarde"]
if settingMin is not None:
json["settingMin"] = settingMin["waarde"]
if settingMin is not None and settingMax is not None:
return json
else:
return None
def put_settings(measureType, data):
print(f'Put setting for {measureType}')
deviceId = DeviceHelper.get_sensor_for_measureType(measureType)
settingType = data["settingType"]
settingValue = data["value"]
response = DataRepository.update_settings_by_deviceId(deviceId, settingType, settingValue)
if response is not None:
return jsonify(status="update settings success", row_count=data), 201
else:
return jsonify(status="update settings failed", row_count=data), 404
#endregion
#region SOCKET IO
#**********************************************************************************************************************************************************
@socketio.on('connect')
def initial_connection():
print('A new client connect')
socketio.emit('B2F_update_page', {'status': "UPDATE page"})
#measureHelper.create_meting_json()
@socketio.on("F2B_get_actuator")
def socket_get_actuator(json):
print('Correct actuator UI')
isPumpActive = 0
if actionHelper.is_actuator_active("water"):
isPumpActive = 1
socketio.emit('B2F_actuator_toggled', {'measureType': "water", "status" : isPumpActive})
isLedActive = 0
if actionHelper.is_actuator_active("light"):
isLedActive = 1
socketio.emit('B2F_actuator_toggled', {'measureType': "light", "status" : isLedActive})
@socketio.on("F2B_toggle_actuator")
def socket_toggle_actuator(json):
measureType = json['measureType']
value = measureHelper.read_sensor(measureType)["waarde"]
toggle_actuator(measureType, value, True)
def toggle_actuator(measureType, value, isForced = False):
#get settings
settings = get_settings(measureType)
if settings is None: #stop if no settings were given
return
settingMax = settings["settingMax"]
settingMin = settings["settingMin"]
print("")
print(f"SETTINGS - {measureType} COMPARE value {value} with range {settingMin} - {settingMax}")
wasActive = actionHelper.is_actuator_active(measureType)
if value >= settingMax:
print(f"{measureType} MAXIMUM REACHED")
toggled = set_actuator(measureType, 0)
#-----------------------------------------------------------------------------
elif value < settingMin:
print(f"{measureType} BELOW MINIMUM")
toggled = set_actuator(measureType, 1)
isActive = actionHelper.is_actuator_active(measureType)
if isActive == True: #if actuator is active, start monitor loop
sensordId = DeviceHelper.get_sensor_for_measureType(measureType)
threading.Timer(1, monitor_active_actuator, args=(measureType,sensordId)).start()
#-----------------------------------------------------------------------------
elif isForced == True and value >= settingMin and value < settingMax:
status = actionHelper.is_actuator_active(measureType)
status = not status
print(f"toggle_actuator - {status}")
comment = f"manual"
toggled = set_actuator(measureType, status, comment)
isActive = actionHelper.is_actuator_active(measureType)
if isActive == True: #if actuator is active, start monitor loop
sensordId = DeviceHelper.get_sensor_for_measureType(measureType)
threading.Timer(1, monitor_active_actuator, args=(measureType,sensordId)).start()
def set_actuator(measureType, status, message = "automatic"):
print(f"set_actuator - {status}")
if measureType is not None and status is not None:
#get data
status = actionHelper.set_active(measureType, status)
now = datetime.now()
date = str(now.strftime("%Y-%m-%d %H:%M:%S"))
comment = message
deviceId = DeviceHelper.get_actuator_for_measureType(measureType)
#print(f"Actuator measure - device ID - {device6Id}")
#check state
isActive = 0
if(status == True):
isActive = 1
#add do database
nieuwId = DataRepository.create_meting(deviceId, isActive, comment, date)
print("")
print(f"Actuator measure")
print(f"-deviceId: {deviceId} - {measureType}")
print(f"-isActive: {isActive}")
print(f"-comment: {comment}")
print(f"-date: {date}")
| |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for kernel point cloud convolutions"""
import tensorflow as tf
from pylib.pc.utils import _flatten_features
from pylib.pc import PointCloud
from pylib.pc import Grid
from pylib.pc import Neighborhood
from pylib.pc import KDEMode
from pylib.pc.custom_ops import basis_proj
from pylib.pc.layers.utils import _format_output, spherical_kernel_points, \
random_rotation
def _linear_weighting(values, sigma):
""" Linear kernel weights for KP Conv.
Args:
values: A `float` `Tensor` of shape `[K, M]`, the normalized distances to
the kernel points, in `[0,1]`.
sigma: A `float`, the normalized influence distance of the kernel points.
Returns:
A `float` `Tensor` of shape `[K, M]`.
"""
return tf.nn.relu(1 - values / sigma)
def _gaussian_weighting(values, sigma):
""" Gaussian kernel weights for KP Conv.
Args:
values: A `float` `Tensor` of shape `[K, M]`, the normalized distances to
the kernel points, in `[0,1]`.
sigma: A `float`, the normalized influence distance of the kernel points.
Returns:
A `float` `Tensor` of shape `[K, M]`.
"""
sigma = sigma / 3
return tf.exp(-(values / sigma)**2)
kernel_interpolation = {'linear': _linear_weighting,
'gaussian': _gaussian_weighting}
class KPConv(tf.Module):
""" A Kernel Point Convolution for 3D point clouds.
Based on the paper [KPConv: Flexible and Deformable Convolution for Point
Clouds. Thomas et al., 2019](https://arxiv.org/abs/1904.08889).
Note: To use this layer for point clouds with arbitrary dimension `D`,
pass initial kernel points of dimension `D` using `custom_kernel_points`.
Args:
num_features_in: An `int`, `C_in`, the number of features per input point.
num_features_out: An `int`, `C_out`, the number of features to compute.
num_kernel_points: An ìnt`, the number of points for representing the
kernel, default is `15`. (optional)
num_dims: An `int`, the dimensionality of the point cloud. Defaults to `3`.
(optional)
deformable: A 'bool', indicating whether to use rigid or deformable kernel
points, default is `False`. (optional)
kp_interpolation: A `string`, either `'linear'`(default) or `'gaussian'`.
(optional)
custom_kernel_points: A `float` `Tensor` of shape `[K, D]`, to pass custom
kernel points. (optional)
initializer_weights: A `tf.initializer` for the weights,
default `TruncatedNormal`. (optional)
Raises:
ValueError, if no custom kernel points are passed for dimension not equal
to 3.
"""
def __init__(self,
num_features_in,
num_features_out,
num_kernel_points=15,
num_dims=3,
deformable=False,
kp_interpolation='linear',
custom_kernel_points=None,
initializer_weights=None,
name=None):
super().__init__(name=name)
self._num_features_in = num_features_in
self._num_features_out = num_features_out
self._num_kernel_points = num_kernel_points
self._deformable = deformable
self._weighting = kernel_interpolation[kp_interpolation]
self._num_dims = num_dims
if name is None:
self._name = 'KPConv'
else:
self._name = name
if num_dims != 3 and custom_kernel_points is None:
raise ValueError(
"For dimension not 3 custom kernel points must be provided!")
# initialize kernel points
if custom_kernel_points is None:
self._kernel_points = spherical_kernel_points(num_kernel_points,
rotate=True)
else:
self._kernel_points = tf.convert_to_tensor(value=custom_kernel_points,
dtype=tf.float32)
# Reposition the points at radius 0.75.
self._kernel_points = self._kernel_points * 0.75
# initialize variables
if initializer_weights is None:
initializer_weights = tf.initializers.GlorotNormal
weights_init_obj = initializer_weights()
if deformable:
self._kernel_offsets_weights = \
tf.Variable(
weights_init_obj(shape=[
self._num_kernel_points * self._num_features_in,
self._num_kernel_points * self._num_dims],
dtype=tf.float32),
trainable=True,
name=self._name + "/weights_deformable")
self._get_offsets = self._kernel_offsets
else:
def _zero(*args, **kwargs):
""" Replaces `_get_offsets` with zeros for rigid KPConv.
"""
return tf.constant(0.0, dtype=tf.float32)
self._get_offsets = _zero
self._weights = \
tf.Variable(
weights_init_obj(shape=[
self._num_kernel_points * self._num_features_in,
self._num_features_out],
dtype=tf.float32),
trainable=True,
name=self._name + "/conv_weights")
def _kp_conv(self,
kernel_input,
neighborhood,
features):
""" Method to compute a kernel point convolution using linear interpolation
of the kernel weights.
Note: In the following
`D` is the dimensionality of the points cloud (=3)
`M` is the number of neighbor pairs
'C1`is the number of input features
`C2` is the number of output features
`N1' is the number of input points
`N2' is the number of ouput points
Args:
kernel_inputs: A `float` `Tensor` of shape `[M, D]`, the input to the
kernel, i.e. the distances between neighbor pairs.
neighborhood: A `Neighborhood` instance.
features: A `float` `Tensor` of shape `[N1, C1]`, the input features.
Returns:
A `float` `Tensor` of shape `[N2, C2]`, the output features.
"""
# neighbor pairs ids
neighbors = neighborhood._original_neigh_ids
# kernel weights from distances, shape [M, K]
kernel_offsets = self._get_offsets(kernel_input, neighborhood, features)
points_diff = tf.expand_dims(kernel_input, 1) - \
(tf.expand_dims(self._kernel_points, 0) + kernel_offsets)
points_dist = tf.linalg.norm(points_diff, axis=2)
kernel_weights = self._weighting(points_dist, self._sigma)
# Pad zeros to fullfil requirements of the basis_proj custom op,
# 8, 16, 32, or 64 basis are allowed.
if self._num_kernel_points < 8:
kernel_weights = tf.pad(kernel_weights,
[[0, 0], [0, 8 - self._num_kernel_points]])
elif self._num_kernel_points > 8 and self._num_kernel_points < 16:
kernel_weights = tf.pad(kernel_weights,
[[0, 0], [0, 16 - self._num_kernel_points]])
elif self._num_kernel_points > 16 and self._num_kernel_points < 32:
kernel_weights = tf.pad(kernel_weights,
[[0, 0], [0, 32 - self._num_kernel_points]])
elif self._num_kernel_points > 32 and self._num_kernel_points < 64:
kernel_weights = tf.pad(kernel_weights,
[[0, 0], [0, 64 - self._num_kernel_points]])
# save values for regularization loss computation
self._cur_point_dist = points_dist
self._cur_neighbors = neighbors
# Compute the projection to the samples.
weighted_features = basis_proj(
kernel_weights,
features,
neighborhood)
# remove padding
weighted_features = weighted_features[:, :, 0:self._num_kernel_points]
#Compute convolution - hidden layer to output (linear)
convolution_result = tf.matmul(
tf.reshape(weighted_features,
[-1, self._num_features_in * self._num_kernel_points]),
self._weights)
return convolution_result
def __call__(self,
features,
point_cloud_in: PointCloud,
point_cloud_out: PointCloud,
conv_radius,
neighborhood=None,
kernel_influence_dist=None,
return_sorted=False,
return_padded=False,
name=None):
""" Computes the Kernel Point Convolution between two point clouds.
Note:
In the following, `A1` to `An` are optional batch dimensions.
`C_in` is the number of input features.
`C_out` is the number of output features.
Args:
features: A `float` `Tensor` of shape `[N1, C_in]` or
`[A1, ..., An,V, C_in]`.
point_cloud_in: A 'PointCloud' instance, on which the features are
defined.
point_cloud_out: A `PointCloud` instance, on which the output
features are defined.
conv_radius: A `float`, the convolution radius.
neighborhood: A `Neighborhood` instance, defining the neighborhood
with centers from `point_cloud_out` and neighbors in `point_cloud_in`.
If `None` it is computed internally. (optional)
kernel_influence_dist = A `float`, the influence distance of the kernel
points. If `None` uses `conv_radius / 2.5`, as suggested in Section 3.3
of the paper. (optional)
return_sorted: A `boolean`, if `True` the output tensor is sorted
according to the batch_ids. (optional)
return_padded: A `bool`, if 'True' the output tensor is sorted and
zero padded. (optional)
Returns:
A `float` `Tensor` of shape
`[N2, C_out]`, if `return_padded` is `False`
or
`[A1, ..., An, V_out, C_out]`, if `return_padded` is `True`.
"""
features = tf.cast(tf.convert_to_tensor(value=features),
dtype=tf.float32)
features = _flatten_features(features, point_cloud_in)
self._num_output_points = point_cloud_out._points.shape[0]
if kernel_influence_dist is None:
# normalized
self._sigma = tf.constant(1.0)
else:
self._sigma = tf.convert_to_tensor(
value=kernel_influence_dist / conv_radius, dtype=tf.float32)
#Create the radii tensor.
radii_tensor = tf.cast(tf.repeat([conv_radius], self._num_dims),
dtype=tf.float32)
if neighborhood is None:
#Compute the grid
grid = Grid(point_cloud_in, radii_tensor)
#Compute the neighborhoods
neigh = Neighborhood(grid, radii_tensor, point_cloud_out)
else:
neigh = neighborhood
#Compute kernel inputs.
neigh_point_coords = tf.gather(
point_cloud_in._points, neigh._original_neigh_ids[:, 0])
center_point_coords = tf.gather(
point_cloud_out._points, neigh._original_neigh_ids[:, 1])
points_diff = (neigh_point_coords - center_point_coords) / \
tf.reshape(radii_tensor, [1, self._num_dims])
#Compute Monte-Carlo convolution
convolution_result = self._kp_conv(points_diff, neigh, features)
return _format_output(convolution_result,
point_cloud_out,
return_sorted,
return_padded)
def _kernel_offsets(self,
kernel_input,
neighborhood,
features):
""" Method to compute the kernel offsets for deformable KPConv
using a rigid KPConv.
As described in Section 3.2 of [KPConv: Flexible and Deformable Convolution
for Point Clouds. Thomas et al., 2019](https://arxiv.org/abs/1904.08889).
Note: In the following
`D` is the dimensionality of the point cloud (=3)
`M` is the number of neighbor pairs
'C1`is the number of input features
`N1' is the number of input points
`N2' is the number of ouput points
`K` is the number of kernel points
Args:
kernel_inputs: A `float` `Tensor` of shape `[M, D]`, the input to the
kernel, i.e. the distances between neighbor pairs.
neighborhood: A `Neighborhood` instance.
features: A `float` `Tensor` of shape `[N1, C1]`, the input features.
Returns:
A `float` `Tensor` of shape `[K, M, D]`, the offsets.
"""
# neighbor pairs ids
neighbors = neighborhood._original_neigh_ids
# kernel weights from distances, shape [M, K]
points_diff = tf.expand_dims(kernel_input, 1) - \
tf.expand_dims(self._kernel_points, 0)
points_dist = tf.linalg.norm(points_diff, | |
#
# Defines data that is consumed by the header2whatever hooks/templates
# to modify the generated files
#
import enum
from typing import Dict, List, Tuple, Optional
from pydantic import validator
from .util import Model, _generating_documentation
class ParamData(Model):
"""Various ways to modify parameters"""
#: Set parameter name to this
name: Optional[str] = None
#: Change C++ type emitted
x_type: Optional[str] = None
#: Default value for parameter
default: Optional[str] = None
#: Disables a default cast caused by ``default_arg_cast``
disable_type_caster_default_cast: bool = False
#: Force this to be an 'out' parameter
#:
#: .. seealso:: :ref:`autowrap_out_params`
#:
force_out: bool = False
#: Force an array size
array_size: Optional[int] = None
#: Ignore this parameter
ignore: bool = False
class BufferType(str, enum.Enum):
#: The buffer must indicate that it is readable (such as bytes, or bytearray)
IN = "in"
#: The buffer must indicate that it is writeable (such as a bytearray)
OUT = "out"
#: The buffer must indicate that it readable or writeable (such as a bytearray)
INOUT = "inout"
class BufferData(Model):
#: Indicates what type of python buffer is required
type: BufferType
#: Name of C++ parameter that the buffer will use
src: str
#: Name of the C++ length parameter. An out-only parameter, it will be set
#: to the size of the python buffer, and will be returned so the caller can
#: determine how many bytes were written
len: str
#: If specified, the minimum size of the python buffer
minsz: Optional[int] = None
class ReturnValuePolicy(enum.Enum):
"""
See `pybind11 documentation <https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies>`_
for what each of these values mean.
"""
TAKE_OWNERSHIP = "take_ownership"
COPY = "copy"
MOVE = "move"
REFERENCE = "reference"
REFERENCE_INTERNAL = "reference_internal"
AUTOMATIC = "automatic"
AUTOMATIC_REFERENCE = "automatic_reference"
class FunctionData(Model):
"""
Customize the way the autogenerator binds a function.
.. code-block:: yaml
functions:
# for non-overloaded functions, just specify the name + customizations
name_of_non_overloaded_fn:
# add customizations for function here
# For overloaded functions, specify the name, but each overload
# separately
my_overloaded_fn:
overloads:
int, int:
# customizations for `my_overloaded_fn(int, int)`
int, int, int:
# customizations for `my_overloaded_fn(int, int, int)`
"""
#: If True, don't wrap this
ignore: bool = False
#: If True, don't wrap this, but provide a pure virtual implementation
ignore_pure: bool = False
#: Generate this in an `#ifdef`
ifdef: Optional[str] = None
#: Generate this in an `#ifndef`
ifndef: Optional[str] = None
#: Use this code instead of the generated code
cpp_code: Optional[str] = None
#: Docstring for the function, will attempt to convert Doxygen docs if omitted
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring for the function
doc_append: Optional[str] = None
#: If True, prepends an underscore to the python name
internal: bool = False
#: Use this to set the name of the function as exposed to python
rename: Optional[str] = None
#: Mechanism to override individual parameters
param_override: Dict[str, ParamData] = {}
#: If specified, put the function in a sub.pack.age
subpackage: Optional[str] = None
#: By default, robotpy-build will release the GIL whenever a wrapped
#: function is called.
no_release_gil: Optional[bool] = None
buffers: List[BufferData] = []
overloads: Dict[str, "FunctionData"] = {}
#: Adds py::keep_alive<x,y> to the function. Overrides automatic
#: keepalive support, which retains references passed to constructors.
#: https://pybind11.readthedocs.io/en/stable/advanced/functions.html#keep-alive
keepalive: Optional[List[Tuple[int, int]]] = None
#: https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
return_value_policy: ReturnValuePolicy = ReturnValuePolicy.AUTOMATIC
#: If this is a function template, this is a list of instantiations
#: that you wish to provide. This is a list of lists, where the inner
#: list is the template parameters for that function
template_impls: Optional[List[List[str]]] = None
#: Specify a transformation lambda to be used when this virtual function
#: is called from C++. This inline code should be a lambda that has the same
#: arguments as the original C++ virtual function, except the first argument
#: will be a py::function with the python overload
#:
#: cpp_code should also be specified for this to be useful
#:
#: For example, to transform a function that takes an iostream into a function
#: that returns a string:
#:
#: .. code-block:: yaml
#:
#: cpp_code: |
#: [](MyClass* self) {
#: return "string";
#: }
#: virtual_xform: |
#: [](py::function fn, MyClass* self, std::iostream &is) {
#: std::string d = py::cast(fn());
#: is << d;
#: }
#:
virtual_xform: Optional[str] = None
@validator("overloads", pre=True)
def validate_overloads(cls, value):
for k, v in value.items():
if v is None:
value[k] = FunctionData()
return value
if not _generating_documentation:
FunctionData.update_forward_refs()
class PropAccess(enum.Enum):
#: Determine read/read-write automatically:
#:
#: * If a struct/union, default to readwrite
#: * If a class, default to readwrite if a basic type that isn't a
#: reference, otherwise default to readonly
AUTOMATIC = "auto"
#: Allow python users access to the value, but ensure it can't
#: change. This is useful for properties that are defined directly
#: in the class
READONLY = "readonly"
#: Allows python users to read/write the value
READWRITE = "readwrite"
class PropData(Model):
#: If set to True, this property is not made available to python
ignore: bool = False
#: Set the python name of this property to the specified string
rename: Optional[str]
#: Python code access to this property
access: PropAccess = PropAccess.AUTOMATIC
#: Docstring for the property (only available on class properties)
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
class EnumValue(Model):
#: If set to True, this property is not made available to python
ignore: bool = False
#: Set the python name of this enum value to the specified string
rename: Optional[str] = None
#: Docstring for the enum value
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
class EnumData(Model):
#: Set your own docstring for the enum
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
#: If set to True, this property is not made available to python
ignore: bool = False
#: Set the python name of this enum to the specified string
rename: Optional[str] = None
value_prefix: Optional[str] = None
#: If specified, put the enum in a sub.pack.age (ignored for
#: enums that are part of classes)
subpackage: Optional[str] = None
values: Dict[str, EnumValue] = {}
class ClassData(Model):
#: Docstring for the class
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
ignore: bool = False
ignored_bases: List[str] = []
#: Specify fully qualified names for the bases
base_qualnames: Dict[str, str] = {}
attributes: Dict[str, PropData] = {}
enums: Dict[str, EnumData] = {}
methods: Dict[str, FunctionData] = {}
is_polymorphic: bool = False
force_no_trampoline: bool = False
force_no_default_constructor: bool = False
#: pybind11 will detect multiple inheritance automatically if a
#: class directly derives from multiple classes. However,
#: If the class derives from classes that participate in multiple
#: inheritance, pybind11 won't detect it automatically, so this
#: flag is needed.
force_multiple_inheritance: bool = False
#: If there are circular dependencies, this will help you resolve them
#: manually. TODO: make it so we don't need this
force_depends: List[str] = []
#: Use this to bring in type casters for a particular type that may have
#: been hidden (for example, with a typedef or definition in another file),
#: instead of explicitly including the header. This should be the full
#: namespace of the type.
force_type_casters: List[str] = []
#: If the object shouldn't be deleted by pybind11, use this. Disables
#: implicit constructors.
nodelete: bool = False
#: Set the python name of the class to this
rename: Optional[str] = None
#: This is deprecated and has no effect
shared_ptr: bool = True
#: If specified, put the class in a sub.pack.age. Ignored
#: for functions attached to a class. When template parameters
#: | |
<gh_stars>0
# Подсчитать, сколько было выделено памяти под переменные
# в ранее разработанных программах в рамках первых трех уроков.
# Проанализировать результат и определить программы с наиболее
# эффективным использованием памяти.
# Примечание: Для анализа возьмите любые 1-3 ваших программы
# или несколько вариантов кода для одной и той же задачи.
# Результаты анализа вставьте в виде комментариев к коду.
# Также укажите в комментариях версию Python и разрядность вашей ОС.
# Python 3.7.0 on MacOS 10.14.2 64-bit
import sys
import random
import math
def show_size(x, level=0):
print('\t' * level, f'type = {type(x)}, size = {sys.getsizeof(x)}, object = {x}')
if hasattr(x, '__iter__'):
if hasattr(x, 'items'):
for key, value in x.items():
show_size(key, level + 1)
show_size(value, level + 1)
elif not isinstance(x, str):
for item in x:
show_size(item, level + 1)
# В диапазоне натуральных чисел от 2 до 99 определить,
# сколько из них кратны любому из чисел в диапазоне от 2 до 9.
def my_count(array1, array2):
cnt = 0
for i in array1:
for j in array2:
if i % j == 0:
array1[i - 2] = 0
for k in range(len(array1)):
if array1[k] == 0:
cnt += 1
return [cnt, array1, array2, i, j, k]
array1 = list(range(2, 100))
array2 = list(range(2, 10))
# print(show_size(my_count(array1, array2)))
# type = <class 'list'>, size = 112, object = [77, [0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 13, 0, 0, 0, 17, 0, 19, 0, 0, 0, 23, 0, 0, 0, 0, 0, 29, 0, 31, 0, 0, 0, 0, 0, 37, 0, 0, 0, 41, 0, 43, 0, 0, 0, 47, 0, 0, 0, 0, 0, 53, 0, 0, 0, 0, 0, 59, 0, 61, 0, 0, 0, 0, 0, 67, 0, 0, 0, 71, 0, 73, 0, 0, 0, 0, 0, 79, 0, 0, 0, 83, 0, 0, 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, 0, 97, 0, 0], [2, 3, 4, 5, 6, 7, 8, 9], 99, 9, 97]
# type = <class 'int'>, size = 28, object = 77
# type = <class 'list'>, size = 992, object = [0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 13, 0, 0, 0, 17, 0, 19, 0, 0, 0, 23, 0, 0, 0, 0, 0, 29, 0, 31, 0, 0, 0, 0, 0, 37, 0, 0, 0, 41, 0, 43, 0, 0, 0, 47, 0, 0, 0, 0, 0, 53, 0, 0, 0, 0, 0, 59, 0, 61, 0, 0, 0, 0, 0, 67, 0, 0, 0, 71, 0, 73, 0, 0, 0, 0, 0, 79, 0, 0, 0, 83, 0, 0, 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, 0, 97, 0, 0]
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 11
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 13
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 17
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 19
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 23
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 29
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 31
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 37
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 41
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 43
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 47
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 53
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 59
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 61
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 67
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 71
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 28, object = 73
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size = 24, object = 0
# type = <class 'int'>, size | |
Bay city, Texas",4083),
("Natalia city, Texas",1198),
("Navarro town, Texas",193),
("Navasota city, Texas",7432),
("Nazareth city, Texas",300),
("Nederland city, Texas",17371),
("Needville city, Texas",3063),
("Nesbitt town, Texas",117),
("Netos CDP, Texas",0),
("Nevada city, Texas",814),
("Newark city, Texas",1326),
("New Berlin city, Texas",689),
("New Boston city, Texas",4694),
("New Braunfels city, Texas",74587),
("Newcastle city, Texas",584),
("New Chapel Hill city, Texas",873),
("New Deal town, Texas",776),
("New Fairview city, Texas",1273),
("New Falcon CDP, Texas",330),
("New Home city, Texas",743),
("New Hope town, Texas",643),
("New London city, Texas",1037),
("New Summerfield city, Texas",800),
("Newton city, Texas",2165),
("New Waverly city, Texas",1067),
("Neylandville town, Texas",76),
("Niederwald city, Texas",505),
("Nina CDP, Texas",20),
("Nixon city, Texas",2987),
("Nocona city, Texas",2960),
("Nocona Hills CDP, Texas",770),
("Nolanville city, Texas",5330),
("Nome city, Texas",405),
("Noonday city, Texas",514),
("Nordheim city, Texas",282),
("Normangee town, Texas",760),
("Normanna CDP, Texas",138),
("North Alamo CDP, Texas",4447),
("North Cleveland city, Texas",235),
("North Escobares CDP, Texas",109),
("Northlake town, Texas",2524),
("North Pearsall CDP, Texas",1211),
("North Richland Hills city, Texas",69800),
("North San Pedro CDP, Texas",890),
("Novice city, Texas",44),
("Oak Grove town, Texas",779),
("Oakhurst CDP, Texas",378),
("Oak Island CDP, Texas",379),
("Oak Leaf city, Texas",1486),
("Oak Point city, Texas",3878),
("Oak Ridge town (Cooke County), Texas",177),
("Oak Ridge town (Kaufman County), Texas",529),
("Oak Ridge North city, Texas",3136),
("Oak Trail Shores CDP, Texas",2981),
("Oak Valley town, Texas",430),
("Oakwood town, Texas",765),
("O'Brien city, Texas",101),
("Odem city, Texas",2410),
("Odessa city, Texas",118582),
("O'Donnell city, Texas",880),
("Oglesby city, Texas",736),
("Oilton CDP, Texas",271),
("Old River-Winfree city, Texas",2130),
("Olivarez CDP, Texas",4246),
("<NAME>utierrez CDP, Texas",25),
("Olmito CDP, Texas",1243),
("Olmito and Olmito CDP, Texas",336),
("Olmos Park city, Texas",1999),
("Olney city, Texas",3138),
("Olton city, Texas",2005),
("Omaha city, Texas",880),
("Onalaska city, Texas",2714),
("Opdyke West town, Texas",232),
("Orange city, Texas",18901),
("Orange Grove city, Texas",1712),
("Orason CDP, Texas",32),
("Orchard city, Texas",253),
("Ore City city, Texas",1109),
("Overton city, Texas",2741),
("Ovilla city, Texas",3800),
("Owl Ranch CDP, Texas",112),
("Oyster Creek city, Texas",1278),
("Ozona CDP, Texas",2943),
("Pablo Pena CDP, Texas",16),
("Paducah town, Texas",1325),
("Paint Rock town, Texas",411),
("Paisano Park CDP, Texas",86),
("Palacios city, Texas",4629),
("Palestine city, Texas",18237),
("Palisades village, Texas",299),
("Palmer town, Texas",2053),
("Palmhurst city, Texas",2698),
("Palm Valley city, Texas",1706),
("Palmview city, Texas",5740),
("Palmview South CDP, Texas",5352),
("Palo Blanco CDP, Texas",80),
("Paloma Creek CDP, Texas",3340),
("Paloma Creek South CDP, Texas",6624),
("Palo Pinto CDP, Texas",161),
("Pampa city, Texas",17793),
("Panhandle town, Texas",2728),
("Panorama Village city, Texas",3179),
("Pantego town, Texas",2532),
("Paradise city, Texas",492),
("Paris city, Texas",24842),
("Parker city, Texas",4534),
("Pasadena city, Texas",154193),
("Pattison city, Texas",478),
("Patton Village city, Texas",1491),
("Pawnee CDP, Texas",151),
("Payne Springs town, Texas",789),
("Pearland city, Texas",117867),
("Pearsall city, Texas",10252),
("Pecan Acres CDP, Texas",5670),
("Pecan Gap city, Texas",166),
("Pecan Grove CDP, Texas",16651),
("Pecan Hill city, Texas",744),
("Pecan Plantation CDP, Texas",5987),
("Pecos city, Texas",9814),
("Pelican Bay city, Texas",1620),
("Pena CDP, Texas",70),
("Penelope town, Texas",160),
("Penitas city, Texas",4834),
("Perezville CDP, Texas",4612),
("Perrin CDP, Texas",256),
("Perryton city, Texas",9167),
("Petersburg city, Texas",1039),
("Petrolia city, Texas",720),
("Petronila city, Texas",145),
("Pettus CDP, Texas",386),
("Pflugerville city, Texas",59757),
("Pharr city, Texas",77835),
("Pilot Point city, Texas",4292),
("Pine Forest city, Texas",709),
("Pine Harbor CDP, Texas",908),
("Pinehurst CDP, Texas",6157),
("Pinehurst city, Texas",2289),
("Pine Island town, Texas",1121),
("Pineland city, Texas",616),
("Pinewood Estates CDP, Texas",1863),
("Piney Point Village city, Texas",3419),
("Pittsburg city, Texas",4634),
("Placedo CDP, Texas",575),
("Plains town, Texas",1180),
("Plainview city, Texas",20714),
("Plano city, Texas",284579),
("Plantersville city, Texas",528),
("Pleak village, Texas",1475),
("Pleasant Hill CDP, Texas",744),
("Pleasanton city, Texas",10450),
("Pleasant Valley town, Texas",323),
("Plum Grove city, Texas",588),
("Point city, Texas",830),
("Point Blank city, Texas",748),
("Point Comfort city, Texas",757),
("Point Venture village, Texas",1194),
("Ponder town, Texas",1973),
("Port Aransas city, Texas",4060),
("Port Arthur city, Texas",55249),
("Porter Heights CDP, Texas",1206),
("Port Isabel city, Texas",5057),
("Portland city, Texas",16865),
("Port Lavaca city, Texas",12303),
("Port Mansfield CDP, Texas",88),
("Port Neches city, Texas",12853),
("Port O'Connor CDP, Texas",971),
("Post city, Texas",5369),
("Post Oak Bend City town, Texas",420),
("Poteet city, Texas",3429),
("Poth town, Texas",2151),
("Potosi CDP, Texas",2925),
("Pottsboro town, Texas",2452),
("Powderly CDP, Texas",1021),
("Powell town, Texas",93),
("Poynor town, Texas",302),
("Prado Verde CDP, Texas",243),
("Prairie View city, Texas",6337),
("Premont city, Texas",2603),
("Presidio city, Texas",4067),
("Preston CDP, Texas",2148),
("Primera town, Texas",4742),
("Princeton city, Texas",9765),
("Progreso city, Texas",5905),
("Progreso Lakes city, Texas",292),
("Prosper town, Texas",19103),
("Providence Village town, Texas",7053),
("Pueblo East CDP, Texas",0),
("Pueblo Nuevo CDP, Texas",679),
("Putnam town, Texas",78),
("Pyote town, Texas",126),
("Quail CDP, Texas",10),
("Quail Creek CDP, Texas",1633),
("Quanah city, Texas",2557),
("Queen City city, Texas",1634),
("Quemado CDP, Texas",169),
("Quesada CDP, Texas",0),
("Quinlan city, Texas",1549),
("Quintana town, Texas",14),
("Quitaque city, Texas",447),
("Quitman city, Texas",2272),
("Radar Base CDP, Texas",283),
("Rafael Pena CDP, Texas",0),
("Ralls city, Texas",1711),
("Ramireno CDP, Texas",69),
("Ramirez-Perez CDP, Texas",49),
("Ramos CDP, Texas",406),
("Ranchette Estates CDP, Texas",207),
("Ranchitos del Norte CDP, Texas",1083),
("Ranchitos East CDP, Texas",513),
("Ranchitos Las Lomas CDP, Texas",308),
("Rancho Alegre CDP, Texas",1448),
("Rancho Banquete CDP, Texas",838),
("Rancho Chico CDP, Texas",548),
("Ranchos Penitas West CDP, Texas",1240),
("Rancho Viejo town, Texas",2832),
("Rancho Viejo CDP, Texas",148),
("Randolph AFB CDP, Texas",1178),
("Ranger city, Texas",2863),
("Rangerville village, Texas",47),
("Rankin city, Texas",940),
("Ransom Canyon town, Texas",1056),
("Ratamosa CDP, Texas",266),
("Ravenna city, Texas",193),
("Raymondville city, Texas",11051),
("Realitos CDP, Texas",280),
("Redfield CDP, Texas",395),
("Redford CDP, Texas",105),
("Redland CDP, Texas",1429),
("Red Lick city, Texas",1069),
("Red Oak city, Texas",12366),
("Redwater city, Texas",1152),
("Redwood CDP, Texas",3429),
("Refugio town, Texas",2847),
("<NAME> CDP, Texas",106),
("Reid Hope King CDP, Texas",1081),
("Reklaw city, Texas",456),
("Relampago CDP, Texas",325),
("Rendon CDP, Texas",13616),
("Reno city (Lamar County), Texas",3272),
("Reno city (Parker and Tarrant Counties), Texas",2811),
("Retreat town, Texas",440),
("Rhome city, Texas",1737),
("Ricardo CDP, Texas",544),
("Rice city, Texas",917),
("Richardson city, Texas",114402),
("Richland town, Texas",205),
("Richland Hills city, Texas",8052),
("Richland Springs town, Texas",281),
("Richmond city, Texas",12084),
("Richwood city, Texas",3851),
("Riesel city, Texas",1073),
("Rio Bravo city, Texas",4791),
("Rio Grande City city, Texas",14415),
("Rio Hondo city, Texas",2607),
("Rio Vista city, Texas",1039),
("Rising Star town, Texas",957),
("Rivereno CDP, Texas",19),
("River Oaks city, Texas",7683),
("Riverside city, Texas",475),
("Riviera CDP, Texas",648),
("Roanoke city, Texas",7899),
("Roaring Springs town, Texas",186),
("Robert Lee city, Texas",1134),
("Robinson city, Texas",11569),
("Robstown city, Texas",11494),
("Roby city, Texas",687),
("Rochester town, Texas",365),
("Rockdale city, Texas",5554),
("Rockport city, Texas",10957),
("Rocksprings town, Texas",1401),
("Rockwall city, Texas",43157),
("Rocky Mound town, Texas",81),
("Rogers town, Texas",1216),
("Rollingwood city, Texas",1569),
("Roma city, Texas",11363),
("Roman Forest city, Texas",1555),
("Ropesville city, Texas",300),
("Roscoe city, Texas",1378),
("Rosebud city, Texas",1437),
("Rose City city, Texas",419),
("Rose Hill Acres city, Texas",455),
("Rosenberg city, Texas",36509),
("Rosharon CDP, Texas",1580),
("Rosita CDP, Texas",3471),
("Ross city, Texas",262),
("Rosser village, Texas",249),
("Rotan city, Texas",1345),
("Round Mountain town, Texas",230),
("Round Rock city, Texas",120157),
("Round Top town, Texas",90),
("Rowlett city, Texas",62127),
("Roxton city, Texas",677),
("Royse City city, Texas",11746),
("Rule town, Texas",597),
("Runaway Bay city, Texas",1784),
("Runge town, Texas",1040),
("Rusk city, Texas",5563),
("Sabinal city, Texas",1655),
("Sachse city, Texas",25133),
("Sadler city, Texas",651),
("Saginaw city, Texas",22873),
("St. Hedwig town, Texas",1941),
("St. Jo city, Texas",930),
("St. Paul town, Texas",1017),
("St. Paul CDP, Texas",790),
("Salado village, Texas",2897),
("Salineño CDP, Texas",129),
("Salineño North CDP, Texas",62),
("Sam<NAME> CDP, Texas",44),
("Samnorwood CDP, Texas",37),
("<NAME> CDP, Texas",764),
("San Angelo city, Texas",99634),
("San Antonio city, Texas",1521),
("San Augustine city, Texas",1815),
("San Benito city, Texas",24466),
("San Carlos CDP, Texas",3659),
("San Carlos I CDP, Texas",63),
("San Carlos II CDP, Texas",456),
("Sanctuary town, Texas",305),
("Sanderson CDP, Texas",759),
("Sandia CDP, Texas",600),
("San Diego city, Texas",4109),
("Sandoval CDP, Texas",0),
("Sand Springs CDP, Texas",846),
("Sandy Hollow-Escondidas CDP, Texas",301),
("Sandy Oaks city, Texas",4396),
("Sandy Point city, Texas",114),
("San Elizario city, Texas",9197),
("San Felipe town, Texas",809),
("San Fernando CDP, Texas",39),
("Sanford town, Texas",151),
("Sanger city, Texas",8023),
("San Isidro CDP, Texas",189),
("San Juan city, Texas",36740),
("San Juan CDP, Texas",119),
("San Leanna village, Texas",563),
("San Leon CDP, Texas",5106),
("San Marcos city, Texas",61751),
("San Patricio city, Texas",368),
("San Pedro CDP, Texas",334),
("San Perlita city, Texas",760),
("San Saba city, Texas",3116),
("Sansom Park city, Texas",5250),
("Santa Anna town, Texas",1174),
("Santa Anna CDP, Texas",9),
("Santa Clara city, Texas",769),
("Santa Cruz CDP, Texas",17),
("Santa Fe city, Texas",13241),
("Santa Maria CDP, Texas",732),
("Santa Monica CDP, Texas",15),
("Santa Rosa town, Texas",2830),
("Santa Rosa CDP, Texas",234),
("Santel CDP, Texas",10),
("San Ygnacio CDP, Texas",547),
("Sarita CDP, Texas",511),
("Savannah CDP, Texas",6224),
("Savoy city, Texas",824),
("Scenic Oaks CDP, Texas",6106),
("Schertz city, Texas",38991),
("Schulenburg city, Texas",2893),
("Scissors CDP, Texas",3564),
("Scotland city, Texas",471),
("Scottsville city, Texas",389),
("Scurry town, Texas",505),
("Seabrook city, Texas",13670),
("Seadrift city, Texas",1446),
("Seagoville city, Texas",16357),
("Seagraves city, Texas",2774),
("Sealy city, Texas",6441),
("Sebastian CDP, Texas",1772),
("Seco Mines CDP, Texas",459),
("Seguin city, Texas",28357),
("Selma city, Texas",9895),
("Seminole city, Texas",7460),
("Serenada CDP, Texas",1502),
("Seth Ward CDP, Texas",2054),
("Seven Oaks city, Texas",74),
("Seven Points city, Texas",1305),
("Seymour city, Texas",2844),
("Shadybrook CDP, Texas",1871),
("Shady Hollow CDP, Texas",4272),
("Shady Shores town, Texas",2828),
("Shallowater city, Texas",2535),
("Shamrock city, Texas",1934),
("Shavano Park city, Texas",3667),
("Sheldon CDP, Texas",2116),
("Shenandoah city, Texas",2817),
("Shepherd city, Texas",3296),
("Sherman city, Texas",41149),
("Sherwood Shores CDP, Texas",1386),
("Shiner city, Texas",2139),
("Shoreacres city, Texas",1630),
("Sienna Plantation CDP, Texas",16996),
("Sierra Blanca CDP, Texas",705),
("Siesta Acres CDP, Texas",2265),
("Siesta Shores CDP, Texas",1477),
("Silsbee city, Texas",6640),
("Silverton city, Texas",709),
("Simonton city, Texas",640),
("Sinton city, Texas",5556),
("Skellytown town, Texas",485),
("Skidmore CDP, Texas",1168),
("Slaton city, Texas",5995),
("Smiley city, Texas",608),
("Smithville city, Texas",4256),
("Smyer town, Texas",605),
("Snook city, Texas",576),
("Snyder city, Texas",11456),
("Socorro city, Texas",33923),
("Solis CDP, Texas",451),
("Somerset city, Texas",1720),
("Somerville city, Texas",1594),
("Sonora city, Texas",3330),
("Sonterra CDP, Texas",2963),
("Sour Lake city, Texas",1998),
("South Alamo CDP, Texas",3731),
("South Fork Estates CDP, Texas",58),
("South Houston city, Texas",17627),
("Southlake city, Texas",30840),
("South La Paloma CDP, Texas",360),
("Southmayd city, Texas",1007),
("South Mountain town, Texas",432),
("South Padre Island town, Texas",2839),
("South Point CDP, Texas",1598),
("Southside Place city, Texas",1596),
("South Toledo Bend CDP, Texas",476),
("Spade CDP, Texas",120),
("Sparks CDP, Texas",3935),
("Spearman city, Texas",3250),
("Splendora city, Texas",1850),
("Spofford city, Texas",240),
("Spring CDP, Texas",58756),
("Spring Branch city, Texas",36),
("Spring Gardens CDP, Texas",643),
("Springlake town, Texas",72),
("Springtown city, Texas",2850),
("Spring Valley Village city, Texas",4239),
("Spur city, Texas",1089),
("Stafford city, Texas",18110),
("Stagecoach town, Texas",653),
("Stamford city, Texas",2930),
("Stanton city, Texas",2937),
("Staples city, Texas",230),
("Star Harbor city, Texas",443),
("Stephenville city, Texas",20396),
("Sterling City city, Texas",994),
("Stinnett city, Texas",1214),
("Stockdale city, Texas",1295),
("Stonewall CDP, Texas",626),
("Stowell CDP, Texas",1291),
("Stratford city, Texas",2108),
("Strawn city, Texas",680),
("Streetman town, Texas",295),
("Study Butte CDP, Texas",196),
("Sudan city, Texas",942),
("Sugar Land city, Texas",118182),
("Sullivan City city, Texas",4141),
("Sulphur Springs city, Texas",15948),
("Sundown city, Texas",1222),
("Sunnyvale town, Texas",6284),
("Sunray city, Texas",1976),
("Sunrise Beach Village city, Texas",943),
("Sunset CDP (Montague County), Texas",485),
("Sunset CDP (Starr County), Texas",14),
("Sunset Acres CDP, Texas",0),
("Sunset Valley city, Texas",547),
("Sun Valley city, Texas",72),
("Surfside Beach city, Texas",593),
("Sweeny city, Texas",3739),
("Sweetwater city, Texas",10701),
("Taft city, Texas",3012),
("Taft Southwest CDP, Texas",980),
("Tahoka city, Texas",2602),
("Talco city, Texas",816),
("Talty town, Texas",2302),
("Tanquecitos South Acres CDP, Texas",113),
("Tanquecitos South Acres II CDP, Texas",0),
("Tatum city, Texas",1081),
("Taylor city, Texas",16811),
("Taylor Lake Village city, Texas",3668),
("Taylor Landing city, Texas",251),
("Teague city, Texas",3533),
("Tehuacana town, Texas",205),
("Temple city, Texas",73143),
("Tenaha town, Texas",1402),
("Terlingua CDP, Texas",82),
("Terrell city, Texas",17378),
("Terrell Hills city, Texas",5330),
("Texarkana city, Texas",37277),
("Texas City city, Texas",47902),
("Texhoma city, Texas",350),
("Texline town, Texas",386),
("The Colony city, Texas",42215),
("The Hills village, Texas",2559),
("The Woodlands CDP, Texas",109843),
("Thompsons town, Texas",212),
("Thompsonville CDP, Texas",198),
("Thorndale city, Texas",1252),
("Thornton town, Texas",665),
("Thorntonville town, Texas",601),
("Thrall city, Texas",779),
("Three Rivers city, Texas",1780),
("Throckmorton town, Texas",732),
("Thunderbird Bay CDP, Texas",546),
("Tierra Bonita CDP, Texas",112),
("Tierra Dorada CDP, Texas",0),
("Tierra Grande CDP, Texas",466),
("Tierra Verde CDP, Texas",428),
("Tiki Island village, Texas",963),
("Tilden CDP, Texas",246),
("Timbercreek Canyon village, Texas",471),
("Timberwood | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import itertools
import logging
import os.path
import re
import subprocess
import time
import unittest
from dulwich import diff_tree
from dulwich import objects
import fixtures
import mock
from testtools.content import text_content
from reno import config
from reno import create
from reno import scanner
from reno.tests import base
from reno import utils
_SETUP_TEMPLATE = """
import setuptools
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
"""
_CFG_TEMPLATE = """
[metadata]
name = testpkg
summary = Test Package
[files]
packages =
testpkg
"""
class GPGKeyFixture(fixtures.Fixture):
"""Creates a GPG key for testing.
It's recommended that this be used in concert with a unique home
directory.
"""
def setUp(self):
super(GPGKeyFixture, self).setUp()
tempdir = self.useFixture(fixtures.TempDir())
gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])')
gnupg_version = utils.check_output(['gpg', '--version'],
cwd=tempdir.path)
for line in gnupg_version.split('\n'):
gnupg_version = gnupg_version_re.match(line)
if gnupg_version:
gnupg_version = (int(gnupg_version.group(1)),
int(gnupg_version.group(2)),
int(gnupg_version.group(3)))
break
else:
if gnupg_version is None:
gnupg_version = (0, 0, 0)
config_file = tempdir.path + '/key-config'
f = open(config_file, 'wt')
try:
if gnupg_version[0] == 2 and gnupg_version[1] >= 1:
f.write("""
%no-protection
%transient-key
""")
f.write("""
%no-ask-passphrase
Key-Type: RSA
Name-Real: Example Key
Name-Comment: N/A
Name-Email: <EMAIL>
Expire-Date: 2d
Preferences: (setpref)
%commit
""")
finally:
f.close()
# Note that --quick-random (--debug-quick-random in GnuPG 2.x)
# does not have a corresponding preferences file setting and
# must be passed explicitly on the command line instead
if gnupg_version[0] == 1:
gnupg_random = '--quick-random'
elif gnupg_version[0] >= 2:
gnupg_random = '--debug-quick-random'
else:
gnupg_random = ''
cmd = ['gpg', '--gen-key', '--batch']
if gnupg_random:
cmd.append(gnupg_random)
cmd.append(config_file)
subprocess.check_call(
cmd,
cwd=tempdir.path,
# Direct stderr to its own pipe, from which we don't read,
# to quiet the commands.
stderr=subprocess.PIPE,
)
class GitRepoFixture(fixtures.Fixture):
logger = logging.getLogger('git')
def __init__(self, reporoot):
self.reporoot = reporoot
super(GitRepoFixture, self).__init__()
def setUp(self):
super(GitRepoFixture, self).setUp()
self.useFixture(GPGKeyFixture())
os.makedirs(self.reporoot)
self.git('init', '.')
self.git('config', '--local', 'user.email', '<EMAIL>')
self.git('config', '--local', 'user.name', 'reno developer')
self.git('config', '--local', 'user.signingkey',
'<EMAIL>')
def git(self, *args):
self.logger.debug('$ git %s', ' '.join(args))
output = utils.check_output(
['git'] + list(args),
cwd=self.reporoot,
)
self.logger.debug(output)
return output
def commit(self, message='commit message'):
self.git('add', '.')
self.git('commit', '-m', message)
self.git('show', '--pretty=format:%H')
time.sleep(0.1) # force a delay between commits
def add_file(self, name):
with open(os.path.join(self.reporoot, name), 'w') as f:
f.write('adding %s\n' % name)
self.commit('add %s' % name)
class Base(base.TestCase):
logger = logging.getLogger('test')
def _add_notes_file(self, slug='slug', commit=True, legacy=False,
contents='i-am-also-a-template'):
n = self.get_note_num()
if legacy:
basename = '%016x-%s.yaml' % (n, slug)
else:
basename = '%s-%016x.yaml' % (slug, n)
filename = os.path.join(self.reporoot, 'releasenotes', 'notes',
basename)
create._make_note_file(filename, contents)
self.repo.commit('add %s' % basename)
return os.path.join('releasenotes', 'notes', basename)
def _make_python_package(self):
setup_name = os.path.join(self.reporoot, 'setup.py')
with open(setup_name, 'w') as f:
f.write(_SETUP_TEMPLATE)
cfg_name = os.path.join(self.reporoot, 'setup.cfg')
with open(cfg_name, 'w') as f:
f.write(_CFG_TEMPLATE)
pkgdir = os.path.join(self.reporoot, 'testpkg')
os.makedirs(pkgdir)
init = os.path.join(pkgdir, '__init__.py')
with open(init, 'w') as f:
f.write("Test package")
self.repo.commit('add test package')
def setUp(self):
super(Base, self).setUp()
self.fake_logger = self.useFixture(
fixtures.FakeLogger(
format='%(levelname)8s %(name)s %(message)s',
level=logging.DEBUG,
nuke_handlers=True,
)
)
# Older git does not have config --local, so create a temporary home
# directory to permit using git config --global without stepping on
# developer configuration.
self.useFixture(fixtures.TempHomeDir())
self.useFixture(fixtures.NestedTempfile())
self.temp_dir = self.useFixture(fixtures.TempDir()).path
self.reporoot = os.path.join(self.temp_dir, 'reporoot')
self.repo = self.useFixture(GitRepoFixture(self.reporoot))
self.c = config.Config(self.reporoot)
self._counter = itertools.count(1)
self.get_note_num = lambda: next(self._counter)
class BasicTest(Base):
def test_non_python_no_tags(self):
filename = self._add_notes_file()
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'0.0.0': [filename]},
results,
)
def test_python_no_tags(self):
self._make_python_package()
filename = self._add_notes_file()
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'0.0.0': [filename]},
results,
)
def test_note_before_tag(self):
filename = self._add_notes_file()
self.repo.add_file('not-a-release-note.txt')
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [filename]},
results,
)
def test_note_commit_tagged(self):
filename = self._add_notes_file()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [filename]},
results,
)
def test_note_commit_after_tag(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
filename = self._add_notes_file()
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0-1': [filename]},
results,
)
def test_other_commit_after_tag(self):
filename = self._add_notes_file()
self.repo.add_file('ignore-1.txt')
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
self.repo.add_file('ignore-2.txt')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [filename]},
results,
)
def test_multiple_notes_after_tag(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file()
f2 = self._add_notes_file()
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0-2': [f1, f2]},
results,
)
def test_multiple_notes_within_tag(self):
self._make_python_package()
f1 = self._add_notes_file(commit=False)
f2 = self._add_notes_file()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [f1, f2]},
results,
)
def test_multiple_tags(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file()
self.repo.git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = self._add_notes_file()
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f1],
'2.0.0-1': [f2],
},
results,
)
def test_rename_file(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1')
self.repo.git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = f1.replace('slug1', 'slug2')
self.repo.git('mv', f1, f2)
self.repo.commit('rename note file')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
def test_rename_file_sort_earlier(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1')
self.repo.git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = f1.replace('slug1', 'slug0')
self.repo.git('mv', f1, f2)
self.repo.commit('rename note file')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
def test_edit_file(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file()
self.repo.git('tag', '-s', '-m', 'first tag', '2.0.0')
with open(os.path.join(self.reporoot, f1), 'w') as f:
f.write('---\npreamble: new contents for file')
self.repo.commit('edit note file')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f1],
},
results,
)
def test_legacy_file(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1', legacy=True)
self.repo.git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = f1.replace('slug1', 'slug2')
self.repo.git('mv', f1, f2)
self.repo.commit('rename note file')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
def test_rename_legacy_file_to_new(self):
self._make_python_package()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1', legacy=True)
self.repo.git('tag', '-s', '-m', 'first tag', '2.0.0')
# Rename the file with the new convention of placing the UUID
# after the slug instead of before.
f2 = f1.replace('0000000000000001-slug1',
'slug1-0000000000000001')
self.repo.git('mv', f1, f2)
self.repo.commit('rename note file')
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
def test_limit_by_earliest_version(self):
self._make_python_package()
self._add_notes_file()
self.repo.git('tag', '-s', '-m', 'first tag', '1.0.0')
f2 = self._add_notes_file()
self.repo.git('tag', '-s', '-m', 'middle tag', '2.0.0')
f3 = self._add_notes_file()
self.repo.git('tag', '-s', '-m', 'last tag', '3.0.0')
self.c.override(
earliest_version='2.0.0',
)
self.scanner = scanner.Scanner(self.c)
raw_results = self.scanner.get_notes_by_version()
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
'3.0.0': [f3],
},
results,
)
def | |
proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_proxy_config(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_private_proxy_config_with_http_info(name, **kwargs)
else:
(data) = self.get_private_proxy_config_with_http_info(name, **kwargs)
return data
def get_private_proxy_config_with_http_info(self, name, **kwargs):
"""
retrieve a private proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_proxy_config_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_private_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_private_proxy_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/user/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_privates_proxy_config(self, **kwargs):
"""
retrieve private proxy configurations
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_proxy_config(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ProxyConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_privates_proxy_config_with_http_info(**kwargs)
else:
(data) = self.get_privates_proxy_config_with_http_info(**kwargs)
return data
def get_privates_proxy_config_with_http_info(self, **kwargs):
"""
retrieve private proxy configurations
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_proxy_config_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ProxyConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_privates_proxy_config" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/user', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ProxyConfigResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_proxy_config(self, id, **kwargs):
"""
retrieve proxy configuration by id
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_proxy_config(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_proxy_config_with_http_info(id, **kwargs)
else:
(data) = self.get_proxy_config_with_http_info(id, **kwargs)
return data
def get_proxy_config_with_http_info(self, id, **kwargs):
"""
retrieve proxy configuration by id
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_proxy_config_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_proxy_config`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_public_proxy_config(self, name, **kwargs):
"""
retrieve a public or private (owned) proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_proxy_config(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_public_proxy_config_with_http_info(name, **kwargs)
else:
(data) = self.get_public_proxy_config_with_http_info(name, **kwargs)
return data
def get_public_proxy_config_with_http_info(self, name, **kwargs):
"""
retrieve a public or private (owned) proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_proxy_config_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_public_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_public_proxy_config`")
collection_formats = {}
| |
<reponame>fleurgaudfernau/Deformetrica_multiscale
import os
import sys
# sys.path.append(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + '../../../')
import torch
import math
import xml.etree.ElementTree as et
from xml.dom.minidom import parseString
from numpy.random import poisson, exponential, normal
import matplotlib.pyplot as plt
import warnings
from deformetrica import get_model_options
from api.deformetrica import Deformetrica
from core.models.longitudinal_atlas import LongitudinalAtlas
from core.models.clustered_longitudinal_atlas import ClusteredLongitudinalAtlas
from in_out.dataset_functions import create_template_metadata
from in_out.xml_parameters import XmlParameters
from core.observations.datasets.longitudinal_dataset import LongitudinalDataset
from launch.estimate_longitudinal_metric_model import instantiate_longitudinal_metric_model
from in_out.deformable_object_reader import DeformableObjectReader
from in_out.dataset_functions import create_dataset
from in_out.array_readers_and_writers import *
def add_gaussian_noise_to_vtk_file(global_output_dir, filename, obj_type, noise_std):
reader = DeformableObjectReader()
obj = reader.create_object(filename, obj_type)
obj.update()
obj.set_points(obj.points + normal(0.0, noise_std, size=obj.points.shape))
obj.write(global_output_dir, os.path.basename(filename))
def main(arg, model_xml_path, number_of_subjects, mean_number_of_visits_minus_two, mean_observation_time_window, global_add_noise, classes):
"""
Basic info printing.
"""
print('')
print('##############################')
print('##### PyDeformetrica 1.0 #####')
print('##############################')
print('')
"""
Read command line, create output directory, read the model xml file.
"""
sample_index = 1
sample_folder = 'sample_' + str(sample_index)
while os.path.isdir(sample_folder):
sample_index += 1
sample_folder = '/Users/local_vianneydebavelaere/Documents/Thèse/Python/Results/starmen_for_simu/test/sample' + str(sample_index)
os.mkdir(sample_folder)
global_output_dir = sample_folder
xml_parameters = XmlParameters()
xml_parameters._read_model_xml(model_xml_path)
template_specifications = xml_parameters.template_specifications
nb_classes = np.max(classes) + 1
model_options = get_model_options(xml_parameters)
model_options['tensor_scalar_type'] = torch.DoubleTensor
model_options['tensor_integer_type'] = torch.LongTensor
global_dimension = model_options['dimension']
# deformetrica = Deformetrica()
# (template_specifications, model_options, _) = deformetrica.further_initialization(
# xml_parameters.model_type, xml_parameters.template_specifications, get_model_options(xml_parameters))
if xml_parameters.model_type == 'ClusteredLongitudinalAtlas'.lower():
"""
Instantiate the model.
"""
# if np.min(model.get_noise_variance()) < 0:
# model.set_noise_variance(np.array([0.0]))
#path = '/Users/local_vianneydebavelaere/Documents/Thèse/Python/deformetrica-last/sandbox/longitudinal_atlas/landmark/3d/hippocampi'
#xml_parameters.read_all_xmls(path + '/model.xml', path + '/data_set_93subjects.xml',
# path + '/optimization_parameters.xml', path + '/output/')
#nb_subjects = 93
"""
Draw random visit ages and create a degenerated dataset object.
"""
onset_ages = np.zeros(number_of_subjects)
accelerations = np.zeros([3,number_of_subjects])
visit_ages = []
for i in range(number_of_subjects):
number_of_visits = 2 + poisson(mean_number_of_visits_minus_two)
# observation_time_window = exponential(mean_observation_time_window)/10
observation_time_window = 5
time_between_two_consecutive_visits_before = observation_time_window / float(number_of_visits)
time_between_two_consecutive_visits_after = observation_time_window / float(number_of_visits)
#age_at_baseline = normal(68, math.sqrt(model.get_time_parameters_variance(0)[2,2])) \
# - 0.5 * observation_time_window
if classes[i] == 0: age_at_baseline = 70
else: age_at_baseline = 70
ages = [age_at_baseline - (j+1) * time_between_two_consecutive_visits_before for j in range(number_of_visits)]
for j in range(number_of_visits):
ages.append(age_at_baseline + (j+1) * time_between_two_consecutive_visits_after)
nb_visits = np.random.randint(5,10,1)
ages = np.sort(ages)
ages = np.linspace(60,83,nb_visits)
visit_ages.append(ages)
#visit_ages = [np.linspace(60,80,20)]
mini = min(visit_ages[0])
maxi = max(visit_ages[0])
for k in range(1, visit_ages.__len__()):
if min(visit_ages[k]) < mini: mini = min(visit_ages[k])
if max(visit_ages[k]) > maxi: maxi = max(visit_ages[k])
model = ClusteredLongitudinalAtlas(template_specifications, min_times=mini, max_times=maxi, **model_options)
subject_ids = ['s' + str(i) for i in range(number_of_subjects)]
dataset = LongitudinalDataset(subject_ids, times=visit_ages)
# dataset = LongitudinalDataset(['0'], times=[[0,1]])
print('>> %d subjects will be generated, with %.2f visits on average, covering an average period of %.2f years.'
% (number_of_subjects, float(dataset.total_number_of_observations) / float(number_of_subjects),
np.mean(np.array([ages[-1] - ages[0] for ages in dataset.times]))))
"""
Generate individual RER.
"""
# Complementary xml parameters.
# tmin = xml_parameters.tmin
# tmax = xml_parameters.tmax
tmin = 0
tmax = 1
if tmin == float('inf'):
tmin *= -1
if tmax == - float('inf'):
tmax *= -1
sources_mean = 0.0
sources_std = 0.2
if xml_parameters.initial_sources_mean is not None:
sources_mean = read_2D_array(xml_parameters.initial_sources_mean)
if xml_parameters.initial_sources_std is not None:
sources_std = read_2D_array(xml_parameters.initial_sources_std)
sources = np.zeros((number_of_subjects, model.number_of_sources)) + sources_mean
#dir = '/Users/local_vianneydebavelaere/Documents/Thèse/Python/Results/output_93subjects/'
#classes = open(dir + 'LongitudinalAtlas__EstimatedParameters__Classes.txt').read().replace('\n', ' ').split(
# ' ')[:-1]
# min_age = np.zeros(number_of_subjects)
# max_age = np.zeros(number_of_subjects)
A = 0.05*np.eye(3)
A[-1,-1] = 1
model.individual_random_effects['time_parameters'][0].set_covariance(A)
# model.individual_random_effects['time_parameters'][1].set_covariance(A)
model.individual_random_effects['time_parameters'][0].set_mean(np.array([0,0,70]))
# model.individual_random_effects['time_parameters'][1].set_mean(np.array([0,0,70]))
#onset_ages = open(dir + 'LongitudinalAtlas__EstimatedParameters__OnsetAges.txt').read().replace('\n', ',').split(',')[:-1]
#rupture_time = []
#rupture_time.append(float(open(dir + 'LongitudinalAtlas__EstimatedParameters__RuptureTime_classe0.txt').read()))
#rupture_time.append(float(open(dir + 'LongitudinalAtlas__EstimatedParameters__RuptureTime_classe1.txt').read()))
#accelerations = open(dir + 'LongitudinalAtlas__EstimatedParameters__Accelerations.txt').read().replace('\n',
# ' ').split(
# ' ')[:-1]
#accelerations2 = open(dir + 'LongitudinalAtlas__EstimatedParameters__Accelerations2.txt').read().replace('\n',
# ' ').split(
# ' ')[:-1]
#sources = open(dir + 'LongitudinalAtlas__EstimatedParameters__Sources.txt').read().replace('\n',
# ',').split(',')[
# :-1]
i = 0
sources_std = 1
while i in range(number_of_subjects):
# [accelerations[0, i], accelerations[1, i], onset_ages[i]] = model.individual_random_effects['time_parameters'][0].sample()
[accelerations[0, i], accelerations[1, i], accelerations[2,i], onset_ages[i]] = [0,0,0,0]
sources[i] = model.individual_random_effects['sources'].sample() * sources_std/4
i += 1
# min_age[i] = tR[classes[i]] - np.exp(accelerations[i]) * (-visit_ages[i][0] + tR[classes[i]] + onset_ages[i])
# max_age[i] = tR[classes[i]] + np.exp(accelerations2[i]) * (visit_ages[i][-1] - tR[classes[i]] - onset_ages[i])
# if visit_ages[i][0] < onset_ages[i] and visit_ages[i][-1] > onset_ages[i]:
# i += 1
dataset.times = visit_ages
model.name = 'SimulatedData'
model.set_rupture_time(65,0)
model.set_rupture_time(75,1)
# sources = np.zeros([1, 32])
model.set_modulation_matrix(np.ones([12,4]), 0)
# model.set_modulation_matrix(np.ones([12,4]), 1)
# sources[0,2] = -0.5
# sources[0,14] = -0.5
individual_RER = {}
# individual_RER['sources'] = sources
# individual_RER['onset_ages'] = np.array([70.])
# individual_RER['accelerations'] = np.array([1.])
# individual_RER['accelerations2'] = np.array([1.])
# individual_RER['classes'] = np.array([0])
individual_RER['sources'] = sources
individual_RER['onset_ages'] = onset_ages
individual_RER['accelerations'] = np.transpose(accelerations)
individual_RER['classes'] = classes
"""
Call the write method of the model.
"""
momenta = []
mom = [None]*model.nb_classes
# for l in range(model.nb_tot_component):
# momenta.append(np.random.normal(0,0.01,model.get_control_points(0).size).reshape(np.shape(model.get_control_points(0))))
# momenta.append([0,1]*model.get_control_points(0).size/2)
momenta.append([[0,1],[0,0], [0,0], [0,0], [0,0], [0,0]])
momenta.append([[0,0],[0,1], [0,0], [0,0], [0,0], [0,0]])
momenta.append([[0,0],[0,-1], [0,0], [0,0], [0,1], [0,0]])
for k in range(model.nb_classes):
mom[k] = []
for l in model.num_component[k]:
mom[k].append(momenta[l])
model.set_momenta(0.1*np.array(mom[k]),k)
cp = model.get_control_points(0)
cp[4,:] = [1.5,0.7]
model.set_control_points(cp,0)
cp = model.get_control_points(0)
width_x = np.abs(cp[0, 0] - cp[-1, 0]) / 8
width_y = np.abs(cp[0, 1] - cp[-1, 1]) / 8
x, y = np.meshgrid(np.linspace(cp[0, 0] - 3 * width_x, cp[-1, 0] + 3 * width_x, 15),
np.linspace(cp[0, 1] - 3 * width_y, cp[-1, 1] + 3 * width_y, 15))
test = np.array([x, y])
intensity = np.zeros([2, 15, 15])
momenta = model.get_momenta(0, 0)
for i in range(15):
for j in range(15):
for k in range(int(cp.size / 2)):
intensity[:, i, j] += np.exp(-np.linalg.norm(test[:, i, j] - cp[k, :]) / 0.8 ** 2) * np.array(
momenta[k])
plt.quiver(x, y, intensity[0, :, :], intensity[1, :, :], scale=17, color='blue')
cpx = cp[:, 0]
cpy = cp[:, 1]
momx = np.array(momenta)[:, 0]
momy = np.array(momenta)[:, 1]
plt.quiver(cpx, cpy, momx, momy, scale=17, color='red')
plt.scatter(cp[:, 0], cp[:, 1], color='red')
template = model.get_template_data(0)['landmark_points']
template_closed = np.zeros([template.shape[0] + 1, 2])
template_closed[:-1, :] = template
template_closed[-1, :] = template[0, :]
plt.plot(template_closed[:, 0], template_closed[:, 1], color='black')
plt.axis([-2, 2.5, -2, 2.5])
plt.show()
model.write(dataset, None, individual_RER, global_output_dir, update_fixed_effects=False, write_residuals=False)
# template_fin = model.spatiotemporal_reference_frame[0].get_template_points_exponential_parameters(1,np.zeros(sources.shape))
if global_dimension == 2:
cmd_replace = 'sed -i -- s/POLYGONS/LINES/g ' + global_output_dir + '/*Reconstruction*'
cmd_delete = 'rm ' + global_output_dir + '/*--'
cmd = cmd_replace + ' && ' + cmd_delete
os.system(cmd) # Quite time-consuming.
"""
Optionally add gaussian noise to the generated samples.
"""
#model.set_noise_variance(np.array(10))
if global_add_noise:
assert np.min(model.get_noise_variance()) > 0, 'Invalid noise variance.'
objects_type = [elt['deformable_object_type'] for elt in xml_parameters.template_specifications.values()]
for i in range(number_of_subjects):
for j, age in enumerate(dataset.times[i]):
for k, (obj_type, obj_name, obj_extension, obj_noise) in enumerate(zip(
objects_type, model.objects_name, model.objects_name_extension,
model.get_noise_variance())):
filename = sample_folder + '/SimulatedData__Reconstruction__%s__subject_s%d__tp_%d__age_%.2f%s' \
% (obj_name, i, j, age, obj_extension)
add_gaussian_noise_to_vtk_file(global_output_dir, filename, obj_type, math.sqrt(obj_noise))
if global_dimension == 2:
cmd_replace = 'sed -i -- s/POLYGONS/LINES/g ' + global_output_dir + '/*Reconstruction*'
cmd_delete = 'rm ' + global_output_dir + '/*--'
cmd = cmd_replace + ' && ' + cmd_delete
os.system(cmd) # Quite time-consuming.
"""
Create and save the dataset xml file.
"""
dataset_xml = et.Element('data-set')
dataset_xml.set('deformetrica-min-version', "3.0.0")
for i in range(number_of_subjects):
subject_id = 'sub-' + str(i)
subject_xml = et.SubElement(dataset_xml, 'subject')
subject_xml.set('id', subject_id)
for j, age in enumerate(dataset.times[i]):
visit_id = 'ses-' + str(j)
visit_xml = et.SubElement(subject_xml, 'visit')
visit_xml.set('id', visit_id)
age_xml = et.SubElement(visit_xml, 'age')
age_xml.text = '%.2f' % age
for k, (obj_name, obj_extension) in enumerate(zip(model.objects_name, model.objects_name_extension)):
filename_xml = et.SubElement(visit_xml, 'filename')
filename_xml.text = sample_folder + '/SimulatedData__Reconstruction__%s__subject_s%d__tp_%d__age_%.2f%s' \
% (obj_name, i, j, age, obj_extension)
filename_xml.set('object_id', obj_name)
dataset_xml_path = sample_folder + 'data_set__sample_' + str(sample_index) + '.xml'
doc = parseString((et.tostring(dataset_xml).decode('utf-8').replace('\n', '').replace('\t', ''))).toprettyxml()
np.savetxt(dataset_xml_path, [doc], fmt='%s')
"""
Create a dataset object from the xml, and compute the residuals.
"""
xml_parameters._read_dataset_xml(dataset_xml_path)
dataset = create_dataset(xml_parameters.template_specifications,
visit_ages=xml_parameters.visit_ages,
dataset_filenames=xml_parameters.dataset_filenames,
subject_ids=xml_parameters.subject_ids,
dimension=global_dimension)
# if global_add_noise:
# control_points, backward_momenta, forward_momenta, modulation_matrix, rupture_time = model._fixed_effects_to_torch_tensors(False)
# sources, onset_ages, accelerations, accelerations2 = model._individual_RER_to_torch_tensors(individual_RER, False)
# template_points, template_data = model._template_to_torch_tensors(False)
# absolute_times, tmin, tmax = model._compute_absolute_times(dataset.times, onset_ages, accelerations, accelerations2, rupture_time)
# model._update_spatiotemporal_reference_frame(
# template_points, control_points, backward_momenta, forward_momenta, modulation_matrix,
# tmin, tmax)
# residuals = model._compute_residuals(dataset, template_data, absolute_times, sources)
#
# residuals_list = [[[residuals_i_j_k.detach().cpu().numpy() for residuals_i_j_k in residuals_i_j]
# for residuals_i_j in residuals_i] for residuals_i in residuals]
# write_3D_list(residuals_list, global_output_dir, model.name + "__EstimatedParameters__Residuals.txt")
#
# # Print empirical noise if relevant.
# assert np.min(model.get_noise_variance()) > 0, 'Invalid noise variance.'
# objects_empirical_noise_std = np.zeros((len(residuals_list[0][0])))
# for i in range(len(residuals_list)):
# for j in range(len(residuals_list[i])):
# for k in range(len(residuals_list[i][j])):
# objects_empirical_noise_std[k] += residuals_list[i][j][k]
# for k in range(len(residuals_list[0][0])):
# objects_empirical_noise_std[k] = \
# math.sqrt(objects_empirical_noise_std[k]
# / float(dataset.total_number_of_observations * model.objects_noise_dimension[k]))
# print('>> Empirical noise std for | |
],
[ nan, 12000, 4000, nan, 4.61, nan ],
[ nan, 15000, 5000, nan, 8.27, nan ],
[ nan, 18000, 6000, nan, 13.19, nan ],
[ nan, 21000, 7000, nan, 19.71, nan ],
[ nan, 24000, 8000, nan, 28.21, nan ],
[ nan, 27000, 9000, nan, 38.61, nan ],
[ nan, 100, 300, nan, 0.00, nan ],
[ nan, 200, 600, nan, 0.02, nan ],
[ nan, 300, 900, nan, 0.04, nan ],
[ nan, 400, 1200, nan, 0.06, nan ],
[ nan, 500, 1500, nan, 0.08, nan ],
[ nan, 600, 1800, nan, 0.11, nan ],
[ nan, 700, 2100, nan, 0.14, nan ],
[ nan, 800, 2400, nan, 0.18, nan ],
[ nan, 900, 2700, nan, 0.22, nan ],
[ nan, 1000, 3000, nan, 0.27, nan ],
[ nan, 2000, 6000, nan, 1.05, nan ],
[ nan, 3000, 9000, nan, 2.63, nan ],
[ nan, 4000, 12000, nan, 5.01, nan ],
[ nan, 5000, 15000, nan, 9.11, nan ],
[ nan, 6000, 18000, nan, 14.50, nan ],
[ nan, 7000, 21000, nan, 21.97, nan ],
[ nan, 8000, 24000, nan, 30.87, nan ],
[ nan, 9000, 27000, nan, 42.54, nan ],
[ nan, 10000, 100, nan, 0.01, nan ],
[ nan, 20000, 200, nan, 0.04, nan ],
[ nan, 30000, 300, nan, 0.09, nan ],
[ nan, 40000, 400, nan, 0.21, nan ],
[ nan, 50000, 500, nan, 0.33, nan ],
[ nan, 60000, 600, nan, 0.49, nan ],
[ nan, 70000, 700, nan, 0.70, nan ],
[ nan, 80000, 800, nan, 0.96, nan ],
[ nan, 90000, 900, nan, 1.31, nan ],
[ nan, 100000, 1000, nan, 1.65, nan ],
[ nan, 200000, 2000, nan, 9.10, nan ],
[ nan, 100, 10000, nan, 0.01, nan ],
[ nan, 200, 20000, nan, 0.05, nan ],
[ nan, 300, 30000, nan, 0.13, nan ],
[ nan, 400, 40000, nan, 0.25, nan ],
[ nan, 500, 50000, nan, 0.44, nan ],
[ nan, 600, 60000, nan, 0.68, nan ],
[ nan, 700, 70000, nan, 1.06, nan ],
[ nan, 800, 80000, nan, 1.29, nan ],
[ nan, 900, 90000, nan, 1.61, nan ],
[ nan, 1000, 100000, nan, 2.02, nan ],
[ nan, 2000, 200000, nan, 12.42, nan ],
])
# numactl --interleave=all ./testing_sgesdd -US -VS -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000 -N 300,100 -N 600,200 -N 900,300 -N 1200,400 -N 1500,500 -N 1800,600 -N 2100,700 -N 2400,800 -N 2700,900 -N 3000,1000 -N 6000,2000 -N 9000,3000 -N 12000,4000 -N 15000,5000 -N 18000,6000 -N 21000,7000 -N 24000,8000 -N 27000,9000 -N 100,300 -N 200,600 -N 300,900 -N 400,1200 -N 500,1500 -N 600,1800 -N 700,2100 -N 800,2400 -N 900,2700 -N 1000,3000 -N 2000,6000 -N 3000,9000 -N 4000,12000 -N 5000,15000 -N 6000,18000 -N 7000,21000 -N 8000,24000 -N 9000,27000 -N 10000,100 -N 20000,200 -N 30000,300 -N 40000,400 -N 50000,500 -N 60000,600 -N 70000,700 -N 80000,800 -N 90000,900 -N 100000,1000 -N 200000,2000 -N 100,10000 -N 200,20000 -N 300,30000 -N 400,40000 -N 500,50000 -N 600,60000 -N 700,70000 -N 800,80000 -N 900,90000 -N 1000,100000 -N 2000,200000
sgesdd_US = array([
[ nan, 10, 10, nan, 0.00, nan ],
[ nan, 20, 20, nan, 0.00, nan ],
[ nan, 30, 30, nan, 0.00, nan ],
[ nan, 40, 40, nan, 0.00, nan ],
[ nan, 50, 50, nan, 0.00, nan ],
[ nan, 60, 60, nan, 0.00, nan ],
[ nan, 70, 70, nan, 0.00, nan ],
[ nan, 80, 80, nan, 0.00, nan ],
[ nan, 90, 90, nan, 0.00, nan ],
[ nan, 100, 100, nan, 0.00, nan ],
[ nan, 200, 200, nan, 0.02, nan ],
[ nan, 300, 300, nan, 0.04, nan ],
[ nan, 400, 400, nan, 0.06, nan ],
[ nan, 500, 500, nan, 0.09, nan ],
[ nan, 600, 600, nan, 0.12, nan ],
[ nan, 700, 700, nan, 0.16, nan ],
[ nan, 800, 800, nan, 0.21, nan ],
[ nan, 900, 900, nan, 0.27, nan ],
[ nan, 1000, 1000, nan, 0.32, nan ],
[ nan, 2000, 2000, nan, 1.23, nan ],
[ nan, 3000, 3000, nan, 2.88, nan ],
[ nan, 4000, 4000, nan, 5.40, nan ],
[ nan, 5000, 5000, nan, 9.84, nan ],
[ nan, 6000, 6000, nan, 14.44, nan ],
[ nan, 7000, 7000, nan, 0.00, nan ],
[ nan, 8000, 8000, nan, 29.12, nan ],
[ nan, 9000, 9000, nan, 38.77, nan ],
[ nan, 10000, 10000, nan, 50.51, nan ],
[ nan, 12000, 12000, nan, 79.90, nan ],
[ nan, 14000, 14000, nan, 0.00, nan ],
[ nan, 16000, 16000, nan, 170.35, nan ],
[ nan, 18000, 18000, nan, 234.33, nan ],
[ nan, 20000, 20000, nan, 310.26, nan ],
[ nan, 300, 100, nan, 0.01, nan ],
[ nan, 600, 200, nan, 0.02, nan ],
[ nan, 900, 300, nan, 0.04, nan ],
[ nan, 1200, 400, nan, 0.07, nan ],
[ nan, 1500, 500, nan, 0.10, nan ],
[ nan, 1800, 600, nan, 0.15, nan ],
[ nan, 2100, 700, nan, 0.20, nan ],
[ nan, 2400, 800, nan, 0.24, nan ],
[ nan, 2700, 900, nan, 0.33, nan ],
[ nan, 3000, 1000, nan, 0.41, nan ],
[ nan, 6000, 2000, nan, 1.77, nan ],
[ nan, 9000, 3000, nan, 4.21, nan ],
[ nan, 12000, 4000, nan, 8.15, nan ],
[ nan, 15000, 5000, nan, 14.27, nan ],
[ nan, 18000, 6000, nan, 23.12, nan ],
[ nan, 21000, 7000, nan, 0.00, nan ],
[ nan, 24000, 8000, nan, 48.00, nan ],
[ nan, 27000, 9000, nan, 64.77, nan ],
[ nan, 100, 300, nan, 0.01, nan ],
[ nan, 200, 600, nan, 0.02, nan ],
[ nan, 300, 900, nan, 0.05, nan ],
[ nan, 400, 1200, nan, 0.08, nan ],
[ nan, 500, 1500, nan, 0.12, nan ],
[ nan, 600, 1800, nan, 0.17, nan ],
[ nan, 700, 2100, nan, 0.22, nan ],
[ nan, 800, 2400, nan, 0.28, nan ],
[ nan, 900, 2700, nan, 0.36, nan ],
[ nan, 1000, 3000, nan, 0.45, nan ],
[ nan, 2000, 6000, nan, 1.77, nan ],
[ nan, 3000, 9000, nan, 4.40, nan ],
[ nan, 4000, 12000, nan, 8.43, nan ],
[ nan, 5000, 15000, nan, 14.94, nan ],
[ nan, 6000, 18000, nan, 25.70, nan ],
[ nan, 7000, 21000, nan, 0.00, nan ],
[ nan, 8000, 24000, nan, 50.43, nan ],
[ nan, 9000, 27000, nan, 68.50, nan ],
[ nan, 10000, 100, nan, 0.02, nan ],
[ nan, 20000, 200, nan, 0.10, nan ],
[ nan, 30000, 300, nan, 0.18, nan ],
[ nan, 40000, 400, nan, 0.34, nan ],
[ nan, 50000, 500, nan, 0.76, nan ],
[ nan, 60000, 600, nan, 0.99, nan ],
[ nan, 70000, 700, nan, 1.30, nan ],
[ nan, 80000, 800, nan, 1.63, nan ],
[ nan, 90000, 900, nan, 2.18, nan ],
[ nan, 100000, 1000, nan, 3.48, nan ],
[ nan, 200000, 2000, nan, 18.28, nan ],
[ nan, 100, 10000, nan, 0.04, nan ],
[ nan, 200, 20000, nan, 0.18, nan ],
[ nan, 300, 30000, nan, 0.34, nan ],
[ nan, 400, 40000, nan, 0.50, nan ],
[ nan, 500, 50000, nan, 1.85, nan ],
[ nan, 600, 60000, nan, 2.18, nan ],
[ nan, 700, 70000, nan, 2.55, nan ],
[ nan, 800, 80000, nan, 2.72, nan ],
[ nan, 900, 90000, nan, 3.10, nan ],
[ nan, 1000, 100000, nan, 6.57, nan ],
[ nan, 2000, 200000, nan, 26.70, nan ],
])
# ------------------------------------------------------------
# file: v1.6.1/cuda7.0-k40c/sgetrf.txt
# numactl --interleave=all ./testing_sgetrf -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
sgetrf = array([
[ 10, 10, nan, nan, 0.03, 0.00, nan ],
[ 20, 20, nan, nan, 0.08, 0.00, nan ],
[ 30, 30, nan, nan, 0.45, 0.00, nan ],
[ 40, 40, nan, nan, 0.66, 0.00, nan ],
[ 50, 50, nan, nan, 1.61, 0.00, nan ],
[ 60, 60, nan, nan, 2.50, 0.00, nan ],
[ 70, 70, nan, nan, 2.10, 0.00, nan ],
[ 80, 80, nan, nan, 3.39, 0.00, nan ],
[ 90, 90, nan, nan, 3.33, 0.00, nan ],
[ 100, 100, nan, nan, 4.60, 0.00, nan ],
[ 200, 200, nan, nan, 16.24, 0.00, nan ],
[ 300, 300, nan, nan, 10.97, 0.00, nan ],
[ 400, 400, nan, nan, 21.24, 0.00, nan ],
[ 500, 500, nan, nan, 31.72, 0.00, nan ],
[ 600, 600, nan, nan, 41.11, 0.00, nan ],
[ 700, 700, nan, nan, 54.26, 0.00, nan ],
[ 800, 800, nan, nan, 65.93, 0.01, nan ],
[ 900, 900, nan, nan, 78.59, 0.01, nan ],
[ 1000, 1000, nan, nan, 94.05, 0.01, nan ],
[ 2000, 2000, nan, nan, 240.35, 0.02, nan ],
[ 3000, 3000, nan, nan, 403.41, | |
<reponame>dvssajay/Semantic-Segmentation-of-Urban-Scene-Images-Using-Recurrent-Neural-Networks
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import numpy as np
import sys, time, os, warnings
from skimage.segmentation import mark_boundaries
import matplotlib.pylab as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
from torchvision.datasets import Cityscapes
import segmentation_models_pytorch as smp
from torchvision.transforms.functional import to_tensor, to_pil_image
from PIL import Image
# In[3]:
from albumentations import (HorizontalFlip,Compose,Resize,Normalize)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
h,w=256,512
transform_train = Compose([ Resize(h,w),
HorizontalFlip(p=0.5),
Normalize(mean=mean,std=std)])
transform_val = Compose( [ Resize(h,w),
Normalize(mean=mean,std=std)])
# In[4]:
class myCityscapes(Cityscapes):
def __getitem__(self, index):
image = Image.open(self.images[index]).convert('RGB')
targets = []
for i, t in enumerate(self.target_type):
if t == 'polygon':
target = self._load_json(self.targets[index][i])
else:
target = Image.open(self.targets[index][i])
targets.append(target)
target = tuple(targets) if len(targets) > 1 else targets[0]
if self.transforms is not None:
sample= self.transforms(image=np.array(image), mask=np.array(target))
# sample = self.transform(**sample)
img = sample['image']
target = sample['mask']
#img, mask = self.transforms(np.array(image),np.array(target))
img = to_tensor(img)
mask = torch.from_numpy(target).type(torch.long)
return img, mask
# In[5]:
train_ds = myCityscapes("./", split='train', mode='fine', target_type='semantic', transforms=transform_train, target_transform=None)
#transforms=None)
test = myCityscapes("./", split='val', mode='fine', target_type='semantic', transforms=transform_val, target_transform=None)
# transforms=None)
# In[6]:
train , val = torch.utils.data.random_split(train_ds, [2000,975])
# In[7]:
len(train)
# In[8]:
len(val)
# In[9]:
len(test)
# In[10]:
#number of classes presented in data
np.random.seed(0)
num_classes=35
COLORS = np.random.randint(0, 2, size=(num_classes+1, 3),dtype="uint8")
# In[11]:
def show_img_target(img, target):
if torch.is_tensor(img):
img=to_pil_image(img)
target=target.numpy()
for ll in range(num_classes):
mask=(target==ll)
img=mark_boundaries(np.array(img) ,
mask,
outline_color=COLORS[ll],
color=COLORS[ll])
plt.imshow(img)
# In[12]:
def re_normalize (x, mean = mean, std= std):
x_r= x.clone()
for c, (mean_c, std_c) in enumerate(zip(mean, std)):
x_r [c] *= std_c
x_r [c] += mean_c
return x_r
# In[13]:
#sample from training data
img, mask = train[3]
print(img.shape, img.type(),torch.max(img))
print(mask.shape, mask.type(),torch.max(mask))
# In[14]:
plt.figure(figsize=(20,20))
img_r= re_normalize(img)
plt.subplot(1, 3, 1)
plt.imshow(to_pil_image(img_r))
plt.subplot(1, 3, 2)
plt.imshow(mask)
plt.subplot(1, 3, 3)
show_img_target(img_r, mask)
# In[15]:
#sample from validation data
img, mask = val[0]
print(img.shape, img.type(),torch.max(img))
print(mask.shape, mask.type(),torch.max(mask))
# In[16]:
plt.figure(figsize=(20,20))
img_r= re_normalize(img)
plt.subplot(1, 3, 1)
plt.imshow(to_pil_image(img_r))
plt.subplot(1, 3, 2)
plt.imshow(mask)
plt.subplot(1, 3, 3)
show_img_target(img_r, mask)
# In[17]:
#sample from validation data
img, mask = test[0]
print(img.shape, img.type(),torch.max(img))
print(mask.shape, mask.type(),torch.max(mask))
# In[18]:
plt.figure(figsize=(20,20))
img_r= re_normalize(img)
plt.subplot(1, 3, 1)
plt.imshow(to_pil_image(img_r))
plt.subplot(1, 3, 2)
plt.imshow(mask)
plt.subplot(1, 3, 3)
show_img_target(img_r, mask)
# In[19]:
#defining Dataloaders
from torch.utils.data import DataLoader
train_dl = DataLoader(train, batch_size=3, shuffle=True)
val_dl = DataLoader(val, batch_size=3, shuffle=False)
test_dl =DataLoader(test, batch_size=1, shuffle=False)
# In[20]:
class single_conv(nn.Module):
def __init__(self,ch_in,ch_out):
super(single_conv,self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.conv(x)
return x
# In[21]:
class conv_block(nn.Module):
def __init__(self,ch_in,ch_out):
super(conv_block,self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True),
nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self,ch_in,ch_out):
super(up_conv,self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.up(x)
return x
# In[22]:
class U_Net(nn.Module):
def __init__(self,img_ch=3,output_ch=35):
super(U_Net,self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2)
self.Conv1 = conv_block(ch_in=img_ch,ch_out=64)
self.Conv2 = conv_block(ch_in=64,ch_out=128)
self.Conv3 = conv_block(ch_in=128,ch_out=256)
self.Conv4 = conv_block(ch_in=256,ch_out=512)
self.Conv5 = conv_block(ch_in=512,ch_out=1024)
self.Up5 = up_conv(ch_in=1024,ch_out=512)
self.Up_conv5 = conv_block(ch_in=1024, ch_out=512)
self.Up4 = up_conv(ch_in=512,ch_out=256)
self.Up_conv4 = conv_block(ch_in=512, ch_out=256)
self.Up3 = up_conv(ch_in=256,ch_out=128)
self.Up_conv3 = conv_block(ch_in=256, ch_out=128)
self.Up2 = up_conv(ch_in=128,ch_out=64)
self.Up_conv2 = conv_block(ch_in=128, ch_out=64)
self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0)
def forward(self,x):
# encoding path
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
# decoding + concat path
d5 = self.Up5(x5)
d5 = torch.cat((x4,d5),dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((x3,d4),dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2,d3),dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1,d2),dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
return d1
# In[23]:
model = U_Net()
# In[24]:
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
model=model.to(device)
# In[25]:
criterion = nn.CrossEntropyLoss(reduction="sum")
from torch import optim
opt = optim.Adam(model.parameters(), lr=1e-6)
# In[26]:
def loss_batch(loss_func, output, target, opt=None):
loss = loss_func(output, target)
if opt is not None:
opt.zero_grad()
loss.backward()
opt.step()
return loss.item(), None
# In[27]:
from torch.optim.lr_scheduler import ReduceLROnPlateau
lr_scheduler = ReduceLROnPlateau(opt, mode='min',factor=0.5, patience=20,verbose=1)
# In[28]:
def get_lr(opt):
for param_group in opt.param_groups:
return param_group['lr']
current_lr=get_lr(opt)
print('current lr={}'.format(current_lr))
# In[29]:
def loss_epoch(model,loss_func,dataset_dl,sanity_check=False,opt=None):
running_loss=0.0
len_data=len(dataset_dl.dataset)
for xb, yb in dataset_dl:
xb=xb.to(device)
yb=yb.to(device)
output=model(xb)
loss_b, metric_b = loss_batch(loss_func, output, yb, opt)
running_loss += loss_b
if sanity_check is True:
break
loss=running_loss/float(len_data)
return loss, None
# In[30]:
import copy
def train_val(model, params):
num_epochs=params["num_epochs"]
loss_func=params["loss_func"]
opt=params["optimizer"]
train_dl=params["train_dl"]
val_dl=params["val_dl"]
sanity_check=params["sanity_check"]
lr_scheduler=params["lr_scheduler"]
path2weights=params["path2weights"]
loss_history={
"train": [],
"val": []}
best_model_wts = copy.deepcopy(model.state_dict())
best_loss=float('inf')
for epoch in range(num_epochs):
current_lr=get_lr(opt)
print('Epoch {}/{}, current lr={}'.format(epoch, num_epochs - 1, current_lr))
model.train()
train_loss, _ = loss_epoch(model,loss_func,train_dl,sanity_check,opt)
loss_history["train"].append(train_loss)
model.eval()
with torch.no_grad():
val_loss, _ = loss_epoch(model,loss_func,val_dl,sanity_check)
loss_history["val"].append(val_loss)
if val_loss < best_loss:
best_loss = val_loss
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model.state_dict(), path2weights)
print("Copied best model weights!")
lr_scheduler.step(val_loss)
if current_lr != get_lr(opt):
print("Loading best model weights!")
model.load_state_dict(best_model_wts)
print("train loss: %.6f" %(train_loss))
print("val loss: %.6f" %(val_loss))
print("-"*10)
model.load_state_dict(best_model_wts)
return model, loss_history
# In[31]:
start = time.time()
import os
path2models= "./ARnet_Experiments/Unet_Baseline3"
if not os.path.exists(path2models):
os.mkdir(path2models)
params_train={
"num_epochs": 150,
"optimizer": opt,
"loss_func": criterion,
"train_dl": train_dl,
"val_dl": val_dl,
"sanity_check": False,
"lr_scheduler": lr_scheduler,
"path2weights": path2models+"weights.pt",}
model,loss_hist=train_val(model,params_train)
end = time.time()
print("TIME TOOK {:3.2f}MIN".format((end - start )/60))
# In[ ]:
torch.save(model.state_dict(), "./ARnet_Experiments/Unet_BaselineM3")
# In[ ]:
num_epochs=params_train["num_epochs"]
plt.figure(figsize=(30,30))
plt.title("Train-Val Loss")
plt.plot(range(1,num_epochs+1),loss_hist["train"],label="train")
plt.plot(range(1,num_epochs+1),loss_hist["val"],label="val")
plt.ylabel("Loss")
plt.xlabel("Training Epochs")
plt.legend()
plt.show()
# In[31]:
model.load_state_dict(torch.load("./ARnet_Experiments/Unet_BaselineM3"))
model.eval()
# In[32]:
from torch.autograd import Variable
# In[33]:
start = time.time()
test_loss, _ = loss_epoch(model,criterion,test_dl,opt)
end = time.time()
print("TIME TOOK {:3.2f}MIN".format((end - start )/60))
# In[34]:
print(test_loss)
# In[35]:
out_dl =DataLoader(test, batch_size=5, shuffle=False)
# In[36]:
len_data=len(out_dl.dataset)
# In[37]:
print(len_data)
# In[38]:
for xb, yb in out_dl:
xb=xb.to(device)
yb=yb.to(device)
output=model(xb)
break
# In[39]:
pred = torch.argmax(output, dim=1)
# In[42]:
pred[0][0][0]
# In[43]:
color = [[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[111, 74, 0],[81, 0, 81],[128, 64, 128],[244, 35, 232],[250, 170, 160],[230, 150, 140],[70, 70, 70],[102, 102, 156],[190, 153, 153],[180, 165, 180],[150, 100, 100],[150, 120, 90],[153, 153, 153],[153, 153, 153],[250, 170, 30],[220, 220, 0],[107, 142, 35],[152, 251, 152],[70, 130, 180],[220, 20, 60],[255, 0, 0],[0, 0, 142],[0, 0, 70],[0, 60, 100],[0, 0, 90],[0, 0, 110],[0, 80, 100],[0, 0, 230],[119, 11, 32],[0, 0, 142]]
# In[55]:
color[5]
# In[40]:
mapc = np.array([[0. , 0. , 0. ],
[0. , 0. , 0. ],
[0. , 0. , 0. ],
[0. , 0. , 0. ],
[0.07843137, 0.07843137, 0.07843137],
[0.43529412, 0.29019608, 0. ],
[0.31764706, 0. , 0.31764706],
[0.50196078, 0.25098039, 0.50196078],
[0.95686275, 0.1372549 , 0.90980392],
[0.98039216, 0.66666667, 0.62745098],
[0.90196078, 0.58823529, 0.54901961],
[0.2745098 , 0.2745098 , 0.2745098 ],
[0.4 , 0.4 , 0.61176471],
[0.74509804, 0.6 , 0.6 ],
[0.70588235, 0.64705882, 0.70588235],
[0.58823529, 0.39215686, 0.39215686],
[0.58823529, 0.47058824, 0.35294118],
[0.6 , 0.6 , 0.6 ],
[0.6 , 0.6 , 0.6 ],
[0.98039216, 0.66666667, 0.11764706],
[0.8627451 , 0.8627451 , 0. ],
[0.41960784, 0.55686275, 0.1372549 ],
[0.59607843, 0.98431373, 0.59607843],
[0.2745098 , 0.50980392, 0.70588235],
[0.8627451 , 0.07843137, 0.23529412],
[1. , 0. , 0. ],
[0. , 0. , 0.55686275],
[0. , 0. , 0.2745098 ],
[0. , 0.23529412, 0.39215686],
[0. , 0. , 0.35294118],
[0. , 0. , 0.43137255],
[0. , 0.31372549, 0.39215686],
[0. , 0. , 0.90196078],
[0.46666667, 0.04313725, 0.1254902 ],
[0. , 0. , 0.55686275]])
# In[47]:
import numpy as np
# In[52]:
o =np.empty([256,512,3])
# In[53]:
o[0][0]= np.array([2,2,2])
# In[54]:
o[0][0]
# In[41]:
def color_img(mask):
out = np.empty([256,512,3])
for i in range(256):
for j in range(512):
x = (mask[i][j]).item()
out[i][j] = mapc[x]
return out
# In[65]:
plt.imshow(color_img(pred[1]))
# In[43]:
img_r= re_normalize(xb[0].cpu())
plt.imshow(to_pil_image(img_r))
plt.savefig('Inp1.png', dpi = 300)
# In[44]:
plt.imshow(color_img(yb[0].cpu()))
plt.savefig('Gt1.png', dpi = 300)
# In[45]:
plt.imshow(color_img(pred[0].cpu()))
plt.savefig('Unet1.png', dpi = 300)
# In[46]:
img_r= re_normalize(xb[1].cpu())
plt.imshow(to_pil_image(img_r))
plt.savefig('Inp2.png', dpi = 300)
# In[47]:
plt.imshow(color_img(yb[1].cpu()))
plt.savefig('Gt2.png', dpi = 300)
# In[48]:
plt.imshow(color_img(pred[1].cpu()))
plt.savefig('Unet2.png', dpi = 300)
# In[49]:
img_r= re_normalize(xb[2].cpu())
plt.imshow(to_pil_image(img_r))
plt.savefig('Inp3.png', dpi = 300)
# In[50]:
plt.imshow(color_img(yb[2].cpu()))
plt.savefig('Gt3.png', dpi = 300)
# In[51]:
plt.imshow(color_img(pred[2].cpu()))
plt.savefig('Unet3.png', dpi = 300)
# In[52]:
img_r= re_normalize(xb[3].cpu())
plt.imshow(to_pil_image(img_r))
plt.savefig('Inp4.png', dpi = 300)
# In[53]:
plt.imshow(color_img(yb[3].cpu()))
plt.savefig('Gt4.png', dpi = 300)
# In[54]:
plt.imshow(color_img(pred[3].cpu()))
plt.savefig('Unet4.png', dpi = 300)
# In[55]:
img_r= re_normalize(xb[4].cpu())
plt.imshow(to_pil_image(img_r))
plt.savefig('inp5.png', dpi = 300)
# In[56]:
plt.imshow(color_img(yb[4].cpu()))
plt.savefig('Gt5.png', dpi = 300)
# In[57]:
plt.imshow(color_img(pred[4].cpu()))
plt.savefig('unet5.png', dpi = 300)
# In[46]:
o_dl =DataLoader(test, batch_size=1, shuffle=False)
# In[47]:
SMOOTH = 1e-6
def iou_pytorch(outputs, labels):
# You can comment out this line if you are passing tensors of equal shape
# But if you are passing output from UNet or something it will most probably
# be with the BATCH x 1 x H x W shape
#outputs = outputs.squeeze(1) # BATCH x 1 x H x W => BATCH x H x W
intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0
union = (outputs | labels).float().sum((1, 2)) # Will be zzero if both are 0
iou = (intersection + SMOOTH) / (union + SMOOTH) # We smooth our devision to avoid 0/0
thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10 # This is equal to comparing with thresolds
return thresholded,iou # Or thresholded.mean() if you are interested in average across the batch
# In[48]:
import pandas as pd
# In[49]:
file = pd.read_csv("./Results_B1.csv")
# In[50]:
file.head()
# In[51]:
pix = pd.read_csv("./Res-pic.csv")
# In[52]:
pix.head()
# In[53]:
x = torch.randn(35)
# In[54]:
x = 66.66
# In[55]:
round(x)
# In[56]:
iou_sum = torch.zeros(1)
t_sum = torch.zeros(1)
acc = torch.zeros(1)
counter = 0
for xb, yb in o_dl:
xb=xb.to(device)
yb=yb.to(device)
output=model(xb)
pixel = criterion(output,yb)
pred | |
1 + 2*5^2 + 5^3
The floating point case is similar to the fixed modulus type
in that elements do not trac their own precision. However, relative
precision is truncated with each operation rather than absolute precision.
On the contrary, the lattice type tracks precision using lattices
and automatic differentiation. It is rather slow but provides sharp
(often optimal) results regarding precision.
We refer to the documentation of the function :func:`ZpLC` for a
small demonstration of the capabilities of this precision model.
PRINTING:
There are many different ways to print `p`-adic elements. The
way elements of a given ring print is controlled by options
passed in at the creation of the ring. There are five basic
printing modes (series, val-unit, terse, digits and bars), as
well as various options that either hide some information in
the print representation or sometimes make print
representations more compact. Note that the printing options
affect whether different `p`-adic fields are considered equal.
1. **series**: elements are displayed as series in `p`.::
sage: R = Zp(5, print_mode='series'); a = R(70700); a
3*5^2 + 3*5^4 + 2*5^5 + 4*5^6 + O(5^22)
sage: b = R(-70700); b
2*5^2 + 4*5^3 + 5^4 + 2*5^5 + 4*5^7 + 4*5^8 + 4*5^9 + 4*5^10 + 4*5^11 + 4*5^12 + 4*5^13 + 4*5^14 + 4*5^15 + 4*5^16 + 4*5^17 + 4*5^18 + 4*5^19 + 4*5^20 + 4*5^21 + O(5^22)
*print_pos* controls whether negatives can be used in the
coefficients of powers of `p`.::
sage: S = Zp(5, print_mode='series', print_pos=False); a = S(70700); a
-2*5^2 + 5^3 - 2*5^4 - 2*5^5 + 5^7 + O(5^22)
sage: b = S(-70700); b
2*5^2 - 5^3 + 2*5^4 + 2*5^5 - 5^7 + O(5^22)
*print_max_terms* limits the number of terms that appear.::
sage: T = Zp(5, print_mode='series', print_max_terms=4); b = R(-70700); b
2*5^2 + 4*5^3 + 5^4 + 2*5^5 + ... + O(5^22)
*names* affects how the prime is printed.::
sage: U.<p> = Zp(5); p
p + O(p^21)
*show_prec* determines how the precision is printed.
It can be either 'none' (or equivalently ``False``), 'bigoh'
(or equivalently ``True``).
The default is ``False`` for the ``'floating-point'`` and
``'fixed-mod'`` types and ``True`` for all other types.
sage: Zp(5, show_prec=False)(6)
1 + 5
*print_sep* and *print_alphabet* have no effect.
Note that print options affect equality::
sage: R == S, R == T, R == U, S == T, S == U, T == U
(False, False, False, False, False, False)
2. **val-unit**: elements are displayed as `p^k u`::
sage: R = Zp(5, print_mode='val-unit'); a = R(70700); a
5^2 * 2828 + O(5^22)
sage: b = R(-707*5); b
5 * 95367431639918 + O(5^21)
*print_pos* controls whether to use a balanced representation or
not.::
sage: S = Zp(5, print_mode='val-unit', print_pos=False); b = S(-70700); b
5^2 * (-2828) + O(5^22)
*names* affects how the prime is printed.::
sage: T = Zp(5, print_mode='val-unit', names='pi'); a = T(70700); a
pi^2 * 2828 + O(pi^22)
*show_prec* determines how the precision is printed.
It can be either 'none' (or equivalently ``False``), 'bigoh'
(or equivalently ``True``).
The default is ``False`` for the ``'floating-point'`` and
``'fixed-mod'`` types and ``True`` for all other types.
sage: Zp(5, print_mode='val-unit', show_prec=False)(30)
5 * 6
*print_max_terms*, *print_sep* and *print_alphabet* have no effect.
Equality again depends on the printing options::
sage: R == S, R == T, S == T
(False, False, False)
3. **terse**: elements are displayed as an integer in base 10::
sage: R = Zp(5, print_mode='terse'); a = R(70700); a
70700 + O(5^22)
sage: b = R(-70700); b
2384185790944925 + O(5^22)
*print_pos* controls whether to use a balanced representation or not.::
sage: S = Zp(5, print_mode='terse', print_pos=False); b = S(-70700); b
-70700 + O(5^22)
*name* affects how the name is printed. Note that this interacts
with the choice of shorter string for denominators.::
sage: T.<unif> = Zp(5, print_mode='terse'); c = T(-707); c
95367431639918 + O(unif^20)
*show_prec* determines how the precision is printed.
It can be either 'none' (or equivalently ``False``), 'bigoh'
(or equivalently ``True``).
The default is ``False`` for the ``'floating-point'`` and
``'fixed-mod'`` types and ``True`` for all other types.
sage: Zp(5, print_mode='terse', show_prec=False)(30)
30
*print_max_terms*, *print_sep* and *print_alphabet* have no effect.
Equality depends on printing options::
sage: R == S, R == T, S == T
(False, False, False)
4. **digits**: elements are displayed as a string of base `p` digits
Restriction: you can only use the digits printing mode for small
primes. Namely, `p` must be less than the length of the alphabet
tuple (default alphabet has length 62).::
sage: R = Zp(5, print_mode='digits'); a = R(70700); repr(a)
'...4230300'
sage: b = R(-70700); repr(b)
'...4444444444444440214200'
Note that it's not possible to read off the precision from the
representation in this mode.
*print_max_terms* limits the number of digits that are printed.::
sage: S = Zp(5, print_max_terms=4); S(-70700)
2*5^2 + 4*5^3 + 5^4 + 2*5^5 + ... + O(5^22)
*print_alphabet* controls the symbols used to substitute for digits
greater than 9. Defaults to
('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z')::
sage: T = Zp(5, print_mode='digits', print_alphabet=('1','2','3','4','5')); repr(T(-70700))
'...5555555555555551325311'
*show_prec* determines how the precision is printed.
It can be either 'none' (or equivalently ``False``), 'dots'
(or equivalently ``True``) or 'bigoh'.
The default is ``False`` for the ``'floating-point'`` and
``'fixed-mod'`` types and ``True`` for all other types.
sage: repr(Zp(5, 2, print_mode='digits', show_prec=True)(6))
'...11'
sage: repr(Zp(5, 2, print_mode='digits', show_prec='bigoh')(6))
'11 + O(5^2)'
*print_pos*, *name* and *print_sep* have no effect.
Equality depends on printing options::
sage: R == S, R == T, S == T
(False, False, False)
5. **bars**: elements are displayed as a string of base `p` digits
with separators
sage: R = Zp(5, print_mode='bars'); a = R(70700); repr(a)
'...4|2|3|0|3|0|0'
sage: b = R(-70700); repr(b)
'...4|4|4|4|4|4|4|4|4|4|4|4|4|4|4|0|2|1|4|2|0|0'
Again, note that it's not possible to read of the precision from
the representation in this mode.
*print_pos* controls whether the digits can be negative.::
sage: S = Zp(5, print_mode='bars',print_pos=False); b = S(-70700); repr(b)
'...-1|0|2|2|-1|2|0|0'
*print_max_terms* limits the number of digits that are printed.::
sage: T = Zp(5, print_max_terms=4); T(-70700)
2*5^2 + 4*5^3 + 5^4 + 2*5^5 + ... + O(5^22)
*print_sep* controls the separation character.::
sage: U = Zp(5, print_mode='bars', print_sep=']['); a = U(70700); repr(a)
'...4][2][3][0][3][0][0'
*show_prec* determines how the precision is printed.
It can be either 'none' (or equivalently ``False``), 'dots'
(or equivalently ``True``) or 'bigoh'.
The default is ``False`` for the ``'floating-point'`` and
``'fixed-mod'`` types and ``True`` for all other types.
sage: repr(Zp(5, 2, print_mode='bars', show_prec=True)(6))
'...1|1'
sage: repr(Zp(5, 2, print_mode='bars', show_prec=False)(6))
'1|1'
*name* and *print_alphabet* have no effect.
Equality depends on printing options::
sage: R == S, R == T, R == U, S == T, S == U, T == U
(False, False, False, False, False, False)
EXAMPLES:
We allow non-prime `p`, but only if ``check = False``. Note that some
features will not work.::
sage: K = Zp(15, check=False); a = K(999); a
9 + 6*15 + 4*15^2 + O(15^20)
We create rings with various parameters::
sage: Zp(7)
7-adic Ring with capped relative precision 20
sage: Zp(9)
Traceback (most recent call last):
...
ValueError: p must be prime
sage: Zp(17, 5)
17-adic Ring with capped relative precision 5
sage: Zp(17, 5)(-1)
16 + 16*17 + 16*17^2 + 16*17^3 + 16*17^4 + O(17^5)
It works even with a fairly huge cap::
sage: Zp(next_prime(10^50), 100000)
100000000000000000000000000000000000000000000000151-adic Ring with capped relative precision 100000
We create each type of ring::
sage: Zp(7, 20, 'capped-rel')
7-adic Ring with capped relative precision 20
sage: Zp(7, 20, 'fixed-mod')
7-adic Ring of fixed | |
<reponame>djangowebstudio/emma<gh_stars>0
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Image'
db.create_table('interface_image', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('image_path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image_real_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image_real_path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('date_entered', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('image_category', self.gf('django.db.models.fields.CharField')(max_length=50)),
('image_pages', self.gf('django.db.models.fields.IntegerField')(default=0)),
('group_status', self.gf('django.db.models.fields.CharField')(max_length=8, blank=True)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('interface', ['Image'])
# Adding model 'Category'
db.create_table('interface_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image_cat', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('interface', ['Category'])
# Adding model 'Group'
db.create_table('interface_group', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('image_group', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image_pages', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('interface', ['Group'])
# Adding model 'AlbumClass'
db.create_table('interface_albumclass', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('interface', ['AlbumClass'])
# Adding model 'Album'
db.create_table('interface_album', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('album_identifier', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('album_name', self.gf('django.db.models.fields.CharField')(default='untitled album', max_length=255)),
('album_pages', self.gf('django.db.models.fields.IntegerField')(default=0)),
('document', self.gf('django.db.models.fields.files.FileField')(max_length=255, blank=True)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('interface', ['Album'])
# Adding M2M table for field image on 'Album'
db.create_table('interface_album_image', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('album', models.ForeignKey(orm['interface.album'], null=False)),
('image', models.ForeignKey(orm['interface.image'], null=False))
))
db.create_unique('interface_album_image', ['album_id', 'image_id'])
# Adding model 'MDAll'
db.create_table('interface_mdall', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255)),
('MDall', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('interface', ['MDAll'])
# Adding model 'Copyright'
db.create_table('interface_copyright', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255)),
('copyright', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('copyright_terms', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('interface', ['Copyright'])
# Adding model 'Order'
db.create_table('interface_order', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255)),
('resolution', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('client', self.gf('django.db.models.fields.CharField')(max_length=255)),
('clientImage', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('group_name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('album_identifier', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('status', self.gf('django.db.models.fields.SmallIntegerField')(null=True)),
))
db.send_create_signal('interface', ['Order'])
# Adding model 'Keyword'
db.create_table('interface_keyword', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('image_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255)),
('keywords', self.gf('django.db.models.fields.TextField')(blank=True)),
('cright', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('profile', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('source', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image_path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('interface', ['Keyword'])
# Adding model 'Metadata'
db.create_table('interface_metadata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('keyword', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Keyword'])),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('file_type', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('mime_type', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('caption_writer', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('keywords', self.gf('django.db.models.fields.TextField')(blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('location', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('provincestate', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('country', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('instructions', self.gf('django.db.models.fields.TextField')(blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator_tool', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('author', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('credit', self.gf('django.db.models.fields.TextField')(blank=True)),
('datetimeoriginal', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('orientation', self.gf('django.db.models.fields.IntegerField')(default=0)),
('softdate', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('copyright', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('profile', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('headline', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('album', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('documentname', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('document', self.gf('django.db.models.fields.files.FileField')(max_length=255, blank=True)),
('MDall', self.gf('django.db.models.fields.TextField')(blank=True)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('interface', ['Metadata'])
# Adding model 'Favorite'
db.create_table('interface_favorite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('user', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image_LNID', self.gf('django.db.models.fields.CharField')(max_length=255)),
('album_name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('album_identifier', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('tag', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('interface', ['Favorite'])
# Adding model 'KeywordCount'
db.create_table('interface_keywordcount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('keyword', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('count', self.gf('django.db.models.fields.IntegerField')(null=True)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('interface', ['KeywordCount'])
# Adding model 'Author'
db.create_table('interface_author', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['interface.Image'])),
('author', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('interface', ['Author'])
# Adding model 'ImageCount'
db.create_table('interface_imagecount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('count', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('interface', ['ImageCount'])
# Adding model 'User'
db.create_table('interface_user', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.IntegerField')(default=0)),
('search', self.gf('django.db.models.fields.CharField')(default='simple', max_length=255)),
('pagesize', self.gf('django.db.models.fields.IntegerField')(default=8)),
('order', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('setting1', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('setting2', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('setting3', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('setting4', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('setting5', self.gf('django.db.models.fields.NullBooleanField')(null=True)),
('setting6', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('setting7', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('setting8', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('setting9', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('setting10', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('setstr1', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('setstr2', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('setstr3', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('setstr4', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('setstr5', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('interface', ['User'])
# Adding model 'Contract'
db.create_table('interface_contract', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.IntegerField')(default=0)),
('contract', self.gf('django.db.models.fields.IntegerField')(default=0)),
('username', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('interface', ['Contract'])
# Adding model 'Query'
db.create_table('interface_query', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.IntegerField')(default=0)),
('mode', self.gf('django.db.models.fields.CharField')(max_length=50)),
('query', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('interface', ['Query'])
def backwards(self, orm):
# Deleting model 'Image'
db.delete_table('interface_image')
# Deleting model 'Category'
db.delete_table('interface_category')
# Deleting model 'Group'
db.delete_table('interface_group')
# Deleting model 'AlbumClass'
db.delete_table('interface_albumclass')
# Deleting model 'Album'
db.delete_table('interface_album')
# Removing M2M table for field image on 'Album'
db.delete_table('interface_album_image')
# Deleting model 'MDAll'
db.delete_table('interface_mdall')
# Deleting model 'Copyright'
db.delete_table('interface_copyright')
# Deleting model 'Order'
db.delete_table('interface_order')
# Deleting model 'Keyword'
db.delete_table('interface_keyword')
# Deleting model 'Metadata'
db.delete_table('interface_metadata')
# Deleting model 'Favorite'
db.delete_table('interface_favorite')
# Deleting model 'KeywordCount'
db.delete_table('interface_keywordcount')
# Deleting model 'Author'
db.delete_table('interface_author')
# Deleting model 'ImageCount'
db.delete_table('interface_imagecount')
# Deleting model 'User'
db.delete_table('interface_user')
# Deleting model 'Contract'
db.delete_table('interface_contract')
# Deleting model 'Query'
db.delete_table('interface_query')
models = {
'interface.album': {
'Meta': {'object_name': 'Album'},
'album_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True'}),
'album_name': ('django.db.models.fields.CharField', [], {'default': "'untitled album'", 'max_length': '255'}),
'album_pages': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['interface.Image']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'interface.albumclass': {
'Meta': {'object_name': 'AlbumClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'interface.author': {
'Meta': {'object_name': 'Author'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interface.Image']"}),
'notes': ('django.db.models.fields.TextField', [], {}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'interface.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interface.Image']"}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image_cat': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'interface.contract': {
'Meta': {'object_name': 'Contract'},
'contract': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'interface.copyright': {
'Meta': {'object_name': 'Copyright'},
'copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'copyright_terms': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interface.Image']"}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'interface.favorite': {
'Meta': {'object_name': 'Favorite'},
'album_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'album_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interface.Image']"}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'interface.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True'}),
'image_group': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image_pages': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'interface.image': {
'Meta': {'object_name': 'Image'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_entered': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'group_status': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True'}),
'image_category': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image_pages': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'image_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image_real_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image_real_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'interface.imagecount': {
'Meta': {'object_name': 'ImageCount'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'interface.keyword': {
'Meta': {'object_name': 'Keyword'},
'cright': ('django.db.models.fields.NullBooleanField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interface.Image']"}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'profile': ('django.db.models.fields.NullBooleanField', [], {'null': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'interface.keywordcount': {
'Meta': {'object_name': 'KeywordCount'},
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'interface.mdall': {
'MDall': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'Meta': {'object_name': 'MDAll'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interface.Image']"}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'interface.metadata': {
'MDall': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'Meta': {'object_name': 'Metadata'},
'album': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'caption_writer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.NullBooleanField', [], {'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creator': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creator_tool': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'credit': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'datetimeoriginal': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'blank': 'True'}),
'documentname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'headline': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interface.Image']"}),
'image_LNID': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'to': | |
~np.isnan(cor_matrix[psv_i])
mask_j = ~np.isnan(cor_matrix[psv_j])
mask = np.logical_and(mask_i, mask_j)
mask_size = np.sum(mask)
if not mask_size:
if np.sum(mask_i) > np.sum(mask_j):
use_psvs[psv_j] = False
else:
use_psvs[psv_i] = False
break
else:
dist_matrix[psv_i, psv_j] = dist_matrix[psv_j, psv_i] = \
scipy.spatial.distance.pdist((cor_matrix[psv_i, mask], cor_matrix[psv_j, mask])) / mask_size
all_usable = np.array([psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em])
N_CLUSTERS = 2
MIN_CLUSTER_SIZE = 5
psv_ixs = np.where(use_psvs)[0]
if len(psv_ixs) < MIN_CLUSTER_SIZE * 2:
return [all_usable]
condensed_dist = scipy.spatial.distance.squareform(dist_matrix[psv_ixs[:, None], psv_ixs])
linkage = hierarchy.linkage(condensed_dist, method='complete')
clusters = hierarchy.fcluster(linkage, 2, criterion='maxclust')
cluster_sizes = np.bincount(clusters)
res = [all_usable]
if len(cluster_sizes) <= 1 or np.max(cluster_sizes) < MIN_CLUSTER_SIZE:
return res
added_all = False
for cluster, count in enumerate(cluster_sizes):
if count < MIN_CLUSTER_SIZE:
continue
res.append(psv_ixs[clusters == cluster])
if len(clusters) - count < MIN_CLUSTER_SIZE:
res[0] = None
if res[0] is None:
del res[0]
return res
_BestCluster = collections.namedtuple('_BestCluster',
'cluster_i n_reliable info_content likelihood psv_f_values sample_gt_probs')
_SMALLEST_COPY_NUM = 2
def write_headers(out, samples, args):
samples_str = '\t'.join(samples) + '\n'
out.checked_write('use_psv_sample',
'# Each element stores two booleans: first shows if the PSV has good observations at the sample,\n',
'# Second shows if the sample has the reference copy number at the PSV.\n',
'# If PSV has less than {} "good" samples, it is not used.\n'.format(args.min_samples),
'region_group\tpsv\tgood\t' + samples_str)
max_ref_cn = min(args.max_ref_cn, args.pscn_bound[0])
col_copies = '\t'.join(map('copy{}'.format, range(1, max_ref_cn // 2 + 1))) + '\n'
out.checked_write('interm_psv_f_values', 'region_group\tcluster\titeration\tpsv\tinfo_content\t' + col_copies)
out.checked_write('psv_f_values', 'region_group\tpsv\tn_samples\tuse_in_em\tinfo_content\t' + col_copies)
out.checked_write('em_likelihoods',
'region_group\tcluster\titeration\ttime\tlikelihood\tn_reliable\treliable_info\n')
out.checked_write('em_sample_gts', 'region_group\tcluster\titeration\tgenotype\tprior\t' + samples_str)
out.checked_write('paralog_cn', 'region_group\tsample\tregion1\tgenotypes\tmarginal_probs\n')
out.checked_write('gene_conversion',
'#chrom\tstart\tend\tsample\tregion_group\tmain_gt\treplacement_gt\tqual\tn_psvs\n')
def create_psv_infos(psvs, region_group, n_samples, genome):
return [_PsvInfo(psv_ix, psv, region_group, n_samples, genome) for psv_ix, psv in enumerate(psvs)]
def find_reliable_psvs(region_group_extra, samples, genome, out, min_samples,
reliable_threshold, max_agcn):
# ===== Setting up variables =====
psvs = region_group_extra.psvs
n_psvs = len(psvs)
region_group = region_group_extra.region_group
group_name = region_group.name
cn = region_group.cn
n_copies = cn // 2
n_samples = len(samples)
if not n_psvs or n_copies < _SMALLEST_COPY_NUM or n_copies > max_agcn // 2:
return
# ===== Selecting a set of PSVs used in the EM algorithm =====
_select_psv_sample_pairs(region_group_extra, samples, out.use_psv_sample, min_samples)
psv_infos = region_group_extra.psv_infos
region_sequence = region_group.region1.get_sequence(genome)
for psv_info in psv_infos:
psv_info.check_complicated_pos(region_group.region1, region_sequence)
psv_info.create_em_psv_gt_probs()
if not any(psv_info.in_em for psv_info in psv_infos):
return
timer_start = perf_counter()
_calculate_psv_info_content(group_name, psv_infos, min_samples, out.psv_filtering)
_filter_close_psvs(psv_infos, out.psv_filtering, close_psv_dist=100)
em_psv_ixs = np.array([psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em])
if not len(em_psv_ixs):
return
common.log(' Searching for reliable PSVs')
sample_genotypes = variants_.all_gt_counts(n_copies, cn)
sample_genotypes_str = [','.join(map(str, gt)) for gt in sample_genotypes]
sample_genotype_priors = _define_sample_gt_priors(n_copies, sample_genotypes)
# ===== EM iterations, try several clusters =====
best_cluster = None
psv_clusters = _cluster_psvs(psv_infos, region_group_extra.psv_read_counts, n_samples)
for cluster_i, cluster in enumerate(psv_clusters, start=1):
psv_f_values = np.full((n_psvs, n_copies), 0.5)
psv_f_values[cluster] = 0.9
total_lik = -np.inf
for iteration in range(1, 101):
if iteration > 1:
psv_f_values, _ = _m_step(sample_gt_probs, psv_infos, em_psv_ixs, psv_f_values)
for psv_ix in em_psv_ixs:
psv_info = psv_infos[psv_ix]
out.interm_psv_f_values.write('{}\t{}\t{}\t{}:{}\t{:.6f}\t{}\n'.format(group_name, cluster_i,
iteration, psv_info.chrom, psv_info.start + 1, psv_info.info_content,
'\t'.join(map('{:.6f}'.format, psv_f_values[psv_ix]))))
old_lik = total_lik
total_lik, sample_gt_probs = _e_step(psv_infos, psv_f_values, sample_genotype_priors, n_samples)
for gt_i, gt in enumerate(sample_genotypes_str):
out.em_sample_gts.write('{}\t{}\t{}\t{}\t{:.3f}\t'.format(group_name, cluster_i, iteration, gt,
sample_genotype_priors[gt_i] / common.LOG10))
out.em_sample_gts.write('\t'.join(map('{:.3f}'.format, sample_gt_probs[:, gt_i] / common.LOG10)))
out.em_sample_gts.write('\n')
common.log(' Cluster {}, iteration {:2}, EM likelihood: {:,.3f}'.format(
cluster_i, iteration, total_lik / common.LOG10))
with np.errstate(invalid='ignore'):
curr_reliable = np.where(np.all(psv_f_values >= reliable_threshold, axis=1))[0]
n_reliable = len(curr_reliable)
mean_info_content = np.mean([psv_infos[i].info_content for i in curr_reliable]) if n_reliable else 0.0
out.em_likelihoods.write('{}\t{}\t{}\t{}\t{:.7f}\t{}\t{:.6f}\n'.format(group_name, cluster_i, iteration,
str(timedelta(seconds=perf_counter() - timer_start))[:-5], total_lik / common.LOG10,
n_reliable, mean_info_content))
if total_lik < old_lik + 0.01:
break
if best_cluster is None or best_cluster.likelihood < total_lik:
best_cluster = _BestCluster(cluster_i, n_reliable, mean_info_content, total_lik,
psv_f_values, sample_gt_probs)
# ===== Save results from the last cluster =====
psv_f_values = best_cluster.psv_f_values
sample_gt_probs = best_cluster.sample_gt_probs
if len(psv_clusters) > 1:
common.log(' === Best cluster {}: likelihood {:.3f}, {} reliable PSVs with mean information content {:.3f}'
.format(best_cluster.cluster_i, best_cluster.likelihood / common.LOG10,
best_cluster.n_reliable, best_cluster.info_content))
if best_cluster.n_reliable and best_cluster.info_content < 0.8:
common.log('WARN: Many reliable PSVs have low information content.')
discarded_psvs = np.array([psv_info.psv_ix for psv_info in psv_infos
if not psv_info.in_em and psv_info.n_used_samples > 0])
if len(discarded_psvs):
oth_f_values, _ = _m_step(sample_gt_probs, psv_infos, discarded_psvs, np.full((n_psvs, n_copies), 0.5))
psv_f_values[discarded_psvs, :] = oth_f_values[discarded_psvs, :]
for psv_info in psv_infos:
out.psv_f_values.write('{}\t{}:{}\t{}\t{}\t{:.6f}\t{}\n'.format(group_name, psv_info.chrom,
psv_info.start + 1, psv_info.n_used_samples, 'T' if psv_info.in_em else 'F', psv_info.info_content,
'\t'.join(map('{:.6f}'.format, psv_f_values[psv_info.psv_ix]))))
region_group_extra.set_f_values(psv_f_values)
def _single_sample_e_step(sample_id, sample_cn, psv_infos, reliable_psv_ixs):
"""
Returns sample genotype probabilities.
"""
n_sample_genotypes = len(psv_infos[reliable_psv_ixs[0]].support_matrix[sample_id])
res = np.zeros(n_sample_genotypes)
for psv_ix in reliable_psv_ixs:
psv_info = psv_infos[psv_ix]
assert sample_cn == psv_info.sample_cns[sample_id]
res += psv_info.support_matrix[sample_id]
return res
def calculate_marginal_probs(genotypes, gt_probs, n_copies, cn):
"""
Returns
- marginal probabilities (n_copies x cn + 1), matrix[x, y] represents log probability of the CN x at copy y,
- paralog-specific CN (n_copies),
- paralog-specific CN qual (n_copies).
"""
marginal_probs = np.full((n_copies, cn + 1), -np.nan)
gt_probs -= logsumexp(gt_probs)
for copy in range(n_copies):
for curr_copy_cn in range(cn + 1):
ixs = [i for i, gt in enumerate(genotypes) if gt[copy] == curr_copy_cn]
marginal_probs[copy, curr_copy_cn] = logsumexp(gt_probs[ixs])
paralog_cn = np.zeros(n_copies, dtype=np.int8)
paralog_qual = np.zeros(n_copies)
for copy in range(n_copies):
best_cn = np.argmax(marginal_probs[copy])
paralog_cn[copy] = best_cn
paralog_qual[copy] = common.phred_qual(marginal_probs[copy], best_cn)
return marginal_probs, paralog_cn, paralog_qual
def paralog_cn_str(paralog_cn, paralog_qual, min_qual_value=5):
"""
Returns
- paralog CN: string,
- paralog qual: tuple of integers,
- any_known: bool (any of the values over the threshold).
If paralog quality is less than min_qual_value, corresponding CN is replaced with '?' and quality
is replaced with 0. Additionally, quality is rounded down to integers.
"""
paralog_cn_str = []
new_paralog_qual = []
any_known = False
for cn, qual in zip(paralog_cn, paralog_qual):
if qual < min_qual_value:
paralog_cn_str.append('?')
new_paralog_qual.append(0)
else:
paralog_cn_str.append(str(cn))
new_paralog_qual.append(int(qual))
any_known = True
return ','.join(paralog_cn_str), tuple(new_paralog_qual), any_known
def _add_paralog_filter(results, filt):
for res in results:
res.paralog_filter.add(filt)
class GeneConversionHmm(cn_hmm.HmmModel):
def __init__(self, best_gt, n_gts, n_observations, stay_prob=0.99, initial_best_prob=0.5):
n_samples = 1
super().__init__(n_samples, n_gts, n_observations, max_state_dist=n_gts * 2)
transition = np.full((n_gts, n_gts), -np.inf)
stay_log = np.log(stay_prob)
single_trans = np.log1p(-stay_prob)
mult_trans = np.log((1 - stay_prob) / (n_gts - 1))
for state in range(n_gts):
if state == best_gt:
transition[state] = mult_trans
else:
transition[state, best_gt] = single_trans
transition[state, state] = stay_log
self.set_transition(transition)
initial = np.full(n_gts, np.log((1 - initial_best_prob) / (n_gts - 1)))
initial[best_gt] = np.log(initial_best_prob)
self.set_initial(initial)
GeneConversion = collections.namedtuple('GeneConversion', 'start end main_gt replacement_gt qual n_psvs')
def _detect_gene_conversion(sample_id, genotypes_str, sample_gt_probs, psv_infos, semirel_psv_ixs):
n_psvs = len(semirel_psv_ixs)
n_genotypes = len(genotypes_str)
best_gt = np.argmax(sample_gt_probs)
model = GeneConversionHmm(best_gt, n_genotypes, n_psvs)
emission_matrix = np.zeros((1, n_genotypes, n_psvs))
HMM_SAMPLE_ID = 0
for i, psv_ix in enumerate(semirel_psv_ixs):
emission_matrix[HMM_SAMPLE_ID, :, i] = psv_infos[psv_ix].support_matrix[sample_id]
model.set_emission_matrices(emission_matrix)
prob, states_vec = model.viterbi(HMM_SAMPLE_ID)
model.run_forward_backward()
res = []
for segment in cn_hmm.get_simple_path(states_vec):
if segment.state == best_gt or segment.end_ix == segment.start_ix + 1:
continue
segment0 = cn_hmm.SimpleSegment(segment.start_ix, segment.end_ix, best_gt)
probs = np.array((
model.path_likelihood(HMM_SAMPLE_ID, (segment0,)),
model.path_likelihood(HMM_SAMPLE_ID, (segment,))))
probs -= logsumexp(probs)
qual = common.phred_qual(probs, best_ix=1)
start_psv = psv_infos[semirel_psv_ixs[segment.start_ix]].psv
end_psv = psv_infos[semirel_psv_ixs[segment.end_ix - 1]].psv
res.append(GeneConversion(start_psv.start, end_psv.start + len(end_psv.ref),
genotypes_str[best_gt], genotypes_str[segment.state], qual, segment.end_ix - segment.start_ix))
return res
def _create_sample_results_from_agcn(sample_id, region_group_extra):
sample_results = []
linked_ranges = []
group_name = region_group_extra.region_group.name
for sample_const_region in region_group_extra.sample_const_regions[sample_id]:
entry = ResultEntry(sample_id, sample_const_region)
entry.info['group'] = group_name
entry.info.update(sample_const_region.info)
reg_start = sample_const_region.region1.start
reg_end = sample_const_region.region1.end
entry.info['n_windows'] = region_group_extra.group_windows_searcher.overlap_size(reg_start, reg_end)
entry.info['hmm_windows'] = region_group_extra.hmm_windows_searcher.overlap_size(reg_start, reg_end)
psv_start_ix, psv_end_ix = region_group_extra.psv_searcher.contained_ixs(reg_start, reg_end)
entry.info['n_psvs'] = psv_end_ix - psv_start_ix
entry.info['rel_psvs'] = np.sum(region_group_extra.psv_is_reliable[psv_start_ix : psv_end_ix])
curr_res_ix = len(sample_results)
if sample_results and sample_results[-1].pred_cn == entry.pred_cn:
linked_ranges[-1][1] = curr_res_ix + 1
else:
linked_ranges.append([curr_res_ix, curr_res_ix + 1])
sample_results.append(entry)
return sample_results, linked_ranges
def _genotypes_str(sample_genotypes, genotypes_str_cache):
"""
Returns
- string representations of sample genotypes,
- string representations of various marginal probabilities in form (0??, 1??, ..., ?0?, ...).
"""
n_copies = len(sample_genotypes[0])
sample_cn = sum(sample_genotypes[0])
if sample_cn in genotypes_str_cache:
return genotypes_str_cache[sample_cn]
sample_genotypes_str = [','.join(map(str, gt)) for gt in sample_genotypes]
marginal_str = []
if n_copies > 2:
gt_str = ['?'] * n_copies
for copy in range(n_copies):
for curr_copy_cn in range(sample_cn + 1):
gt_str[copy] = str(curr_copy_cn)
marginal_str.append(''.join(gt_str))
gt_str[copy] = '?'
res = (sample_genotypes_str, marginal_str)
genotypes_str_cache[sample_cn] = res
return res
def _single_sample_pscn(sample_id, sample_name, sample_results, linked_ranges, region_group_extra, genome,
out, genotypes_str_cache, max_genotypes):
# ====== Defining useful variables ======
psv_infos = region_group_extra.psv_infos
n_psvs = len(psv_infos)
region_group = region_group_extra.region_group
group_name = region_group.name
n_copies = region_group.cn // 2
outp = out.paralog_cn
region_chrom = region_group.region1.chrom_name(genome)
psv_searcher = region_group_extra.psv_searcher
# ====== Calculate psCN for a set of consecutive regions with the same agCN ======
for link_ix, (start_ix, end_ix) in enumerate(linked_ranges):
# ===== Check if psCN can be calculated =====
curr_results = sample_results[start_ix:end_ix]
if not curr_results[0].sample_const_region.cn_is_known:
_add_paralog_filter(curr_results, Filter.UncertainCN)
continue
psv_ixs = | |
<gh_stars>1-10
import taichi as ti
import numpy as np
from functools import reduce
# from sph_base import SPHBase
# ti.init(arch=ti.cpu)
# Use GPU for higher peformance if available
ti.init(arch=ti.gpu, device_memory_GB=4, packed=True)
# 因为邻居搜索的网格不会做,所以尺寸数据只好也沿用助教的写法
# res = (720,720)
res = (512,512)
dim = 2
assert dim > 1
screen_to_world_ratio = 50
bound = np.array(res) / screen_to_world_ratio
print(bound)
# Material
material_boundary = 0
particle_radius = 0.05 # particle radius
particle_diameter = 2 * particle_radius
support_radius = particle_radius * 4.0 # support radius
m_V = 0.8 * particle_diameter ** dim
particle_max_num = 2 ** 15
particle_max_num_per_cell = 100
particle_max_num_neighbor = 200
particle_num = ti.field(int, shape=())
# gravity = -98.0 # 重力
viscosity = 0.05 # 黏性
density_0 = 1000.0 # 参照密度
mass = m_V * density_0
dt =3e-4
exponent = 7.0
stiffness = 50.0
# 粒子信息
x = ti.Vector.field(dim, dtype=float)
v = ti.Vector.field(dim, dtype=float)
d_velocity = ti.Vector.field(dim, dtype=float)
density = ti.field(dtype=float)
pressure = ti.field(dtype=float)
material = ti.field(dtype=int)
color = ti.field(dtype=int)
particle_neighbors = ti.field(int)
particle_neighbors_num = ti.field(int)
particles_node = ti.root.dense(ti.i, particle_max_num)
particles_node.place(x,v,d_velocity, density, pressure, material, color,particle_neighbors_num)
# Grid related properties
grid_size = support_radius
grid_num = np.ceil(np.array(res) / grid_size).astype(int)
print(grid_num)
grid_particles_num = ti.field(int)
grid_particles = ti.field(int)
padding = grid_size
particle_node = particles_node.dense(ti.j, particle_max_num_neighbor)
particle_node.place(particle_neighbors)
index = ti.ij if dim == 2 else ti.ijk
grid_node = ti.root.dense(index, grid_num)
grid_node.place(grid_particles_num)
cell_index = ti.k if dim == 2 else ti.l
cell_node = grid_node.dense(cell_index, particle_max_num_per_cell)
cell_node.place(grid_particles)
# ========================================
#
# boundary particle
# 圆形,粒子信息
circular_max_num=1000
circular_num= ti.field(int, shape=())
circular_node = ti.root.dense(ti.i, circular_max_num)
c_x=ti.Vector.field(dim, dtype=float)
c_v=ti.Vector.field(dim, dtype=float)
c_f=ti.Vector.field(dim, dtype=float)
c_r=ti.field(float)
c_m=ti.field(float)
fixed = ti.field(int)
circular_node.place(c_x,c_v,c_f,c_r,c_m,fixed)
Young_modulus=2000000
# 弹簧数据结构
rest_length = ti.field(dtype=float, shape=(circular_max_num, circular_max_num))
Young_modulus_spring=921000
dashpot_damping=300#弹簧减震器
离墙距离=0.2#粒子边界不能距离实际边界太近,否则无效,可能是网格问题?
@ti.func
def cubic_kernel( r_norm):
res = ti.cast(0.0, ti.f32)
h = support_radius
# value of cubic spline smoothing kernel
k = 1.0
if dim == 1:
k = 4 / 3
elif dim == 2:
k = 40 / 7 / np.pi
elif dim == 3:
k = 8 / np.pi
k /= h ** dim
q = r_norm / h
if q <= 1.0:
if q <= 0.5:
q2 = q * q
q3 = q2 * q
res = k * (6.0 * q3 - 6.0 * q2 + 1)
else:
res = k * 2 * ti.pow(1 - q, 3.0)
# res是一个被强制转换为ti.f32的值
return res
@ti.func
def cubic_kernel_derivative( r):
h = support_radius
# derivative of cubic spline smoothing kernel
k = 1.0
if dim == 1:
k = 4 / 3
elif dim == 2:
k = 40 / 7 / np.pi
elif dim == 3:
k = 8 / np.pi
k = 6. * k / h ** dim
r_norm = r.norm()
q = r_norm / h
res = ti.Vector([0.0 for _ in range(dim)])
if r_norm > 1e-5 and q <= 1.0:
grad_q = r / (r_norm * h)
if q <= 0.5:
res = k * q * (3.0 * q - 2.0) * grad_q
else:
factor = 1.0 - q
res = k * (-factor * factor) * grad_q
return res
@ti.func
def viscosity_force( p_i, p_j, r):
# Compute the viscosity force contribution
v_xy = (v[p_i] -
v[p_j]).dot(r)
res = 2 * (dim + 2) * viscosity * (mass / (density[p_j])) * v_xy / (
r.norm()**2 + 0.01 * support_radius**2) * cubic_kernel_derivative(r)
return res
@ti.func
def pressure_force( p_i, p_j, r):
# Compute the pressure force contribution, Symmetric Formula
res = -density_0 * m_V * (pressure[p_i] / density[p_i] ** 2
+ pressure[p_j] / density[p_j] ** 2) \
* cubic_kernel_derivative(r)
return res
@ti.func
def simulate_collisions( p_i, vec, d):
# Collision factor, assume roughly (1-c_f)*velocity loss after collision
c_f = 0.3
x[p_i] += vec * d
v[p_i] -= (1.0 + c_f) * v[p_i].dot(vec) * vec
@ti.kernel
def solve():
# 初始化网格,以搜索粒子的邻居
# initialize_particle_system()
for p in range(particle_num[None]):
cell = (x[p] / grid_size).cast(int)
offset = grid_particles_num[cell].atomic_add(1)
grid_particles[cell, offset] = p
#搜索邻居,不会打网格,借用助教的代码
# search_neighbors()
for i in range(particle_num[None]):
#感觉这个没什么用?# Skip boundary particles
# if material[i] == 0:
# continue
center_cell = (x[i] / grid_size).cast(int)
cnt = 0
for offset in ti.grouped(ti.ndrange(*((-1, 2),) * dim)):
if cnt >= particle_max_num_neighbor:
break
cell = center_cell + offset
flag = True
for d in ti.static(range(dim)):
flag = flag and (0 <= cell[d] < grid_num[d])
if not flag:
break
for j in range(grid_particles_num[cell]):
p_j = grid_particles[cell, j]
distance = (x[i] - x[p_j]).norm()
if i != p_j and distance < support_radius:
particle_neighbors[i, cnt] = p_j
cnt += 1
particle_neighbors_num[i] = cnt
#根据周围邻居,计算密度
# compute_densities()
for i in range(particle_num[None]):
x_i = x[i]
density[i] = 0.0#初始化密度
for j in range(particle_neighbors_num[i]):
p_j = particle_neighbors[i, j]
x_j = x[p_j]
密度权重=1#暂时解决了圆形周围的粒子会滑向边界
if(material[i]==2):
密度权重=6
density[i] += 密度权重*m_V * cubic_kernel((x_i - x_j).norm())
density[i] *= density_0
边界压力权重=1
# 边界粘性权重=1
# 边界系数_exponent=exponent
#根据密度,计算压力
# compute_pressure_forces()
for i in range(particle_num[None]):#可以合并到上面的循环里面
density[i] = ti.max(density[i], density_0)
# if(material[i]==2):
# 边界系数_exponent=7
# pressure[i] = stiffness * (ti.pow(density[i] / density_0, 边界系数_exponent) - 1.0)
pressure[i] = stiffness * (ti.pow(density[i] / density_0,exponent) - 1.0)
# 重力、计算压力、计算粘性力
# compute_non_pressure_forces()
for i in range(particle_num[None]):
if material[i] == 2:
continue
x_i = x[i]
dv = ti.Vector([0 ,-280])#重力
if(material[i]==3):
dv[1]*=-1
for j in range(particle_neighbors_num[i]):#根据邻居的数量,分别计算贡献
p_j = particle_neighbors[i, j]
if(material[p_j]==2):
# 边界压力权重
边界压力权重=4
# 边界粘性权重=1
x_j = x[p_j]
#计算压力
dv += 边界压力权重*pressure_force(i, p_j, x_i-x_j)
# 计算粘性力
# dv += 边界粘性权重*viscosity_force(i, p_j, x_i - x_j)
dv += viscosity_force(i, p_j, x_i - x_j)
d_velocity[i] = dv
#辛欧拉积分状态更新
for i in range(particle_num[None]):
if material[i] == 2:
continue
# if d_velocity[i].norm()>100:d_velocity[i]*=0.1#由于耦合的存在,经常产生高速粒子,对加速度做一下限制,不能太大,但可能影响性能,后期再测试
v[i] += dt * d_velocity[i]
if v[i].norm()>200:v[i]*=0.2#同上
x[i] += dt * v[i]
#仍然保留简陋的边界条件,用于限制坐标
# 虽然粒子也可以当边界,但是高速粒子可以穿透,仍然需要控制一下
for i in range(particle_num[None]):
if material[i] ==2:
continue
pos = x[i]
#离墙距离2是为了让这个边界稍微比粒子边界宽一点点,以免粒子卡在粒子边界上,v+=1是还想给点往出推的速度,确保不卡住,但现在不需要了
离墙距离2=离墙距离+0.01
if pos[0] < 离墙距离2:
# print("a")
x[i][0]+=-1.2*(pos[0] - 离墙距离2)
# v[i][0]+=4
if pos[0] > bound[0] - 离墙距离2:
# print("y")
x[i][0]-=-1.2*( bound[0] - 离墙距离2-pos[0])
# v[i][0]-=4
if pos[1] > bound[1] - 离墙距离2:
# print("s")
x[i][1]-=-1.2*(bound[1] - 离墙距离2-pos[1])
# v[i][1]-=4
if pos[1] < 离墙距离2:
# print("x")
x[i][1]+=-1.2*(pos[1] - 离墙距离2)
# v[i][1]+=4
# 成功将圆形碰撞耦合了进来!圆与粒子交互
for i in range(circular_num[None]):
pos1 = c_x[i]
质量比例=particle_radius/c_r[i]#其实就是面积(体积)比例,半径比例
for j in range(particle_num[None]):
direction_vector=pos1-x[j]
direction_vector_length=ti.sqrt(direction_vector[0]**2+direction_vector[1]**2)
if (direction_vector_length<=c_r[i]+particle_radius):
# if(material[j]==1):#只与流动的粒子作用,因为这个算法不太完善
#制作一个切向加速度,近似摩擦力.n为垂直与单位向量的法向量,vrel是速度在法线上的投影
n=ti.Vector([direction_vector[1],-direction_vector[0]])
v_rel = (c_v[i] - v[j]).dot(n)
v[j]-=v_rel*n*dt*7
c_v[i]+=v_rel*n*dt*7
if(material[j]==1):x[j]-=direction_vector*direction_vector_length*0.05#把粒子往出推一点,仅推一点,起到缓和冲击力就作用,这一操作会导致水里的物体一动,周围的粒子会跟着震动。。
elastic_force=2000*(direction_vector/direction_vector_length)*(c_r[i]+particle_radius-direction_vector_length)
v[j]-=elastic_force#由于杨氏模量数值的缘故,dt可以省去了
c_v[i]+=elastic_force*质量比例*0.3#使粒子对圆的影响小一点
# 圆形的相互碰撞部分、处理链接弹簧
# for i in range(circular_num[None]):#最外层循环可以合并
for j in range(i+1,circular_num[None]):
direction_vector=c_x[j]-pos1 # direction_vector=c_x[j]-c_x[i],pos1替换一下
d = (direction_vector).normalized() # 两个粒子的单位向量
if rest_length[i, j] == 0: # 是否存在弹簧
direction_vector_length=ti.sqrt(direction_vector[0]**2+direction_vector[1]**2)
if (direction_vector_length<=c_r[i] + c_r[j]):
elastic_force=Young_modulus*(direction_vector/direction_vector_length)*(c_r[i]+c_r[j]-direction_vector_length)
elastic_damping = (c_v[i] - c_v[j]).dot(direction_vector/direction_vector_length)
c_v[i] += -elastic_damping*10 * (direction_vector/direction_vector_length)*dt
c_v[j] -= -elastic_damping*10 * (direction_vector/direction_vector_length)*dt
c_v[i]-=elastic_force*dt
c_v[j]+=elastic_force*dt
else:
# 计算弹簧要用f,因为阻尼里面有v的影响,不能直接更新v
# c_v[i] += Young_modulus_spring*(direction_vector.norm()/rest_length[j, i]-1)*d*dt
c_f[i] += Young_modulus_spring*(direction_vector.norm()/rest_length[j, i]-1)*d
# c_v[j] += -Young_modulus_spring*(direction_vector.norm()/rest_length[j, i]-1)*d*dt
c_f[j] += -Young_modulus_spring*(direction_vector.norm()/rest_length[j, i]-1)*d
v_rel = (c_v[j] - c_v[i]).dot(d)
# c_v[i] += v_rel*dashpot_damping* d*dt
c_f[i] += v_rel*dashpot_damping* d
# c_v[j] += -v_rel*dashpot_damping* d*dt
c_f[j] += -v_rel*dashpot_damping* d
# 半隐式欧拉更新圆形位置
for i in range(circular_num[None]):
#在这里做出区分是有必要的,因为粒子会影响圆的速度,而圆的状态更新公式里有受速度影响的项,所以要及时给速度和力置零,否则弹簧会出问题
if fixed[i]==0:
c_v[i]+=c_f[i]*dt
c_v[i]*=0.995
c_f[i]=[0,-2800]#用完重置力
c_x[i] += c_v[i]*dt
else:
c_v[i]=[0,0]
c_f[i]=[0,0]
#圆形的碰撞边界可以去掉,但是要测试粒子做边界的效果
#圆形的边界碰撞,挪用小作业中简化的过的公式,1000本来是杨氏模量,但是这份代码杨氏模量大,沿用1000可以省去*dt
for i in range(circular_num[None]):
if(c_x[i][0]<c_r[i]):c_v[i][0]+=(1000*(c_r[i]-c_x[i][0])-0.1*c_v[i][0])
if(c_x[i][1]<c_r[i]):c_v[i][1]+=(1000*(c_r[i]-c_x[i][1])-0.1*c_v[i][0])
if(c_x[i][0]+c_r[i]>bound[0]):c_v[i][0]+=(1000*(bound[0]-c_x[i][0]-c_r[i])-0.1*c_v[i][0])
if(c_x[i][1]+c_r[i]>bound[1]):c_v[i][1]+=(1000*(bound[1]-c_x[i][1]-c_r[i])-0.1*c_v[i][1])
def substep():
grid_particles_num.fill(0)
particle_neighbors.fill(-1)
solve()
@ti.kernel
def add_particle(posx:ti.f32,posy:ti.f32, vx:ti.f32,vy:ti.f32, material1:ti.i32, color1:ti.i32):
# print(x1)
color_=color1
if(color1==0):
if(material1==3):color_=0x00cc33
if(material1==2):color_=0x696969
if(material1==1):color_=0x87CEFA
num =particle_num[None]
x[num]= [posx,posy]
v[num]= [vx,vy]
density[num] = 1000
material[num] = material1
color[num] = color_
particle_num[None] += 1
作为边界的的粒子=0
def build_boundary():
#左
# 离墙距离=0.15
边界粒度=36
边界数量=380
for i in range(边界数量):
add_particle(离墙距离,i/边界粒度,0,0,2,0)
#下
for i in range(边界数量):
add_particle(i/边界粒度,离墙距离,0,0,2,0)
#上
for i in range(边界数量):
add_particle(i/边界粒度,res[0] /screen_to_world_ratio-离墙距离,0,0,2,0)
#右
for i in range(边界数量):
add_particle(res[0] /screen_to_world_ratio-离墙距离,i/边界粒度,0,0,2,0)
global 作为边界的的粒子
作为边界的的粒子=particle_num[None]
print(作为边界的的粒子)
# 接近边界会失效?网格影响?
# 修改粒子密度权重,使用一层薄的粒子做边界
# 全部用粒子做边界,可以不再需要圆形的边界判断
上一个粒子画的线=[0,0]
def p_bondary(pos1_,pos2_,dxdy):
#两点确定斜率,用描点画线的方式近似画一个边界
# dxdy粒度
#换位的目的是,永远只考虑从低往高画
if (pos1_[1] <=pos2_[1]):
pos1=pos1_
pos2=pos2_
else:
pos1=pos2_
pos2=pos1_
print(pos1)
print(pos2)
#两点坐标之差算斜率k
k=(pos2[1]-pos1[1])/(pos2[0]-pos1[0])
print("k:",k)
dx=dy=dxdy#默认都为一倍的粒度
if k<0:
if(k>-1):
dx*=-1
dy=k*dxdy*-1#dy要为正数
else:
dx=(1/k)*dxdy
else:
if(k>=1):
dx=(1/k)*dxdy
else:
dy=k*dxdy
print("dx,dy:",dx,dy)
posx=posy=0
global 上一个粒子画的线
上一个粒子画的线[0]=particle_num[None]
if(k<0):
while(1):
add_particle((pos1[0]+posx),(pos1[1]+posy),0,0,2,0)
if(pos1[0]+posx>pos2[0]):posx+=dx#对于斜率的正负要做出区分
if(pos1[1]+posy<pos2[1]):posy+=dy
# if(pos1[1]+posy>pos2[1]):break
if(pos1[0]+posx<pos2[0]):break
print((posx,posy))
# print(pos1[0]+posx)
# add_particle((pos1[0]+posx),(pos1[1]+posy),0,0,2,0x956333)
else:
while(1):
add_particle((pos1[0]+posx),(pos1[1]+posy),0,0,2,0)
if(pos1[0]+posx<pos2[0]):posx+=dx#对于斜率的正负要做出区分
if(pos1[1]+posy<pos2[1]):posy+=dy
# if(pos1[1]+posy>pos2[1]):break
if(pos1[0]+posx>pos2[0]):break
print((pos1[0]+posx,pos1[1]+posy))
# add_particle((pos1[0]+posx),(pos1[1]+posy),0,0,2,0x956333)
上一个粒子画的线[1]=particle_num[None]
# print(上一个粒子画的线[1]-上一个粒子画的线[0])
def 边界粒子变流体(上一个粒子画的线):
if 上一个粒子画的线[0]==0 and 上一个粒子画的线[1]==0 :
for i in range(作为边界的的粒子,particle_num[None]):
if(material[i]==2):
material[i] = 1
else:
for i in range(上一个粒子画的线[0],上一个粒子画的线[1]):
material[i] = 1
# @ti.kernel
def 范围边界变流体(pos_xx: ti.f32, pos_yy: ti.f32,搜索半径: ti.f32):
# print("aaaa")
pos=ti.Vector([pos_xx,pos_yy])
# print(pos)
for i in range(particle_num[None]):
# print(i)
if material[i]== 2:
dpos=pos-x[i]
# print(dpos)
d = (dpos).norm() # 粒子与鼠标的距离
# print(d)
if(d<=搜索半径):
material[i]=1
color[i]=0x87CEFA
print("sss")
def add_particle_cube(pos,size,material,color_):
li=(int)(size[0]*10)
lj=(int)(size[1]*10)
for i in range(li):
for j in range(lj):
pass
add_particle(pos[0]+i/18,pos[1]+j/18,0,0,material,color_)
def 一个水枪(水枪位置,水枪速度,material):
add_particle(水枪位置[0],水枪位置[1]+0.05, 水枪速度[0],水枪速度[1],material,0)
add_particle(水枪位置[0],水枪位置[1]+0.1, 水枪速度[0],水枪速度[1],material,0)
add_particle(水枪位置[0],水枪位置[1]+0.15, 水枪速度[0],水枪速度[1],material,0)
add_particle(水枪位置[0],水枪位置[1]+0.2, 水枪速度[0],水枪速度[1],material,0)
@ti.kernel
def delete_particle(num1:ti.i32):
if(particle_num[None]>作为边界的的粒子):
num2 =particle_num[None]
particle_num[None]-=num1
for i in range(num2-num1,num2):
x[i]=[0,0]
v[i]=[0,0]
d_velocity[i]=[0,0]
pressure[i]= 0
density[i] = | |
ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.send_message(self.chat_id, *args, **kwargs)
def reply_photo(self, *args, **kwargs):
"""
Shortcut for ``bot.send_photo(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the photo is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_photo(self.chat_id, *args, **kwargs)
def reply_audio(self, *args, **kwargs):
"""
Shortcut for ``bot.send_audio(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the audio is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_audio(self.chat_id, *args, **kwargs)
def reply_document(self, *args, **kwargs):
"""
Shortcut for ``bot.send_document(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the document is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_document(self.chat_id, *args, **kwargs)
def reply_sticker(self, *args, **kwargs):
"""
Shortcut for ``bot.send_sticker(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the sticker is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_sticker(self.chat_id, *args, **kwargs)
def reply_video(self, *args, **kwargs):
"""
Shortcut for ``bot.send_video(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the video is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_video(self.chat_id, *args, **kwargs)
def reply_video_note(self, *args, **kwargs):
"""
Shortcut for ``bot.send_video_note(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the video is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_video_note(self.chat_id, *args, **kwargs)
def reply_voice(self, *args, **kwargs):
"""
Shortcut for ``bot.send_voice(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the voice is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_voice(self.chat_id, *args, **kwargs)
def reply_location(self, *args, **kwargs):
"""
Shortcut for ``bot.send_location(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the location is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_location(self.chat_id, *args, **kwargs)
def reply_venue(self, *args, **kwargs):
"""
Shortcut for ``bot.send_venue(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the venue is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_venue(self.chat_id, *args, **kwargs)
def reply_contact(self, *args, **kwargs):
"""
Shortcut for ``bot.send_contact(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the contact is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
self._quote(kwargs)
return self.bot.send_contact(self.chat_id, *args, **kwargs)
def forward(self, chat_id, disable_notification=False):
"""Shortcut for
>>> bot.forward_message(chat_id=chat_id,
... from_chat_id=update.message.chat_id,
... disable_notification=disable_notification,
... message_id=update.message.message_id)
Returns:
:class:`telegram.Message`: On success, instance representing the message forwarded.
"""
return self.bot.forward_message(
chat_id=chat_id,
from_chat_id=self.chat_id,
disable_notification=disable_notification,
message_id=self.message_id)
def edit_text(self, *args, **kwargs):
"""
Shortcut for
>>> bot.edit_message_text(chat_id=message.chat_id,
... message_id=message.message_id,
... *args, **kwargs)
Note:
You can only edit messages that the bot sent itself,
therefore this method can only be used on the
return value of the ``bot.send_*`` family of methods.
"""
return self.bot.edit_message_text(
chat_id=self.chat_id, message_id=self.message_id, *args, **kwargs)
def edit_caption(self, *args, **kwargs):
"""
Shortcut for
>>> bot.edit_message_caption(chat_id=message.chat_id,
... message_id=message.message_id,
... *args, **kwargs)
Note:
You can only edit messages that the bot sent itself,
therefore this method can only be used on the
return value of the ``bot.send_*`` family of methods.
"""
return self.bot.edit_message_caption(
chat_id=self.chat_id, message_id=self.message_id, *args, **kwargs)
def edit_reply_markup(self, *args, **kwargs):
"""
Shortcut for
>>> bot.edit_message_reply_markup(chat_id=message.chat_id,
... message_id=message.message_id,
... *args, **kwargs)
Note:
You can only edit messages that the bot sent itself,
therefore this method can only be used on the
return value of the ``bot.send_*`` family of methods.
"""
return self.bot.edit_message_reply_markup(
chat_id=self.chat_id, message_id=self.message_id, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Shortcut for
>>> bot.delete_message(chat_id=message.chat_id,
... message_id=message.message_id,
... *args, **kwargs)
Returns:
bool: On success, `True` is returned.
"""
return self.bot.delete_message(
chat_id=self.chat_id, message_id=self.message_id, *args, **kwargs)
def parse_entity(self, entity):
"""
Returns the text from a given :class:`telegram.MessageEntity`.
Note:
This method is present because Telegram calculates the offset and length in
UTF-16 codepoint pairs, which some versions of Python don't handle automatically.
(That is, you can't just slice ``Message.text`` with the offset and length.)
Args:
entity (telegram.MessageEntity): The entity to extract the text from. It must be an
entity that belongs to this message.
Returns:
str: The text of the given entity
"""
# Is it a narrow build, if so we don't need to convert
if sys.maxunicode == 0xffff:
return self.text[entity.offset:entity.offset + entity.length]
else:
entity_text = self.text.encode('utf-16-le')
entity_text = entity_text[entity.offset * 2:(entity.offset + entity.length) * 2]
return entity_text.decode('utf-16-le')
def parse_entities(self, types=None):
"""
Returns a ``dict`` that maps :class:`telegram.MessageEntity` to ``str``.
It contains entities from this message filtered by their ``type`` attribute as the key, and
the text that each entity belongs to as the value of the ``dict``.
Note:
This method should always be used instead of the ``entities`` attribute, since it
calculates the correct substring from the message text based on UTF-16 codepoints.
See ``get_entity_text`` for more info.
Args:
types (Optional[list]): List of ``telegram.MessageEntity`` types as strings. If the
``type`` attribute of an entity is contained in this list, it will be returned.
Defaults to a list of all types. All types can be found as constants in
:class:`telegram.MessageEntity`.
Returns:
dict[:class:`telegram.MessageEntity`, ``str``]: A dictionary of entities mapped to the
text that belongs to them, calculated based on UTF-16 codepoints.
"""
if types is None:
types = MessageEntity.ALL_TYPES
return {
entity: self.parse_entity(entity)
for entity in self.entities if entity.type in types
}
@property
def text_html(self):
"""
Creates an html-formatted string from the markup entities found in the message
(uses ``parse_entities``).
Use this if you want to retrieve the original string sent by the bot, as opposed to the
plain text with corresponding markup entities.
Returns:
str
"""
entities = self.parse_entities()
message_text = self.text
if not sys.maxunicode == 0xffff:
message_text = message_text.encode('utf-16-le')
markdown_text = ''
last_offset = 0
for entity, text in sorted(entities.items(), key=(lambda item: item[0].offset)):
text = escape_html(text)
if entity.type == MessageEntity.TEXT_LINK:
insert = '<a href="{}">{}</a>'.format(entity.url, text)
elif entity.type == MessageEntity.BOLD:
insert = '<b>' + text + '</b>'
elif entity.type == MessageEntity.ITALIC:
insert = '<i>' + text + '</i>'
elif entity.type == MessageEntity.CODE:
insert = '<code>' + text + '</code>'
elif entity.type == MessageEntity.PRE:
insert = '<pre>' + text + '</pre>'
else:
insert = text
if sys.maxunicode == 0xffff:
markdown_text += escape_html(message_text[last_offset:entity.offset]) + insert
else:
| |
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
"""Code for http interface"""
import json
from datetime import datetime
from flask import Flask, request
from flask_restplus import Api, Resource, fields, abort
from dcae_cli._version import __version__
from dcae_cli.commands import util
from dcae_cli.util.logger import get_logger
from dcae_cli.util.exc import DcaeException
from dcae_cli.util import config as cli_config
from dcae_cli.catalog.exc import MissingEntry, CatalogError, DuplicateEntry, FrozenEntry, ForbiddenRequest
from dcae_cli.catalog.mock.catalog import MockCatalog
_log = get_logger("http")
_app = Flask(__name__)
# Try to bundle as many errors together
# https://flask-restplus.readthedocs.io/en/stable/parsing.html#error-handling
_app.config['BUNDLE_ERRORS'] = True
_api = Api(_app, version=__version__, title="DCAE Onboarding HTTP API", description=""
, contact="<EMAIL>", default_mediatype="application/json"
, prefix="/onboarding", doc="/onboarding", default="onboarding"
)
compSpecPath = cli_config.get_server_url() + cli_config.get_path_component_spec()
component_fields_request = _api.schema_model('Component Spec',
{'properties': {'owner': {'type': 'string'},
'spec': {'type': 'object', \
'description': 'The Component Spec schema is here -> ' + compSpecPath}
}
})
component_fields_get = _api.model('component fields', {
'id': fields.String(required=True, description='. . . . ID of the component'),
'name': fields.String(required=True, description='. . . . Name of the component'),
'version': fields.String(required=True, description='. . . . Version of the component'),
'owner': fields.String(required=True, description='. . . . ID of who added the component'),
'whenAdded': fields.DateTime(required=True, dt_format='iso8601', description='. . . . When component was added to the Catalog'),
'modified': fields.DateTime(required=True, dt_format='iso8601', description='. . . . When component was last modified'),
'status': fields.String(required=True, description='. . . . Status of the component'),
'description': fields.String(required=True, description='. . . . Description of the component'),
'componentType': fields.String(required=True, description='. . . . only "docker"'),
'componentUrl': fields.String(required=True, description='. . . . Url to the Component Specification')
})
components_get = _api.model('Component List', {'components': fields.List(fields.Nested(component_fields_get))})
component_fields_by_id = _api.inherit('component fields by id', component_fields_get, {
'spec': fields.Raw(required=True, description='The Component Specification (json)')
})
component_post = _api.model('Component post', {'componentUrl': fields.String(required=True, description='. . . . Url to the Component Specification')})
dataformatPath = cli_config.get_server_url() + cli_config.get_path_data_format()
dataformat_fields_request = _api.schema_model('Data Format Spec',
{'properties': {'owner': {'type': 'string'},
'spec': {'type': 'object', \
'description': 'The Data Format Spec schema is here -> ' + dataformatPath}
}
})
dataformat_fields_get = _api.model('dataformat fields', {
'id': fields.String(required=True, description='. . . . ID of the data format'),
'name': fields.String(required=True, description='. . . . Name of the data format'),
'version': fields.String(required=True, description='. . . . Version of the data format'),
'owner': fields.String(required=True, description='. . . . ID of who added the data format'),
'whenAdded': fields.DateTime(required=True, dt_format='iso8601', description='. . . . When data format was added to the Catalog'),
'modified': fields.DateTime(required=True, dt_format='iso8601', description='. . . . When data format was last modified'),
'status': fields.String(required=True, description='. . . . Status of the data format'),
'description': fields.String(required=True, description='. . . . Description of the data format'),
'dataFormatUrl': fields.String(required=True, description='. . . . Url to the Data Format Specification')
})
dataformats_get = _api.model('Data Format List', {'dataFormats': fields.List(fields.Nested(dataformat_fields_get))})
dataformat_fields_by_id = _api.inherit('dataformat fields by id', dataformat_fields_get, {
'spec': fields.Raw(required=True, description='The Data Format Specification (json)')
})
dataformat_post = _api.model('Data Format post', {'dataFormatUrl': fields.String(required=True, description='. . . . Url to the Data Format Specification')})
patch_fields = _api.model('Patch Spec', {'owner': fields.String(required=True, description='User ID'),
'status': fields.String(required=True, enum=['published', 'revoked'], \
description='. . . . .[published] is the only status change supported right now')
} )
error_message = _api.model('Error message', {'message': fields.String(description='. . . . .Details about the unsuccessful API request')})
parser_components = _api.parser()
parser_components.add_argument("name", type=str, trim=True,
location="args", help="Name of component to filter for")
parser_components.add_argument("version", type=str, trim=True,
location="args", help="Version of component to filter for")
################
## Component ##
################
@_api.route("/components", endpoint="resource_components")
class Components(Resource):
"""Component resource"""
@_api.doc("get_components", description="Get list of Components in the catalog")
@_api.marshal_with(components_get)
@_api.response(200, 'Success, Components retrieved')
@_api.response(500, 'Internal Server Error')
@_api.expect(parser_components)
def get(self):
only_latest = False
only_published = False
args = parser_components.parse_args()
mockCat = MockCatalog()
comps = mockCat.list_components(latest=only_latest, only_published=only_published)
def format_record_component(obj):
def format_value(v):
if type(v) == datetime:
return v.isoformat()
else:
return v
def to_camel_case(snake_str):
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + ''.join(x.title() for x in components[1:])
return dict([(to_camel_case(k), format_value(v)) \
for k,v in obj.items()])
def add_self_url(comp):
comp["componentUrl"] = fields.Url("resource_component", absolute=True) \
.output(None, {"component_id": comp["id"]})
return comp
def add_status(comp):
# "whenRevoked" and "whenPublished" are used to get status
comp["status"] = util.get_status_string_camel(comp)
return comp
def should_keep(comp):
"""Takes args to be used to filter the list of components"""
ok_name = args["name"] == None or args["name"] == comp["name"]
ok_version = args["version"] == None or args["version"] == comp["version"]
return ok_name and ok_version
comps = [ add_self_url(add_status(format_record_component(comp)))
for comp in comps if should_keep(comp) ]
return { "components": comps }, 200
@_api.doc("post_component", description="Add a Component to the Catalog", body=component_fields_request)
@_api.marshal_with(component_post)
@_api.response(200, 'Success, Component added')
@_api.response(400, 'Bad Request', model=error_message)
@_api.response(409, 'Component already exists', model=error_message)
@_api.response(500, 'Internal Server Error')
@_api.expect(component_fields_request)
def post(self):
resp = None
try:
http_body = request.get_json()
user = http_body['owner']
spec = http_body['spec']
try:
name = spec['self']['name']
version = spec['self']['version']
except Exception:
raise DcaeException("(Component) Spec needs to have a 'self' section with 'name' and 'version'")
mockCat = MockCatalog()
''' Pass False to do an add vs update '''
mockCat.add_component(user, spec, False)
component_id = mockCat.get_component_id(name, version)
componentUrl = fields.Url("resource_component", absolute=True) \
.output(None, {"component_id": component_id})
resp = {"componentUrl": componentUrl}
except KeyError as e:
abort(code=400, message="Request field missing: {}".format(e))
except DuplicateEntry as e:
resp = e.message.replace("name:version", name + ":" + version)
# We abort flask_restplus so our error message will override "marshal_with()" in response body
abort(code=409, message=resp)
except (CatalogError, DcaeException) as e:
abort(code=400, message=e)
return resp, 200
######################
## Component by ID ##
######################
@_api.route("/components/<string:component_id>", endpoint="resource_component")
class Component(Resource):
@_api.doc("get_component", description="Get a Component")
@_api.marshal_with(component_fields_by_id)
@_api.response(200, 'Success, Component retrieved')
@_api.response(404, 'Component not found in Catalog', model=error_message)
@_api.response(500, 'Internal Server Error')
def get(self, component_id):
resp = None
try:
mockCat = MockCatalog()
comp = mockCat.get_component_by_id(component_id)
status = util.get_status_string(comp)
resp = { "id": comp["id"]
, "name": comp['name']
, "version": comp['version']
, "whenAdded": comp['when_added'].isoformat()
, "modified": comp["modified"].isoformat()
, "owner": comp["owner"]
, "description": comp['description']
, "componentType": comp['component_type']
, "spec": json.loads(comp["spec"])
, "componentUrl": fields.Url("resource_component", absolute=True)
.output(None, {"component_id": comp["id"]})
, "status": status
}
except MissingEntry as e:
abort(code=404, message=e)
return resp, 200
@_api.doc("put_component", description="Replace a Component Spec in the Catalog", body=component_fields_request)
@_api.response(200, 'Success, Component replaced')
@_api.response(400, 'Bad Request', model=error_message)
@_api.response(404, 'Component not found in Catalog', model=error_message)
@_api.response(500, 'Internal Server Error')
@_api.expect(component_fields_request)
def put(self, component_id):
resp = None
try:
http_body = request.get_json()
user = http_body['owner']
spec = http_body['spec']
mockCat = MockCatalog()
''' Pass True to do an update vs add '''
mockCat.add_component(user, spec, True)
except MissingEntry as e:
abort(code=404, message=e)
except (FrozenEntry, CatalogError, DcaeException) as e:
abort(code=400, message=e)
return resp, 200
@_api.doc("patch_component", description="Update a Component's status in the Catalog", body=patch_fields)
@_api.response(200, 'Success, Component status updated')
@_api.response(400, 'Bad Request', model=error_message)
@_api.response(403, 'Forbidden Request', model=error_message)
@_api.response(404, 'Component not found in Catalog', model=error_message)
@_api.response(500, 'Internal Server Error')
@_api.expect(patch_fields)
def patch(self, component_id):
resp = None
try:
http_body = request.get_json()
user = http_body['owner']
field = http_body['status']
if field not in ['published', 'revoked']:
raise DcaeException("Unknown status in request: '{}'".format(field))
if field == 'revoked':
raise DcaeException("This status is not supported yet: '{}'".format(field))
mockCat = MockCatalog()
comp = mockCat.get_component_by_id(component_id)
comp_name = comp['name']
comp_version = comp['version']
mockCat.publish_component(user, comp_name, comp_version)
except MissingEntry as e:
abort(code=404, message=e)
except ForbiddenRequest as e:
abort(code=403, message=e)
except (CatalogError, DcaeException) as e:
abort(code=400, message=e)
return resp, 200
###################
## Data Format ##
###################
@_api.route("/dataformats", endpoint="resource_formats")
class DataFormats(Resource):
"""Data Format resource"""
@_api.doc("get_dataformats", description="Get list of Data Formats in the catalog")
@_api.marshal_with(dataformats_get)
@_api.response(200, 'Success, Data Formats retrieved')
@_api.response(500, 'Internal Server Error')
def get(self):
only_latest = False
only_published = False
mockCat = MockCatalog()
formats = mockCat.list_formats(latest=only_latest, only_published=only_published)
def format_record_dataformat(obj):
def format_value(v):
if type(v) == datetime:
return v.isoformat()
else:
return v
def to_camel_case(snake_str):
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + ''.join(x.title() for x in components[1:])
| |
you; your;
yours;
nimma
ೕವ
her; him; his; it; its; one; she; their; them;
they; we; you; your;
nivu
ಅದರ
her; his; its;
adara
ಅವಳ
her; him; his; it; its; she; their; them; they;
you; your;
avalu
ಅವಳನು
her; him; his; it; its; she; their; you; your;
avalannu
itself
ತನೇ
thannanne
ಸತಃ
even; itself; self;
svathh
j
jacket
ಕವಚ
kavacha
job
ಕೇ
function; job; office; task;
kacheri
ಾನ
job; location; place; position; post; seat;
setting; site; situation; status;
sthhaana
job
ೆಲಸ
act; action; assignment; effect; function;
impression; job; operate; task; work;
kelasa
ಉೋಗ
employment; job; profession;
udyeuga
ಪಾಗ
assignment; job; task;
parithyaaga
join
ಕಟು
join; link; tie;
kattu
ಸಂಪಕ
combine; connect; connection; join; link;
samparka
joint
ಸಂ
samdhi
joke
ಾಸ
haasya
judge
ಾಾೕಶ
nyaayaadhisha
judgment
ೕಾನ
thirmaana
juice
ರಸ
rasa
jump
ಾೆ
haarike
junior
ಯ
kiriya
jury
ೕಪಾರರ
thirpugaarara
just
ೇವಲ
just; simple; simply; unit;
kevala
ಾಯಾದ
fair; just; right;
nyaayavaada
justify
ಸಾರುವ
sariyaagiruva
k
keep
catch; handle; keep; retain;
hidi
ಸಂರಸು
hold; keep; maintain; receive; retain;
samrakshhisu
ಪೆ
expand; extend; get; hold; increase; keep;
maintain; receive;
padeyiri
ಮನರಂಜೆ
entertainment; hold; keep; maintain;
manaramjane
key
ೕೈ
kilike
ಗುಂ
button; key;
gumdi
kill
ೊಲು
keullu
kind
ೆಡ
bedagi
king
ಾಜ
raaja
kiss
ಮುತು
muththu
kitchen
ಅೆ
ಮೆ
adige mane
knee
ಮಂ
mamdi
knife
ಾಕು
chaaku
know
ಾನ
know; knowledge; known;
jnjaana
ರುವ
know; knowledge;
thilidiruva
knowledge
ರುವ
know; knowledge;
thilidiruva
ಾನ
knowledge; science;
vijnjaana
ಾನ
know; knowledge; known;
jnjaana
known
ಾನ
know; knowledge; known;
jnjaana
ೊರುವ
geuththiruva
l
lab
ಪೕಾಲಯ
prayeugaalaya
lack
ೊರೆ
lack; miss; want;
keurathe
ೈರುಾಜ
lack; miss; want;
geruhaajari
ladder
ಪ
ladder; scale;
chippu
ಏ
eni
lady
ಮೆ
lady; queen;
mahile
ಾ
lady; queen;
raani
lake
ಸೋವರ
sareuvara
land
ೇಶ
country; ground; land; region;
desha
ಭೂ
bottom; dirt; earth; floor; ground; land; soil;
world;
bhumi
landscape
ಭೂದೃಶ
bhudrrishya
language
ಾೆ
language; speech; tongue;
bhaashhe
ಪದ
language; speech; term; word;
pada
large
large
ದೂರದ
away; broad; course; far; large; long; off;
path; remote; road; way; wide; widely;
duradalli
ದೂರದ
away; broad; far; large; long; off; remote;
wide; widely;
durada
ಅಗಲಾದ
away; broad; far; large; long; remote; wide;
widely;
agalavaada
ೊಡ
big; brilliant; grand; great; high; large; tall;
deudda
last
ೊೆಯ
continue; final; finally; last; latter; stand;
ultimately;
keuneya
later
ನಂತರ
after; at; back; behind; later; on; so; then; to;
towards;
namthara
latter
ೊೆಯ
continue; final; finally; last; latter; stand;
ultimately;
keuneya
ೕಯ
dvithiya
laugh
ನಗುತ
naguththa
ಾಸ
haasa
law
ಾನೂನು
law; right;
kaanunu
ಬಲ
law; right;
bala
lawyer
ವೕಲ
vakila
lay
ಇಡು
idu
ಪ
lay; place; put;
put
ೋಗು
lay; place; put;
seugu
layer
ಪದರ
film; fold; layer;
padara
lead
ನೆೊಂಡು
direct; guide; lead; manage;
nadesikeumdu
leader
ಾಯಕ
head; leader;
naayaka
ಪಾನ
boss; employer; head; leader;
pradhaana
ತೆ
boss; employer; head; leader;
thale
leadership
ೇಶನ
direction; leadership; management; way;
nirdeshana
ಾಯಕತ
naayakathva
leading
ಕೆೊಯುವ
karedeuyyuva
league
ಸಂಘ
samgha
learn
ಕ
learn; study;
kali
ಾಠ
learn; lesson; teach;
paatha
least
ಸಲ
brief; few; least; little; short; somewhat;
svalpa
leather
ಚಮ
coat; hide; leather; skin;
charma
leave
ಅನುಮ
leave; permission; permit;
anumathi
ಪರಾನ
leave; permission; permit;
paravaanagi
ಾಯು
saayu
lecture
ಉಪಾಸ
upanyaasa
left
ಟು
left; loose; quit;
bittu
leg
ೆ
foot; leg;
leg
ಅ
base; basis; foot; leg;
adi
ಮುಂಾಲು
base; basis; foot; leg;
mumgaalu
length
ಉದ
udda
ಾಾವ
kaalaavadhi
less
ಕಯ
kadimeya
lesson
ಾಠ
learn; lesson; teach;
paatha
let
ಾೆ
engage; hire; let; rent;
baadige
ಸವಲತು
admit; allow; let; permit;
savalaththu
ಅನುಮ
ೊಡು
admit; allow; let; permit;
anumathi keudu
letter
ಅರ
akshhara
level
ದುೊಸಲು
level; smooth;
medugeulisalu
ವಸಗೃಹ
flat; level; map; plane;
vasathigrriha
lie
ಸುಳ
sullu
life
ೕವನ
life; living;
jivana
life
ಬದುಕು
exist; life; live;
baduku
lift
ಎತು
lift; raise;
eththu
ಎತುಗ
elevator; lift;
eththuga
ಏಸು
elevator; lift; raise;
erisu
light
ೆರವೊ
bright; clear; clearly; light; obvious;
theravugeulisi
ಪಾಶಾನ
bright; clear; light;
prakaashamaana
ೆಳಗು
belagu
ಸಷಾ
bright; clear; clearly; light; obviously;
spashhtavaagi
like
ಸದೃಶ
like; same; similar; similarly;
sadrrisha
ಾ
as; data; here; how; information; like; so;
than; that; then; there; thus; what; which;
maahithi
ಧಸು
appreciate; estimate; like; prize; rate; value;
nirdharisu
ಅಂಾಜು
appreciate; estimate; like; prize; rate; value;
aamdaaju
ಸಮ
equal; even; like; same; similar;
sama
likely
ಬಹುಶಃ
likely; maybe; possibly; probably;
bahushh
ಸಂಭವೕಯ
likely; possible; potential;
sambhavaniya
ಸಂಾವ
likely; probably;
sambhaavya
limit
ಗ
border; limit;
gadi
ೕಾ
simaa
limited
ಕ
limited; little; low; lower; poor; reduce; small;
kadime
ೕತ
simitha
line
ಾವ
haavu
ಾಲು
line; row;
saalu
link
ಕಟು
join; link; tie;
kattu
ೊಂ
connection; link; tie;
keumdi
ಸಂಪಕ
combine; connect; connection; join; link;
samparka
lip
ೆ
border; edge; lip; side;
meune
lip
ತು
border; edge; lip; side;
thuti
ಬೆ
border; edge; lip; side;
bale
list
ಪ
patti
listen
ಆಸು
hear; listen;
aalisu
literally
ಅರಶಃ
akshharashh
literature
ಾತ
saahithya
little
ಕ
limited; little; low; lower; poor; reduce; small;
kadime
ಸಣ
little; minor; small;
sanna
ಸಲ
brief; few; least; little; short; somewhat;
svalpa
ಹುಡುಗ
boy; little; small;
huduga
live
ಾಯರತ
alive; busy; live; living;
kaaryaniratha
ರಕೕರುವ
rakthahiruva
ೕವಂತ
alive; live; living;
jivamtha
ಬದುಕು
exist; life; live;
baduku
living
ಾಯರತ
alive; busy; live; living;
kaaryaniratha
ೕವಂತ
alive; live; living;
jivamtha
ೕವನ
life; living;
jivana
load
ೋ
charge; load;
leud
ಪಾಸ
charge; load;
prayaasa
loan
ಾಲ
blame; credit; debt; loan;
saala
ೆ
credit; loan;
kredit
ಕಡೊಡು
credit; loan;
kadakeudu
local
ಸೕಯ
sthhaliya
ಸೕಯಾ
sthhaliyavaagi
location
ಸಳ
location; place; position; seat; setting; site;
situation; spot;
sthhala
ಾನ
job; location; place; position; post; seat;
setting; site; situation; status;
sthhaana
location
ೈ
location; place; seat; site; spot;
set
lock
ತೂಬು
thubu
log
ಾಖೆ
document; log; paper; record; register;
daakhale
logical
ಾಕಾ
consistent; logical;
thaarkikavaagi
ಾಕ
consistent; logical;
thaarkika
lonely
ಒಂಾ
alone; lonely; only; single;
oamtiyaagi
ಪೇಕಾದ
prathyekavaada
ಾತ
alone; lonely; merely; only; single; unique;
maathra
long
ದೂರದ
away; broad; far; large; long; off; remote;
wide; widely;
durada
ದೂರದ
away; broad; course; far; large; long; off;
path; remote; road; way; wide; widely;
duradalli
ೕಘ
dirgha
ಅಗಲಾದ
away; broad; far; large; long; remote; wide;
widely;
agalavaada
look
ಾಣು
appear; look; seem;
kaanu
ೈಗಾರ
look; see; watch;
kegadiyaara
ೋಟ
consider; look; see; view; watch;
neuta
ೋಟವನು
air; appearance; aspect; look;
neutavannu
ೋಡಲು
consider; look; see; view; watch;
neudalu
ೋಡು
look; mark; note; notice; see; watch;
neudu
loose
ಪತಸು
loose; quit;
parithyajisu
ಟು
left; loose; quit;
bittu
loss
ನಷ
fault; loss;
nashhta
lost
ಕೆದು
kaledu
lot
ಪಾಣ
amount; game; ground; lot; number; part;
piece; quantity; ratio; relation; relationship;
share;
pramaana
ಾಲು
divide; game; lot; part; piece; separate;
share;
paalu
lot
ಅದೃಷ
accident; chance; fortune; lot; occasion;
opportunity; skill;
adrrishhta
ಆಟ
game; lot; match; part; piece; play; share;
sport;
aata
ಾಗ
bit; concern; game; lot; part; piece; role;
roll; room; share; slice;
bhaaga
ಗುಂಪ
amount; band; bunch; ground; group; lot;
quantity;
gumpu
loud
ಉಚ
ಸರದ
loud; sound;
uchcha svarada
ಬಲಾದ
hard; loud; powerful;
balavaada
ಶಾ
hard; loud; powerful;
shakthishaali
love
priye
low
ಕ
limited; little; low; lower; poor; reduce; small;
kadime
lower
ಕ
limited; little; low; lower; poor; reduce; small;
kadime
luck
ಸುಖ
chance; fortune; luck;
sukha
lunch
ಆಾರ
bread; diet; dinner; eat; food; lunch; meal;
aahaara
ಊಟ
dinner; eat; food; lunch; meal;
uta
ನು
dinner; eat; food; lunch; meal;
thinnu
ೋಜನ
dinner; lunch;
bheujana
m
machine
ಯಂತ
engine; machine;
yamthra
mad
ಹುಚುದ
crazy; mad;
huchchuhidida
ಹುೆದ
crazy; mad;
huchchedda
ಪಾಶನ
crazy; mad;
prajnjaashunya
ೇ
crazy; mad;
kreji
magazine
ಯತಾಕ
niyathakaalika
mail
ೊಯ
keuriyar
main
ಮುಖ
mukhya
mainly
ಮುಖಾ
especially; mainly; primarily;
mukhyavaagi
maintain
ಮನರಂಜೆ
entertainment; hold; keep; maintain;
manaramjane
ಸಂರಸು
hold; keep; maintain; receive; retain;
samrakshhisu
ಪೆ
expand; extend; get; hold; increase; keep;
maintain; receive;
padeyiri
ವಸಲು
maintain; retain;
nirvahisalu
maintenance
ಸಂರೆ
samrakshhane
major
ಪಮುಖ
important; major;
pramukha
make
ಮುೆ
make; mark;
mudre
ಉತ
create; generate; make; produce;
uthpaththi
ಏನು
anything; do; make; some; something;
somewhat; what; which;
enu
ಾಡು
act; do; make; operate; serve;
maadu
ಾಡುವದು
make; manufacturing; preparation;
maaduvudu
male
ಮರದ ಗೂಟ
cap; male;
marada guta
ಪರುಷೕಗ
purushhayeugya
ಪರುಷ
purushha
ಗಂಡು
gamdu
man
ಮನುಷ
human; husband; man; person;
manushhya
ವ
human; individual; man; person;
vyakthi
manage
ಆಡತ
administration; manage; management; rule;
aadalitha
ನೆೊಂಡು
direct; guide; lead; manage;
nadesikeumdu
ವ
nirvahisi
management
ಆಡತ
administration; manage; management; rule;
aadalitha
ೇಶನ
direction; leadership; management; way;
nirdeshana
manager
ೇಶಕ
director; manager;
nirdeshaka
manner
ಾ
away; course; manner; method; mode;
mood; off; path; road; track; way;
daari
manner
ಾನ
approach; manner; method;
vidhaana
manufacturer
ತಾರಕ
thayaaraka
ಾಪಕ
nirmaapaka
manufacturing
ಾಡುವದು
make; manufacturing; preparation;
maaduvudu
many
ಅೇಕ
many; much; numerous;
aneka
ಹಲವ
many; several;
halavu
map
ಭೂಪಟ
card; design; map; menu; plan; project;
ticket;
bhupata
ನು
card; map; menu;
menu
ೕಜೆ
design; draft; map; plan; plane; project;
scheme;
yeujane
ಾಸ
design; draft; map; plan; project; scheme;
vinyaasa
ವಸಗೃಹ
flat; level; map; plane;
vasathigrriha
march
ಸರಹದು
march; run; step; walk;
sarahaddu
ೋಗುವದು
go; march; run; walk;
heuguvudu
ರ
march; run; walk;
ran
ನೆ
course; march; speed; step; walk;
nade
mark
ೆ
character; mark; sign; signal;
chihne
ೋಡು
look; mark; note; notice; see; watch;
neudu
ಮೆ
majje
ಮುೆ
make; mark;
mudre
ೆಾ
sensaarship
ಗುರುತು
act; action; effect; impression; indicate;
mark;
guruthu
market
ಾರುಕೆ
maarukatte
ಬಾ
bajaar
marriage
ಮದುೆ
marriage; wedding;
maduve
ಾಹ
marriage; wedding;
vivaaha
married
ಮದುೆಾದ
maduveyaada
marry
ಮದುೆಾಗು
maduveyaagu
master
ಾಂಯ
champion; master;
chaampiyan
ಾಾಪಕ
master; professor; teacher;
praadhyaapaka
match
ಆಟ
game; lot; match; part; piece; play; share;
sport;
aata
mate
ಸಹೋ
sahadyeugi
ಒಡಾ
buddy; mate;
odanaadi
material
ವಸು
affair; against; business; material; matter;
object; opposite; stuff; subject; substance;
theme; thing; topic; towards;
vasthu
ಷಯ
affair; business; case; content; deal;
material; matter; object; shop; stuff; subject;
substance; theme; thing; topic; trade;
vishhaya
ಮೂಲವಸು
material; matter;
mulavasthu
ಬೆ
batte
ಅಂಾಂಶ
material; stuff; web;
aamgaamsha
ದವ
material; stuff;
dravya
matter
ವವಾರ
affair; business; case; deal; matter; shop;
trade;
vyavahaara
ಷಯ
affair; business; case; content; deal;
material; matter; object; shop; stuff; subject;
substance; theme; thing; topic; trade;
vishhaya
ವಸು
affair; against; business; material; matter;
object; opposite; stuff; subject; substance;
theme; thing; topic; towards;
vasthu
ಪಸುವದು
examination; matter; question; request;
prashnisuvudu
ಾಯ
act; action; affair; business; case; deal;
matter; operate; task;
kaarya
ಪೆ
consult; examine; matter; problem;
question; request;
prashne
ಮೂಲವಸು
material; matter;
mulavasthu
maximum
ಅತಕ
athyadhika
may
ಾರುಣ
may; youth;
thaarunya
maybe
ಬಹುಶಃ
likely; maybe; possibly; probably;
bahushh
me
ನನೆ
i; me;
nanage
ಾನು
i; me;
naanu
meal
ಆಾರ
bread; diet; dinner; eat; food; lunch; meal;
aahaara
ಊಟ
dinner; eat; food; lunch; meal;
uta
ನು
dinner; eat; food; lunch; meal;
thinnu
ಟು
hittu
meaning
ಅಥದ
importance; meaning; mind; sense;
significance; spirit; understanding;
arthhadalli
ಾಮುಖ
extent; importance; meaning; sense;
significance;
praamukhya
ಅಥ
importance; include; meaning; mind;
realize; see; sense; significance;
understand; understanding;
arthha
measurement
ಾಪನ
bar; beat; measurement;
maapana
meat
ಾಂಸ
maamsa
media
ಾಧಮ
maadhyama
medical
ೈದೕಯ
vedyakiya
medicine
ಔಷಧ
aushhadha
ಔಷ
aushhadhi
medium
ಅಂಗ
aamga
ಮಧಮ
madhyama
ಸಾಸ
average; medium;
saraasari
meeting
ಸೆ
meeting; session;
sabhe
member
ಸದಸ
sadasya
membership
ಸದಸತ
sadasyathva
memory
ೆನಪ
nenapu
ಸರೆ
smarane
mental
ಾನಕ
maanasika
ಾನಕಾ
mental; spiritual;
maanasikavaagi
mention
ನಮೂಸು
mention; note;
namudisu
menu
ಭೂಪಟ
card; design; map; menu; plan; project;
ticket;
bhupata
ನು
card; map; menu;
menu
merely
ಾತ
alone; lonely; merely; only; single; unique;
maathra
mess
ೊಳಕು
dirt; mess;
keulaku
message
ಾಯಾರ
raayabhaara
metal
ೋಹದ
leuhada
method
ಾನ
approach; manner; method;
vidhaana
ಾಗ
away; course; distance; method; off; path;
road; way;
maarga
ಾ
away; course; manner; method; mode;
mood; off; path; road; track; way;
daari
ಾಷ
form; method; mode; mood; shape; way;
phyaashhan
middle
ೇಂದ
farm; middle; station; stop;
kemdra
midnight
ಮಧಾ
madhyaraathri
might
ಅಾರ
can; force; might; power; strength;
adhikaara
ಅಕಬಲ
adhikabala
milk
ಾಲು
haalu
mind
ಅಥ
importance; include; meaning; mind;
realize; see; sense; significance;
understand; understanding;
arthha
ಅಥದ
importance; meaning; mind; sense;
significance; spirit; understanding;
arthhadalli
ಮನೆ
mind; spirit;
manassige
mine
ಗ
mine; my;
gani
ನನ
mine; my; myself;
nanna
minimum
ಕಷ
kanishhtha
minor
ಸಣ
little; minor; small;
sanna
minute
ಷ
nimishha
mirror
ಪಫಸು
prathiphalisu
ಕನ
kannadi
miss
ಕೆ
kanye
miss
ೊರೆ
lack; miss; want;
keurathe
ೈರುಾಜ
lack; miss; want;
geruhaajari
mission
ೕಗ
niyeuga
mistake
ಪಾದ
error; fault; mistake;
pramaada
ೋಷ
error; fault; mistake;
deushha
ಕಪಟ
error; mistake;
kapata
ಭ
error; mistake;
bhrame
mix
ೆೆ
mix; rain; shower; touch;
beresi
ಶಣ
mix; mixture;
mishrana
mixed
ತ
mishritha
mixture
ಶಣ
mix; mixture;
mishrana
mobile
ಚರ
chara
mode
ಾಷ
form; method; mode; mood; shape; way;
phyaashhan
ೕ
meud
ಾ
away; course; manner; method; mode;
mood; off; path; road; track; way;
daari
model
ಾದ
agent; design; example; instance; model;
pattern; proof; sample; test;
maadari
moment
ಣ
kshhana
money
ಹಣ
currency; money;
hana
monitor
ಯಂಸಲು
check; control; monitor;
niyamthrisalu
ೕಾರೆ
control; monitor;
melvichaarane
month
ಂಗಳ
thimgalu
mood
ತ
chiththa
ಹುಾೆ
huchchaatike
ಮನಃ
manhsthhithi
ಾ
away; course; manner; method; mode;
mood; off; path; road; track; way;
daari
mood
ಾಷ
form; method; mode; mood; shape; way;
phyaashhan
more
ಮತಷು
also; more; moreover; plus;
maththashhtu
ೆಚು
more; much; plus;
hechchu
moreover
ಮತಷು
also; more; moreover; plus;
maththashhtu
ೕಾ
also; moreover; too;
melaagi
ಸಹ
actually; also; despite; even; moreover; still;
too;
saha
morning
ೆೆ
morning; tomorrow;
beligge
ಾಂ
morning; tomorrow;
maarnimg
mortgage
ಅಡಾನ
adamaana
most
ಅತಂತ
most; much; quite; very;
athyamtha
mostly
ೆಾ
hechchaagi
mother
ಾ
thaayi
motor
ೕಾ
meutaar
ಎಂ
engine; motor;
eamjin
mountain
ಪವತಗಳ
parvathagalu
ಪವತ
parvatha
mouse
ೌ
maus
mouth
ಾ
baayi
ಮೂ
bill; mouth;
muthi
move
ಸಸಲು
move; shift;
sarisalu
ಚಲೆಯ
chalaneya
ಚಳವ
campaign; move;
chaluvali
movie
ಚಲನತ
film; movie;
chalanachithra
much
ಅೇಕ
many; much; numerous;
aneka
ೆಚು
more; much; plus;
hechchu
much
ಅತಂತ
most; much; quite; very;
athyamtha
mud
ಮಣು
bottom; dirt; earth; floor; ground; mud; soil;
mannu
muscle
ಾಯು
snaayu
music
ಸಂೕತ
concert; music;
samgitha
must
ಮ
mast
ಕತವ
duty; must; obligation; owe;
karthavya
my
ನನ
mine; my; myself;
nanna
ಗ
mine; my;
gani
myself
ನನ
mine; my; myself;
nanna
n
nail
ೆ
meule
name
ೆಸರು
hesaru
narrow
ಾಗು
narrow; tight;
kiridaagu
nasty
ಅಸಹಕರ
asahyakara
ಅಸಹಾದ
nasty; ugly;
asahyavaada
ಅೕಲಾದ
nasty; ugly;
ashlilavaada
nation
ಾಷ
nation; people;
raashhtra
ಜನರು
nation; people;
janaru
national
ೇೕಯ
national; native;
deshiya
native
ಜನದ
native; natural;
janmasiddha
ೇೕಯ
national; native;
deshiya
ಮೂಲಾ
mulanivaasi
natural
ಜನದ
native; natural;
janmasiddha
naturally
ೋ
keurs
ೈಸಕಾ
nesargikavaagi
nature
ನಡೆ
character; nature;
nadathe
ಪಕೃ
prakrrithi
near
ಸತ
sannihitha
ಹರದ
close; near;
haththirada
ೕೆ
about; above; across; after; against; around;
at; by; during; for; in; inside; into; near; of;
on; over; round; to; towards; up; upon;
upstairs; with; within;
mele
ಮುಂನ
following; near; next;
mumdina
ಾಲದ
at; during; for; in; into; near; on; whereas;
while; with; within;
kaaladalli
ಅನುಸಸುವ
following; near; next;
anusarisuva
nearby
ಸೕಪದ
samipada
nearly
ಕುತು
about; above; across; almost; around; at;
nearly; of; on; over; round;
kurithu
ಕಟಾ
closely; nearly;
nikatavaagi
ಬಹುೇಕ
about; almost; around; nearly;
bahutheka
neat
ಾಜೂಾದ
naajukaada
ಶುಾದ
clean; clear; neat; pure;
shuchiyaada
necessarily
ಅಗತಾ
agathyavaagi
necessary
ಅಗತ
important; necessary; need; require; want;
agathya
ಅಾಯ
anivaarya
ಅಗತಾದ
agathyavaada
neck
ಕತು
collar; neck; throat;
kaththu
ಕುೆಯ
collar; neck; throat;
kuththigeya
need
ಅಗತ
important; necessary; need; require; want;
agathya
ಅವಶಕೆ
ask; demand; desire; need; require;
requirement; want; wish;
avashyakathe
negative
ಋಾತಕ
rrinaathmaka
negotiate
ಾಸು
saadhisu
negotiation
ಸಂಾನ
samdhaana
neither
neither
ಮೇನೂ
ಇಲ
neither; nor;
maththenu illa
ಾವದೂ
ಅಲ
yaavudu alla
ಾವದೂ
ಇಲ
neither; no; none;
yaavudu illa
ಇಲ
as; here; neither; no; none; not; that; there;
illa
nerve
ನರ
nara
nervous
ಅರಾದ
asthhiravaada
net
ವಳ
nivvala
ಾಲ
net; network;
jaalari
ಾಲಬಂಧ
net; network;
jaalabamdha
ಎೆ
net; stretch;
ele
ನೂಲುಹು
nuluhuri
network
ಾಲಬಂಧ
net; network;
jaalabamdha
ಾಲ
net; network;
jaalari
never
ಎಂಗೂ
ever; never;
eamdigu
new
ಾಾ
cool; fresh; new; recent;
thaajaa
ೊಸ
new; recent;
heusa
news
ಸು
suddi
next
ಅನುಸಸುವ
following; near; next;
anusarisuva
ಮುಂನ
following; near; next;
mumdina
nice
ಮುಾದ
beautiful; cute; fair; fine; nice; pretty;
muddaada
ನಯೕಲ
friendly; nice;
vinayashila
ಯ
dear; expensive; nice; valuable;
priya
ಆಾದಕರ
nice; pleasant;
aahlaadakara
ಆಾಮಾಯಕ
comfortable; nice;
aaraamadaayaka
ದುಾ
dear; expensive; nice; valuable;
dubaari
night
ಾ
raathri
no
ಅಲ
no; not;
alla
no
ಇಲ
as; here; neither; no; none; not; that; there;
illa
ಾವದೂ
ಇಲ
neither; no; none;
yaavudu illa
nobody
ಾವನೂ
ಇಲ
yaavanu illa
noise
ಸದು
noise; sound;
saddu
none
ಾವದೂ
ಇಲ
neither; no; none;
yaavudu illa
ಇಲ
as; here; neither; no; none; not; that; there;
illa
ಏನೂ ಇಲ
none; nothing;
enu illa
nor
ಮೇನೂ
ಇಲ
neither; nor;
maththenu illa
normal
ಾಾರಣ
saadhaarana
normally
ಾಾನಾ
common; normally; ordinary; usual; usually;
saamaanyavaagi
north
ಉತರ
answer; north; reply; respond; response;
uththara
nose
ಮೂಗು
mugu
not
ಇಲ
as; here; neither; no; none; not; that; there;
illa
ಅಲ
no; not;
alla
note
ೋಡು
look; mark; note; notice; see; watch;
neudu
ನಮೂಸು
mention; note;
namudisu
nothing
ಏನೂ ಇಲ
none; nothing;
enu illa
ಶನ
shunya
notice
ೋಡು
look; mark; note; notice; see; watch;
neudu
ಪಕಟೆ
ad; advice; notice;
prakatane
novel
ಮೂಲ
novel; original; originally; primary; source;
spring;
mula
ಅಪವ
apurva
now
ಈಗ
iga
nowhere
ಇಲದ
ಾಗ
illada jaaga
number
ಪಾಣ
amount; game; ground; lot; number; part;
piece; quantity; ratio; relation; relationship;
share;
pramaana
ೆಾಾರ
figure; number;
lekkaachaara
ಸಂೆ
figure; number;
samkhye
numerous
ಅೇಕ
many; much; numerous;
aneka
nurse
ಾ
daadi
o
object
ಉೇಸುವದು
end; goal; intention; object; purpose;
uddeshisuvudu
ವಸು
affair; against; business; material; matter;
object; opposite; stuff; subject; substance;
theme; thing; topic; towards;
vasthu
ಉೇಶ
end; goal; intention; object; objective;
purpose;
uddesha
ಷಯ
affair; business; case; content; deal;
material; matter; object; shop; stuff; subject;
substance; theme; thing; topic; trade;
vishhaya
ಗು
end; goal; intention; object; purpose; target;
guri
objective
ಉೇಶ
end; goal; intention; object; objective;
purpose;
uddesha
obligation
ಕಟುಾಡು
duty; obligation;
kattupaadu
ಕತವ
duty; must; obligation; owe;
karthavya
obtain
ಗಸಲು
galisalu
obvious
ೆರವೊ
bright; clear; clearly; light; obvious;
theravugeulisi
ಪಕಟ
express; obvious;
prakata
obviously
ಸಷಾ
bright; clear; clearly; light; obviously;
spashhtavaagi
ಪಕಟಾ
prakatavaagi
occasion
ಅದೃಷ
accident; chance; fortune; lot; occasion;
opportunity; skill;
adrrishhta
ಅವಾಶ
chance; occasion; opportunity;
avakaasha
ಾರಣ
cause; challenge; occasion; reason;
kaarana
ಸಂದಭ
cause; chance; context; occasion;
opportunity; reason;
samdarbha
occur
ಸಂಭಸುವ
appear; happen; occur;
sambhavisuva
ಆಗು
appear; happen; occur;
aagu
odd
ತಾದ
vichithravaada
ಪರೕಯ
curious; foreign; odd; strange; unusual;
weird;
parakiya
of
ಆ
at; by; for; from; in; into; of; off; out; outside;
over; to; towards; up;
aaph
ಇಂದ
about; against; at; by; for; from; in; inside;
into; of; off; on; out; outside; over; round;
since; through; to; towards; up; upon; within;
iamda
ಕುತು
about; above; across; almost; around; at;
nearly; of; on; over; round;
kurithu
ೆ
about; after; against; around; at; by; for; in;
inside; into; of; on; round; to; towards; up;
upon; within;
ge
ಮೂಲಕ
above; across; at; by; from; of; on; over;
through;
mulaka
ೕೆ
about; above; across; after; against; around;
at; by; during; for; in; inside; into; near; of;
on; over; round; to; towards; up; upon;
upstairs; with; within;
mele
off
ಆ
at; by; for; from; in; into; of; | |
in provisioned_vms:
vms.append(self.info(vm['name'], vm=vm, ignore_volumes=True, floating_ips=floating_ips))
return sorted(vms, key=lambda x: x['name'])
def console(self, name, tunnel=False, web=False):
try:
vm = self._get_vm(name)
if vm is None:
error("VM %s not found" % name)
return None
except ApiException as exc:
error("Unable to retrieve VM %s. %s" % (name, exc))
return None
try:
# url = self.conn.create_instance_console_access_token(
# instance_id=vm['id'], console_type='serial').result['href']
url = "https://cloud.ibm.com/vpc-ext/compute/vs/%s~%s/vnc" % (self.region, vm['id'])
except ApiException as exc:
error("Unable to retrieve console access. %s" % exc)
return None
if web:
return url
if self.debug or os.path.exists("/i_am_a_container"):
msg = "Open the following url:\n%s" % url if os.path.exists("/i_am_a_container") else url
pprint(msg)
else:
pprint("Opening url: %s" % url)
webbrowser.open(url, new=2, autoraise=True)
return None
def serialconsole(self, name, web=False):
try:
vm = self._get_vm(name)
if vm is None:
error("VM %s not found" % name)
return None
except ApiException as exc:
error("Unable to retrieve VM %s. %s" % (name, exc))
return None
try:
url = "https://cloud.ibm.com/vpc-ext/compute/vs/%s~%s/serial" % (self.region, vm['id'])
except ApiException as exc:
error("Unable to retrieve console access. %s" % exc)
return None
if web:
return url
if self.debug or os.path.exists("/i_am_a_container"):
msg = "Open the following url:\n%s" % url if os.path.exists("/i_am_a_container") else url
pprint(msg)
else:
pprint("Opening url: %s" % url)
webbrowser.open(url, new=2, autoraise=True)
return None
def info(self, name, output='plain', fields=[], values=False, vm=None, ignore_volumes=False, floating_ips=None,
debug=False):
yamlinfo = {}
if vm is None:
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return yamlinfo
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
return yamlinfo
state = vm['status']
if floating_ips is None:
try:
floating_ips = {x['target']['id']: x for x in
self.conn.list_floating_ips().result['floating_ips'] if x['status'] == 'available'}
except ApiException as exc:
error('Unable to retrieve floating ips. %s' % exc)
return yamlinfo
ips = []
for network in vm['network_interfaces']:
if network['id'] not in floating_ips:
continue
ips.append(floating_ips[network['id']]['address'])
ip = ','.join(ips)
# zone = vm['zone']['name']
image = vm['image']['name']
yamlinfo['profile'] = vm['profile']['name']
yamlinfo['name'] = name
yamlinfo['status'] = state
# yamlinfo['region'] = self.region
# yamlinfo['zone'] = zone
yamlinfo['ip'] = ip
# yamlinfo['bandwidth'] = vm['bandwidth']
yamlinfo['flavor'] = vm['profile']['name']
yamlinfo['cpus'] = vm['vcpu']['count']
yamlinfo['memory'] = vm['memory']
yamlinfo['image'] = image
yamlinfo['user'] = common.get_user(image)
yamlinfo['creationdate'] = vm['created_at']
yamlinfo['id'] = vm['id']
# yamlinfo['resource_group'] = vm['resource_group']['name']
# yamlinfo['resource_type'] = vm['resource_type']
# yamlinfo['startable'] = vm['startable']
# yamlinfo['vpc'] = vm['vpc']['name']
yamlinfo['profile'] = ''
yamlinfo['plan'] = ''
tag_list = self.global_tagging_service.list_tags(attached_to=vm['crn']).get_result().items()
for entry in tag_list:
if entry[0] != 'items':
continue
tags = entry[1]
for tag in tags:
tagname = tag['name']
if tagname.count(':') == 1:
key, value = tagname.split(':')
if key in METADATA_FIELDS:
yamlinfo[key] = value
break
nets = []
for interface in vm['network_interfaces']:
network = interface['subnet']['name']
device = interface['name']
private_ip = interface['primary_ipv4_address']
nets.append({'device': device, 'net': network, 'type': private_ip, 'mac': 'N/A'})
yamlinfo['private_ip'] = private_ip
if nets:
yamlinfo['nets'] = nets
# yamlinfo['primary_network_interface'] = vm['primary_network_interface']['name']
disks = []
if ignore_volumes is False:
try:
volumes = self._get_volumes()
except ApiException as exc:
error("Unable to retrieve volume information. %s" % exc)
return yamlinfo
for attachment in vm['volume_attachments']:
devname = attachment['volume']['name']
if devname in volumes:
volume = volumes[devname]
disksize = volume['capacity']
drivertype = volume['profile']['name']
diskformat = 'N/A'
path = 'N/A'
disks.append({'device': devname, 'size': disksize, 'format': diskformat, 'type': drivertype,
'path': path})
if disks:
yamlinfo['disks'] = disks
if debug:
yamlinfo['debug'] = vm
return yamlinfo
def ip(self, name):
ips = []
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return ""
for network in vm['network_interfaces']:
response = self.conn.list_instance_network_interface_floating_ips(vm['id'], network['id'])
ips.extend([x['address'] for x in response.result['floating_ips'] if x['status'] == 'available'])
except ApiException as exc:
error("Unable to retrieve IP for %s. %s" % (name, exc))
return ','.join(ips)
def internalip(self, name):
try:
vm = self._get_vm(name)
except ApiException:
return None
if 'primary_network_interface' not in vm:
return None
return vm['primary_network_interface']['primary_ipv4_address']
def volumes(self, iso=False):
image_list = []
try:
images = self.conn.list_images().result['images']
for image in images:
if image['status'] not in ['available', 'deprecated'] or \
image['operating_system']['name'].startswith('windows'):
continue
image_list.append(image['name'])
except ApiException as exc:
error("Unable to retrieve volume information. %s" % exc)
return image_list
return sorted(image_list, key=str.lower)
def delete(self, name, snapshots=False):
conn = self.conn
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
tags = []
try:
tags = self.global_tagging_service.list_tags(attached_to=vm['crn']).result['items']
except Exception as exc:
error('Unable to retrieve tags. %s' % exc)
dnsclient, domain = None, None
for tag in tags:
tagname = tag['name']
if tagname.count(':') == 1:
key, value = tagname.split(':')
if key == 'domain':
domain = value
if key == 'dnsclient':
dnsclient = value
try:
for network in vm['network_interfaces']:
response = conn.list_instance_network_interface_floating_ips(instance_id=vm['id'],
network_interface_id=network['id']).result
if len(response['floating_ips']) == 0:
continue
for floating_ip in response['floating_ips']:
conn.remove_instance_network_interface_floating_ip(id=floating_ip['id'],
instance_id=vm['id'],
network_interface_id=network['id'])
conn.delete_floating_ip(id=floating_ip['id'])
except ApiException as exc:
return {'result': 'failure',
'reason': 'Unable to remove floating IPs for VM %s. %s' % (name, exc)}
try:
conn.delete_instance(id=vm['id'])
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to delete VM. %s' % exc}
if domain is not None and dnsclient is None:
self.delete_dns(name, domain, name)
return {'result': 'success'}
def dnsinfo(self, name):
dnsclient, domain = None, None
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return dnsclient, domain
except ApiException as exc:
error('Unable to retrieve VM. %s' % exc)
return dnsclient, domain
try:
tags = self.global_tagging_service.list_tags(attached_to=vm['crn']).result['items']
except ApiException as exc:
error('Unable to retrieve tags. %s' % exc)
return None, None
for tag in tags:
tagname = tag['name']
if tagname.count(':') == 1:
key, value = tagname.split(':')
if key == 'dnsclient':
dnsclient = value
if key == 'domain':
domain = value
return dnsclient, domain
def clone(self, old, new, full=False, start=False):
print("not implemented")
def update_metadata(self, name, metatype, metavalue, append=False):
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
return
resource_model = {'resource_id': vm['crn']}
tag_names = ["%s:%s" % (metatype, metavalue)]
try:
self.global_tagging_service.attach_tag(resources=[resource_model],
tag_names=tag_names, tag_type='user').get_result()
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to attach tags. %s' % exc}
def update_memory(self, name, memory):
print("not implemented")
def update_cpus(self, name, numcpus):
print("not implemented")
def update_start(self, name, start=True):
print("not implemented")
def update_information(self, name, information):
self.update_metadata(name, 'information', information)
def update_iso(self, name, iso):
print("not implemented")
def update_flavor(self, name, flavor):
try:
vm = self._get_vm(name)
if vm is None:
return {'result': 'failure', 'reason': 'VM %s not found' % name}
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve VM %s. %s' % (name, exc)}
if vm['status'] != 'stopped':
return {'result': 'failure', 'reason': 'VM %s must be stopped' % name}
try:
provisioned_profiles = self._get_profiles()
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to retrieve flavors. %s' % exc}
if flavor not in provisioned_profiles:
return {'result': 'failure', 'reason': 'Flavor %s not found' % flavor}
try:
self.conn.update_instance(id=vm['id'], instance_patch=vpc_v1.InstancePatch(
profile=vpc_v1.InstancePatchProfileInstanceProfileIdentityByName(name=flavor)))
except ApiException as exc:
return {'result': 'failure', 'reason': 'Unable to update instance. %s' % exc}
return {'result': 'success'}
def create_disk(self, name, size, pool=None, thin=True, image=None):
print("not implemented")
def add_disk(self, name, size, pool=None, thin=True, image=None,
shareable=False, existing=None, interface='virtio'):
print("not implemented")
def delete_disk(self, name, diskname, pool=None, novm=False):
print("not implemented")
def list_disks(self):
print("not implemented")
return {}
def add_nic(self, name, network):
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
return
try:
subnet = self._get_subnet(network)
if subnet is None:
error('Network %s not found' % network)
return
except ApiException as exc:
error('Unable to retrieve network information. %s' % exc)
return
try:
# TODO: better name. Follow ethX scheme.
self.conn.create_instance_network_interface(
instance_id=vm['id'],
subnet=vpc_v1.SubnetIdentityById(id=subnet['id']),
allow_ip_spoofing=False
)
except ApiException as exc:
error('Unable to create NIC. %s' % exc)
def delete_nic(self, name, interface):
try:
vm = self._get_vm(name)
if vm is None:
error('VM %s not found' % name)
return
except ApiException as exc:
error('Unable to retrieve VM %s. %s' % (name, exc))
try:
for network in vm['network_interfaces']:
if network['name'] == interface:
response = self.conn.delete_instance_network_interface(instance_id=vm['id'],
id=network['id'])
if response.status_code != 204:
error('Unexpected status code received: %d' % response.status_code)
except ApiException | |
:param feature_names (string): part of filename that all feature files have in common
:param label_names (string): part of filename that all label files have in common
:param tt_split_ratio (float): split ratio of training and testing data files (value between 0. and 1.)
:param train_shard_size (int): approximate tfrecord shard sizes for training data (in MB)
:param test_shard_size (int): approximate tfrecord shard sizes for testing data (in MB)
:param delete_converted (bool): whether to delete .npy shard folders that were already converted to .tfrecords
:param debug (bool): switch between normal and debug mode
"""
# 01_prepare_data params
self.audio_folder = os.path.normpath(if_str(audio_folder, "audio_folder"))
self.transcript_folder = os.path.normpath(if_str(transcript_folder, "transcript_folder"))
self.save_folder = os.path.normpath(if_str(save_folder, "save_folder"))
self.dataset = if_str(dataset, "dataset").lower()
if feature_type.upper() in self.__feature_types:
self.feature_type = feature_type.upper()
else:
raise AttributeError(f"feature_type must be one of: {self.__feature_types}")
if label_type.lower() in self.__label_types:
self.label_type = label_type.lower()
else:
raise AttributeError(f"label_type must be one of: {self.__label_types}")
self.digitize_numbers = if_bool(digitize_numbers, "digitize_numbers")
self.repeated = if_bool(repeated, "repeated")
self.energy = if_bool(energy, "energy")
if (isinstance(deltas, (list, tuple))
and len(deltas) == 2
and isinstance(deltas[0], int)
and isinstance(deltas[0], int)):
self.deltas = deltas
else:
raise AttributeError(f"deltas must be length 2 tuple/list with int values inside it")
self.nbanks = if_int(nbanks, "nbanks")
self.filter_nan = if_bool(filter_nan, "filter_nan")
self.sort = if_bool(sort, "sort")
self.oral_max_duration = if_float(oral_max_duration, "oral_max_duration")
self.speeds = speeds if [if_float(s) for s in speeds] else self.__speeds
self.debug = if_bool(debug, "debug")
self.bigrams = True if label_type == self.__label_types[1] else False
self.full_save_path = os.path.join(self.save_folder,
f'{self.dataset.upper()}_{self.feature_type}_{self.label_type}'
f'_{self.nbanks}_banks{"_DEBUG" if self.debug else ""}/')
self.__framestride = None
self.digit_counter = defaultdict(lambda: 0)
# 02_feature_length_range params
self.min_frame_length = if_int(min_frame_length)
self.max_frame_length = if_int(max_frame_length)
self.mode = mode if if_str(mode) in self.__modes else self.__modes[0]
self.delete_unused = if_bool(delete_unused)
self.feature_names = if_str(feature_names)
self.label_names = if_str(label_names)
# 03_sort_data params
self.tt_split_ratio = if_float(tt_split_ratio) # TODO: range between 0. and 1.
self.train_shard_size = train_shard_size
self.test_shard_size = test_shard_size
# 04_numpy_to_tfrecord
self.delete_converted = if_bool(delete_converted)
# for data_config.json
self._num_features = None
self._time_info_dict = dict()
self._data_config_dict = dict()
@staticmethod
def _get_file_paths(audio_folder, transcript_folder):
audio_files = [os.path.splitext(f) for f in os.listdir(audio_folder)
if os.path.isfile(os.path.join(audio_folder, f))]
transcript_files = [os.path.splitext(f) for f in os.listdir(transcript_folder)
if os.path.isfile(os.path.join(transcript_folder, f))]
files = []
for file1, file2 in zip(audio_files, transcript_files):
err_message = "{} =/= {}".format(file1[0], file2[0])
assert file1[0] == file2[0], err_message
files.append((f"{audio_folder}/{file1[0]}{file1[1]}", f"{transcript_folder}/{file2[0]}{file2[1]}"))
return files
@staticmethod
def _get_file_names(files):
return [os.path.splitext(os.path.split(file[0])[1])[0] for file in files]
# 01_prepare_data.py
def prepare_data(self, files):
cepstra_length_list = []
oral_max_duration = self.oral_max_duration
speeds = self.speeds
file_names = self._get_file_names(files)
for speed in speeds:
self.digit_counter.clear()
LOGGER.info(f"Create audio_transormer for speed {speed}")
audio_transformer = (AudioEffectsChain().speed(speed))
save_path = os.path.join(self.full_save_path, f"{speed}/")
LOGGER.debug(f"Current save_path: {save_path}")
for i, file in enumerate(files):
if self.dataset == "pdtsc":
pdtsc = PDTSCLoader([file[0]], [file[1]], self.digitize_numbers, self.bigrams, self.repeated)
labels = pdtsc.transcripts_to_labels() # list of lists of 1D numpy arrays
labels = labels[0] # flatten label list
audio_list, fs = pdtsc.load_audio()
audio = audio_list[0]
fs = fs[0]
LOGGER.debug(
f"Loaded PDTSC with fs {fs} from:\n \t audio_path: {file[0]}\n \t transcript_path: {file[1]}")
elif self.dataset == "oral":
oral = OralLoader([file[0]], [file[1]], self.digitize_numbers, self.bigrams, self.repeated)
label_dict = oral.transcripts_to_labels(
oral_max_duration) # Dict['file_name':Tuple[sents_list, starts_list, ends_list]]
audio_dict, fs_dict = oral.load_audio() # Dicts['file_name']
labels = label_dict[file_names[i]]
audio = audio_dict[file_names[i]]
fs = fs_dict[file_names[i]]
LOGGER.debug(
f"Loaded ORAL with fs {fs} from:\n \t audio_path: {file[0]}\n \t transcript_path: {file[1]}")
elif self.dataset == "cv":
cv = CommonVoiceLoader([file])
# TODO: continue this wild ride later
else:
raise ValueError("'dataset' argument must be either 'pdtsc' or 'oral'")
full_save_path = os.path.join(save_path, file_names[i])
LOGGER.info(f"\tApplying SoX transormation on audio from {full_save_path}")
for ii in range(len(audio)):
LOGGER.debug(f"\t\t input.shape: {audio[ii].shape}")
audio[ii] = audio_transformer(audio[ii])
LOGGER.debug(f"\t\t output.shape: {audio[ii].shape}")
LOGGER.info(f"\tApplying FeatureExtractor on audio")
feat_ext = FeatureExtractor(audio, fs, feature_type=self.feature_type, energy=self.energy,
deltas=self.deltas, nbanks=self.nbanks)
cepstra = feat_ext.transform_data() # list of 2D arrays
self.__framestride = feat_ext.framestride
# filter out cepstra which are containing nan values
if self.filter_nan:
LOGGER.info(f"\tFiltering out NaN values")
# boolean list where False marks cepstra in which there is at least one nan value present
mask_nan = [not np.isnan(cepstrum).any() for cepstrum in cepstra]
# mask out cepstra and their corresponding labels with nan values
cepstra = list(compress(cepstra, mask_nan))
labels = list(compress(labels, mask_nan))
# SAVE Cepstra to files (features)
LOGGER.info(f"\tSaving cepstra to files")
FeatureExtractor.save_cepstra(cepstra, full_save_path, exist_ok=True)
LOGGER.debug(f"\t\tfull_save_path: {full_save_path}")
# SAVE Transcripts to files (labels)
LOGGER.info(f"\tSaving transcripts to files")
if self.dataset == 'pdtsc':
pdtsc.save_labels([labels], save_path, exist_ok=True)
elif self.dataset == 'oral':
label_dict[file_names[i]] = labels
oral.save_labels(label_dict, save_path, exist_ok=True)
else:
raise ValueError("'dataset' argument must be either 'pdtsc' or 'oral'")
LOGGER.info(f"\tChecking SAVE/LOAD consistency")
loaded_cepstra, loaded_cepstra_paths = FeatureExtractor.load_cepstra(full_save_path)
loaded_labels, loaded_label_paths = DataLoader.load_labels(full_save_path)
# flatten the lists
loaded_cepstra, loaded_cepstra_paths, loaded_labels, loaded_label_paths = (loaded_cepstra[0],
loaded_cepstra_paths[0],
loaded_labels[0],
loaded_label_paths[0])
for j in range(len(cepstra)):
if np.any(np.not_equal(cepstra[j], loaded_cepstra[j])):
raise UserWarning("Saved and loaded cepstra are not value consistent.")
if self.dataset == 'pdtsc':
if np.any(np.not_equal(labels[j], loaded_labels[j])):
raise UserWarning("Saved and loaded labels are not value consistent.")
elif self.dataset == 'oral':
if np.any(np.not_equal(labels[j][0], loaded_labels[j])):
raise UserWarning("Saved and loaded labels are not value consistent.")
# add (cepstrum_path, label_path, cepstrum_length) tuple into collective list for sorting
cepstra_length_list.append(
(loaded_cepstra_paths[j], loaded_label_paths[j], loaded_cepstra[j].shape[0]))
LOGGER.debug(f'files from {file_names[i]} transformed and saved into {os.path.abspath(save_path)}.')
# print number digitization results
if self.digitize_numbers:
LOGGER.debug(f"Digits in file: {file}")
if "pdtsc" in self.dataset:
counter = pdtsc.dt.count_nonzero()
elif "oral" in self.dataset:
counter = oral.dt.count_nonzero()
elif "cv" in self.dataset:
counter = cv.dt.count_nonzero()
for k, v in counter.items():
self.digit_counter[k] += v
LOGGER.info(f"\t dc: {self.digit_counter}")
# sort cepstra and labels by time length (number of frames)
if self.sort:
LOGGER.info(f"Sorting cepstra and labels by time length (number of frames)")
sort_indices = np.argsort(
[c[2] for c in cepstra_length_list]) # indices which sort the lists by cepstra length
cepstra_length_list = [cepstra_length_list[i] for i in sort_indices] # sort the cepstra list
num_digits = len(str(len(cepstra_length_list)))
for idx, file in enumerate(cepstra_length_list):
cepstrum_path, label_path, _ = file
os.rename(cepstrum_path, "{0}/cepstrum-{1:0{2}d}.npy".format(save_path, idx, num_digits))
os.rename(label_path, "{0}/transcript-{1:0{2}d}.npy".format(save_path, idx, num_digits))
subfolders = next(os.walk(save_path))[1]
for folder in subfolders:
try:
os.rmdir(os.path.join(save_path, folder))
except OSError:
LOGGER.warning("Folder {} is not empty! Can't delete.".format(os.path.join(save_path, folder)))
LOGGER.info(f"Save the number of features in cepstra.")
self._num_features = cepstra[0].shape[1]
LOGGER.debug(f"_num_features: {self._num_features}")
# 02_feature_length_range.py
def feature_length_range(self):
""" Check individual files (features and their labels) in load_dir and copy/move those which satisfy the condition:
min_frame_length <= feature_frame_len <= max_frame_length
:param load_dir: folder from which to load features and their labels
:param min_frame_length: lower bound of the feature frame length condition
:param max_frame_length: upper bound of the feature frame length condition
:param mode: 'copy'/'move' - condition satisfying files are copied/moved from load_dir to save_dir
:param feature_names: sequence of symbols that can be used as common identifier for feature files
:param label_names: sequence of symbols that can be used as common identifier for label files
:return: None
"""
# normalize the save directory path
save_path = f"{self.full_save_path[:-1]}_min_{self.min_frame_length}_max_{self.max_frame_length}/"
folder_structure_gen = os.walk(self.full_save_path) # ('path_to_current_folder', [subfolders], ['files', ...])
for folder in folder_structure_gen:
path, subfolders, files = folder
feat_file_names = [f for f in files if self.feature_names in f]
label_file_names = [f for f in files if self.label_names in f]
frames_accepted = 0
frames_rejected = 0
num_feats = len(feat_file_names)
num_labels = len(label_file_names)
assert num_feats == num_labels, 'There is {} feature files and {} label files (must be same).'.format(
num_feats, num_labels)
rel_path = os.path.relpath(path,
self.full_save_path) # relative position of current subdirectory in regards to load_dir
save_full_path = os.path.normpath(os.path.join(save_path, rel_path)) # folder/subfolder to which save files in save_dir
# time_info_dict initialization
for speed in self.speeds:
if str(speed) in rel_path:
cur_speed = str(speed)
if cur_speed not in self._time_info_dict.keys():
self._time_info_dict[cur_speed] = {"accepted": 0.,
"rejected": 0.}
LOGGER.debug(f"Initialized time_info dict for {cur_speed}.")
break
else:
cur_speed = None
LOGGER.debug("No speed folder found.")
# make subdirectories in save_dir
os.makedirs(save_full_path, exist_ok=True)
for i in range(num_feats):
feat_load_path = os.path.join(path, feat_file_names[i])
label_load_path = os.path.join(path, label_file_names[i])
feat_save_path = os.path.join(save_full_path, feat_file_names[i])
label_save_path = os.path.join(save_full_path, label_file_names[i])
feat, _ = FeatureExtractor.load_cepstra(feat_load_path)
feat_frame_len = feat[0][0].shape[0]
if self.min_frame_length <= feat_frame_len <= self.max_frame_length:
# TODO: calculate cummulative feature time length
frames_accepted += feat_frame_len
if self.mode == 'copy':
shutil.copy2(feat_load_path, feat_save_path)
LOGGER.debug("Copied {} to {}".format(feat_load_path, feat_save_path))
shutil.copy2(label_load_path, label_save_path)
LOGGER.debug("Copied {} to {}".format(label_load_path, label_save_path))
elif self.mode == 'move':
os.rename(feat_load_path, feat_save_path)
LOGGER.debug("Moved {} to {}".format(feat_load_path, feat_save_path))
os.rename(label_load_path, label_save_path)
LOGGER.debug("Moved {} to {}".format(label_load_path, label_save_path))
else:
raise ValueError("argument mode must be either 'copy' or 'move'")
else:
frames_rejected += feat_frame_len
# generate dict file with time lengths for each speed
| |
<filename>server/analysis/preprocessing.py
#
# OtterTune - preprocessing.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
from abc import ABCMeta, abstractmethod
from itertools import chain, combinations, combinations_with_replacement
import numpy as np
from sklearn.preprocessing import MinMaxScaler as SklearnMinMaxScaler
from .util import is_numeric_matrix, is_lexical_matrix
# ==========================================================
# Preprocessing Base Class
# ==========================================================
class Preprocess(object, metaclass=ABCMeta):
@abstractmethod
def fit(self, matrix):
pass
@abstractmethod
def transform(self, matrix, copy=True):
pass
def fit_transform(self, matrix, copy=True):
self.fit(matrix)
return self.transform(matrix, copy=True)
@abstractmethod
def inverse_transform(self, matrix, copy=True):
pass
# ==========================================================
# Bin by Deciles
# ==========================================================
class Bin(Preprocess):
def __init__(self, bin_start, axis=None):
if axis is not None and \
axis != 1 and axis != 0:
raise NotImplementedError("Axis={} is not yet implemented".format(axis))
self.deciles_ = None
self.bin_start_ = bin_start
self.axis_ = axis
def fit(self, matrix):
if self.axis_ is None:
self.deciles_ = get_deciles(matrix, self.axis_)
elif self.axis_ == 0: # Bin columns
self.deciles_ = []
for col in matrix.T:
self.deciles_.append(get_deciles(col, axis=None))
elif self.axis_ == 1: # Bin rows
self.deciles_ = []
for row in matrix:
self.deciles_.append(get_deciles(row, axis=None))
return self
def transform(self, matrix, copy=True):
assert self.deciles_ is not None
if self.axis_ is None:
res = bin_by_decile(matrix, self.deciles_,
self.bin_start_, self.axis_)
elif self.axis_ == 0: # Transform columns
columns = []
for col, decile in zip(matrix.T, self.deciles_):
columns.append(bin_by_decile(col, decile,
self.bin_start_, axis=None))
res = np.vstack(columns).T
elif self.axis_ == 1: # Transform rows
rows = []
for row, decile in zip(matrix, self.deciles_):
rows.append(bin_by_decile(row, decile,
self.bin_start_, axis=None))
res = np.vstack(rows)
assert res.shape == matrix.shape
return res
def inverse_transform(self, matrix, copy=True):
raise NotImplementedError("This method is not supported")
def get_deciles(matrix, axis=None):
if axis is not None:
raise NotImplementedError("Axis is not yet implemented")
assert matrix.ndim > 0
assert matrix.size > 0
decile_range = np.arange(10, 101, 10)
deciles = np.percentile(matrix, decile_range, axis=axis)
deciles[-1] = np.Inf
return deciles
def bin_by_decile(matrix, deciles, bin_start, axis=None):
if axis is not None:
raise NotImplementedError("Axis is not yet implemented")
assert matrix.ndim > 0
assert matrix.size > 0
assert deciles is not None
assert len(deciles) == 10
binned_matrix = np.zeros_like(matrix)
for i in range(10)[::-1]:
decile = deciles[i]
binned_matrix[matrix <= decile] = i + bin_start
return binned_matrix
# ==========================================================
# Shuffle Indices
# ==========================================================
class Shuffler(Preprocess):
def __init__(self, shuffle_rows=True, shuffle_columns=False,
row_indices=None, column_indices=None, seed=0):
self.shuffle_rows_ = shuffle_rows
self.shuffle_columns_ = shuffle_columns
self.row_indices_ = row_indices
self.column_indices_ = column_indices
np.random.seed(seed)
self.fitted_ = False
def fit(self, matrix):
if self.shuffle_rows_ and self.row_indices_ is None:
self.row_indices_ = get_shuffle_indices(matrix.data.shape[0])
if self.shuffle_columns_ and self.column_indices_ is None:
self.column_indices_ = get_shuffle_indices(matrix.data.shape[1])
self.fitted_ = True
def transform(self, matrix, copy=True):
if not self.fitted_:
raise Exception("The fit() function must be called before transform()")
if copy:
matrix = matrix.copy()
if self.shuffle_rows_:
matrix.data = matrix.data[self.row_indices_]
matrix.rowlabels = matrix.rowlabels[self.row_indices_]
if self.shuffle_columns_:
matrix.data = matrix.data[:, self.column_indices_]
matrix.columnlabels = matrix.columnlabels[self.column_indices_]
return matrix
def inverse_transform(self, matrix, copy=True):
if copy:
matrix = matrix.copy()
if self.shuffle_rows_:
inverse_row_indices = np.argsort(self.row_indices_)
matrix.data = matrix.data[inverse_row_indices]
matrix.rowlabels = matrix.rowlabels[inverse_row_indices]
if self.shuffle_columns_:
inverse_column_indices = np.argsort(self.column_indices_)
matrix.data = matrix.data[:, inverse_column_indices]
matrix.columnlabels = matrix.columnlabels[inverse_column_indices]
return matrix
def get_shuffle_indices(size, seed=None):
if seed is not None:
assert isinstance(seed, int)
np.random.seed(seed)
if isinstance(size, int):
return np.random.choice(size, size, replace=False)
else:
indices = []
for d in size:
indices.append(np.random.choice(d, d, replace=False))
return indices
# ==========================================================
# Polynomial Features
# ==========================================================
class PolynomialFeatures(Preprocess):
"""Compute the polynomial features of the input array.
This code was copied and modified from sklearn's
implementation.
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree_ = degree
self.interaction_only_ = interaction_only
self.include_bias_ = include_bias
self.n_input_features_ = None
self.n_output_features_ = None
# @property
# def powers_(self):
# combinations = self._combinations(self.n_input_features_, self.degree_,
# self.interaction_only_,
# self.include_bias_)
# return np.vstack(np.bincount(c, minlength=self.n_input_features_)
# for c in combinations)
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_with_replacement)
start = int(not include_bias)
return chain.from_iterable(comb(list(range(n_features)), i)
for i in range(start, degree + 1))
def fit(self, matrix):
assert matrix.ndim == 2
assert matrix.size > 0
_, n_features = matrix.shape
combos = self._combinations(n_features, self.degree_,
self.interaction_only_,
self.include_bias_)
self.n_input_features_ = matrix.shape[1]
self.n_output_features_ = sum(1 for _ in combos)
return self
def transform(self, matrix, copy=True):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
assert matrix.ndim == 2
assert matrix.size > 0
n_samples, n_features = matrix.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
is_numeric_type = is_numeric_matrix(matrix)
is_lexical_type = is_lexical_matrix(matrix)
if is_lexical_type:
strs = matrix.reshape((matrix.size,))
maxlen = max([len(s) for s in strs])
dtype = "S{}".format(maxlen * 2 + 1)
else:
dtype = matrix.dtype
# allocate output data
poly_matrix = np.empty((n_samples, self.n_output_features_), dtype=dtype)
combos = self._combinations(n_features, self.degree_,
self.interaction_only_,
self.include_bias_)
for i, c in enumerate(combos):
if is_numeric_type:
poly_matrix[:, i] = matrix[:, c].prod(1)
elif is_lexical_type:
n_poly1_feats = n_features + int(self.include_bias_)
if i >= n_poly1_feats:
x = "*".join(np.squeeze(matrix[:, c]).tolist())
else:
x = "".join(np.squeeze(matrix[:, c]).tolist())
poly_matrix[:, i] = x
else:
raise TypeError("Unsupported matrix type {}".format(matrix.dtype))
return poly_matrix
def inverse_transform(self, matrix, copy=True):
raise NotImplementedError("This method is not supported")
# ==========================================================
# Dummy Encoding
# ==========================================================
class DummyEncoder(Preprocess):
def __init__(self, n_values, categorical_features, cat_columnlabels, noncat_columnlabels):
from sklearn.preprocessing import OneHotEncoder
if not isinstance(n_values, np.ndarray):
n_values = np.array(n_values)
if not isinstance(categorical_features, np.ndarray):
categorical_features = np.array(categorical_features)
# assert categorical_features.size > 0
assert categorical_features.shape == n_values.shape
for nv in n_values:
if nv <= 2:
raise Exception("Categorical features must have 3+ labels")
self.n_values = n_values
self.cat_columnlabels = cat_columnlabels
self.noncat_columnlabels = noncat_columnlabels
self.encoder = OneHotEncoder(
n_values=n_values, categorical_features=categorical_features, sparse=False)
self.new_labels = None
self.cat_idxs_old = categorical_features
def fit(self, matrix):
self.encoder.fit(matrix)
# determine new columnlabels
# categorical variables are done in order specified by categorical_features
new_labels = []
for i, cat_label in enumerate(self.cat_columnlabels):
low = self.encoder.feature_indices_[i]
high = self.encoder.feature_indices_[i + 1]
for j in range(low, high):
# eg the categorical variable named cat_var with 5 possible values
# turns into 0/1 variables named cat_var____0, ..., cat_var____4
new_labels.append(cat_label + "____" + str(j - low))
# according to sklearn documentation,
# "non-categorical features are always stacked to the right of the matrix"
# by observation, it looks like the non-categorical features' relative order is preserved
# BUT: there is no guarantee made about that behavior!
# We either trust OneHotEncoder to be sensible, or look for some other way
new_labels += self.noncat_columnlabels
self.new_labels = new_labels
def transform(self, matrix, copy=True):
# actually transform the matrix
matrix_encoded = self.encoder.transform(matrix)
return matrix_encoded
def fit_transform(self, matrix, copy=True):
self.fit(matrix)
return self.transform(matrix)
def inverse_transform(self, matrix, copy=True):
n_values = self.encoder.n_values_
n_features = matrix.shape[-1] - self.encoder.feature_indices_[-1] + len(n_values)
noncat_start_idx = self.encoder.feature_indices_[-1]
inverted_matrix = np.empty((matrix.shape[0], n_features))
cat_idx = 0
noncat_idx = 0
for i in range(n_features):
if i in self.cat_idxs_old:
new_col = np.ones((matrix.shape[0],))
start_idx = self.encoder.feature_indices_[cat_idx]
for j in range(n_values[cat_idx]):
col = matrix[:, start_idx + j]
new_col[col == 1] = j
cat_idx += 1
else:
new_col = np.array(matrix[:, noncat_start_idx + noncat_idx])
noncat_idx += 1
inverted_matrix[:, i] = new_col
return inverted_matrix
def total_dummies(self):
return sum(self.n_values)
def consolidate_columnlabels(columnlabels):
import re
# use this to check if a label was created by dummy encoder
p = re.compile(r'(.*)____\d+')
consolidated_columnlabels = []
cat_seen = set() # avoid duplicate cat_labels
for lab in columnlabels:
m = p.match(lab)
# m.group(1) is the original column name
if m:
if m.group(1) not in cat_seen:
cat_seen.add(m.group(1))
consolidated_columnlabels.append(m.group(1))
else:
# non-categorical variable
consolidated_columnlabels.append(lab)
return consolidated_columnlabels
def fix_scaler(scaler, encoder, params):
p = 0.5
mean = scaler.mean_
var = scaler.var_
n_values = encoder.n_values
cat_start_idxs = encoder.xform_start_indices
current_idx = 0
cat_idx = 0
for param in params:
if param.iscategorical:
if param.isboolean:
nvals = 1
else:
assert cat_start_idxs[cat_idx] == current_idx
nvals = n_values[cat_idx]
cat_idx += 1
cat_mean = nvals * p
cat_var = cat_mean * (1 - p)
mean[current_idx: current_idx + nvals] = cat_mean
var[current_idx: current_idx + nvals] = cat_var
current_idx += nvals
else:
current_idx += 1
scaler.mean_ = mean
scaler.var_ = var
scaler.scale_ = np.sqrt(var)
def get_min_max(params, encoder=None):
if encoder is not None:
num_cat_feats = encoder.n_values.size
nfeats = len(params) - num_cat_feats + np.sum(encoder.n_values)
n_values = encoder.n_values
cat_start_idxs = encoder.xform_start_indices
else:
num_cat_feats = 0
nfeats = len(params)
n_values = np.array([])
cat_start_idxs = np.array([])
mins = np.empty((nfeats,))
maxs = np.empty((nfeats,))
current_idx = 0
cat_idx = 0
for param in params:
if param.iscategorical:
if param.isboolean:
nvals = 1
else:
assert cat_start_idxs[cat_idx] == current_idx
nvals = n_values[cat_idx]
cat_idx += | |
[self.output]
self.sample_counter = self.sample_counter + 1
# In the train mode we want to continue appending the outputs by calling forward function
# The outputs will be saved internally in the network instance as a list
# Otherwise we want to transform outputs list to a tensor and return it
return self.output
def return_outputs_history(self):
return torch.stack(self.outputs, 1)
import pandas as pd
def load_data(args, filepath=None, inputs_list=None, outputs_list=None):
if filepath is None:
filepath = args.val_file_name
if inputs_list is None:
inputs_list = args.inputs_list
if outputs_list is None:
outputs_list = args.outputs_list
if type(filepath) == list:
filepaths = filepath
else:
filepaths = [filepath]
all_features = []
all_targets = []
for one_filepath in filepaths:
# Load dataframe
print('loading data from ' + str(one_filepath))
print('')
df = pd.read_csv(one_filepath, comment='#')
if args.cheat_dt:
df['dt'] = df['dt'].shift(-1)
df = df[:-1]
# Get Raw Data
inputs = copy.deepcopy(df)
outputs = copy.deepcopy(df)
inputs.drop(inputs.tail(1).index, inplace=True) # Drop last row
outputs.drop(outputs.head(1).index, inplace=True)
inputs.reset_index(inplace=True) # Reset index
outputs.reset_index(inplace=True)
inputs = inputs[inputs_list]
outputs = outputs[outputs_list]
features = np.array(inputs)
targets = np.array(outputs)
all_features.append(features)
all_targets.append(targets)
if type(filepath) == list:
return all_features, all_targets
else:
return features, targets
class Dataset(data.Dataset):
def __init__(self, df, labels, args, seq_len=None):
'Initialization'
self.data = df
self.labels = labels
self.args = args
self.seq_len = None
self.df_lengths = []
self.df_lengths_cs = []
self.number_of_samples = 0
self.reset_seq_len(seq_len=seq_len)
def reset_seq_len(self, seq_len=None):
"""
This method should be used if the user wants to change the seq_len without creating new Dataset
Please remember that one can reset it again to come back to old configuration
:param seq_len: Gives new user defined seq_len. Call empty to come back to default.
"""
if seq_len is None:
self.seq_len = self.args.seq_len # Sequence length
else:
self.seq_len = seq_len
self.df_lengths = []
self.df_lengths_cs = []
if type(self.data) == list:
for data_set in self.data:
self.df_lengths.append(data_set.shape[0] - self.seq_len)
if not self.df_lengths_cs:
self.df_lengths_cs.append(self.df_lengths[0])
else:
self.df_lengths_cs.append(self.df_lengths_cs[-1]+self.df_lengths[-1])
self.number_of_samples = self.df_lengths_cs[-1]
else:
self.number_of_samples = self.data.shape[0] - self.seq_len
def __len__(self):
'Total number of samples'
return self.number_of_samples
def __getitem__(self, idx):
if type(self.data) == list:
idx_data_set = next(i for i, v in enumerate(self.df_lengths_cs) if v > idx)
if idx_data_set == 0:
pass
else:
idx -= self.df_lengths_cs[idx_data_set-1]
return self.data[idx_data_set][idx:idx + self.seq_len, :], self.labels[idx_data_set][idx:idx + self.seq_len]
else:
return self.data[idx:idx + self.seq_len, :], self.labels[idx:idx + self.seq_len]
def plot_results(net,
args,
dataset=None,
filepath=None,
inputs_list=None,
outputs_list=None,
closed_loop_list=None,
seq_len=None,
warm_up_len=None,
closed_loop_enabled=False,
comment='',
rnn_full_name=None,
save=False,
close_loop_idx=150):
"""
This function accepts RNN instance, arguments and CartPole instance.
It runs one random experiment with CartPole,
inputs the data into RNN and check how well RNN predicts CartPole state one time step ahead of time
"""
if filepath is None:
filepath = args.val_file_name
if type(filepath) == list:
filepath = filepath[0]
if warm_up_len is None:
warm_up_len = args.warm_up_len
if seq_len is None:
seq_len = args.seq_len
if inputs_list is None:
inputs_list = args.inputs_list
if inputs_list is None:
raise ValueError('RNN inputs not provided!')
if outputs_list is None:
outputs_list = args.outputs_list
if outputs_list is None:
raise ValueError('RNN outputs not provided!')
if closed_loop_enabled and (closed_loop_list is None):
closed_loop_list = args.close_loop_for
if closed_loop_list is None:
raise ValueError('RNN closed-loop-inputs not provided!')
# normalization_info = NORMALIZATION_INFO
# # Here in contrary to ghoast car implementation I have
# # rnn_input[name] /= normalization_info.iloc[0][column]
# # and not
# # rnn_input.iloc[0][column] /= normalization_info.iloc[0][column]
# # It is because rnn_input is just row (type = Series) and not the whole DataFrame (type = DataFrame)
#
# def denormalize_output(output_series):
# for name in output_series.index:
# if normalization_info.iloc[0][name] is not None:
# output_series[name] *= normalization_info.iloc[0][name]
# return output_series
#
# # Reset the internal state of RNN cells, clear the output memory, etc.
net.reset()
net.eval()
device = get_device()
#
if dataset is None:
dev_features, dev_targets = load_data(args, filepath, inputs_list=inputs_list, outputs_list=outputs_list)
dev_set = Dataset(dev_features, dev_targets, args, seq_len=seq_len)
else:
dev_set = copy.deepcopy(dataset)
dev_set.reset_seq_len(seq_len=seq_len)
# Format the experiment data
features, targets = dev_set[0]
#
features_pd = pd.DataFrame(data=features, columns=inputs_list)
targets_pd = pd.DataFrame(data=targets, columns=outputs_list)
#FIXME: Add denormalization by uncommenting the next line
# targets_pd = pd.DataFrame(data=targets, columns=outputs_list).apply(denormalize_output, axis=1)
rnn_outputs = pd.DataFrame(columns=outputs_list)
rnn_output = None
#
warm_up_idx = 0
rnn_input_0 = copy.deepcopy(features_pd.iloc[0])
# Does not bring anything. Why? 0-state shouldn't have zero internal state due to biases...
while warm_up_idx < warm_up_len:
rnn_input = rnn_input_0
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
net(rnn_input=rnn_input)
warm_up_idx += 1
net.outputs = []
net.sample_counter = 0
close_the_loop = False
idx_cl = 0
for index, row in features_pd.iterrows():
rnn_input = copy.deepcopy(row)
if idx_cl == close_loop_idx:
close_the_loop = True
if closed_loop_enabled and close_the_loop and (rnn_output is not None):
rnn_input[closed_loop_list] = normalized_rnn_output[closed_loop_list]
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
normalized_rnn_output = net(rnn_input=rnn_input)
normalized_rnn_output = list(np.squeeze(normalized_rnn_output.detach().cpu().numpy()))
normalized_rnn_output = pd.Series(data=normalized_rnn_output, index=outputs_list)
rnn_output = copy.deepcopy(normalized_rnn_output)
#FIXME : Enable denormalization
# denormalize_output(rnn_output)
rnn_outputs = rnn_outputs.append(rnn_output, ignore_index=True)
idx_cl += 1
# # If RNN was given sin and cos of body angle calculate back the body angle
# if ('body_angle.cos' in rnn_outputs) and ('body_angle.sin' in rnn_outputs) and ('body_angle_deg' not in rnn_outputs):
# rnn_outputs['body_angle_deg'] = rnn_outputs.apply(SinCos2Angle_wrapper, axis=1)
# if ('body_angle.cos' in targets_pd) and ('body_angle.sin' in targets_pd) and ('body_angle_deg' not in targets_pd):
# targets_pd['body_angle_deg'] = targets_pd.apply(SinCos2Angle_wrapper, axis=1)
#
# # Get the time or # samples axes
experiment_length = seq_len
#
if 'time' in features_pd.columns:
t = features_pd['time'].to_numpy()
time_axis = t
time_axis_string = 'Time [s]'
elif 'dt' in features_pd.columns:
dt = features_pd['dt'].to_numpy()
t = np.cumsum(dt)
time_axis = t
time_axis_string = 'Time [s]'
else:
samples = np.arange(0, experiment_length)
time_axis = samples
time_axis_string = 'Sample number'
number_of_plots = 0
if ('s.angle' in targets_pd) and ('s.angle' in rnn_outputs) and ('s.position' in targets_pd) and ('s.position' in rnn_outputs):
x_target = targets_pd['s.angle'].to_numpy()
y_target = targets_pd['s.position'].to_numpy()
x_output = rnn_outputs['s.angle'].to_numpy()
y_output = rnn_outputs['s.position'].to_numpy()
number_of_plots += 1
#FIXME: For number of plots = 1, TypeError: 'AxesSubplot' object is not subscriptable
number_of_plots=2
#
# if ('body_angle_deg' in targets_pd) and ('body_angle_deg' in rnn_outputs):
# body_angle_target = targets_pd['body_angle_deg'].to_numpy()
# body_angle_output = rnn_outputs['body_angle_deg'].to_numpy()
# number_of_plots += 1
#
# if ('velocity_m_per_sec.x' in targets_pd) and ('velocity_m_per_sec.x' in rnn_outputs) and ('velocity_m_per_sec.y' in targets_pd) and ('velocity_m_per_sec.y' in rnn_outputs):
# vel_x_target = targets_pd['velocity_m_per_sec.x'].to_numpy()
# vel_y_target = targets_pd['velocity_m_per_sec.y'].to_numpy()
# vel_x_output = rnn_outputs['velocity_m_per_sec.x'].to_numpy()
# vel_y_output = rnn_outputs['velocity_m_per_sec.y'].to_numpy()
# speed_target = np.sqrt((vel_x_target**2)+(vel_y_target**2))
# speed_output = np.sqrt((vel_x_output ** 2) + (vel_y_output ** 2))
# number_of_plots += 1
#
# # Create a figure instance
fig, axs = plt.subplots(number_of_plots, 1, figsize=(18, 10)) #, sharex=True) # share x axis so zoom zooms all plots
plt.subplots_adjust(hspace=0.4)
start_idx = 0
axs[0].set_title(comment, fontsize=20)
axs[0].set_ylabel("Position", fontsize=18)
axs[0].plot(x_target, pixels2meters(SCREEN_HEIGHT_PIXELS)-y_target, 'k:', markersize=12, label='Ground Truth')
axs[0].plot(x_output, pixels2meters(SCREEN_HEIGHT_PIXELS)-y_output, 'b', markersize=12, label='Predicted position')
axs[0].plot(x_target[start_idx], pixels2meters(SCREEN_HEIGHT_PIXELS)-y_target[start_idx], 'g.', markersize=16, label='Start')
axs[0].plot(x_output[start_idx], pixels2meters(SCREEN_HEIGHT_PIXELS)-y_output[start_idx], 'g.', markersize=16)
axs[0].plot(x_target[-1], pixels2meters(SCREEN_HEIGHT_PIXELS)-y_target[-1], 'r.', markersize=16, label='End')
axs[0].plot(x_output[-1], pixels2meters(SCREEN_HEIGHT_PIXELS)-y_output[-1], 'r.', markersize=16)
if closed_loop_enabled:
axs[0].plot(x_target[close_loop_idx], pixels2meters(SCREEN_HEIGHT_PIXELS)-y_target[close_loop_idx], '.', color='darkorange', markersize=16, label='connect output->input')
axs[0].plot(x_output[close_loop_idx], pixels2meters(SCREEN_HEIGHT_PIXELS)-y_output[close_loop_idx], '.', color='darkorange', markersize=16)
axs[0].tick_params(axis='both', which='major', labelsize=16)
axs[0].set_xlabel('Angle', fontsize=18)
axs[0].legend()
#
#
#
# axs[1].set_ylabel("Body angle (deg)", fontsize=18)
# axs[1].plot(time_axis, body_angle_target, 'k:', markersize=12, label='Ground Truth')
# axs[1].plot(time_axis, body_angle_output, 'b', markersize=12, label='Predicted speed')
#
# axs[1].plot(time_axis[start_idx], body_angle_target[start_idx], 'g.', markersize=16, label='Start')
# axs[1].plot(time_axis[start_idx], body_angle_output[start_idx], 'g.', markersize=16)
# axs[1].plot(time_axis[-1], body_angle_target[-1], 'r.', markersize=16, label='End')
# axs[1].plot(time_axis[-1], body_angle_output[-1], 'r.', markersize=16)
# if closed_loop_enabled:
# axs[1].plot(time_axis[close_loop_idx], body_angle_target[close_loop_idx], '.', color='darkorange', markersize=16, label='Connect output->input')
# axs[1].plot(time_axis[close_loop_idx], body_angle_output[close_loop_idx], '.', color='darkorange', markersize=16)
#
# axs[1].tick_params(axis='both', which='major', labelsize=16)
#
# axs[1].set_xlabel(time_axis_string, fontsize=18)
#
# axs[1].legend()
#
#
# axs[2].set_ylabel("Speed (m/s)", fontsize=18)
# axs[2].plot(time_axis, speed_target, 'k:', markersize=12, label='Ground Truth')
# axs[2].plot(time_axis, speed_output, 'b', markersize=12, label='Predicted speed')
#
# axs[2].plot(time_axis[start_idx], speed_target[start_idx], 'g.', markersize=16, label='Start')
# axs[2].plot(time_axis[start_idx], speed_output[start_idx], 'g.', markersize=16)
# axs[2].plot(time_axis[-1], speed_target[-1], 'r.', markersize=16, label='End')
# axs[2].plot(time_axis[-1], speed_output[-1], 'r.', markersize=16)
# if closed_loop_enabled:
# axs[2].plot(time_axis[close_loop_idx], speed_target[close_loop_idx], '.', color='darkorange', markersize=16, label='Connect output->input')
# axs[2].plot(time_axis[close_loop_idx], speed_output[close_loop_idx], '.', color='darkorange', markersize=16)
#
# axs[2].tick_params(axis='both', which='major', labelsize=16)
#
# axs[2].set_xlabel(time_axis_string, fontsize=18)
# axs[2].legend()
#
# plt.ioff()
# # plt.show()
# plt.pause(1)
#
# # Make name settable and with time-date stemp
# # Save figure to png
if save:
# Make folders if not yet exist
try:
os.makedirs('save_plots')
except FileExistsError:
pass
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%d%b%Y_%H%M%S")
if rnn_full_name is not None:
fig.savefig('./save_plots/'+rnn_full_name+'.png')
else:
fig.savefig('./save_plots/'+timestampStr + '.png')
#FIXME: The M_PER_PIXEL was imported from globals in case of l2race. I am hardcoding it for now
M_PER_PIXEL = 0.10
SCREEN_HEIGHT_PIXELS=768
SCREEN_WIDTH_PIXELS=1024
def pixels2meters(x_map: float):
"""
The function converts a value in the map units (pixels) to the physical units (meters).
It is suitable to convert position, velocity or acceleration.
:param x_map: value in map units (pixels, not necessarily integer)
:return x_track: Value converted to physical units (meters)
"""
x_track = x_map * M_PER_PIXEL
return x_track
def meters2pixels(x_track: float):
"""
The function converts a value in the map units (pixels) to the physical units (meters).
In contrast to get_position_on_map() it DOES NOT round the | |
<gh_stars>1-10
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
# v2: replace with src/awkward/_v2/_connect/numba/arrayview.py
import operator
import numba
import numba.core.typing
import numba.core.typing.ctypes_utils
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
########## for code that's built up from strings
def code_to_function(code, function_name, externals=None, debug=False):
if debug:
print("################### " + function_name) # noqa: T201
print(code) # noqa: T201
namespace = {} if externals is None else dict(externals)
exec(code, namespace)
return namespace[function_name]
########## Lookup
class Lookup:
def __init__(self, layout):
positions = []
sharedptrs = []
arrays = []
tolookup(layout, positions, sharedptrs, arrays)
assert len(positions) == len(sharedptrs)
def find(x):
for i, array in enumerate(arrays):
if x is array:
return i
assert isinstance(x, int)
return x
self.original_positions = positions
self.positions = [find(x) for x in positions]
self.sharedptrs_hold = sharedptrs
self.arrays = arrays
def arrayptr(x):
if isinstance(x, int):
return x
else:
return x.ctypes.data
def sharedptr(x):
if x is None:
return -1
elif x == 0:
return 0
else:
return x.ptr()
self.nplike = ak.nplike.of(layout)
self.arrayptrs = self.nplike.array(
[arrayptr(x) for x in positions], dtype=np.intp
)
self.sharedptrs = self.nplike.array(
[sharedptr(x) for x in sharedptrs], dtype=np.intp
)
def _view_as_array(self):
return self.nplike.vstack(
[self.nplike.arange(len(self.arrayptrs)), self.arrayptrs, self.sharedptrs]
).T
def tolookup(layout, positions, sharedptrs, arrays):
if isinstance(layout, ak.layout.NumpyArray):
return ak._connect._numba.layout.NumpyArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.NumpyForm):
return ak._connect._numba.layout.NumpyArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.RegularArray):
return ak._connect._numba.layout.RegularArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.RegularForm):
return ak._connect._numba.layout.RegularArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(
ak.layout.ListArray32,
ak.layout.ListArrayU32,
ak.layout.ListArray64,
ak.layout.ListOffsetArray32,
ak.layout.ListOffsetArrayU32,
ak.layout.ListOffsetArray64,
),
):
return ak._connect._numba.layout.ListArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, (ak.forms.ListForm, ak.forms.ListOffsetForm)):
return ak._connect._numba.layout.ListArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(
ak.layout.IndexedArray32,
ak.layout.IndexedArrayU32,
ak.layout.IndexedArray64,
),
):
return ak._connect._numba.layout.IndexedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.IndexedForm):
return ak._connect._numba.layout.IndexedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(ak.layout.IndexedOptionArray32, ak.layout.IndexedOptionArray64),
):
return ak._connect._numba.layout.IndexedOptionArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.IndexedOptionForm):
return ak._connect._numba.layout.IndexedOptionArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.ByteMaskedArray):
return ak._connect._numba.layout.ByteMaskedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.ByteMaskedForm):
return ak._connect._numba.layout.ByteMaskedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.BitMaskedArray):
return ak._connect._numba.layout.BitMaskedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.BitMaskedForm):
return ak._connect._numba.layout.BitMaskedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.UnmaskedArray):
return ak._connect._numba.layout.UnmaskedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.UnmaskedForm):
return ak._connect._numba.layout.UnmaskedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.RecordArray):
return ak._connect._numba.layout.RecordArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.RecordForm):
return ak._connect._numba.layout.RecordArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.Record):
return ak._connect._numba.layout.RecordType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(
ak.layout.UnionArray8_32,
ak.layout.UnionArray8_U32,
ak.layout.UnionArray8_64,
),
):
return ak._connect._numba.layout.UnionArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.UnionForm):
return ak._connect._numba.layout.UnionArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.VirtualArray):
return ak._connect._numba.layout.VirtualArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.VirtualForm):
return ak._connect._numba.layout.VirtualArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
else:
raise AssertionError(
f"unrecognized Content or Form type: {type(layout)}"
+ ak._util.exception_suffix(__file__)
)
def tonumbatype(form):
if isinstance(form, ak.forms.NumpyForm):
return ak._connect._numba.layout.NumpyArrayType.from_form(form)
elif isinstance(form, ak.forms.RegularForm):
return ak._connect._numba.layout.RegularArrayType.from_form(form)
elif isinstance(form, (ak.forms.ListForm, ak.forms.ListOffsetForm)):
return ak._connect._numba.layout.ListArrayType.from_form(form)
elif isinstance(form, ak.forms.IndexedForm):
return ak._connect._numba.layout.IndexedArrayType.from_form(form)
elif isinstance(form, ak.forms.IndexedOptionForm):
return ak._connect._numba.layout.IndexedOptionArrayType.from_form(form)
elif isinstance(form, ak.forms.ByteMaskedForm):
return ak._connect._numba.layout.ByteMaskedArrayType.from_form(form)
elif isinstance(form, ak.forms.BitMaskedForm):
return ak._connect._numba.layout.BitMaskedArrayType.from_form(form)
elif isinstance(form, ak.forms.UnmaskedForm):
return ak._connect._numba.layout.UnmaskedArrayType.from_form(form)
elif isinstance(form, ak.forms.RecordForm):
return ak._connect._numba.layout.RecordArrayType.from_form(form)
elif isinstance(form, ak.forms.UnionForm):
return ak._connect._numba.layout.UnionArrayType.from_form(form)
elif isinstance(form, ak.forms.VirtualForm):
return ak._connect._numba.layout.VirtualArrayType.from_form(form)
else:
raise AssertionError(
f"unrecognized Form type: {type(form)}"
+ ak._util.exception_suffix(__file__)
)
@numba.extending.typeof_impl.register(Lookup)
def typeof_Lookup(obj, c):
return LookupType()
class LookupType(numba.types.Type):
arraytype = numba.types.Array(numba.intp, 1, "C")
def __init__(self):
super().__init__(name="ak.LookupType()")
@numba.extending.register_model(LookupType)
class LookupModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [("arrayptrs", fe_type.arraytype), ("sharedptrs", fe_type.arraytype)]
super().__init__(dmm, fe_type, members)
@numba.extending.unbox(LookupType)
def unbox_Lookup(lookuptype, lookupobj, c):
arrayptrs_obj = c.pyapi.object_getattr_string(lookupobj, "arrayptrs")
sharedptrs_obj = c.pyapi.object_getattr_string(lookupobj, "sharedptrs")
proxyout = c.context.make_helper(c.builder, lookuptype)
proxyout.arrayptrs = c.pyapi.to_native_value(
lookuptype.arraytype, arrayptrs_obj
).value
proxyout.sharedptrs = c.pyapi.to_native_value(
lookuptype.arraytype, sharedptrs_obj
).value
c.pyapi.decref(arrayptrs_obj)
c.pyapi.decref(sharedptrs_obj)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error)
########## ArrayView
class ArrayView:
@classmethod
def fromarray(cls, array):
behavior = ak._util.behaviorof(array)
layout = ak.operations.convert.to_layout(
array,
allow_record=False,
allow_other=False,
numpytype=(np.number, bool, np.bool_),
)
while isinstance(layout, ak.layout.VirtualArray) and isinstance(
layout.generator, ak.layout.SliceGenerator
):
layout = layout.array
layout = ak.operations.convert.regularize_numpyarray(
layout, allow_empty=False, highlevel=False
)
if isinstance(layout, ak.partition.PartitionedArray):
numba_type = None
for part in layout.partitions:
if numba_type is None:
numba_type = ak._connect._numba.layout.typeof(part)
elif numba_type != ak._connect._numba.layout.typeof(part):
raise ValueError(
"partitioned arrays can only be used in Numba if all "
"partitions have the same numba_type"
+ ak._util.exception_suffix(__file__)
)
return PartitionedView(
ak._connect._numba.layout.typeof(part),
behavior,
[Lookup(x) for x in layout.partitions],
ak.nplike.of(layout).asarray(layout.stops, dtype=np.intp),
0,
len(layout),
(),
)
else:
return ArrayView(
ak._connect._numba.layout.typeof(layout),
behavior,
Lookup(layout),
0,
0,
len(layout),
(),
)
def __init__(self, type, behavior, lookup, pos, start, stop, fields):
self.type = type
self.behavior = behavior
self.lookup = lookup
self.pos = pos
self.start = start
self.stop = stop
self.fields = fields
def toarray(self):
layout = self.type.tolayout(self.lookup, self.pos, self.fields)
sliced = layout.getitem_range_nowrap(self.start, self.stop)
return ak._util.wrap(sliced, self.behavior)
@numba.extending.typeof_impl.register(ArrayView)
def typeof_ArrayView(obj, c):
return ArrayViewType(obj.type, obj.behavior, obj.fields)
def wrap(type, viewtype, fields):
if fields is None:
return ArrayViewType(type, viewtype.behavior, viewtype.fields)
else:
return ArrayViewType(type, viewtype.behavior, fields)
class ArrayViewType(numba.types.IterableType, numba.types.Sized):
def __init__(self, type, behavior, fields):
super().__init__(
name="ak.ArrayView({}, {}, {})".format(
type.name,
ak._connect._numba.repr_behavior(behavior),
repr(fields),
)
)
self.type = type
self.behavior = behavior
self.fields = fields
@property
def iterator_type(self):
return IteratorType(self)
@numba.extending.register_model(ArrayViewType)
class ArrayViewModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [
("pos", numba.intp),
("start", numba.intp),
("stop", numba.intp),
("arrayptrs", numba.types.CPointer(numba.intp)),
("sharedptrs", numba.types.CPointer(numba.intp)),
("pylookup", numba.types.pyobject),
]
super().__init__(dmm, fe_type, members)
@numba.core.imputils.lower_constant(ArrayViewType)
def lower_const_Array(context, builder, viewtype, array):
return lower_const_view(context, builder, viewtype, array._numbaview)
def lower_const_view(context, builder, viewtype, view):
lookup = view.lookup
arrayptrs = lookup.arrayptrs
sharedptrs = lookup.sharedptrs
pos = view.pos
start = view.start
stop = view.stop
arrayptrs_val = context.make_constant_array(
builder, numba.typeof(arrayptrs), arrayptrs
)
sharedptrs_val = context.make_constant_array(
builder, numba.typeof(sharedptrs), sharedptrs
)
proxyout = context.make_helper(builder, viewtype)
proxyout.pos = context.get_constant(numba.intp, pos)
proxyout.start = context.get_constant(numba.intp, start)
proxyout.stop = context.get_constant(numba.intp, stop)
proxyout.arrayptrs = context.make_helper(
builder, numba.typeof(arrayptrs), arrayptrs_val
).data
proxyout.sharedptrs = context.make_helper(
builder, numba.typeof(sharedptrs), sharedptrs_val
).data
proxyout.pylookup = context.add_dynamic_addr(
builder, id(lookup), info=str(type(lookup))
)
return proxyout._getvalue()
@numba.extending.unbox(ArrayViewType)
def unbox_Array(viewtype, arrayobj, c):
view_obj = c.pyapi.object_getattr_string(arrayobj, "_numbaview")
out = unbox_ArrayView(viewtype, view_obj, c)
c.pyapi.decref(view_obj)
return out
def unbox_ArrayView(viewtype, view_obj, c):
lookup_obj = c.pyapi.object_getattr_string(view_obj, "lookup")
pos_obj = c.pyapi.object_getattr_string(view_obj, "pos")
start_obj = c.pyapi.object_getattr_string(view_obj, "start")
stop_obj = c.pyapi.object_getattr_string(view_obj, "stop")
lookup_val = c.pyapi.to_native_value(LookupType(), lookup_obj).value
lookup_proxy = c.context.make_helper(c.builder, LookupType(), lookup_val)
proxyout = c.context.make_helper(c.builder, viewtype)
proxyout.pos = c.pyapi.number_as_ssize_t(pos_obj)
proxyout.start = c.pyapi.number_as_ssize_t(start_obj)
proxyout.stop = c.pyapi.number_as_ssize_t(stop_obj)
proxyout.arrayptrs = c.context.make_helper(
c.builder, LookupType.arraytype, lookup_proxy.arrayptrs
).data
proxyout.sharedptrs = c.context.make_helper(
c.builder, LookupType.arraytype, lookup_proxy.sharedptrs
).data
proxyout.pylookup = lookup_obj
c.pyapi.decref(lookup_obj)
c.pyapi.decref(pos_obj)
c.pyapi.decref(start_obj)
c.pyapi.decref(stop_obj)
if c.context.enable_nrt:
c.context.nrt.decref(c.builder, LookupType(), lookup_val)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error)
@numba.extending.box(ArrayViewType)
def box_Array(viewtype, viewval, c):
arrayview_obj = box_ArrayView(viewtype, viewval, c)
out = c.pyapi.call_method(arrayview_obj, "toarray", ())
c.pyapi.decref(arrayview_obj)
return out
def dict2serializable(obj):
if obj is None:
return None
else:
return tuple(obj.items())
def serializable2dict(obj):
if obj is None:
return None
else:
return dict(obj)
def box_ArrayView(viewtype, viewval, c):
serializable2dict_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(serializable2dict)
)
behavior2_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(dict2serializable(viewtype.behavior))
)
behavior_obj = c.pyapi.call_function_objargs(
serializable2dict_obj, (behavior2_obj,)
)
ArrayView_obj = c.pyapi.unserialize(c.pyapi.serialize_object(ArrayView))
type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(viewtype.type))
fields_obj = c.pyapi.unserialize(c.pyapi.serialize_object(viewtype.fields))
proxyin = c.context.make_helper(c.builder, viewtype, viewval)
pos_obj = c.pyapi.long_from_ssize_t(proxyin.pos)
start_obj = c.pyapi.long_from_ssize_t(proxyin.start)
stop_obj = c.pyapi.long_from_ssize_t(proxyin.stop)
lookup_obj = proxyin.pylookup
out = c.pyapi.call_function_objargs(
ArrayView_obj,
(type_obj, behavior_obj, lookup_obj, pos_obj, start_obj, stop_obj, fields_obj),
)
c.pyapi.decref(serializable2dict_obj)
c.pyapi.decref(behavior2_obj)
c.pyapi.decref(behavior_obj)
c.pyapi.decref(ArrayView_obj)
c.pyapi.decref(type_obj)
c.pyapi.decref(fields_obj)
c.pyapi.decref(pos_obj)
c.pyapi.decref(start_obj)
c.pyapi.decref(stop_obj)
return out
@numba.core.typing.templates.infer_global(len)
class type_len(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], ArrayViewType):
return numba.intp(args[0])
@numba.extending.lower_builtin(len, ArrayViewType)
def lower_len(context, builder, sig, args):
proxyin = context.make_helper(builder, sig.args[0], args[0])
return builder.sub(proxyin.stop, proxyin.start)
@numba.core.typing.templates.infer_global(operator.getitem)
class type_getitem(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if len(args) == 2 and len(kwargs) == 0 and isinstance(args[0], ArrayViewType):
viewtype, wheretype = args
if isinstance(wheretype, numba.types.Integer):
return viewtype.type.getitem_at_check(viewtype)(viewtype, wheretype)
elif (
isinstance(wheretype, numba.types.SliceType) and not wheretype.has_step
):
return viewtype.type.getitem_range(viewtype)(viewtype, wheretype)
elif isinstance(wheretype, numba.types.StringLiteral):
return viewtype.type.getitem_field(viewtype, wheretype.literal_value)(
viewtype, wheretype
)
else:
raise TypeError(
"only an integer, start:stop range, or a *constant* "
"field name string may be used as ak.Array "
"slices in compiled code" + ak._util.exception_suffix(__file__)
)
@numba.extending.lower_builtin(operator.getitem, ArrayViewType, numba.types.Integer)
def lower_getitem_at(context, builder, sig, args):
rettype, (viewtype, wheretype) = sig.return_type, sig.args
viewval, whereval = args
viewproxy = context.make_helper(builder, viewtype, viewval)
return viewtype.type.lower_getitem_at_check(
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
wheretype,
whereval,
True,
True,
)
@numba.extending.lower_builtin(operator.getitem, ArrayViewType, numba.types.slice2_type)
def lower_getitem_range(context, builder, sig, args):
rettype, (viewtype, wheretype) = sig.return_type, sig.args
viewval, whereval = args
viewproxy = context.make_helper(builder, viewtype, viewval)
whereproxy = context.make_helper(builder, wheretype, whereval)
return viewtype.type.lower_getitem_range(
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
whereproxy.start,
whereproxy.stop,
True,
)
@numba.extending.lower_builtin(
operator.getitem, ArrayViewType, numba.types.StringLiteral
)
def lower_getitem_field(context, builder, sig, args):
_, (viewtype, wheretype) = sig.return_type, sig.args
viewval, whereval = args
return viewtype.type.lower_getitem_field(
context, builder, viewtype, viewval, wheretype.literal_value
)
@numba.core.typing.templates.infer_getattr
class type_getattr(numba.core.typing.templates.AttributeTemplate):
key = ArrayViewType
def | |
if not java_threading.currentVMThread == "main_no_init":
java_threading.currentVMThread.STATE =make_String("WAITING", loader)
temp = java_threading.currentVMThread
interp.interp_lock.release()
# if there is an other thread it will get the
# exc. control here!
interp.interp_lock.acquire()
java_threading.currentVMThread = temp
if not java_threading.currentVMThread == "main_no_init":
java_threading.currentVMThread.STATE =make_String("RUNNABLE", loader)
def VMThread_interrupted(locals, loader, cls, method):
import java_threading
cvmt = java_threading.currentVMThread
result = cvmt.isInterrupted
cvmt.isInterrupted = False
return result
def VMThread_countStackFrame(locals, loader, cls, method):
raise NotImplemented("Hook Method")
# where does that come from?
# FIXME:MAYBE buggy
def VMStackWalker_getClassLoader(locals, loader, cls, method):
return ClassLoaderref(loader.getclass("java/lang/ClassLoader"), True, loader)
# TODO: this is wrong. it musst return an arra of classes of the callstack
def VMStackWalker_getClassContext(locals, loader, cls, method):
classref = loader.called_classes
assert isinstance(classref, Classref)
return classref
def VMStackWalker_getCallingClass(locals, loader, cls, method):
try:
classref = loader.called_classes.arrayref[1]
assert isinstance(classref, Classref)
return classref
except IndexError:
return None
def VMStackWalker_getCallingClassLoader(locals, loader, cls, method):
try:
classref = loader.called_classes.arrayref[1]
assert isinstance(classref, Classref)
return ClassLoaderref(loader.getclass("java/lang/ClassLoader"), True, classref.classLoader)
except IndexError:
return None
def VMSystemProperties_preInit(locals, loader, cls, method):
#TODO: add Systemproperties
# call setProperty(String,String) by hand
objectref = locals.get(0,"ref")
assert objectref.jcls.__name__ == "java/util/Properties"
acls = objectref.jcls.cls
const = acls.constant_pool
classNameIndex = const[acls.this_class].name_index
clsName = const[classNameIndex]
assert clsName == "java/util/Properties"
real_name = "setProperty_reference_java__lang__String___reference_java__lang__String___reference_java__lang__Object"
method_info = objectref.jcls.methods[unicode(real_name)]
descr = descriptor(const[method_info.descriptor_index])
invoke_setProperty(loader, acls, method_info, descr, objectref,"java.vm.name", "NoName")
invoke_setProperty(loader, acls, method_info, descr, objectref,"java.vm.version", "1")
invoke_setProperty(loader, acls, method_info, descr, objectref,"java.io.tmpdir", "/tmp") #Default temp file patp XXX Windows
invoke_setProperty(loader, acls, method_info, descr, objectref,"os.name", "Linux") #XXX WIndows
invoke_setProperty(loader, acls, method_info, descr, objectref,"file.separator", "/") #XXX WIndows
invoke_setProperty(loader, acls, method_info, descr, objectref,"path.separator", ":") #XXX WIndows
invoke_setProperty(loader, acls, method_info, descr, objectref,"line.separator", "\n") #XXX WIndows
def VMSystemProperties_postInit(locals, loader, cls, method):
raise NotImplemented("Hook Method")
# replaced gnu/classpath by sun/misc
def Unsafe_objectFieldOffset(locals, loader, cls, method):
fieldref = locals.get(1,"ref")
vmfieldref = fieldref.fields.get(u"f","ref")
return vmfieldref.fields.get(u"id","long")
# replaced gnu/classpath by sun/misc
def Unsafe_compareAndSwap(locals, loader, cls, method):
raise NotImplemented("Hook Method")
# replaced gnu/classpath by sun/misc
def Unsafe_put(locals, loader, cls, method):
raise NotImplemented("Hook Method")
# replaced gnu/classpath by sun/misc
def Unsafe_get(locals, loader, cls, method):
raise NotImplemented("Hook Method")
# replaced gnu/classpath by sun/misc
def Unsafe_arrayBaseOffset(locals, loader, cls, method):
raise NotImplemented("Hook Method")
# replaced gnu/classpath by sun/misc
def Unsafe_park(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMath_sin(locals, loader, cls, method):
return math.sin(locals.get(0, "double"))
def VMMath_cos(locals, loader, cls, method):
return math.cos(locals.get(0, "double"))
def VMMath_tan(locals, loader, cls, method):
return math.tan(locals.get(0, "double"))
def VMMath_asin(locals, loader, cls, method):
return math.asin(locals.get(0, "double"))
def VMMath_acos(locals, loader, cls, method):
return math.acos(locals.get(0, "double"))
def VMMath_atan(locals, loader, cls, method):
return math.atan(locals.get(0, "double"))
def VMMath_atan2(locals, loader, cls, method):
#doubles need two stackpos
return math.atan2(locals.get(0, "double"), locals.get(2, "double"))
def VMMath_exp(locals, loader, cls, method):
return math.exp(locals.get(0, "double"))
def VMMath_log(locals, loader, cls, method):
return math.log(locals.get(0, "double"))
def VMMath_sqrt(locals, loader, cls, method):
return math.sqrt(locals.get(0, "double"))
def VMMath_pow(locals, loader, cls, method):
#doubles need two stackpos
return math.pow(locals.get(0, "double"), locals.get(2, "double"))
def VMMath_floor(locals, loader, cls, method):
return math.floor(locals.get(0, "double"))
def VMMath_ceil(locals, loader, cls, method):
return math.ceil(locals.get(0, "double"))
def VMMath_cosh(locals, loader, cls, method):
return math.cosh(locals.get(0, "double"))
def VMMath_sinh(locals, loader, cls, method):
return math.sinh(locals.get(0, "double"))
def VMMath_tanh(locals, loader, cls, method):
return math.tanh(locals.get(0, "double"))
def VMMath_log10(locals, loader, cls, method):
return math.log10(locals.get(0, "double"))
def VMMath_hypot(locals, loader, cls, method):
return math.hypot(locals.get(0, "double"), locals.get(2, "double"))
def VMMath_log1p(locals, loader, cls, method):
return math.log1p(locals.get(0, "double"))
def VMMath_expm1(locals, loader, cls, method):
return math.exp(locals.get(0, "double")-1.0)
def VMMath_IEEEremainder(locals, loader, cls, method):
return math.fmod(locals.get(0, "double"), locals.get(2, "double"))
def VMMath_cbrt(locals, loader, cls, method):
return math.pow(locals.get(0, "double"),1.0/3)
def VMMath_rint(locals, loader, cls, method):
value = locals.get(0, "double")
min = math.floor(value)
max = math.ceil(value)
if value-min == 0 and value-max==0:
return value
value = value - min
if value<0.5:
return min
else:
return max
def VMFile_lastModified(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_setReadOnly(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_create(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_list(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_renameTo(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_length(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_exists(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_delete(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_setLastModified(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_mkdir(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_isFile(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_canWrite(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_canRead(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_isDirectory(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_canWriteDirectory(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_listRoots(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_isHidden(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_getName(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMFile_getCanonicalForm(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMAccessController_pushContext(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMAccessController_popContext(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMAccessController_getContext(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMAccessController_getStack(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInetAddress_getLocalHostname(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInetAddress_lookupInaddrAny(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInetAddress_getHostByAddr(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInetAddress_getHostByName(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMDirectByteBuffer_init(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMDirectByteBuffer_allocate(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMDirectByteBuffer_free(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMDirectByteBuffer_get(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMDirectByteBuffer_put(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMDirectByteBuffer_adjustAddress(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMDirectByteBuffer_shiftDown(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMChannels_createStream(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMChannels_newInputStream(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMChannels_newOutputStream(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInstrumentationImpl_isRedefineClassesSupported(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInstrumentationImpl_redefineClasses(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInstrumentationImpl_getAllLoadedClass(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInstrumentationImpl_getInitiatedClass(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMInstrumentationImpl_getObjectSize(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMRuntimeMXBeanImpl_getInputArguments(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMRuntimeMXBeanImpl_getName(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMRuntimeMXBeanImpl_getStartTime(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMClassLoadingMXBeanImpl_getLoadedClassCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMClassLoadingMXBeanImpl_getUnloadedClassCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMClassLoadingMXBeanImpl_isVerbose(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMClassLoadingMXBeanImpl_setVerbose(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_findDeadlockedThreads(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_findMonitorDeadlockedThreads(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getAllThreads(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getAllThreadIds(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getAllThreadIds(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getCurrentThreadCpuTime(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getCurrentThreadUserTime(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getDaemonThreadCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getLockInfo(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getMonitorInfo(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getPeakThreadCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_resetPeakThreadCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getThreadCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getThreadCpuTime(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getThreadUserTime(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getThreadInfoForId(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMThreadMXBeanImpl_getTotalStartedThreadCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryMXBeanImpl_getHeapMemoryUsage(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryMXBeanImpl_getNonHeapMemoryUsage(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryMXBeanImpl_getObjectPendingFinalizationCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryMXBeanImpl_isVerbose(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryMXBeanImpl_setVerbose(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMCompilationMXBeanImpl_getTotalCompilationTime(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getCollectionUsage(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getCollectionUsageThreshold(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getCollectionUsageThresholdCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getMemoryManagerNames(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getPeakUsage(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getType(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getUsage(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getUsageThreshold(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_getUsageThresholdCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_isValid(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_resetPeakUsage(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_setCollectionUsageThreshold(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryPoolMXBeanImpl_setUsageThreshold(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryManagerMXBeanImpl_getMemoryPoolNames(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMMemoryManagerMXBeanImpl_isValid(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMGarbageCollectorMXBeanImpl_getCollectionCount(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMGarbageCollectorMXBeanImpl_getCollectionTime(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMManagementFactory_getMemoryPoolNames(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMManagementFactory_getMemoryManagerNames(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMManagementFactory_getGarbageCollectorNames(locals, loader, cls, method):
raise NotImplemented("Hook Method")
def VMField_setInt(locals, loader, cls, method):
vmfield_set_helper(locals, "int")
def VMField_setByte(locals, loader, cls, method):
vmfield_set_helper(locals, "byte")
def VMField_setShort(locals, loader, cls, method):
vmfield_set_helper(locals, "short")
def VMField_setLong(locals, loader, cls, method):
vmfield_set_helper(locals, "long")
def VMField_setFloat(locals, loader, cls, method):
vmfield_set_helper(locals, "float")
def VMField_setDouble(locals, loader, cls, method):
vmfield_set_helper(locals, "double")
def VMField_setBoolean(locals, loader, cls, method):
vmfield_set_helper(locals, "boolean")
def VMField_setChar(locals, loader, cls, method):
vmfield_set_helper(locals, "char")
def VMField_set(locals, loader, cls, method):
vmfield_set_helper(locals, "ref")
def VMField_getInt(locals, loader, cls, method):
return vmfield_get_helper(locals, "int")
def VMField_getByte(locals, loader, cls, method):
return vmfield_get_helper(locals, "byte")
def VMField_getShort(locals, loader, cls, method):
return vmfield_get_helper(locals, "short")
def VMField_getLong(locals, loader, cls, method):
return vmfield_get_helper(locals, "long")
def VMField_getFloat(locals, loader, cls, method):
return vmfield_get_helper(locals, "float")
def VMField_getDouble(locals, loader, cls, method):
return vmfield_get_helper(locals, "double")
def VMField_getBoolean(locals, loader, cls, method):
return vmfield_get_helper(locals, "boolean")
def VMField_getChar(locals, loader, cls, method):
return vmfield_get_helper(locals, "char")
def VMField_get(locals, loader, | |
transform = transforms.Compose(
[transforms.RandomResizedCrop(size=224, scale=scale),
transforms.RandomHorizontalFlip(),
get_color_distortion(s=color_distortion),
get_gaussian_blur(ks=23),
transforms.ToTensor()])
else:
transform = transforms.Compose(
[transforms.Resize(size=256),
transforms.CenterCrop(size=224),
transforms.ToTensor()])
if normalize:
transform = transforms.Compose(
[transform,
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
def init_transform(root, samples, class_to_idx, seed,
keep_file=keep_file,
training=training):
""" Transforms applied to dataset at the start of training """
new_targets, new_samples = [], []
if training and (keep_file is not None) and os.path.exists(keep_file):
logger.info(f'Using {keep_file}')
with open(keep_file, 'r') as rfile:
for line in rfile:
class_name = line.split('_')[0]
target = class_to_idx[class_name]
img = line.split('\n')[0]
new_samples.append(
(os.path.join(root, class_name, img),
target))
new_targets.append(target)
else:
logger.info('flipping coin to keep labels')
g = torch.Generator()
g.manual_seed(seed)
for sample in samples:
if torch.bernoulli(torch.tensor(unlabel_prob), generator=g) == 0:
target = sample[1]
new_samples.append((sample[0], target))
new_targets.append(target)
return np.array(new_targets), np.array(new_samples)
return transform, init_transform
def make_multicrop_transform(
dataset_name,
num_crops,
size,
crop_scale,
normalize,
color_distortion
):
if 'imagenet' in dataset_name:
return _make_multicrop_imgnt_transforms(
num_crops=num_crops,
size=size,
scale=crop_scale,
normalize=normalize,
color_distortion=color_distortion)
elif 'cifar10' in dataset_name:
return _make_multicrop_cifar10_transforms(
num_crops=num_crops,
size=size,
scale=crop_scale,
normalize=normalize,
color_distortion=color_distortion)
def _make_multicrop_cifar10_transforms(
num_crops,
size=18,
scale=(0.3, 0.75),
normalize=False,
color_distortion=0.5
):
def get_color_distortion(s=1.0):
print('_make_multicrop_cifar10_transforms distortion strength', s)
# s is the strength of color distortion.
color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
def Solarize(img):
v = np.random.uniform(0, 256)
return PIL.ImageOps.solarize(img, v)
solarize = transforms.Lambda(Solarize)
rnd_solarize = transforms.RandomApply([solarize], p=0.2)
def Equalize(img):
return PIL.ImageOps.equalize(img)
equalize = transforms.Lambda(Equalize)
rnd_equalize = transforms.RandomApply([equalize], p=0.2)
color_distort = transforms.Compose([
rnd_color_jitter,
rnd_solarize,
rnd_equalize])
return color_distort
transform = transforms.Compose(
[transforms.RandomResizedCrop(size=size, scale=scale),
transforms.RandomHorizontalFlip(),
get_color_distortion(s=color_distortion),
transforms.ToTensor()])
if normalize:
transform = transforms.Compose(
[transform,
transforms.Normalize(
(0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
return (num_crops, transform)
def _make_multicrop_imgnt_transforms(
num_crops,
size=96,
scale=(0.05, 0.14),
normalize=False,
color_distortion=1.0,
):
def get_color_distortion(s=1.0):
color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([
rnd_color_jitter,
rnd_gray])
return color_distort
def get_gaussian_blur(ks=25):
def gaussian_blur(img):
radius_min, radius_max = 0.1, 2.0
return img.filter(ImageFilter.GaussianBlur(
radius=np.random.uniform(radius_min, radius_max)))
t_gaussian_blur = transforms.Lambda(gaussian_blur)
rnd_gaussian_blur = transforms.RandomApply([t_gaussian_blur], p=0.5)
return rnd_gaussian_blur
logger.debug('making multicrop transforms')
transform = transforms.Compose(
[transforms.RandomResizedCrop(size=size, scale=scale),
transforms.RandomHorizontalFlip(),
get_color_distortion(s=color_distortion),
get_gaussian_blur(ks=23),
transforms.ToTensor()])
if normalize:
transform = transforms.Compose(
[transform,
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
return (num_crops, transform)
class ClassStratifiedSampler(torch.utils.data.Sampler):
def __init__(
self,
data_source,
world_size,
rank,
batch_size=1,
classes_per_batch=10,
epochs=1,
seed=0,
unique_classes=False
):
"""
ClassStratifiedSampler
Batch-sampler that samples 'batch-size' images from subset of randomly
chosen classes e.g., if classes a,b,c are randomly sampled,
the sampler returns
torch.cat([a,b,c], [a,b,c], ..., [a,b,c], dim=0)
where a,b,c, are images from classes a,b,c respectively.
Sampler, samples images WITH REPLACEMENT (i.e., not epoch-based)
:param data_source: dataset of type "TransImageNet" or "TransCIFAR10'
:param world_size: total number of workers in network
:param rank: local rank in network
:param batch_size: num. images to load from each class
:param classes_per_batch: num. classes to randomly sample for batch
:param epochs: num consecutive epochs thru data_source before gen.reset
:param seed: common seed across workers for subsampling classes
:param unique_classes: true ==> each worker samples a distinct set of classes; false ==> all workers sample the same classes
"""
super(ClassStratifiedSampler, self).__init__(data_source)
self.data_source = data_source
self.rank = rank
self.world_size = world_size
self.cpb = classes_per_batch
self.unique_cpb = unique_classes
self.batch_size = batch_size
self.num_classes = len(data_source.classes)
self.epochs = epochs
self.outer_epoch = 0
if not self.unique_cpb:
assert self.num_classes % self.cpb == 0
self.base_seed = seed # instance seed
self.seed = seed # subsample sampler seed
def set_epoch(self, epoch):
self.outer_epoch = epoch
def set_inner_epochs(self, epochs):
self.epochs = epochs
def _next_perm(self):
self.seed += 1
g = torch.Generator()
g.manual_seed(self.seed)
self._perm = torch.randperm(self.num_classes, generator=g)
def _get_perm_ssi(self):
start = self._ssi
end = self._ssi + self.cpb
subsample = self._perm[start:end]
return subsample
def _next_ssi(self):
if not self.unique_cpb:
self._ssi = (self._ssi + self.cpb) % self.num_classes
if self._ssi == 0:
self._next_perm()
else:
self._ssi += self.cpb * self.world_size
max_end = self._ssi + self.cpb * (self.world_size - self.rank)
if max_end > self.num_classes:
self._ssi = self.rank * self.cpb
self._next_perm()
def _get_local_samplers(self, epoch):
""" Generate samplers for local data set in given epoch """
seed = int(self.base_seed + epoch
+ self.epochs * self.rank
+ self.outer_epoch * self.epochs * self.world_size)
g = torch.Generator()
g.manual_seed(seed)
samplers = []
for t in range(self.num_classes):
t_indices = np.array(self.data_source.target_indices[t])
if not self.unique_cpb:
i_size = len(t_indices) // self.world_size
if i_size > 0:
t_indices = t_indices[self.rank*i_size:(self.rank+1)*i_size]
if len(t_indices) > 1:
t_indices = t_indices[torch.randperm(len(t_indices), generator=g)]
samplers.append(iter(t_indices))
return samplers
def _subsample_samplers(self, samplers):
""" Subsample a small set of samplers from all class-samplers """
subsample = self._get_perm_ssi()
subsampled_samplers = []
for i in subsample:
subsampled_samplers.append(samplers[i])
self._next_ssi()
return zip(*subsampled_samplers)
def __iter__(self):
self._ssi = self.rank*self.cpb if self.unique_cpb else 0
self._next_perm()
# -- iterations per epoch (extract batch-size samples from each class)
ipe = (self.num_classes // self.cpb if not self.unique_cpb
else self.num_classes // (self.cpb * self.world_size)) * self.batch_size
for epoch in range(self.epochs):
# -- shuffle class order
samplers = self._get_local_samplers(epoch)
subsampled_samplers = self._subsample_samplers(samplers)
counter, batch = 0, []
for i in range(ipe):
batch += list(next(subsampled_samplers))
counter += 1
if counter == self.batch_size:
yield batch
counter, batch = 0, []
if i + 1 < ipe:
subsampled_samplers = self._subsample_samplers(samplers)
def __len__(self):
if self.batch_size == 0:
return 0
ipe = (self.num_classes // self.cpb if not self.unique_cpb
else self.num_classes // (self.cpb * self.world_size))
return self.epochs * ipe
class ImageNet(torchvision.datasets.ImageFolder):
def __init__(
self,
root,
image_folder='imagenet_full_size/061417/',
tar_folder='imagenet_full_size/',
tar_file='imagenet_full_size-061417.tar',
train=True,
transform=None,
target_transform=None,
job_id=None,
local_rank=None,
copy_data=True
):
"""
ImageNet
Dataset wrapper (can copy data locally to machine)
:param root: root network directory for ImageNet data
:param image_folder: path to images inside root network directory
:param tar_file: zipped image_folder inside root network directory
:param train: whether to load train data (or validation)
:param transform: data-augmentations (applied in data-loader)
:param target_transform: target-transform to apply in data-loader
:param job_id: scheduler job-id used to create dir on local machine
:param copy_data: whether to copy data from network file locally
"""
suffix = 'train/' if train else 'val/'
data_path = None
if copy_data:
logger.info('copying data locally')
data_path = copy_imgnt_locally(
root=root,
suffix=suffix,
image_folder=image_folder,
tar_folder=tar_folder,
tar_file=tar_file,
job_id=job_id,
local_rank=local_rank)
if (not copy_data) or (data_path is None):
data_path = os.path.join(root, image_folder, suffix)
logger.info(f'data-path {data_path}')
super(ImageNet, self).__init__(
root=data_path,
transform=transform,
target_transform=target_transform)
logger.info('Initialized ImageNet')
class TransImageNet(ImageNet):
def __init__(
self,
dataset,
supervised=False,
supervised_views=1,
init_transform=None,
multicrop_transform=(0, None),
seed=0
):
"""
TransImageNet
Dataset that can apply transforms to images on initialization and can
return multiple transformed copies of the same image in each call
to __getitem__
"""
self.dataset = dataset
self.supervised = supervised
self.supervised_views = supervised_views
self.multicrop_transform = multicrop_transform
self.targets, self.samples = dataset.targets, dataset.samples
if self.supervised:
self.targets, self.samples = init_transform(
dataset.root,
dataset.samples,
dataset.class_to_idx,
seed)
logger.debug(f'num-labeled {len(self.samples)}')
mint = None
self.target_indices = []
for t in range(len(dataset.classes)):
indices = np.squeeze(np.argwhere(
self.targets == t)).tolist()
self.target_indices.append(indices)
mint = len(indices) if mint is None else min(mint, len(indices))
logger.debug(f'num-labeled target {t} {len(indices)}')
logger.debug(f'min. labeled indices {mint}')
@property
def classes(self):
return self.dataset.classes
def __getitem__(self, index):
target = self.targets[index]
path = self.samples[index][0]
img = self.dataset.loader(path)
if self.dataset.target_transform is not None:
target = self.dataset.target_transform(target)
if self.dataset.transform is not None:
if self.supervised:
return (*[self.dataset.transform(img) for _ in range(self.supervised_views)], target)
else:
img_1 = self.dataset.transform(img)
img_2 = self.dataset.transform(img)
multicrop, mc_transform = self.multicrop_transform
if multicrop > 0 and mc_transform is not None:
mc_imgs = [mc_transform(img) for _ in range(int(multicrop))]
return (img_1, img_2, *mc_imgs, target)
return img_1, img_2, target
return img, target
class TransCIFAR10(torchvision.datasets.CIFAR10):
def __init__(
self,
root,
image_folder='cifar-pytorch/11222017/',
tar_file='cifar-10-python.tar.gz',
copy_data=False,
train=True,
transform=None,
target_transform=None,
init_transform=None,
supervised=True,
multicrop_transform=(0, None),
supervised_views=1
):
data_path = None
if copy_data:
logger.info('copying data locally')
data_path = copy_cifar10_locally(
root=root,
image_folder=image_folder,
tar_file=tar_file)
if (not copy_data) or (data_path is None):
data_path = os.path.join(root, image_folder)
logger.info(f'data-path {data_path}')
super().__init__(data_path, train, transform, target_transform, False)
self.supervised_views = supervised_views
self.multicrop_transform = multicrop_transform
self.supervised = supervised
if self.supervised:
self.targets, self.data = init_transform(self.targets, self.data)
logger.info(f'num-labeled {len(self.data)}')
mint = None
self.target_indices = []
for t in range(len(self.classes)):
indices = np.squeeze(np.argwhere(self.targets == t)).tolist()
self.target_indices.append(indices)
mint = len(indices) if mint is None else min(mint, len(indices))
logger.info(f'num-labeled target {t} {len(indices)}')
logger.info(f'min. labeled indices {mint}')
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.transform is not None:
if self.supervised:
return (*[self.transform(img) for _ in range(self.supervised_views)], target)
else:
img_1 = self.transform(img)
img_2 = self.transform(img)
multicrop, mc_transform = self.multicrop_transform
if multicrop > 0 and mc_transform is not None:
mc_imgs = [mc_transform(img) for _ in range(int(multicrop))]
return (img_1, img_2, | |
import codecs
import pickle
import optparse
import os
import re
import sys
from collections import OrderedDict
import numpy as np
models_path = "./models"
eval_path = "./evaluation"
eval_temp = os.path.join(eval_path, "temp")
# TODO: Move this to a better configurational structure
eval_logs_dir = os.path.join(eval_temp, "eval_logs")
if not os.path.exists(eval_temp):
os.makedirs(eval_temp)
if not os.path.exists(models_path):
os.makedirs(models_path)
if not os.path.exists(eval_logs_dir):
os.makedirs(eval_logs_dir)
eval_script = os.path.join(eval_path, "conlleval-runner.sh")
class RegexpTokenizer():
pattern = r"\w+[']\w+|\w+|\$[\d\.]+|\S+"
flags = re.UNICODE | re.MULTILINE | re.DOTALL
# flags = None
regexp = None
def __init__(self):
self.regexp = re.compile(self.pattern, self.flags)
def tokenize(self, sentence):
return self.regexp.findall(sentence)
tokenizer = RegexpTokenizer()
def tokenize_sentences_string(sentences_string):
"""
:type sentences_string: str
"""
tokenized_sentences = []
sentence_lines = sentences_string.split("\n")
for sentences_string_line in sentence_lines:
tokenized_sentences.append(tokenizer.tokenize(sentences_string_line))
return tokenized_sentences
def lock_file(f):
import fcntl, errno, time
while True:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
# raise on unrelated IOErrors
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
return True
def unlock_file(f):
import fcntl
fcntl.flock(f, fcntl.LOCK_UN)
def create_a_model_subpath(models_path):
current_model_paths = read_model_paths_database(models_path)
if len(current_model_paths) > 0:
last_model_path_id_part = int(current_model_paths[-1][0].split("-")[1])
else:
last_model_path_id_part = -1
return os.path.join(models_path, "model-%08d" % (last_model_path_id_part+1)), (last_model_path_id_part+1)
def add_a_model_path_to_the_model_paths_database(models_path, model_subpath, model_params_string):
f = codecs.open(os.path.join(models_path, "model_paths_database.dat"), "a+")
lock_file(f)
f.write("%s %s\n" % (model_subpath, model_params_string))
unlock_file(f)
f.close()
def read_model_paths_database(models_path):
try:
f = codecs.open(os.path.join(models_path, "model_paths_database.dat"), "r")
lock_file(f)
lines = f.readlines()
sorted_model_subpaths = sorted([line.strip().split() for line in lines if len(line.strip()) > 0], key=lambda x: x[0])
# current_model_paths = {model_path: model_params for model_path, model_params in sorted_model_paths}
unlock_file(f)
f.close()
except IOError as e:
return []
return sorted_model_subpaths
def get_model_subpath(parameters):
model_parameters_string = get_name(parameters)
sorted_model_subpaths = read_model_paths_database("models")
for cur_model_subpath, cur_model_parameters_string in sorted_model_subpaths[::-1]:
if cur_model_parameters_string == model_parameters_string:
return cur_model_subpath
def get_name(parameters):
"""
Generate a model name from its parameters.
"""
l = []
for k, v in list(parameters.items()):
if (type(v) is str or type(v) is str) and "/" in v:
l.append((k, v[::-1][:v[::-1].index('/')][::-1]))
else:
l.append((k, v))
name = ",".join(["%s=%s" % (k, str(v).replace(',', '')) for k, v in l])
return "".join(i for i in name if i not in "\/:*?<>|")
def create_dico(item_list):
"""
Create a dictionary of items from a list of list of items.
"""
assert type(item_list) is list
dico = {}
for items in item_list:
for item in items:
if item not in dico:
dico[item] = 1
else:
dico[item] += 1
return dico
def create_mapping(dico):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(list(dico.items()), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in list(id_to_item.items())}
return item_to_id, id_to_item
def zero_digits(s):
"""
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s)
def iob2(tags):
"""
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def iob_iobes(tags):
"""
IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
def iobes_iob(tags):
"""
IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
def insert_singletons(words, singletons, p=0.5):
"""
Replace singletons by the unknown word with a probability p.
"""
new_words = []
for word in words:
if word in singletons and np.random.uniform() < p:
new_words.append(0)
else:
new_words.append(word)
return new_words
def pad_word_chars(words):
"""
Pad the characters of the words in a sentence.
Input:
- list of lists of ints (list of words, a word being a list of char indexes)
Output:
- padded list of lists of ints
- padded list of lists of ints (where chars are reversed)
- list of ints corresponding to the index of the last character of each word
"""
max_length = max([len(word) for word in words])
char_for = []
char_rev = []
char_pos = []
for word in words:
padding = [0] * (max_length - len(word))
char_for.append(word + padding)
char_rev.append(word[::-1] + padding)
char_pos.append(len(word) - 1)
return char_for, char_rev, char_pos
def create_input(data, parameters, add_label, singletons=None):
"""
Take sentence data and return an input for
the training or the evaluation function.
"""
words = data['words']
chars = data['chars']
if singletons is not None:
words = insert_singletons(words, singletons)
if parameters['cap_dim']:
caps = data['caps']
char_for, char_rev, char_pos = pad_word_chars(chars)
input = []
if parameters['word_dim']:
input.append(words)
if parameters['char_dim']:
input.append(char_for)
if parameters['ch_b']:
input.append(char_rev)
input.append(char_pos)
if parameters['cap_dim']:
input.append(caps)
# print input
if add_label:
input.append(data['tags'])
# print input
return input
def read_args(evaluation=False, args_as_a_list=sys.argv[1:], for_xnlp=False):
optparser = optparse.OptionParser()
if for_xnlp:
optparser.add_option(
"-r", "--reload", default="0",
type='int', help="Reload the last saved model"
)
optparser.add_option(
"--model_path", default="",
type='str', help="Model path must be given when a reload is requested"
)
optparser.add_option(
"--model_epoch_path", default="",
type='str', help="Model epoch path must be given when a reload is requested"
)
else:
for label in ["ner", "md"]:
optparser.add_option(
"--{label}_train_file".format(label=label), default="",
help="Train set location"
)
optparser.add_option(
"--{label}_dev_file".format(label=label), default="",
help="Dev set location"
)
optparser.add_option(
"--{label}_test_file".format(label=label), default="",
help="Test set location"
)
optparser.add_option(
"--lang_name", default="turkish",
help="langugage name"
)
optparser.add_option(
"--alt_dataset_group", default="none",
help="alternative dataset group selector"
)
optparser.add_option(
"--use_golden_morpho_analysis_in_word_representation", default=False, action="store_true",
help="use golden morpho analysis when representing words"
)
optparser.add_option(
"-s", "--tag_scheme", default="iobes",
help="Tagging scheme (IOB or IOBES)"
)
optparser.add_option(
"-l", "--lower", default="0",
type='int', help="Lowercase words (this will not affect character inputs)"
)
optparser.add_option(
"-z", "--zeros", default="0",
type='int', help="Replace digits with 0"
)
optparser.add_option(
"-c", "--char_dim", default="25",
type='int', help="Char embedding dimension"
)
optparser.add_option(
"-C", "--char_lstm_dim", default="25",
type='int', help="Char LSTM hidden layer size"
)
optparser.add_option(
"-b", "--char_bidirect", default="1",
type='int', help="Use a bidirectional LSTM for chars"
)
# morpho_tag section
optparser.add_option(
"--morpho_tag_dim", default="100",
type='int', help="Morpho tag embedding dimension"
)
optparser.add_option(
"--morpho_tag_lstm_dim", default="100",
type='int', help="Morpho tag LSTM hidden layer size"
)
optparser.add_option(
"--morpho_tag_bidirect", default="1",
type='int', help="Use a bidirectional LSTM for morpho tags"
)
optparser.add_option(
"--morpho_tag_type", default="char",
help="Mode of morphological tag extraction"
)
optparser.add_option(
"--morpho-tag-column-index", default="1",
type='int', help="the index of the column which contains the morphological tags in the conll format"
)
optparser.add_option(
"--integration_mode", default="0",
type='int', help="integration mode"
)
optparser.add_option(
"--active_models", default="0",
type='int', help="active models: 0: NER, 1: MD, 2: JOINT"
)
optparser.add_option(
"--multilayer", default="0",
type='int', help="use a multilayered sentence level Bi-LSTM"
)
optparser.add_option(
"--shortcut_connections", default="0",
type='int', help="use shortcut connections in the multilayered scheme"
)
optparser.add_option(
"--tying_method", default="",
help="tying method"
)
optparser.add_option(
"-w", "--word_dim", default="100",
type='int', help="Token embedding dimension"
)
optparser.add_option(
"-W", "--word_lstm_dim", default="100",
type='int', help="Token LSTM hidden layer size"
)
optparser.add_option(
"-B", "--word_bidirect", default="1",
type='int', help="Use a bidirectional LSTM for words"
)
optparser.add_option(
"-p", "--pre_emb", default="",
help="Location of pretrained embeddings"
)
optparser.add_option(
"-A", "--all_emb", default="0",
type='int', help="Load all embeddings"
)
optparser.add_option(
"-a", "--cap_dim", default="0",
type='int', help="Capitalization feature dimension (0 to disable)"
)
optparser.add_option(
"-f", "--crf", default="1",
type='int', help="Use CRF (0 to disable)"
)
optparser.add_option(
"-D", "--dropout", default="0.5",
type='float', help="Droupout on the input (0 = no dropout)"
)
optparser.add_option(
"-L", "--lr_method", default="adam-alpha_float@0.005",
help="Learning method (SGD, Adadelta, Adam..)"
)
optparser.add_option(
"--disable_sparse_updates", default=True, action="store_false",
dest="sparse_updates_enabled",
help="Sparse updates enabled"
)
optparser.add_option(
"-r", "--reload", default="0",
type='int', help="Reload the last saved model"
)
optparser.add_option(
"--model_path", default="",
type='str', help="Model path must be given when a reload is requested"
)
optparser.add_option(
"--model_epoch_path", default="",
type='str', help="Model epoch path must be given when a reload is requested"
)
optparser.add_option(
"--skip-testing", default="0",
type='int',
help="Skip the evaluation on test set (because dev and test sets are the same and thus testing is irrelevant)"
)
optparser.add_option(
"--predict-and-exit-filename", default="",
help="Used with '--reload 1', the loaded model is used for predicting on | |
<filename>thred/models/thred/thred_model.py<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import variable_scope
from .. import attention_helper
from ..base import AbstractModel
from ..topic_aware import taware_layer, taware_decoder
from thred.util import log, vocab, rnn_factory
from thred.util.device import DeviceManager, RoundRobin
class TopicAwareHierarchicalSeq2SeqModel(AbstractModel):
"""Topic-Aware Hierarchical Sequence-to-sequence model
"""
def __init__(self,
mode,
num_turns,
iterator,
params,
rev_vocab_table=None,
scope=None,
log_trainables=True):
log.print_out("# creating %s graph ..." % mode)
self.dtype = tf.float32
self.mode = mode
self.num_turns = num_turns - 1
self.device_manager = DeviceManager()
self.round_robin = RoundRobin(self.device_manager)
self.num_gpus = min(params.num_gpus, self.device_manager.num_available_gpus())
log.print_out("# number of gpus %d" % self.num_gpus)
self.iterator = iterator
with tf.variable_scope(scope or 'thred_graph', dtype=self.dtype):
self.init_embeddings(params.vocab_file, params.vocab_pkl, scope=scope)
encoder_keep_prob, decoder_keep_prob = self.get_keep_probs(mode, params)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
context_keep_prob = 1.0 - params.context_dropout_rate
else:
context_keep_prob = 1.0
with tf.variable_scope(scope or "build_network"):
with tf.variable_scope("decoder/output_projection") as output_scope:
if params.boost_topic_gen_prob:
self.output_layer = taware_layer.JointDenseLayer(
params.vocab_size, params.topic_vocab_size,
scope=output_scope, name="output_projection")
else:
self.output_layer = layers_core.Dense(
params.vocab_size,
use_bias=False, name="output_projection")
self.batch_size = tf.size(self.iterator.source_sequence_lengths[0])
devices = self.round_robin.assign(2, base=self.num_gpus - 1)
encoder_results, encoder_state = self.__build_encoder(params, encoder_keep_prob)
context_outputs, context_state = self.__build_context(params, encoder_results, encoder_state,
context_keep_prob, devices[0])
self.global_step = tf.Variable(0, trainable=False)
self.use_scheduled_sampling = False
if mode == tf.contrib.learn.ModeKeys.TRAIN:
self.sampling_probability = tf.constant(params.scheduled_sampling_prob)
self.sampling_probability = self._get_sampling_probability(params, self.global_step,
self.sampling_probability)
self.use_scheduled_sampling = params.scheduled_sampling_prob > 0
elif mode == tf.contrib.learn.ModeKeys.EVAL:
self.sampling_probability = tf.constant(0.0)
logits, sample_ids, _ = self.__build_decoder(params, context_outputs, context_state,
decoder_keep_prob, devices[1])
if mode != tf.contrib.learn.ModeKeys.INFER:
with tf.device(self.device_manager.tail_gpu()):
loss = self.__compute_loss(logits)
else:
loss, losses = None, None
if mode == tf.contrib.learn.ModeKeys.TRAIN:
self.train_loss = loss
self.word_count = sum(
[tf.reduce_sum(self.iterator.source_sequence_lengths[t]) for t in range(self.num_turns)]) + \
tf.reduce_sum(self.iterator.target_sequence_length)
elif mode == tf.contrib.learn.ModeKeys.EVAL:
self.eval_loss = loss
elif mode == tf.contrib.learn.ModeKeys.INFER:
self.sample_words = rev_vocab_table.lookup(tf.to_int64(sample_ids))
if mode != tf.contrib.learn.ModeKeys.INFER:
## Count the number of predicted words for compute ppl.
self.predict_count = tf.reduce_sum(self.iterator.target_sequence_length)
trainables = tf.trainable_variables()
if mode == tf.contrib.learn.ModeKeys.TRAIN:
self.learning_rate = tf.constant(params.learning_rate)
# decay
self.learning_rate = self._get_learning_rate_decay(params, self.global_step, self.learning_rate)
# Optimizer
if params.optimizer.lower() == "sgd":
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
tf.summary.scalar("lr", self.learning_rate)
elif params.optimizer.lower() == "adam":
opt = tf.train.AdamOptimizer(self.learning_rate)
tf.summary.scalar("lr", self.learning_rate)
else:
raise ValueError('Unknown optimizer: ' + params.optimizer)
# Gradients
gradients = tf.gradients(
self.train_loss,
trainables,
colocate_gradients_with_ops=True)
clipped_grads, grad_norm = tf.clip_by_global_norm(gradients, params.max_gradient_norm)
grad_norm_summary = [tf.summary.scalar("grad_norm", grad_norm)]
grad_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_grads)))
self.grad_norm = grad_norm
self.update = opt.apply_gradients(
zip(clipped_grads, trainables), global_step=self.global_step)
# Summary
self.train_summary = tf.summary.merge([
tf.summary.scalar("lr", self.learning_rate),
tf.summary.scalar("train_loss", self.train_loss),
] + grad_norm_summary)
if mode == tf.contrib.learn.ModeKeys.INFER:
self.infer_logits, self.sample_id = logits, sample_ids
self.infer_summary = tf.no_op()
# Saver
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
# Print trainable variables
if log_trainables:
log.print_out("# Trainable variables")
for trainable in trainables:
log.print_out(" %s, %s, %s" % (trainable.name, str(trainable.get_shape()),
trainable.op.device))
def __build_encoder(self, params, keep_prob):
encoder_cell = {}
if params.encoder_type == "uni":
encoder_cell['uni'] = rnn_factory.create_cell(params.cell_type, params.hidden_units,
use_residual=params.residual,
num_layers=params.num_layers,
input_keep_prob=keep_prob)
elif params.encoder_type == "bi":
num_bi_layers = int(params.num_layers / 2)
encoder_cell['fw'] = rnn_factory.create_cell(params.cell_type, params.hidden_units,
use_residual=params.residual,
num_layers=num_bi_layers,
input_keep_prob=keep_prob)
encoder_cell['bw'] = rnn_factory.create_cell(params.cell_type, params.hidden_units,
use_residual=params.residual,
num_layers=num_bi_layers,
input_keep_prob=keep_prob)
else:
raise ValueError("Unknown encoder type: '%s'" % params.encoder_type)
encoding_devices = self.round_robin.assign(self.num_turns)
encoder_results, next_initial_state = [], None
for t in range(self.num_turns):
with variable_scope.variable_scope("encoder") as scope:
if t > 0:
scope.reuse_variables()
with tf.device(encoding_devices[t]):
encoder_embedded_inputs = tf.nn.embedding_lookup(params=self.embeddings,
ids=self.iterator.sources[t])
if params.encoder_type == "bi":
encoder_outputs, states = tf.nn.bidirectional_dynamic_rnn(
encoder_cell['fw'],
encoder_cell['bw'],
inputs=encoder_embedded_inputs,
dtype=self.dtype,
sequence_length=self.iterator.source_sequence_lengths[t],
swap_memory=True)
fw_state, bw_state = states
num_bi_layers = int(params.num_layers / 2)
if t == 0:
if params.context_type == "uni":
next_initial_state = self._merge_bidirectional_states(num_bi_layers, fw_state, bw_state)
else:
if num_bi_layers > 1:
initial_state_fw, initial_state_bw = [], []
for layer_id in range(num_bi_layers):
initial_state_fw.append(fw_state[layer_id])
initial_state_bw.append(bw_state[layer_id])
next_initial_state = (tuple(initial_state_fw), tuple(initial_state_bw))
else:
next_initial_state = (fw_state, bw_state)
if num_bi_layers > 1:
next_input = tf.concat([fw_state[-1], bw_state[-1]], axis=1)
else:
next_input = tf.concat([fw_state, bw_state], axis=1)
else:
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
encoder_cell['uni'],
inputs=encoder_embedded_inputs,
sequence_length=self.iterator.source_sequence_lengths[t],
dtype=self.dtype,
swap_memory=True,
scope=scope)
if t == 0:
if params.context_type == "uni":
next_initial_state = encoder_state
else:
num_bi_layers = int(params.num_layers / 2)
initial_state_fw, initial_state_bw = [], []
for layer_id in range(num_bi_layers):
initial_state_fw.append(encoder_state[2 * layer_id])
initial_state_bw.append(encoder_state[2 * layer_id + 1])
next_initial_state = (tuple(initial_state_fw), tuple(initial_state_bw))
if params.num_layers > 1:
next_input = encoder_state[-1]
else:
next_input = encoder_state
# msg_attn_mechanism = attention_helper.create_attention_mechanism(
# params.attention_type,
# params.hidden_units,
# encoder_outputs,
# self.iterator.source_sequence_lengths[t])
encoder_results.append((encoder_outputs, next_input))
return encoder_results, next_initial_state
def _merge_bidirectional_states(self, num_bi_layers, fw_state, bw_state):
if num_bi_layers > 1:
merged_state = []
for layer_id in range(num_bi_layers):
merged_state.append(fw_state[layer_id])
merged_state.append(bw_state[layer_id])
merged_state = tuple(merged_state)
else:
merged_state = (fw_state, bw_state)
return merged_state
def __build_context(self, params, encoder_results, initial_state, keep_prob, device):
with variable_scope.variable_scope("context") as scope:
with tf.device(device):
context_seq_length = tf.fill([self.batch_size], self.num_turns)
context_inputs = tf.stack([state for _, state in encoder_results], axis=0)
# message_attention = attention_helper.create_attention_mechanism(params.attention_type,
# params.hidden_units,
# context_inputs)
if params.context_type == "uni":
cell = rnn_factory.create_cell(params.cell_type, params.hidden_units,
use_residual=params.residual, num_layers=params.num_layers,
input_keep_prob=keep_prob)
# cell = tf.contrib.seq2seq.AttentionWrapper(
# cell,
# msg_attn_mechanism,
# attention_layer_size=params.hidden_units,
# alignment_history=False,
# output_attention=True,
# name="message_attention")
context_outputs, context_state = tf.nn.dynamic_rnn(cell,
initial_state=initial_state,
inputs=context_inputs,
sequence_length=context_seq_length,
time_major=True,
dtype=self.dtype,
swap_memory=True)
return context_outputs, context_state
elif params.context_type == "bi":
num_bi_layers = int(params.num_layers / 2)
fw_cell = rnn_factory.create_cell(params.cell_type, params.hidden_units, num_bi_layers,
use_residual=params.residual,
input_keep_prob=keep_prob,
devices=self.round_robin.assign(num_bi_layers))
bw_cell = rnn_factory.create_cell(params.cell_type, params.hidden_units, num_bi_layers,
use_residual=params.residual,
input_keep_prob=keep_prob,
devices=self.round_robin.assign(num_bi_layers,
self.device_manager.num_available_gpus() - 1))
context_outputs, context_state = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell,
context_inputs,
initial_state_fw=initial_state[0],
initial_state_bw=initial_state[1],
sequence_length=context_seq_length,
time_major=True,
dtype=scope.dtype,
swap_memory=True)
fw_state, bw_state = context_state
fw_output, bw_output = context_outputs
context_outputs = tf.concat([fw_output, bw_output], axis=-1)
return context_outputs, self._merge_bidirectional_states(num_bi_layers, fw_state, bw_state)
else:
raise ValueError("Unknown context type: %s" % params.context_type)
def __build_decoder_cell(self, params, context_outputs, context_state, input_keep_prob):
cell = rnn_factory.create_cell(params.cell_type, params.hidden_units,
use_residual=params.residual, num_layers=params.num_layers,
input_keep_prob=input_keep_prob,
devices=self.round_robin.assign(params.num_layers))
topical_embeddings = tf.nn.embedding_lookup(self.embeddings, self.iterator.topic)
max_topic_length = tf.reduce_max(self.iterator.topic_sequence_length)
expanded_context_state = tf.tile(tf.expand_dims(context_state[-1] if params.num_layers > 1 else context_state, axis=1),
[1, max_topic_length, 1])
topical_embeddings = tf.concat([expanded_context_state, topical_embeddings], axis=2)
context_sequence_length = tf.fill([self.batch_size], self.num_turns)
batch_majored_context_outputs = tf.transpose(context_outputs, [1, 0, 2])
if self.mode == tf.contrib.learn.ModeKeys.INFER and params.beam_width > 0:
batch_size = self.batch_size * params.beam_width
decoder_initial_state = tf.contrib.seq2seq.tile_batch(context_state, multiplier=params.beam_width)
memory = tf.contrib.seq2seq.tile_batch(batch_majored_context_outputs, multiplier=params.beam_width)
topical_embeddings = tf.contrib.seq2seq.tile_batch(topical_embeddings, multiplier=params.beam_width)
context_sequence_length = tf.contrib.seq2seq.tile_batch(
context_sequence_length, multiplier=params.beam_width)
topic_sequence_length = tf.contrib.seq2seq.tile_batch(
self.iterator.topic_sequence_length, multiplier=params.beam_width)
else:
batch_size = self.batch_size
decoder_initial_state = context_state
memory = batch_majored_context_outputs
topic_sequence_length = self.iterator.topic_sequence_length
context_attention = attention_helper.create_attention_mechanism(params.attention_type,
params.hidden_units,
memory, context_sequence_length)
topical_attention = attention_helper.create_attention_mechanism(params.attention_type,
params.hidden_units,
topical_embeddings, topic_sequence_length)
alignment_history = self.mode == tf.contrib.learn.ModeKeys.INFER and params.beam_width == 0
cell = tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism=(context_attention, topical_attention),
attention_layer_size=(params.hidden_units, params.hidden_units),
alignment_history=alignment_history,
output_attention=True,
name="joint_attention")
decoder_initial_state = cell.zero_state(batch_size, self.dtype).clone(cell_state=decoder_initial_state)
return cell, decoder_initial_state
def __build_decoder(self, params, context_outputs, context_state,
keep_prob, device):
iterator = self.iterator
with variable_scope.variable_scope("decoder") as scope:
with tf.device(device):
cell, initial_state = self.__build_decoder_cell(params, context_outputs, context_state, keep_prob)
if self.mode != tf.contrib.learn.ModeKeys.INFER:
# decoder_emp_inp: [max_time, batch_size, num_units]
decoder_emb_inp = tf.nn.embedding_lookup(self.embeddings, iterator.target_input)
# Helper
if self.use_scheduled_sampling:
helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
decoder_emb_inp, iterator.target_sequence_length, self.embeddings,
self.sampling_probability)
else:
helper = tf.contrib.seq2seq.TrainingHelper(decoder_emb_inp, iterator.target_sequence_length)
# Decoder
my_decoder = taware_decoder.ConservativeBasicDecoder(
cell,
helper,
initial_state,
self.output_layer)
# Dynamic decoding
outputs, final_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(my_decoder,
swap_memory=True,
scope=scope)
sample_ids = outputs.sample_id
logits = outputs.rnn_output
# Note: there's a subtle difference here between train and inference.
# We could have set output_layer when create my_decoder
# and shared more code between train and inference.
# We chose to apply the output_layer to all timesteps for speed:
# 10% improvements for small models & 20% for larger ones.
# If memory is a concern, we should apply output_layer per timestep.
### Inference
else:
beam_width = params.beam_width
start_tokens = tf.fill([self.batch_size], vocab.SOS_ID)
end_token = vocab.EOS_ID
maximum_iterations = self._get_decoder_max_iterations(params)
if beam_width > 0:
# initial_state = tf.contrib.seq2seq.tile_batch(context_outputs[-1],
# multiplier=params.beam_width)
my_decoder = taware_decoder.ConservativeBeamSearchDecoder(
cell, self.embeddings, start_tokens, end_token,
initial_state=initial_state,
beam_width=beam_width,
output_layer=self.output_layer,
length_penalty_weight=params.length_penalty_weight)
else:
# Helper
if params.sampling_temperature > 0.0:
helper = tf.contrib.seq2seq.SampleEmbeddingHelper(
self.embeddings, start_tokens, end_token,
softmax_temperature=params.sampling_temperature,
seed=None)
else:
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(self.embeddings, start_tokens, end_token)
# Decoder
my_decoder = taware_decoder.ConservativeBasicDecoder(
cell,
helper,
initial_state,
output_layer=self.output_layer # applied per timestep
)
# Dynamic decoding
outputs, final_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(my_decoder,
maximum_iterations=maximum_iterations,
swap_memory=True,
scope=scope)
if beam_width > 0:
logits = tf.no_op()
sample_ids = outputs.predicted_ids
else:
logits = outputs.rnn_output
sample_ids = outputs.sample_id
return logits, sample_ids, final_decoder_state
def _get_decoder_max_iterations(self, params):
max_encoder_length = None
for t in range(self.num_turns):
if max_encoder_length is None:
max_encoder_length = tf.reduce_max(self.iterator.source_sequence_lengths[t])
else:
max_encoder_length = tf.maximum(max_encoder_length,
tf.reduce_max(self.iterator.source_sequence_lengths[t]))
return tf.to_int32(
tf.round(tf.to_float(max_encoder_length) * params.decoding_length_factor))
def __compute_loss(self, logits):
iterator = self.iterator
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=iterator.target_output, logits=logits)
max_time = iterator.target_output.shape[1].value or tf.shape(iterator.target_output)[1]
target_weights = tf.sequence_mask(iterator.target_sequence_length, max_time, dtype=self.dtype)
loss = tf.reduce_sum(crossent * target_weights) / tf.to_float(self.batch_size)
return loss
def train(self, sess):
assert self.mode == tf.contrib.learn.ModeKeys.TRAIN
return sess.run([self.update,
self.train_loss,
self.predict_count,
self.train_summary,
self.global_step,
self.word_count,
self.batch_size,
self.grad_norm,
self.learning_rate])
def eval(self, sess):
assert self.mode == tf.contrib.learn.ModeKeys.EVAL
return sess.run(
[self.eval_loss, self.predict_count, self.batch_size])
def infer(self, sess):
assert self.mode == tf.contrib.learn.ModeKeys.INFER
return sess.run(
[self.infer_logits, self.infer_summary, self.sample_id, self.sample_words])
def decode(self, sess):
_, infer_summary, _, sample_words = self.infer(sess)
if | |
time slot
"""
print ("\n**** test_schedule_premept_active_task ****")
# used by cancel_schedules
agentid = 'new_agent'
taskid = 'task_high_priority2'
cancel_schedules.append({'agentid': agentid, 'taskid': taskid})
# add low prority task as well since it won't get cancelled till
# end of grace time
cancel_schedules.append(
{'agentid': TEST_AGENT, 'taskid': 'task_low_priority2'})
publish_agent.callback = MagicMock(name="callback")
publish_agent.callback.reset_mock()
# subscribe to schedule response topic
publish_agent.vip.pubsub.subscribe(peer='pubsub',
prefix=topics.ACTUATOR_SCHEDULE_RESULT,
callback=publish_agent.callback).get()
start = str(datetime.now())
end = str(datetime.now() + timedelta(seconds=15))
msg = [
['fakedriver1', start, end]
]
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
TEST_AGENT,
'task_low_priority2',
'LOW_PREEMPT',
msg).get(timeout=10)
# expected result {'info': u'', 'data': {}, 'result': SUCCESS}
print result
assert result['result'] == SUCCESS
# wait for above call's success response to publish_agent.callback method
gevent.sleep(1)
publish_agent.callback.reset_mock()
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
agentid,
taskid,
'HIGH',
msg).get(timeout=10)
assert result['result'] == SUCCESS
# wait for 2 callbacks - success msg for task_high_priority and preempt
# msg for task_low_priority
gevent.sleep(6)
print ('call args list:', publish_agent.callback.call_args_list)
assert publish_agent.callback.call_count == 2
# Grab the args of callback and verify
call_args1 = publish_agent.callback.call_args_list[0][0]
call_args2 = publish_agent.callback.call_args_list[1][0]
assert call_args1[1] == PLATFORM_ACTUATOR
assert call_args1[3] == topics.ACTUATOR_SCHEDULE_RESULT
# initialize 0 to schedule response and 1 to cancel response
schedule_header = call_args1[4]
schedule_message = call_args1[5]
print ("call args of 1 ", publish_agent.callback.call_args_list[1])
cancel_header = call_args2[4]
cancel_message = call_args2[5]
# check if order is reversed: 0 is cancelresponse and 1 is new schedule
if call_args1[4]['type'] == 'CANCEL_SCHEDULE':
assert call_args2[4]['type'] == 'NEW_SCHEDULE'
cancel_header = call_args1[4]
cancel_message = call_args1[5]
schedule_header = call_args2[4]
schedule_message = call_args2[5]
else:
assert call_args1[4]['type'] == 'NEW_SCHEDULE'
assert call_args2[4]['type'] == 'CANCEL_SCHEDULE'
# values remain as initialized above if/else
assert schedule_header['type'] == 'NEW_SCHEDULE'
assert schedule_header['taskID'] == taskid
assert schedule_header['requesterID'] == agentid
assert schedule_message['result'] == SUCCESS
assert cancel_header['taskID'] == 'task_low_priority2'
assert cancel_message['data']['agentID'] == agentid
assert cancel_message['data']['taskID'] == taskid
assert cancel_message['result'] == 'PREEMPTED'
@pytest.mark.actuator
def test_schedule_premept_active_task_gracetime(publish_agent,
cancel_schedules):
"""
Test error response for schedule request.
Test schedule preemption of a actively running task with priority LOW by
a higher priority task from the a different agent. Try setting a point
before the end of grace time of lower priority task. set operation should
fail
:param publish_agent: fixture invoked to setup all agents necessary and
returns an instance of Agent object used for publishing
:param cancel_schedules: fixture used to cancel the schedule at the end
of test so that other tests can use the same device and time slot
"""
print ("\n**** test_schedule_premept_active_task_gracetime ****")
# used by cancel_schedules
agentid = 'new_agent'
taskid = 'task_high_priority3'
cancel_schedules.append({'agentid': agentid, 'taskid': taskid})
# add low prority task as well since it won't get cancelled till
# end of grace time
cancel_schedules.append(
{'agentid': TEST_AGENT, 'taskid': 'task_low_priority3'})
publish_agent.callback = MagicMock(name="callback")
publish_agent.callback.reset_mock()
# subscribe to schedule response topic
publish_agent.vip.pubsub.subscribe(peer='pubsub',
prefix=topics.ACTUATOR_SCHEDULE_RESULT,
callback=publish_agent.callback).get()
start = str(datetime.now())
end = str(datetime.now() + timedelta(seconds=20))
msg = [
['fakedriver1', start, end]
]
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
TEST_AGENT,
'task_low_priority3',
'LOW_PREEMPT',
msg).get(timeout=10)
# expected result {'info': u'', 'data': {}, 'result': SUCCESS}
print result
assert result['result'] == SUCCESS
# wait for above call's success response to publish_agent.callback method
gevent.sleep(1)
publish_agent.callback.reset_mock()
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
agentid,
taskid,
'HIGH',
msg).get(timeout=10)
assert result['result'] == SUCCESS
# wait for 2 callbacks - success msg for task_high_priority and preempt
# msg for task_low_priority
gevent.sleep(6)
print ('call args list:', publish_agent.callback.call_args_list)
assert publish_agent.callback.call_count == 2
# Grab the args of callback and verify
call_args1 = publish_agent.callback.call_args_list[0][0]
call_args2 = publish_agent.callback.call_args_list[1][0]
assert call_args1[1] == PLATFORM_ACTUATOR
assert call_args1[3] == topics.ACTUATOR_SCHEDULE_RESULT
# initialize 0 to schedule response and 1 to cancel response
schedule_header = call_args1[4]
schedule_message = call_args1[5]
cancel_header = call_args2[4]
cancel_message = call_args2[5]
# check if order is reversed: 0 is cancelresponse and 1 is new schedule
if call_args1[4]['type'] == 'CANCEL_SCHEDULE':
assert call_args2[4]['type'] == 'NEW_SCHEDULE'
cancel_header = call_args1[4]
cancel_message = call_args1[5]
schedule_header = call_args2[4]
schedule_message = call_args2[5]
else:
assert call_args1[4]['type'] == 'NEW_SCHEDULE'
assert call_args2[4]['type'] == 'CANCEL_SCHEDULE'
# values remain as initialized above if/else
assert schedule_header['type'] == 'NEW_SCHEDULE'
assert schedule_header['taskID'] == taskid
assert schedule_header['requesterID'] == agentid
assert schedule_message['result'] == SUCCESS
assert cancel_header['taskID'] == 'task_low_priority3'
assert cancel_message['data']['agentID'] == agentid
assert cancel_message['data']['taskID'] == taskid
assert cancel_message['result'] == 'PREEMPTED'
# High priority task's schedule request should succeed but it should not
# be able to start write to the device till active task's (
# 'task_low_priority3') grace time is over
try:
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR, # Target agent
'set_point', # Method
agentid, # Requestor
'fakedriver1/SampleWritableFloat1', # Point to set
2.5 # New value
).get(timeout=10)
pytest.fail('Expecting LockError. Code returned: {}'.format(result))
except RemoteError as e:
assert e.exc_info['exc_type'] == 'actuator.agent.LockError'
assert e.message == 'caller ({}) does not have this lock'.format(
agentid)
@pytest.mark.actuator
def test_schedule_premept_error_active_task(publish_agent, cancel_schedules):
"""
Test error response for schedule request.
Test schedule preemption of a actively running task with priority LOW by
a higher priority task from the a different agent. It should fail as the
LOW priority task's time window is active
:param publish_agent: fixture invoked to setup all agents necessary and
returns an instance of Agent object used for publishing
:param cancel_schedules: fixture used to cancel the schedule at the end
of test so that other tests can use the same device and time slot
"""
print ("\n**** test_schedule_premept_error_active_task ****")
# used by cancel_schedules
agentid = TEST_AGENT
taskid = 'task_low_priority3'
cancel_schedules.append({'agentid': agentid, 'taskid': taskid})
publish_agent.callback = MagicMock(name="callback")
publish_agent.callback.reset_mock()
# subscribe to schedule response topic
publish_agent.vip.pubsub.subscribe(peer='pubsub',
prefix=topics.ACTUATOR_SCHEDULE_RESULT,
callback=publish_agent.callback).get()
start = str(datetime.now())
end = str(datetime.now() + timedelta(seconds=10))
msg = [
['fakedriver1', start, end]
]
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
agentid,
taskid,
PRIORITY_LOW,
msg).get(timeout=10)
# expected result {'info': u'', 'data': {}, 'result': SUCCESS}
print result
assert result['result'] == SUCCESS
# wait for above call's success response to publish_agent.callback method
gevent.sleep(1)
publish_agent.callback.reset_mock()
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
agentid,
'failed_high_priority_task',
'HIGH',
msg).get(timeout=10)
assert result['result'] == FAILURE
assert result['info'] == 'CONFLICTS_WITH_EXISTING_SCHEDULES'
assert result['data'][TEST_AGENT].keys()[0] == taskid
@pytest.mark.actuator
def test_schedule_premept_future_task(publish_agent, cancel_schedules):
"""
Test error response for schedule request.
Test schedule preemption of a future task with priority LOW by a higher
priority task from the a different agent.
:param publish_agent: fixture invoked to setup all agents necessary and
returns an instance of Agent object used for publishing
:param cancel_schedules: fixture used to cancel the schedule at the end
of test so that other tests can use the same device and time slot
"""
print ("\n**** test_schedule_premept_future_task ****")
# used by cancel_schedules
agentid = 'new_agent'
taskid = 'task_high_priority4'
cancel_schedules.append({'agentid': agentid, 'taskid': taskid})
# add low prority task as well since it won't get cancelled till end of
# grace time
cancel_schedules.append(
{'agentid': TEST_AGENT, 'taskid': 'task_low_priority4'})
publish_agent.callback = MagicMock(name="callback")
publish_agent.callback.reset_mock()
# subscribe to schedule response topic
publish_agent.vip.pubsub.subscribe(peer='pubsub',
prefix=topics.ACTUATOR_SCHEDULE_RESULT,
callback=publish_agent.callback).get()
start = str(datetime.now() + timedelta(seconds=10))
end = str(datetime.now() + timedelta(seconds=20))
msg = [
['fakedriver2', start, end]
]
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
TEST_AGENT,
'task_low_priority4',
'LOW',
msg).get(timeout=10)
# expected result {'info': u'', 'data': {}, 'result': SUCCESS}
print result
assert result['result'] == SUCCESS
# wait for above call's success response to publish_agent.callback method
gevent.sleep(1)
publish_agent.callback.reset_mock()
result = publish_agent.vip.rpc.call(
PLATFORM_ACTUATOR,
REQUEST_NEW_SCHEDULE,
agentid,
taskid,
'HIGH',
msg).get(timeout=10)
assert result['result'] == SUCCESS
# wait for 2 callbacks - success msg for task_high_priority and preempt
# msg for task_low_priority
gevent.sleep(6)
print ('call args list:', publish_agent.callback.call_args_list)
assert publish_agent.callback.call_count == 2
# Grab the args of callback and verify
call_args1 = publish_agent.callback.call_args_list[0][0]
call_args2 = publish_agent.callback.call_args_list[1][0]
assert call_args1[1] == PLATFORM_ACTUATOR
assert call_args1[3] == topics.ACTUATOR_SCHEDULE_RESULT
# initialize 0 to schedule response and 1 to cancel response
schedule_header = call_args1[4]
schedule_message = call_args1[5]
print ("call args of 1 ", publish_agent.callback.call_args_list[1])
cancel_header = call_args2[4]
cancel_message = call_args2[5]
# check if order is reversed: 0 is cancelresponse and 1 is new schedule
if call_args1[4]['type'] == 'CANCEL_SCHEDULE':
assert call_args2[4]['type'] == 'NEW_SCHEDULE'
cancel_header = call_args1[4]
cancel_message = call_args1[5]
schedule_header = call_args2[4]
schedule_message = call_args2[5]
else:
assert call_args1[4]['type'] == 'NEW_SCHEDULE'
assert call_args2[4]['type'] == 'CANCEL_SCHEDULE'
# values remain as initialized above if/else
assert schedule_header['type'] == 'NEW_SCHEDULE'
assert schedule_header['taskID'] == taskid
assert schedule_header['requesterID'] == agentid
assert schedule_message['result'] == SUCCESS
assert cancel_header['taskID'] == 'task_low_priority4'
assert cancel_message['data']['agentID'] == agentid
assert cancel_message['data']['taskID'] == taskid
assert cancel_message['result'] == 'PREEMPTED'
@pytest.mark.actuator
def test_schedule_conflict_self(publish_agent):
"""
Test error response for schedule request. Test schedule with conflicting
| |
<gh_stars>0
import asyncio
import errno
import logging
import queue
from socket import error as SocketError
import time
import pybalboa.messages as messages
BALBOA_DEFAULT_PORT = 4257
M_START = 0x7e
M_END = 0x7e
C_PUMP1 = 0x04
C_PUMP2 = 0x05
C_PUMP3 = 0x06
C_PUMP4 = 0x07
C_PUMP5 = 0x08
C_PUMP6 = 0x09
C_LIGHT1 = 0x11
C_LIGHT2 = 0x12
C_MISTER = 0x0e
C_AUX1 = 0x16
C_AUX2 = 0x17
C_BLOWER = 0x0c
C_TEMPRANGE = 0x50
C_HEATMODE = 0x51
MAX_PUMPS = 6
NROF_BMT = 14
(BMTR_STATUS_UPDATE,
BMTR_FILTER_CONFIG,
BMTS_CONFIG_REQ,
BMTR_CONFIG_RESP,
BMTS_FILTER_REQ,
BMTS_CONTROL_REQ,
BMTS_SET_TEMP,
BMTS_SET_TIME,
BMTS_SET_WIFI,
BMTS_PANEL_REQ,
BMTS_SET_TSCALE,
BMTR_PANEL_RESP,
BMTR_PANEL_NOCLUE1,
BMTR_PANEL_NOCLUE2) = range(0, NROF_BMT)
mtypes = [
[0xFF, 0xAF, 0x13], # BMTR_STATUS_UPDATE
[0x0A, 0xBF, 0x23], # BMTR_FILTER_CONFIG
[0x0A, 0xBF, 0x04], # BMTS_CONFIG_REQ
[0x0A, 0XBF, 0x94], # BMTR_CONFIG_RESP
[0x0A, 0xBF, 0x22], # BMTS_FILTER_REQ
[0x0A, 0xBF, 0x11], # BMTS_CONTROL_REQ
[0x0A, 0xBF, 0x20], # BMTS_SET_TEMP
[0x0A, 0xBF, 0x21], # BMTS_SET_TIME
[0x0A, 0xBF, 0x92], # BMTS_SET_WIFI
[0x0A, 0xBF, 0x22], # BMTS_PANEL_REQ
[0x0A, 0XBF, 0x27], # BMTS_SET_TSCALE
[0x0A, 0xBF, 0x2E], # BMTR_PANEL_RESP
[0x0A, 0xBF, 0x24], # BMTR_PANEL_NOCLUE1
[0x0A, 0XBF, 0x25], # BMTR_PANEL_NOCLUE2
]
text_heatmode = ["Ready", "Ready in Rest", "Rest"]
text_heatstate = ["Idle", "Heating", "Heat Waiting"]
text_tscale = ["Fahrenheit", "Celsius"]
text_timescale = ["12h", "24h"]
text_pump = ["Off", "Low", "High"]
text_temprange = ["High", "Low"]
text_blower = ["Off", "Low", "Medium", "High"]
text_switch = ["Off", "On"]
text_filter = ["Off", "Cycle 1", "Cycle 2", "Cycle 1 and 2"]
class BalboaSpaWifi:
def __init__(self, hostname, port=BALBOA_DEFAULT_PORT):
# API Constants
self.TSCALE_C = 1
self.TSCALE_F = 0
self.HEATMODE_READY = 0
self.HEATMODE_RNR = 1 # Ready in Rest
self.HEATMODE_REST = 2
self.TIMESCALE_12H = 0
self.TIMESCALE_24H = 1
self.PUMP_OFF = 0
self.PUMP_LOW = 1
self.PUMP_HIGH = 2
self.TEMPRANGE_LOW = 0
self.TEMPRANGE_HIGH = 1
self.tmin = [
[50.0, 10.0],
[80.0, 26.0],
]
self.tmax = [
[80.0, 26.0],
[104.0, 40.0],
]
self.BLOWER_OFF = 0
self.BLOWER_LOW = 1
self.BLOWER_MEDIUM = 2
self.BLOWER_HIGH = 3
self.FILTER_OFF = 0
self.FILTER_1 = 1
self.FILTER_2 = 2
self.FILTER_1_2 = 3
self.OFF = 0
self.ON = 1
self.HEATSTATE_IDLE = 0
self.HEATSTATE_HEATING = 1
self.HEATSTATE_HEAT_WAITING = 2
# Internal states
self.host = hostname
self.port = port
self.reader = None
self.writer = None
self.connected = False
self.config_loaded = False
self.pump_array = [0, 0, 0, 0, 0, 0]
self.light_array = [0, 0]
self.circ_pump = 0
self.blower = 0
self.mister = 0
self.aux_array = [0, 0]
self.tempscale = self.TSCALE_F
self.priming = 0
self.timescale = 0
self.curtemp = 0.0
self.settemp = 0.0
self.heatmode = 0
self.heatstate = 0
self.temprange = 0
self.pump_status = [0, 0, 0, 0, 0, 0]
self.circ_pump_status = 0
self.light_status = [0, 0]
self.mister_status = 0
self.blower_status = 0
self.aux_status = [0, 0]
self.lastupd = 0
self.sleep_time = 60
self.macaddr = 'Unknown'
self.time_hour = 0
self.time_minute = 0
self.filter_mode = 0
self.prior_status = None
self.new_data_cb = None
self.model_name = 'Unknown'
self.sw_vers = 'Unknown'
self.cfg_sig = 'Unknown'
self.setup = 0
self.ssid = 'Unknown'
self.log = logging.getLogger(__name__)
async def connect(self):
""" Connect to the spa."""
try:
self.reader, self.writer = await asyncio.open_connection(self.host,
self.port)
except (asyncio.TimeoutError, ConnectionRefusedError):
self.log.error("Cannot connect to spa at {0}:{1}".format(self.host,
self.port))
return False
self.connected = True
return True
async def disconnect(self):
""" Stop talking to the spa."""
self.log.info("Disconnect requested")
self.connected = False
self.writer.close()
await self.writer.wait_closed()
async def int_new_data_cb(self):
""" Internal new data callback.
Binds to self.new_data_cb()
"""
if self.new_data_cb is None:
return
else:
await self.new_data_cb()
async def send_config_req(self):
""" Ask the spa for it's config. """
if not self.connected:
return
data = bytearray(7)
data[0] = M_START
data[1] = 5 # len of msg
data[2] = mtypes[BMTS_CONFIG_REQ][0]
data[3] = mtypes[BMTS_CONFIG_REQ][1]
data[4] = mtypes[BMTS_CONFIG_REQ][2]
data[5] = 0x77 # known value
data[6] = M_END
self.writer.write(data)
await self.writer.drain()
async def send_panel_req(self, ba, bb):
""" Send a panel request, 2 bytes of data.
0001020304 0506070809101112
0,1 - 7E0B0ABF2E 0A0001500000BF7E
2,0 - 7E1A0ABF24 64DC140042503230303047310451800C6B010A0200F97E
4,0 - 7E0E0ABF25 120432635068290341197E
"""
if not self.connected:
return
data = bytearray(10)
data[0] = M_START
data[1] = 8
data[2] = mtypes[BMTS_PANEL_REQ][0]
data[3] = mtypes[BMTS_PANEL_REQ][1]
data[4] = mtypes[BMTS_PANEL_REQ][2]
data[5] = ba
data[6] = 0
data[7] = bb
data[8] = messages.Message.crc(data[1:8])
data[9] = M_END
self.writer.write(data)
await self.writer.drain()
async def send_temp_change(self, newtemp):
""" Change the set temp to newtemp. """
if not self.connected:
return
# Check if the temp is valid for the heatmode
if (newtemp < self.tmin[self.temprange][self.tempscale] or
newtemp > self.tmax[self.temprange][self.tempscale]):
self.log.error("Attempt to set temp outside of boundary of heatmode")
return
data = bytearray(8)
data[0] = M_START
data[1] = 6
data[2] = mtypes[BMTS_SET_TEMP][0]
data[3] = mtypes[BMTS_SET_TEMP][1]
data[4] = mtypes[BMTS_SET_TEMP][2]
if self.tempscale == self.TSCALE_C:
newtemp *= 2.0
val = int(round(newtemp))
data[5] = val
data[6] = messages.Message.crc(data[1:6])
data[7] = M_END
self.writer.write(data)
await self.writer.drain()
async def change_light(self, light, newstate):
""" Change light #light to newstate. """
if not self.connected:
return
# we don't have 3 lights!
if light > 1:
return
# we don't have THIS light
if not self.light_array[light]:
return
# this is a toggle switch, not on/off
if self.light_status[light] == newstate:
return
# Setup the basic things we know
data = bytearray(9)
data[0] = M_START
data[1] = 7
data[2] = mtypes[BMTS_CONTROL_REQ][0]
data[3] = mtypes[BMTS_CONTROL_REQ][1]
data[4] = mtypes[BMTS_CONTROL_REQ][2]
data[5] = C_LIGHT1 if light == 0 else C_LIGHT2
data[6] = 0x00 # who knows?
data[7] = messages.Message.crc(data[1:])
data[8] = M_END
self.writer.write(data)
await self.writer.drain()
async def change_pump(self, pump, newstate):
""" Change pump #pump to newstate. """
if not self.connected:
return
# we don't have 7 pumps!
if pump > MAX_PUMPS:
return
# we don't have THIS pump
if not self.pump_array[pump]:
return
# this is a toggle switch, not on/off
if self.pump_status[pump] == newstate:
return
# what we know:
data = bytearray(9)
data[0] = M_START
data[1] = 7
data[2] = mtypes[BMTS_CONTROL_REQ][0]
data[3] = mtypes[BMTS_CONTROL_REQ][1]
data[4] = mtypes[BMTS_CONTROL_REQ][2]
data[6] = 0x00 # who knows?
data[8] = M_END
# calculate how many times to push the button
if self.pump_array[pump] == 2:
for iter in range(1, 2+1):
if newstate == ((self.pump_status[pump] + iter) % 3):
break
else:
iter = 1
# now push the button until we hit desired state
for pushes in range(1, iter+1):
# 4 is 0, 5 is 2, presume 6 is 3?
data[5] = C_PUMP1 + pump
data[7] = messages.Message.crc(data[1:7])
self.writer.write(data)
await self.writer.drain()
await asyncio.sleep(1.0)
async def change_heatmode(self, newmode):
""" Change the spa's heatmode to newmode. """
if not self.connected:
return
# check for sanity
if newmode > 2:
return
# this is a toggle switch, not on/off
if self.heatmode == newmode:
return
# what we know:
data = bytearray(9)
data[0] = M_START
data[1] = 7
data[2] = mtypes[BMTS_CONTROL_REQ][0]
data[3] = mtypes[BMTS_CONTROL_REQ][1]
data[4] = mtypes[BMTS_CONTROL_REQ][2]
data[5] = C_HEATMODE
data[6] = 0x00 # who knows?
data[7] = messages.Message.crc(data[1:7])
data[8] = M_END
# You can't put the spa in REST, it can BE in rest, but you cannot
# force it into rest. It's a tri-state, but a binary switch.
# calculate how many times to push the button
if newmode == self.HEATMODE_READY:
if (self.heatmode == self.HEATMODE_REST or
self.heatmode == self.HEATMODE_RNR):
self.writer.write(data)
await self.writer.drain()
await asyncio.sleep(0.5)
if newmode == self.HEATMODE_REST or newmode == self.HEATMODE_RNR:
if self.heatmode == self.HEATMODE_READY:
self.writer.write(data)
await self.writer.drain()
await asyncio.sleep(0.5)
async def change_temprange(self, newmode):
""" Change the spa's temprange to newmode. """
if not self.connected:
return
# check for sanity
if newmode > 1:
return
# this is a toggle switch, not on/off
if self.temprange == newmode:
return
# what we know:
data = bytearray(9)
data[0] = M_START
data[1] = 7
data[2] = mtypes[BMTS_CONTROL_REQ][0]
data[3] = mtypes[BMTS_CONTROL_REQ][1]
data[4] = mtypes[BMTS_CONTROL_REQ][2]
data[5] = C_TEMPRANGE
data[6] = 0x00 # who knows?
data[7] = messages.Message.crc(data[1:7])
data[8] = M_END
async def change_aux(self, aux, newstate):
""" Change aux #aux to newstate. """
if not self.connected:
return
# we don't have 3 auxs!
if aux > 1:
return
# we don't have THIS aux
if not self.aux_array[aux]:
return
# this is a toggle switch, not on/off
if self.aux_status[aux] == newstate:
return
# Setup the basic things we know
data = bytearray(9)
data[0] = M_START
data[1] = 7
data[2] = mtypes[BMTS_CONTROL_REQ][0]
data[3] = mtypes[BMTS_CONTROL_REQ][1]
data[4] = mtypes[BMTS_CONTROL_REQ][2]
data[5] = C_AUX1 if aux == 0 else C_AUX2
data[6] = 0x00 # who knows?
data[7] = messages.Message.crc(data[1:7])
data[8] = M_END
self.writer.write(data)
await self.writer.drain()
async def change_mister(self, newmode):
""" Change the spa's mister to newmode. """
if not self.connected:
return
# | |
<filename>tests/test_basic.py
import pytest
from parsita import *
def test_literals():
class TestParsers(GeneralParsers):
a = lit("a")
bb = lit("bb")
assert TestParsers.a.parse("a") == Success("a")
assert TestParsers.bb.parse("bb") == Success("bb")
assert TestParsers.bb.parse("bbb") == Failure("Expected end of source but found b at index 2")
assert TestParsers.bb.parse("aa") == Failure("Expected b but found a at index 0")
assert str(TestParsers.a) == "a = 'a'"
assert str(TestParsers.bb) == "bb = 'bb'"
def test_multiple_literals():
class TestParsers(GeneralParsers):
ab = lit("a", "b")
assert TestParsers.ab.parse("a") == Success("a")
assert TestParsers.ab.parse("b") == Success("b")
def test_or_die():
class TestParsers(GeneralParsers):
a = lit("a")
bb = lit("bb")
assert TestParsers.a.parse("a").or_die() == "a"
with pytest.raises(ParseError, match="Expected b but found a at index 0"):
TestParsers.bb.parse("aa").or_die()
def test_predicate():
class TestParsers(GeneralParsers):
a = pred(any1, lambda x: x in ("A", "a"), "letter A")
d = pred(any1, str.isdigit, "digit")
assert TestParsers.a.parse("a") == Success("a")
assert TestParsers.a.parse("A") == Success("A")
assert TestParsers.d.parse("2") == Success("2")
assert TestParsers.d.parse("23") == Failure("Expected end of source but found 3 at index 1")
assert TestParsers.d.parse("a") == Failure("Expected digit but found a at index 0")
assert str(TestParsers.a) == "a = pred(any1, letter A)"
def test_forward_declaration():
class TestParsers(GeneralParsers):
a = b
b = lit("b")
assert TestParsers.a.parse("b") == Success("b")
assert TestParsers.a.parse("ab") == Failure("Expected b but found a at index 0")
def test_forward_expression():
class TestParsers(GeneralParsers):
a = lit("a")
ca = c | a
da = d & a
c = lit("c")
d = lit("d")
assert TestParsers.ca.parse("c") == Success("c")
assert TestParsers.ca.parse("a") == Success("a")
assert TestParsers.da.parse("da") == Success(["d", "a"])
assert str(TestParsers.ca) == "ca = c | a"
assert str(TestParsers.da) == "da = d & a"
def test_manual_forward():
class TestParsers(GeneralParsers):
b = fwd()
a = "a" & b
b.define("b" & opt(a))
assert TestParsers.a.parse("ab") == Success(["a", ["b", []]])
assert TestParsers.a.parse("abab") == Success(["a", ["b", [["a", ["b", []]]]]])
def test_manual_forward_mutual():
class TestParsers(GeneralParsers):
a = fwd()
b = fwd()
a.define("a" & b)
b.define("b" & opt(a))
assert TestParsers.a.parse("ab") == Success(["a", ["b", []]])
assert TestParsers.a.parse("abab") == Success(["a", ["b", [["a", ["b", []]]]]])
def test_multiple_references():
class TestParsers(GeneralParsers):
a = lit("a")
cora = c | a
canda = c & a
c = "c"
assert TestParsers.cora.parse("c") == Success("c")
assert TestParsers.cora.parse("a") == Success("a")
assert TestParsers.canda.parse("ca") == Success(["c", "a"])
assert str(TestParsers.cora) == "cora = 'c' | a"
assert str(TestParsers.canda) == "canda = 'c' & a"
def test_optional():
class TestParsers(GeneralParsers):
a = lit("a")
b = opt(a)
assert TestParsers.b.parse("a") == Success(["a"])
assert TestParsers.b.parse("c") == Failure("Expected a or end of source but found c at index 0")
assert str(TestParsers.b) == "b = opt(a)"
def test_optional_longer():
class TestParsers(GeneralParsers):
a = lit("ab")
b = opt(a)
assert TestParsers.b.parse("ab") == Success(["ab"])
assert TestParsers.b.parse("ac") == Failure("Expected b but found c at index 1")
assert str(TestParsers.b) == "b = opt(a)"
def test_optional_literal():
class TestParsers(GeneralParsers):
b = opt("ab")
assert TestParsers.b.parse("ab") == Success(["ab"])
assert TestParsers.b.parse("ac") == Failure("Expected b but found c at index 1")
assert str(TestParsers.b) == "b = opt('ab')"
def test_alternative():
class TestParsers(GeneralParsers):
a = lit("a")
b = lit("b")
c = lit("cd")
ab = a | b
bc = b | c
assert TestParsers.ab.parse("a") == Success("a")
assert TestParsers.ab.parse("b") == Success("b")
assert TestParsers.ab.parse("c") == Failure("Expected a or b but found c at index 0")
assert TestParsers.bc.parse("cd") == Success("cd")
assert TestParsers.bc.parse("ce") == Failure("Expected d but found e at index 1")
assert str(TestParsers.bc) == "bc = b | c"
def test_multiple():
class TestParsers(GeneralParsers):
a = lit("aaaa")
b = lit("bbb")
c = lit("cc")
d = lit("d")
back = a | (b | c | d)
front = (a | b | c) | d
both = (a | b) | c | d
for parser in [TestParsers.back, TestParsers.front, TestParsers.both]:
assert parser.parse("aaaa") == Success("aaaa")
assert parser.parse("cc") == Success("cc")
assert parser.parse("bbc") == Failure("Expected b but found c at index 2")
assert parser.parse("bbba") == Failure("Expected end of source but found a at index 3")
str(TestParsers.back), "back = a | b | c | d"
str(TestParsers.front), "front = a | b | c | d"
str(TestParsers.both), "both = a | b | c | d"
def test_right_or():
class TestParsers(GeneralParsers):
ab = "a" | lit("b")
assert TestParsers.ab.parse("a") == Success("a")
def test_multiple_messages_duplicate():
class TestParsers(GeneralParsers):
a = lit("a")
ab = a & "b"
ac = a & "c"
either = ab | ac
assert TestParsers.either.parse("cc") == Failure("Expected a but found c at index 0")
def test_longest():
class TestParsers(GeneralParsers):
a = lit("a")
either = longest(a, "b")
assert str(TestParsers.either) == "either = longest(a, 'b')"
def test_sequential():
class TestParsers(GeneralParsers):
a = lit("a")
b = lit("b")
c = lit("cd")
ab = a & b
bc = b & c
abc = a & b & c
assert TestParsers.ab.parse("ab") == Success(["a", "b"])
assert TestParsers.bc.parse("bcd") == Success(["b", "cd"])
assert TestParsers.abc.parse("abcd") == Success(["a", "b", "cd"])
assert TestParsers.abc.parse("abc") == Failure("Expected d but found end of source")
assert TestParsers.abc.parse("abf") == Failure("Expected c but found f at index 2")
assert str(TestParsers.abc) == "abc = a & b & c"
def test_discard_left():
class TestParsers(GeneralParsers):
a = lit("a")
b = lit("b")
ab = a >> b
ac = a >> c
c = lit("c")
assert TestParsers.ab.parse("ab") == Success("b")
assert TestParsers.ac.parse("ac") == Success("c")
assert str(TestParsers.ac) == "ac = a >> c"
def test_discard_right():
class TestParsers(GeneralParsers):
a = lit("a")
b = lit("b")
ab = a << b
ac = a << c
c = lit("c")
assert TestParsers.ab.parse("ab") == Success("a")
assert TestParsers.ac.parse("ac") == Success("a")
assert TestParsers.ac.parse("aa") == Failure("Expected c but found a at index 1")
assert str(TestParsers.ac) == "ac = a << c"
def test_discard_bare_literals():
class TestParsers(GeneralParsers):
a = lit("a")
b = "b"
rshift = a >> b
rrshift = b >> a
lshift = a << b
rlshift = b << a
assert TestParsers.rshift.parse("ab") == Success("b")
assert TestParsers.rrshift.parse("ba") == Success("a")
assert TestParsers.lshift.parse("ab") == Success("a")
assert TestParsers.rlshift.parse("ba") == Success("b")
def test_repeated():
class TestParsers(GeneralParsers):
bs = rep1("b")
cs = rep("c")
assert TestParsers.bs.parse("bbbb") == Success(["b", "b", "b", "b"])
assert TestParsers.bs.parse("b") == Success(["b"])
assert TestParsers.bs.parse("") == Failure("Expected b but found end of source")
assert TestParsers.bs.parse("bbbc") == Failure("Expected b or end of source but found c at index 3")
assert TestParsers.cs.parse("ccc") == Success(["c", "c", "c"])
assert TestParsers.cs.parse("c") == Success(["c"])
assert TestParsers.cs.parse("") == Success([])
assert TestParsers.cs.parse("cccb") == Failure("Expected c or end of source but found b at index 3")
assert str(TestParsers.bs) == "bs = rep1('b')"
assert str(TestParsers.cs) == "cs = rep('c')"
def test_repeated_with_bounds():
assert rep("b", min=2).parse("bbbb") == Success(["b", "b", "b", "b"])
assert rep("b", max=5).parse("bbbb") == Success(["b", "b", "b", "b"])
assert rep("b", min=3, max=5).parse("bbbb") == Success(["b", "b", "b", "b"])
assert rep("b", min=4, max=4).parse("bbbb") == Success(["b", "b", "b", "b"])
assert isinstance(rep("b", min=5).parse("bbbb"), Failure)
assert isinstance(rep("b", max=3).parse("bbbb"), Failure)
def test_repeated_longer():
class TestParsers(GeneralParsers):
bf = rep1("bf")
cf = rep("cf")
assert TestParsers.bf.parse("bfbf") == Success(["bf", "bf"])
assert TestParsers.bf.parse("bf") == Success(["bf"])
assert TestParsers.bf.parse("") == Failure("Expected b but found end of source")
assert TestParsers.bf.parse("bfbc") == Failure("Expected f but found c at index 3")
assert TestParsers.cf.parse("cfcfcf") == Success(["cf", "cf", "cf"])
assert TestParsers.cf.parse("cf") == Success(["cf"])
assert TestParsers.cf.parse("") == Success([])
assert TestParsers.cf.parse("cfcb") == Failure("Expected f but found b at index 3")
assert str(TestParsers.bf) == "bf = rep1('bf')"
assert str(TestParsers.cf) == "cf = rep('cf')"
def test_repeated_separated():
class TestParsers(GeneralParsers):
bs = rep1sep("b", ",")
cs = repsep("c", ",")
assert TestParsers.bs.parse("b,b,b") == Success(["b", "b", "b"])
assert TestParsers.bs.parse("b") == Success(["b"])
assert TestParsers.bs.parse("") == Failure("Expected b but found end of source")
assert TestParsers.cs.parse("c,c,c") == Success(["c", "c", "c"])
assert TestParsers.cs.parse("c") == Success(["c"])
assert TestParsers.cs.parse("") == Success([])
assert str(TestParsers.bs) == "bs = rep1sep('b', ',')"
assert str(TestParsers.cs) == "cs = repsep('c', ',')"
def test_repeated_separated_nonliteral():
class TestParsers(GeneralParsers):
bs = rep1sep("b", opt(","))
cs = repsep("c", opt(","))
assert TestParsers.bs.parse("b,bb") == Success(["b", "b", "b"])
assert TestParsers.bs.parse("b") == Success(["b"])
assert TestParsers.bs.parse("") == Failure("Expected b but found end of source")
assert TestParsers.cs.parse("cc,c") == Success(["c", "c", "c"])
assert TestParsers.cs.parse("c") == Success(["c"])
assert TestParsers.cs.parse("") == Success([])
assert str(TestParsers.bs) == "bs = rep1sep('b', opt(','))"
assert str(TestParsers.cs) == "cs = repsep('c', opt(','))"
@pytest.mark.timeout(2)
def test_infinite_recursion_protection():
class TestParsers(GeneralParsers):
bad_rep = rep(opt("a"))
bad_rep1 = rep1(opt("a"))
bad_repsep = repsep(opt("a"), opt(":"))
bad_rep1sep = rep1sep(opt("a"), opt(":"))
# Recursion happens in middle of stream
for parser in (TestParsers.bad_rep, TestParsers.bad_rep1, TestParsers.bad_repsep, TestParsers.bad_rep1sep):
with pytest.raises(
RuntimeError,
match="Infinite recursion detected in "
r"bad_rep1?(sep)? = rep1?(sep)?\(opt\('a'\)(, opt\(':'\))?\); "
"empty string was matched and will be matched forever at index 2 before b",
):
parser.parse("aab")
# Recursion happens at end of stream
for parser in (TestParsers.bad_rep, TestParsers.bad_rep1, TestParsers.bad_repsep, TestParsers.bad_rep1sep):
with pytest.raises(
RuntimeError,
match="Infinite recursion detected in | |
11, 12, 13, 14, 15, 16, EOS, 0,
0, 0, 0, 0, 0
],
[
4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0
]],
dtype=np.int32),
np.array(
[[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.float32))
expected_output2 = MassOutput(
np.array([[
3, 3, 3, 3, 3, 3, 3, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0
], [
3, 3, 3, 3, 3, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
], [
4, 5, 6, 7, 3, 3, 3, 3, 3, 3, 3, 15, 16, 2, 0, 0, 0, 0, 0, 0
], [4, 5, 3, 3, 3, 3, 3, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int32),
np.array([[
1, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0
], [
1, 4, 5, 6, 7, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
], [3, 3, 3, 3, 7, 8, 9, 10, 11, 12, 13, 3, 3, 3, 0, 0, 0, 0, 0, 0
], [3, 3, 5, 6, 7, 8, 9, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int32),
np.array([[
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0,
0
], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0,
0, 0, 0, 0
],
[
4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0
]],
dtype=np.int32),
np.array(
[[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.float32))
match_idx = FindResultFromList(result,
[expected_output1, expected_output2])
self.assertIsNotNone(match_idx,
'{} is not a valid result'.format(result))
def testSegmented(self):
ids = np.array(
[[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0],
[4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0],
[4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int32)
weights = np.array(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.float32)
actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32)
g = tf.Graph()
with g.as_default():
(src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass(
ids,
weights,
actual_seq_len,
mask_id=3,
mask_ratio=0.5,
mask_minlen=1,
span_len=3,
keep_prob=0,
rand_prob=0,
mask_prob=1,
mask_target=True,
vocab_size=9)
with self.session(graph=g) as sess:
(src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([
src_ids,
tgt_ids,
tgt_labels,
tgt_weights,
])
result = MassOutput(src_ids, tgt_ids, tgt_labels, tgt_weights)
expected_output1 = MassOutput(
np.array([[
4, 3, 3, 3, 3, 3, 3, 11, 12, 13, 14, 15, 16, 3, 0, 0, 0, 0, 0, 0
], [
4, 3, 3, 7, 8, 9, 10, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
], [4, 5, 6, 7, 8, 3, 3, 3, 12, 3, 14, 3, 3, 3, 0, 0, 0, 0, 0, 0],
[
3, 3, 6, 3, 3, 3, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0
]],
dtype=np.int32),
np.array([[
3, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 16, 0, 0, 0, 0, 0, 0
], [
3, 4, 5, 3, 3, 3, 3, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
], [
3, 3, 3, 3, 3, 8, 9, 10, 3, 12, 3, 14, 15, 16, 0, 0, 0, 0, 0, 0
], [BOS, 4, 3, 6, 7, 8, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int32),
np.array([[
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0,
0, 0
], [
4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
[
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0,
0, 0, 0, 0, 0
],
[
4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0
]],
dtype=np.int32),
np.array(
[[0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.float32))
expected_output2 = MassOutput(
np.array([[
4, 5, 6, 3, 3, 3, 10, 3, 3, 3, 14, 15, 16, 3, 0, 0, 0, 0, 0, 0
], [
3, 3, 6, 7, 8, 9, 10, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
], [4, 3, 3, 3, 8, 9, 10, 11, 12, 3, 3, 3, 16, 3, 0, 0, 0, 0, 0, 0
], [4, 5, 3, 3, 8, 9, 3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int32),
np.array([[
3, 3, 3, 6, 7, 8, 3, 10, 11, 12, 3, 3, 3, 16, 0, 0, 0, 0, 0, 0
], [
1, 4, 3, 3, 3, 3, 3, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
], [
3, 4, 5, 6, 3, 3, 3, 3, 3, 12, 13, 14, 3, 16, 0, 0, 0, 0, 0, 0
], [3, 3, 5, 6, 3, 3, 9, 10, 11, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int32),
np.array([[
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0,
0
], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0,
| |
from yaglm.config.base_penalty import PenaltyConfig, WithPenSeqConfig, \
WithFlavorPenSeqConfig, ElasticNetConfig, \
SeparableSumConfig, InfimalSumConfig, OverlappingSumConfig
from yaglm.pen_max.ridge import get_ridge_pen_max
from yaglm.pen_max.lasso import get_lasso_pen_max
from yaglm.autoassign import autoassign
class NoPenalty(PenaltyConfig):
"""
Represents no penalty.
"""
pass
class Ridge(WithPenSeqConfig):
"""
Ridge penalty with optional weights.
pen_val * 0.5 * ||coef||_2^2
pen_val * 0.5 * sum_j weighs_j coef_j^2
Parameters
----------
pen_val: float
The penalty parameter value.
weights: None, array-like
(Optional) Weights for each term in the penalty.
"""
@autoassign
def __init__(self, pen_val=1, weights=None): pass
def get_pen_val_max(self, X, y, loss, fit_intercept=True,
sample_weight=None, init_data=None):
return get_ridge_pen_max(X=X, y=y, loss=loss,
weights=self.weights,
fit_intercept=fit_intercept,
sample_weight=sample_weight,
targ_ubd=1,
norm_by_dim=True)
class GeneralizedRidge(WithPenSeqConfig):
"""
Generalized ridge penalty with a matrix transform i.e.
pen_val * 0.5 * ||mat @ coef||_2^2
Parameters
----------
pen_val: float
The penalty parameter value.
mat: None, array-like (K, n_features)
The matrix transform.
"""
@autoassign
def __init__(self, pen_val=1, mat=None): pass
class Lasso(WithFlavorPenSeqConfig):
"""
Entrywise non-smooth penalty e.g. a lasso, adapative lasso or entrywise SCAD.
pen_val * ||coef||_1
pen_val * sum_j weighs_j |coef_j|
sum_j non-convex_{pen_val} (coef_j)
Parameters
----------
pen_val: float
The penalty parameter value.
weights: None, array-like
(Optional) Weights for each term in the penalty.
flavor: None, PenaltyFlavor
(Optional) Which flavor of the penalty to use e.g. adaptive, non-convex direct, or non-convex LLA.
"""
@autoassign
def __init__(self, pen_val=1, weights=None, flavor=None): pass
def _get_vanilla_pen_val_max(self, X, y, loss, fit_intercept=True,
sample_weight=None):
return get_lasso_pen_max(X=X, y=y, loss=loss,
weights=self.weights,
fit_intercept=fit_intercept,
sample_weight=sample_weight)
# TODO: add default weights
# TODO: add default to infimal option for overlapping
class GroupLasso(WithFlavorPenSeqConfig):
"""
Group penalty e.g. group lasso, adaptive group lasso, or group non-convex.
pen_val * sum_{g in groups} weights_g ||coef_g||_2
sum_{g in groups} non-convex_{pen_val}(||coef_g||_2)
Parameters
----------
groups: list, None
Indices of the groups. If None, then all features are put in a single group.
pen_val: float
The penalty parameter value.
weights: None, array-like
(Optional) Weights for each term in the penalty.
flavor: None, PenaltyFlavor
(Optional) Which flavor of the penalty to use e.g. adaptive, non-convex direct, or non-convex LLA.
References
----------
<NAME>. and <NAME>., 2006. Model selection and estimation in regression with grouped variables. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 68(1), pp.49-67.
<NAME>. and <NAME>., 2015. Group descent algorithms for nonconvex penalized linear and logistic regression models with grouped predictors. Statistics and computing, 25(2), pp.173-187.
"""
@autoassign
def __init__(self, groups=None, pen_val=1, weights=None, flavor=None): pass
def _get_vanilla_pen_val_max(self, X, y, loss, fit_intercept=True,
sample_weight=None):
return get_lasso_pen_max(X=X, y=y, loss=loss,
weights=self.weights,
groups=self.groups,
fit_intercept=fit_intercept,
sample_weight=sample_weight)
# TODO: add flavor
# TODO: add weights -- should we have both entrywise and group?
# perhaps default gives group lasso weights
class ExclusiveGroupLasso(WithPenSeqConfig):
"""
Exclusive group lasso.
pen_val * sum_{g in groups} ||coef_g||_1^2
Parameters
----------
groups: list, None
Indices of the groups. If None, then all features are put in a single group.
pen_val: float
The penalty parameter value.
References
----------
<NAME>. and <NAME>., 2017. Within group variable selection through the exclusive lasso. Electronic Journal of Statistics, 11(2), pp.4220-4257.
<NAME>., <NAME>. and <NAME>., 2010, March. Exclusive lasso for multi-task feature selection. In Proceedings of the thirteenth international conference on artificial intelligence and statistics (pp. 988-995). JMLR Workshop and Conference Proceedings.
"""
@autoassign
def __init__(self, groups=None, pen_val=1): pass
class MultiTaskLasso(WithFlavorPenSeqConfig):
"""
The multi-task lasso (including adaptive and non-convex flavors) for multiple response coefficients.
pen_val * sum_{j=1}^{n_features} w_r ||coef(j, :)||_2
sum_{j=1}^{n_features} non_convex_{pen_val}(||coef(j, :)||_2)
Parameters
----------
pen_val: float
The penalty parameter value.
weights: None, array-like shape (n_features, )
(Optional) Weights for each feature.
flavor: None, PenaltyFlavor
(Optional) Which flavor of the penalty to use e.g. adaptive, non-convex direct, or non-convex LLA.
"""
@autoassign
def __init__(self, pen_val=1, weights=None, flavor=None): pass
def _get_vanilla_pen_val_max(self, X, y, loss, fit_intercept=True,
sample_weight=None):
return get_lasso_pen_max(X=X, y=y, loss=loss,
weights=self.weights,
multi_task=True,
fit_intercept=fit_intercept,
sample_weight=sample_weight)
class NuclearNorm(WithFlavorPenSeqConfig):
"""
Nuclear norm, adaptive nuclear norm or non-convex nuclear norm.
pen_val * ||coef||_*
pen_val * sum_{j} w_j sigma_j(coef)
sum_{j} non-convex_{pen_val}(sigma_j(coef))
Parameters
----------
pen_val: float
The penalty parameter value.
weights: None, array-like
(Optional) Weights for each term in the penalty.
flavor: None, PenaltyFlavor
(Optional) Which flavor of the penalty to use e.g. adaptive, non-convex direct, or non-convex LLA.
"""
@autoassign
def __init__(self, pen_val=1, weights=None, flavor=None): pass
def _get_vanilla_pen_val_max(self, X, y, loss, fit_intercept=True,
sample_weight=None):
return get_lasso_pen_max(X=X, y=y, loss=loss,
weights=self.weights,
nuc=True,
fit_intercept=fit_intercept,
sample_weight=sample_weight)
class FusedLasso(WithFlavorPenSeqConfig):
"""
The graph fused lasso also known as graph trend filtering. The fused lasso (i.e. total-variation 1 penalty) is a special case when the graph is a chain graph. This penalty includes higher order trend filtering and can represent the adaptive an non-convex versions of the graph fused lasso.
Note this is NOT the sparse fused lasso.
The standard fused lasso (chain graph) is given by:
pen_val * sum_{j=1}^{n_features -1} |coef_{j+1} - coef_j|
The graph fused lasso is given by:
pen_val * sum_{(ij) in edgelist} w_{(ij)} |coef_i - coef_j|
sum_{(ij) in edgelist} non-convex_{pen_val}(|coef_i - coef_j|)
Parameters
----------
pen_val: float
The penalty parameter value.
edgelist: str, array-like (n_edges, 2)
The graph's edgelist. If edgelist='chain' then this is the TV-1 penalty.
order: int
The order of the trend filtering difference.
weights: None, array-like
(Optional) Weights for edge. If edgelish='chain', this should be length n_features - 1.
flavor: None, PenaltyFlavor
(Optional) Which flavor of the penalty to use e.g. adaptive, non-convex direct, or non-convex LLA.
References
----------
<NAME>., <NAME>., <NAME>. and <NAME>., 2015, February. Trend filtering on graphs. In Artificial Intelligence and Statistics (pp. 1042-1050). PMLR.
<NAME>. and <NAME>., 2011. The solution path of the generalized lasso. The annals of statistics, 39(3), pp.1335-1371.
<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2005. Sparsity and smoothness via the fused lasso. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 67(1), pp.91-108.
"""
@autoassign
def __init__(self, pen_val=1, edgelist='chain', order=1,
weights=None, flavor=None): pass
class GeneralizedLasso(WithFlavorPenSeqConfig):
"""
The generalized lasso including the adaptive a non-convex versions.
pen_val * ||mat @ coef||_1
sum_{r=1}^{p} non-convex_{pen_val}(|mat(r, :).T @ coef|)
Parameters
----------
pen_val: float
The penalty parameter value.
mat: array-like, shape (p, n_features)
The transformation matrix.
weights: None, array-like
(Optional) Weights for each term in mat @ coef.
flavor: None, PenaltyFlavor
(Optional) Which flavor of the penalty to use e.g. adaptive, non-convex direct, or non-convex LLA.
References
----------
<NAME>. and <NAME>., 2011. The solution path of the generalized lasso. The annals of statistics, 39(3), pp.1335-1371.
<NAME>. and <NAME>., 2019. The generalized lasso problem and uniqueness. Electronic Journal of Statistics, 13(2), pp.2307-2347.
"""
@autoassign
def __init__(self, pen_val=1, mat=None, weights=None, flavor=None): pass
########################
# ElasticNet penalties #
########################
# TODO: maybe add ridge weights?
class ElasticNet(ElasticNetConfig):
"""
Represents the ElasticNet penalty
pen_val * mix_val ||coef||_1 + 0.5 * pen_val * (1 - mix_val) * ||coef||_2^2
The Lasso may have weights (though not the ridge at this time) or may be flavored.
We define the non-convex elastic net as
non-convex_{pen_val * mix_val} (coef) + 0.5 * pen_val * (1 - mix_val) * ||coef||_2^2
Parameters
----------
pen_val: float
The penalty strength.
mix_val: float
The mixing value between 0 and 1.
lasso_weights: None, array-like
(Optional) Weights for the Lasso penalty.
lasso_flavor: None, FlavorConfig
(Optional) Flavor for the lasso penalty.
ridge_weights: None, array-like
(Optional) Weights for the ridge penalty.
"""
@autoassign
def __init__(self, pen_val=1, mix_val=0.5,
lasso_weights=None, lasso_flavor=None,
ridge_weights=None): pass
def _get_sum_configs(self):
lasso_config = Lasso(pen_val=self.pen_val * self.mix_val,
weights=self.lasso_weights,
flavor=self.lasso_flavor)
ridge_config = Ridge(pen_val=self.pen_val * (1 - self.mix_val),
weights=self.ridge_weights)
return lasso_config, ridge_config
def get_sum_names(self):
return ['lasso', 'ridge']
# TODO: add default weights
# TODO: add default to infimal for overlapping
# TODO: should we allow ridge weights?
class GroupElasticNet(ElasticNetConfig):
"""
Represents the group ElasticNet penalty
pen_val * mix_val * gruop_lasso(coef; groups) + pen_val * (1 - mix_val) * ridge(coef)
non_convex-group_{pen_val * mix_val}(coef; groups) + pen_val * (1 - mix_val) * ridge(coef)
Parameters
----------
groups: list, None
Indices of the groups. If None, then all features are put in a single group.
pen_val: float
The penalty strength.
mix_val: float
The mixing value between 0 and 1.
lasso_weights: None, array-like
(Optional) Weights for the Lasso.
lasso_flavor: None, FlavorConfig
(Optional) Flavor for the lasso penalty.
ridge_weights: None, array-like
(Optional) Weights for the ridge penalty.
"""
@autoassign
def __init__(self, groups=None,
pen_val=1, mix_val=0.5,
lasso_weights=None, lasso_flavor=None,
ridge_weights=None): pass
def _get_sum_configs(self):
lasso_config = GroupLasso(groups=self.groups,
pen_val=self.pen_val * self.mix_val,
weights=self.lasso_weights,
flavor=self.lasso_flavor)
ridge_config | |
NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 3.3.2 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
# 5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 3.3.2 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 3.3.2, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
# 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
# 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between MDT and Licensee. This License Agreement does not grant permission to use MDT trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
# 8. By copying, installing or otherwise using matplotlib 3.3.2, Licensee agrees to be bound by the terms and conditions of this License Agreement.
# Copyright (c) 2005-2020, NumPy Developers.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# MIT License
# Copyright (c) <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The Python Imaging Library (PIL) is
# Copyright © 1997-2011 by Secret Labs AB
# Copyright © 1995-2011 by <NAME>
# Pillow is the friendly PIL fork. It is
# Copyright © 2010-2020 by <NAME> and contributors
# Like PIL, Pillow is licensed under the open source PIL Software License:
# By obtaining, using, and/or copying this software and/or its associated
# documentation, you agree that you have read, understood, and will comply
# with the following terms and conditions:
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and that
# both that copyright notice and this permission notice appear in supporting
# documentation, and that the name of Secret Labs AB or the author not be
# used in advertising or publicity pertaining to distribution of the software
# without specific, written prior permission.
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# Copyright (c) 2014, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the PyAutoGUI nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2015, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the PyAutoGUI nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2010-2020, PyInstaller Development Team
# Copyright (c) 2005-2009, <NAME>
# Based on previous work under copyright (c) 2002 McMillan Enterprises, Inc.
# PyInstaller is licensed under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later | |
_try_set(tokenvalue, color("<local> ", "blue"), lambda: format_py_obj(_resolve_identifier(f.f_locals, token)))
tokenvalue = _try_set(tokenvalue, color("<global> ", "blue"), lambda: format_py_obj(_resolve_identifier(f.f_globals, token)))
tokenvalue = _try_set(tokenvalue, color("<builtin> ", "blue"), lambda: format_py_obj(_resolve_identifier(f.f_builtins, token)))
tokenvalue = tokenvalue or color("<not found>", "blue")
prefix = ' %s ' % color(".", "blue", bold=True).join(token) + color("= ", "blue", bold=True)
output(prefix, tokenvalue)
alreadyPrintedLocals.add(token)
if len(alreadyPrintedLocals) == 0:
output(color(" no locals", "blue"))
else:
output(color(' -- code not available --', "blue"))
if isframe(_tb):
_tb = _tb.f_back
elif isstacksummary(_tb):
_tb = StackSummary.from_list(_tb[1:])
if not _tb:
_tb = None
else:
_tb = _tb.tb_next
n += 1
except Exception as e:
output(color("ERROR: cannot get more detailed exception info because:", "red", bold=True))
import traceback
for l in traceback.format_exc().split("\n"):
output(" " + l)
return out
def print_tb(tb, file=None, **kwargs):
if file is None:
file = sys.stderr
for l in format_tb(tb=tb, **kwargs):
file.write(l)
file.flush()
def better_exchook(etype, value, tb, debugshell=False, autodebugshell=True, file=None, with_color=None):
"""
Replacement for sys.excepthook.
:param etype: exception type
:param value: exception value
:param tb: traceback
:param bool debugshell: spawn a debug shell at the context of the exception
:param bool autodebugshell: if env DEBUG is an integer != 0, it will spawn a debug shell
:param file: the output stream where we will print the traceback and exception information
:param bool|None with_color: whether to use ANSI escape codes for colored output
"""
if file is None:
file = sys.stderr
def output(ln): file.write(ln + "\n")
color = Color(enable=with_color)
output(color("EXCEPTION", "red", bold=True))
allLocals,allGlobals = {},{}
if tb is not None:
print_tb(tb, allLocals=allLocals, allGlobals=allGlobals, file=file, withTitle=True, with_color=color.enable)
else:
output(color("better_exchook: traceback unknown", "red"))
import types
def _some_str(value):
try: return str(value)
except Exception: return '<unprintable %s object>' % type(value).__name__
def _format_final_exc_line(etype, value):
valuestr = _some_str(value)
if value is None or not valuestr:
line = color("%s" % etype, "red")
else:
line = color("%s" % etype, "red") + ": %s" % (valuestr,)
return line
if (isinstance(etype, BaseException) or
(hasattr(types, "InstanceType") and isinstance(etype, types.InstanceType)) or
etype is None or type(etype) is str):
output(_format_final_exc_line(etype, value))
else:
output(_format_final_exc_line(etype.__name__, value))
if autodebugshell:
try: debugshell = int(os.environ["DEBUG"]) != 0
except Exception: pass
if debugshell:
output("---------- DEBUG SHELL -----------")
debug_shell(user_ns=allLocals, user_global_ns=allGlobals, traceback=tb)
file.flush()
def dump_all_thread_tracebacks(exclude_thread_ids=None, file=None):
"""
Prints the traceback of all threads.
:param set[int]|list[int]|None exclude_thread_ids: threads to exclude
:param file: output stream
"""
if exclude_thread_ids is None:
exclude_thread_ids = []
if not file:
file = sys.stdout
import threading
if hasattr(sys, "_current_frames"):
print("", file=file)
threads = {t.ident: t for t in threading.enumerate()}
for tid, stack in sys._current_frames().items():
if tid in exclude_thread_ids: continue
# This is a bug in earlier Python versions.
# http://bugs.python.org/issue17094
# Note that this leaves out all threads not created via the threading module.
if tid not in threads: continue
tags = []
thread = threads.get(tid)
if thread:
assert isinstance(thread, threading.Thread)
if thread is threading.currentThread():
tags += ["current"]
if isinstance(thread, threading._MainThread):
tags += ["main"]
tags += [str(thread)]
else:
tags += ["unknown with id %i" % tid]
print("Thread %s:" % ", ".join(tags), file=file)
print_tb(stack, file=file)
print("", file=file)
print("That were all threads.", file=file)
else:
print("Does not have sys._current_frames, cannot get thread tracebacks.", file=file)
def get_current_frame():
"""
:return: current frame object (excluding this function call)
:rtype: types.FrameType
Uses sys._getframe if available, otherwise some trickery with sys.exc_info and a dummy exception.
"""
if hasattr(sys, "_getframe"):
return sys._getframe(1)
try:
raise ZeroDivisionError
except ZeroDivisionError:
return sys.exc_info()[2].tb_frame.f_back
def iter_traceback(tb=None, enforce_most_recent_call_first=False):
"""
Iterates a traceback of various formats:
- traceback (types.TracebackType)
- frame object (types.FrameType)
- stack summary (traceback.StackSummary)
:param types.TracebackType|types.FrameType|StackSummary|None tb: traceback. if None, will use sys._getframe
:param bool enforce_most_recent_call_first:
Frame or stack summery: most recent call first (top of the stack is the first entry in the result)
Traceback: most recent call last
If True, and we get traceback, will unroll and reverse, such that we have always the most recent call first.
:return: yields the frames (types.FrameType)
:rtype: list[types.FrameType|DummyFrame]
"""
if tb is None:
tb = get_current_frame()
def is_stack_summary(_tb):
return isinstance(_tb, StackSummary)
is_frame = inspect.isframe
is_traceback = inspect.istraceback
assert is_traceback(tb) or is_frame(tb) or is_stack_summary(tb)
# Frame or stack summery: most recent call first
# Traceback: most recent call last
if is_traceback(tb) and enforce_most_recent_call_first:
frames = list(iter_traceback(tb))
for frame in frames[::-1]:
yield frame
return
_tb = tb
while _tb is not None:
if is_frame(_tb):
frame = _tb
elif is_stack_summary(_tb):
if isinstance(_tb[0], ExtendedFrameSummary):
frame = _tb[0].tb_frame
else:
frame = DummyFrame.from_frame_summary(_tb[0])
else:
frame = _tb.tb_frame
yield frame
if is_frame(_tb):
_tb = _tb.f_back
elif is_stack_summary(_tb):
_tb = StackSummary.from_list(_tb[1:])
if not _tb:
_tb = None
else:
_tb = _tb.tb_next
class ExtendedFrameSummary(FrameSummary):
def __init__(self, frame, **kwargs):
super(ExtendedFrameSummary, self).__init__(**kwargs)
self.tb_frame = frame
class DummyFrame:
"""
This class has the same attributes as a code and a frame object
and is intended to be used as a dummy replacement.
"""
@classmethod
def from_frame_summary(cls, f):
"""
:param FrameSummary f:
:rtype: DummyFrame
"""
return cls(filename=f.filename, lineno=f.lineno, name=f.name, f_locals=f.locals)
def __init__(self, filename, lineno, name, f_locals=None, f_globals=None, f_builtins=None):
self.lineno = lineno
self.tb_lineno = lineno
self.f_lineno = lineno
self.f_code = self
self.filename = filename
self.co_filename = filename
self.name = name
self.co_name = name
self.f_locals = f_locals or {}
self.f_globals = f_globals or {}
self.f_builtins = f_builtins or {}
self.have_vars_available = (f_locals is not None or f_globals is not None or f_builtins is not None)
def _StackSummary_extract(frame_gen, limit=None, lookup_lines=True, capture_locals=False):
"""Create a StackSummary from a traceback or stack object.
Very simplified copy of the original StackSummary.extract().
We want always to capture locals, that is why we overwrite it.
Additionally, we also capture the frame.
This is a bit hacky and also not like this is originally intended (to not keep refs).
:param frame_gen: A generator that yields (frame, lineno) tuples to
include in the stack.
:param limit: None to include all frames or the number of frames to
include.
:param lookup_lines: If True, lookup lines for each frame immediately,
otherwise lookup is deferred until the frame is rendered.
:param capture_locals: If True, the local variables from each frame will
be captured as object representations into the FrameSummary.
"""
result = StackSummary()
for f, lineno in frame_gen:
co = f.f_code
filename = co.co_filename
name = co.co_name
result.append(ExtendedFrameSummary(
frame=f, filename=filename, lineno=lineno, name=name, lookup_line=False))
return result
def install():
"""
Replaces sys.excepthook by our better_exchook.
"""
sys.excepthook = better_exchook
def replace_traceback_format_tb():
"""
Replaces these functions from the traceback module by our own:
- traceback.format_tb
- traceback.StackSummary.format
- traceback.StackSummary.extract
Note that this kind of monkey patching might not be safe under all circumstances
and is not officially supported by Python.
"""
import traceback
traceback.format_tb = format_tb
if hasattr(traceback, "StackSummary"):
traceback.StackSummary.format = format_tb
traceback.StackSummary.extract = _StackSummary_extract
# ------------------------------------------------
# Test/demo code starts here.
def test_is_source_code_missing_open_brackets():
assert is_source_code_missing_open_brackets("a") is False
assert is_source_code_missing_open_brackets("a)") is True
assert is_source_code_missing_open_brackets("fn()") is False
assert is_source_code_missing_open_brackets("fn().b()") is False
assert is_source_code_missing_open_brackets("fn().b()[0]") is False
assert is_source_code_missing_open_brackets("fn({a[0]: 'b'}).b()[0]") is False
assert is_source_code_missing_open_brackets("a[0]: 'b'}).b()[0]") is True
def test_add_indent_lines():
assert add_indent_lines("foo ", " bar") == "foo bar"
assert add_indent_lines("foo ", " bar\n baz") == "foo bar\n baz"
def test_get_same_indent_prefix():
assert get_same_indent_prefix(["a", "b"]) == ""
assert get_same_indent_prefix([" a"]) == " "
assert get_same_indent_prefix([" a", " b"]) == " "
def test_remove_indent_lines():
assert remove_indent_lines(" a\n b") == "a\n b"
assert remove_indent_lines(" a\n b") == "a\nb"
assert remove_indent_lines("\ta\n\t b") == "a\n b"
if __name__ == "__main__":
if sys.argv[1:] == ["test"]:
for k, v in sorted(globals().items()):
if not k.startswith("test_"): continue
print("running: %s()" % k)
v()
print("ok.")
sys.exit()
elif sys.argv[1:] == ["debug_shell"]:
debug_shell(locals(), globals())
sys.exit()
elif sys.argv[1:] == ["debug_shell_exception"]:
try:
raise Exception("demo exception")
except Exception:
better_exchook(*sys.exc_info(), debugshell=True)
sys.exit()
elif sys.argv[1:]:
print("Usage: %s (test|...)" % sys.argv[0])
sys.exit(1)
# some examples
# this code produces this output: https://gist.github.com/922622
try:
x = {1:2, "a":"b"}
def f():
y = "foo"
# noinspection PyUnresolvedReferences,PyStatementEffect
x, 42, sys.stdin.__class__, sys.exc_info, y, z
f()
except Exception:
better_exchook(*sys.exc_info())
try:
f = lambda x: None
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable,PyArgumentList
f(x, y)
except Exception:
better_exchook(*sys.exc_info())
try:
# noinspection PyArgumentList
(lambda x: None)(__name__,
42) # multiline
except Exception:
better_exchook(*sys.exc_info())
try:
class Obj:
def __repr__(self):
return (
"<Obj multi-\n" +
" line repr>")
obj = Obj()
assert not obj
except Exception:
better_exchook(*sys.exc_info())
# use this to overwrite the global exception handler
sys.excepthook = | |
<reponame>Willy5s/Pirates-Online-Rewritten
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui import GuiPanel, RedeemCodeGUI
from pirates.piratesgui import GuiButton, DialogButton
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.pirate import DynamicHuman
from pirates.piratesgui.TabBar import LeftTab, TabBar
from direct.interval.IntervalGlobal import *
from pirates.makeapirate import TattooGlobals
from pirates.piratesgui.BorderFrame import BorderFrame
from pirates.uberdog.UberDogGlobals import InventoryType
from otp.otpbase import OTPGlobals
from otp.otpgui import OTPDialog
from pirates.piratesgui import PDialog
from direct.task import Task
import random
from math import sin
from math import cos
from math import pi
from pirates.piratesbase import Freebooter
from pirates.inventory import ItemGlobals, DropGlobals
from pirates.inventory.InventoryGlobals import *
from pirates.uberdog.TradableInventoryBase import InvItem
from pirates.pirate import TitleGlobals
ZONE1 = 0
ZONE2 = 1
ZONE3 = 2
ZONE4 = 3
ZONE5 = 4
ZONE6 = 5
ZONE7 = 6
ZONE8 = 7
TYPE = 0
OFFSETX = 1
OFFSETY = 2
SCALE = 3
ROTATE = 4
COLOR = 5
TattooZones = [
[
ZONE1, PLocalizer.TattooChest], [ZONE2, PLocalizer.TattooLeftArm], [ZONE3, PLocalizer.TattooRightArm], [ZONE4, PLocalizer.TattooFace]]
CHEST_CAMERA = 0
LARM_CAMERA = 1
RARM_CAMERA = 2
FACE_CAMERA = 3
BODY_CAMERA = 4
BUYING = 0
SELLING = 1
class TattooStoreTab(LeftTab):
def __init__(self, tabBar, name, **kw):
optiondefs = (
('modelName', 'general_frame_d', None), ('borderScale', 0.38, None), ('bgBuffer', 0.15, None))
self.defineoptions(kw, optiondefs)
LeftTab.__init__(self, tabBar, name, **kw)
self.initialiseoptions(TattooStoreTab)
return None
class TattooStoreTabBar(TabBar):
def refreshTabs(self):
for x, name in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
tab.setPos(-0.07, 0, 1.1 - 0.1 * (x + self.offset))
(tab.setScale(0.2, 1, 0.2),)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setX(-0.08)
tab.setScale(0.2, 1, 0.22)
def makeTab(self, name, **kw):
return TattooStoreTab(self, name, **kw)
class TattooStoreCartList(DirectScrolledFrame):
def __init__(self, parent, width, height, itemWidth, itemHeight):
self.width = width + PiratesGuiGlobals.ScrollbarSize
self.listItemHeight = itemHeight
self.listItemWidth = itemWidth
self.height = height
self._parent = parent
self.pvpMode = parent.pvpMode
charGui = loader.loadModel('models/gui/char_gui')
DirectScrolledFrame.__init__(self, relief=None, state=DGG.NORMAL, manageScrollBars=0, autoHideScrollBars=1, frameSize=(0, self.width, 0, self.height), canvasSize=(0, self.width - 0.05, 0.025, self.height - 0.025), verticalScroll_relief=None, verticalScroll_image=charGui.find('**/chargui_slider_small'), verticalScroll_frameSize=(0, PiratesGuiGlobals.ScrollbarSize, 0, self.height), verticalScroll_image_scale=(self.height + 0.05, 1, 0.75), verticalScroll_image_hpr=(0, 0, 90), verticalScroll_image_pos=(self.width - PiratesGuiGlobals.ScrollbarSize * 0.5 - 0.004, 0, self.height * 0.5), verticalScroll_image_color=(0.61, 0.6, 0.6, 1), verticalScroll_thumb_image=(charGui.find('**/chargui_slider_node'), charGui.find('**/chargui_slider_node_down'), charGui.find('**/chargui_slider_node_over')), verticalScroll_thumb_relief=None, verticalScroll_thumb_image_scale=0.25, verticalScroll_resizeThumb=0, horizontalScroll_relief=None, sortOrder=5)
self.initialiseoptions(TattooStoreCartList)
self.verticalScroll.incButton.destroy()
self.verticalScroll.decButton.destroy()
self.horizontalScroll.incButton.destroy()
self.horizontalScroll.decButton.destroy()
self.horizontalScroll.hide()
self.panels = []
self.purchases = []
self.itemColor = Vec4(0.2, 0.2, 0.2, 1.0)
charGui.removeNode()
return
def destroy(self):
self.ignoreAll()
for panel in self.panels:
panel.destroy()
del self.panels
del self.purchases
DirectScrolledFrame.destroy(self)
def setItemColor(self, color):
self.itemColor = color
def repackPanels(self):
z = self.listItemHeight
i = 0
for i in range(len(self.panels)):
self.panels[i].setPos(0.01, 0, -z * (i + 1))
self.panels[i].origionalPos = self.panels[i].getPos(render2d)
self['canvasSize'] = (
0, self.listItemWidth - 0.09, -z * (i + 1), 0)
def addPanel(self, data, repack=1):
uid = data[1]
tattooId = data[2]
itemCost = ItemGlobals.getGoldCost(uid)
itemText = PLocalizer.getItemName(uid)
if self._parent.mode == 1:
itemCost = int(itemCost * ItemGlobals.GOLD_SALE_MULTIPLIER)
maxLength = 23 - len(str(itemCost))
isDisabled = 0
panel = DirectButton(parent=self, relief=None, text=itemText[:maxLength], text_fg=self.itemColor, text_align=TextNode.ALeft, text_scale=PiratesGuiGlobals.TextScaleMed, text_shadow=PiratesGuiGlobals.TextShadow, text_pos=(0.06, 0.0), command=self.removePanel, extraArgs=[data])
panel.costLabel = DirectLabel(parent=panel, relief=None, text=str(itemCost), text_fg=self.itemColor, text_align=TextNode.ARight, text_scale=PiratesGuiGlobals.TextScaleMed, text_shadow=PiratesGuiGlobals.TextShadow, text_pos=(0.45, 0.0), image=self._parent.CoinImage, image_scale=0.15, image_pos=(0.48, 0.0, 0.014))
panel.bind(DGG.ENTER, self.highlightStart, extraArgs=[panel])
panel.bind(DGG.EXIT, self.highlightStop, extraArgs=[panel])
panel.data = data
panel.price = itemCost
panel.reparentTo(self.getCanvas())
self.panels.append(panel)
self.purchases.append(data)
if repack:
self.repackPanels()
return
def highlightStart(self, item, event=None):
item['text_fg'] = PiratesGuiGlobals.TextFG6
item.costLabel['text_fg'] = PiratesGuiGlobals.TextFG6
def highlightStop(self, item, event=None):
item['text_fg'] = self.itemColor
item.costLabel['text_fg'] = self.itemColor
def removePanel(self, data, repack=1):
for panel in self.panels:
if panel.data == data:
self._parent.updateButton(data, 1)
self.panels.remove(panel)
self.purchases.remove(data)
panel.destroy()
if repack:
self.repackPanels()
self._parent.updateBalance()
return
def hasPanel(self, data):
for panel in self.panels:
if panel.data == data:
return True
return False
def removeAllPanels(self):
for panel in self.panels:
panel.destroy()
self.panels = []
self.purchases = []
self.repackPanels()
def show(self):
DirectScrolledFrame.show(self)
def hide(self):
DirectScrolledFrame.hide(self)
def getItemQuantity(self, itemId):
counter = 0
for panel in self.panels:
if panel.data[0] == itemId:
counter += panel.data[1]
return counter
class TattooStoreGUI(DirectFrame):
notify = directNotify.newCategory('TattooStoreGUI')
width = (PiratesGuiGlobals.InventoryItemGuiWidth + PiratesGuiGlobals.ScrollbarSize + 0.06) * 2
height = 1.5
columnWidth = PiratesGuiGlobals.InventoryItemGuiWidth + PiratesGuiGlobals.ScrollbarSize + 0.05
holidayIdList = []
def __init__(self, npc, shopId, **kw):
optiondefs = (('relief', None, None), ('framSize', (0, self.width, 0, self.height), None), ('sortOrder', 20, None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, None, **kw)
self.initialiseoptions(TattooStoreGUI)
self.pirate = None
self.camIval = None
self.buttons = []
self.buttonIndex = 0
self.tattooAmount = 0
self.currentPage = None
self.buttonsPerPage = 4
self.prevIdx = 0
self.mode = BUYING
self.redeemCodeGUI = None
gui = loader.loadModel('models/gui/toplevel_gui')
self.CoinImage = gui.find('**/treasure_w_coin*')
skullModel = loader.loadModel('models/gui/avatar_chooser_rope')
self.ParchmentIcon = gui.find('**/main_gui_quest_scroll')
self.TattooIcons = loader.loadModel('models/textureCards/tattooIcons')
self.ShirtIcon = loader.loadModel('models/gui/char_gui').find('**/chargui_cloth')
self.LockIcon = gui.find('**/pir_t_gui_gen_key_subscriber')
self.questIcon = loader.loadModel('models/gui/compass_main').find('**/icon_objective_grey')
self.backTabParent = self.attachNewNode('backTabs', sort=0)
self.panel = GuiPanel.GuiPanel(None, self.width, self.height, parent=self, showClose=False)
self.setPos(0.0, 0, -0.75)
self.balance = 0
self.npc = npc
self.rootTitle = PLocalizer.TattooShop
self.model = loader.loadModel('models/gui/gui_shop_tailor')
self.model.reparentTo(self.panel)
self.model.setPos(0.625, 0.0, 1.05)
self.model.setScale(0.337, 0.0, 0.327)
self.paid = Freebooter.getPaidStatus(localAvatar.getDoId())
self.shopId = shopId
self.pvpMode = 0
if shopId == PiratesGlobals.PRIVATEER_TATTOOS:
self.pvpMode = 1
if localAvatar.gameFSM.camIval is not None:
if localAvatar.gameFSM.camIval.isPlaying():
localAvatar.gameFSM.camIval.finish()
self.initialCamPos = camera.getPos()
self.initialCamHpr = camera.getHpr()
self.initialPirateH = 0
self.cartWidth = self.columnWidth - 0.1
self.cartHeight = self.height - 0.25
self.cartFrame = DirectFrame(parent=self.panel, relief=None, frameSize=(0, self.cartWidth, 0, self.cartHeight))
self.cartFrame.setPos(self.columnWidth + 0.025, 0, 0.08)
self.buyParchment = DirectFrame(parent=self.cartFrame, relief=None, text=PLocalizer.TailorPurchase, text_fg=PiratesGuiGlobals.TextFG1, text_align=TextNode.ACenter, text_scale=PiratesGuiGlobals.TextScaleLarge, text_pos=(0.0, 0.2), text_shadow=PiratesGuiGlobals.TextShadow, textMayChange=0, image=self.ParchmentIcon, image_scale=(0.24, 0.0, 0.3), image_pos=(0.0, 0.0, 0.0), pos=(0.3, 0.0, 0.92))
self.sellParchment = DirectFrame(parent=self.cartFrame, relief=None, text=PLocalizer.TailorSelling, text_fg=PiratesGuiGlobals.TextFG1, text_align=TextNode.ACenter, text_scale=PiratesGuiGlobals.TextScaleLarge, text_pos=(0.0, 0.2), text_shadow=PiratesGuiGlobals.TextShadow, textMayChange=0, image=self.ParchmentIcon, image_scale=(0.24, 0.0, 0.3), image_pos=(0.0, 0.0, 0.0), pos=(0.3, 0.0, 0.48))
self.purchaseInventory = TattooStoreCartList(self, self.cartWidth, self.cartHeight - 0.95, self.cartWidth, self.cartHeight / 20.0)
self.purchaseInventory.reparentTo(self.cartFrame)
self.purchaseInventory.setPos(0.0, 0.0, 0.76)
self.sellInventory = TattooStoreCartList(self, self.cartWidth, self.cartHeight - 0.95, self.cartWidth, self.cartHeight / 20.0)
self.sellInventory.reparentTo(self.cartFrame)
self.sellInventory.setPos(0.0, 0.0, 0.31)
self.frontTabParent = self.panel.attachNewNode('frontTab', sort=2)
self.currentWardrobe = []
yourMoney = PLocalizer.YourMoney
currencyIcon = self.CoinImage
self.balanceTitle = DirectFrame(parent=self.cartFrame, relief=None, text=PLocalizer.Total, text_fg=PiratesGuiGlobals.TextFG2, text_align=TextNode.ALeft, text_scale=PiratesGuiGlobals.TextScaleLarge, text_pos=(0.0, 0.0), text_shadow=PiratesGuiGlobals.TextShadow, pos=(0.09,0, 0.225))
self.balanceValue = DirectFrame(parent=self.cartFrame, relief=None, text=str(self.balance), text_fg=PiratesGuiGlobals.TextFG2, text_align=TextNode.ARight, text_scale=PiratesGuiGlobals.TextScaleLarge, text_pos=(-0.055, 0.0), text_shadow=PiratesGuiGlobals.TextShadow, textMayChange=1, image=currencyIcon, image_scale=0.15, image_pos=(-0.025, 0, 0.015), pos=(self.cartWidth, 0, 0.225))
self.myGoldTitle = DirectFrame(parent=self.cartFrame, relief=None, text=yourMoney, text_fg=PiratesGuiGlobals.TextFG2, text_align=TextNode.ALeft, text_scale=PiratesGuiGlobals.TextScaleLarge, text_pos=(0.0, 0.0), text_shadow=PiratesGuiGlobals.TextShadow, pos=(0.09, 0, 0.155))
self.myGold = DirectFrame(parent=self.cartFrame, relief=None, text=str(self.getMoney()), text_fg=PiratesGuiGlobals.TextFG2, text_align=TextNode.ARight, text_scale=PiratesGuiGlobals.TextScaleLarge, text_pos=(-0.055, 0.0), text_shadow=PiratesGuiGlobals.TextShadow, textMayChange=1, image=currencyIcon, image_scale=0.15, image_pos=(-0.025, 0, 0.015), pos=(self.cartWidth, 0, 0.155))
self.commitButton = DialogButton.DialogButton(command=self.handleCommitPurchase, parent=self.cartFrame, text=PLocalizer.PurchaseCommit, text_fg=PiratesGuiGlobals.TextFG2, text_pos=(0.02, -PiratesGuiGlobals.TextScaleLarge * 0.25), text_scale=PiratesGuiGlobals.TextScaleLarge, text_shadow=PiratesGuiGlobals.TextShadow, buttonStyle=DialogButton.DialogButton.YES)
self.commitButton.setPos(self.cartWidth / 2, 0, 0.005)
self.closeButton = DialogButton.DialogButton(command=self.closePanel, parent=self.cartFrame, text=PLocalizer.lClose, text_fg=PiratesGuiGlobals.TextFG2, text_pos=(0.02, -PiratesGuiGlobals.TextScaleLarge * 0.25), text_scale=PiratesGuiGlobals.TextScaleLarge, text_shadow=PiratesGuiGlobals.TextShadow, buttonStyle=DialogButton.DialogButton.NO)
self.closeButton.setPos(self.cartWidth / 2 - 0.55, 0, 0.005)
self.redeemCodeButton = DialogButton.DialogButton(command=self.showRedeemCodeGUI, parent=self.cartFrame, text=PLocalizer.ShopRedeem, text_fg=PiratesGuiGlobals.TextFG2, text_scale=PiratesGuiGlobals.TextScaleLarge, text_shadow=PiratesGuiGlobals.TextShadow)
self.redeemCodeButton.setPos(-0.015, 0, 0.005)
self.storeButton = DialogButton.DialogButton(command=self.changeMode, state=DGG.DISABLED, parent=self.cartFrame, text=PLocalizer.InteractStore, text_fg=PiratesGuiGlobals.TextFG2, text_scale=PiratesGuiGlobals.TextScaleLarge, text_shadow=PiratesGuiGlobals.TextShadow, image_color=Vec4(0.7, 0.95, 0.7, 1.0), scale=0.9, extraArgs=[0])
self.storeButton.setPos(-0.4, 0.0, 1.15)
self.wardrobeButton = DialogButton.DialogButton(command=self.changeMode, state=DGG.NORMAL, parent=self.cartFrame, text=PLocalizer.TailorWardrobe, text_fg=PiratesGuiGlobals.TextFG2, text_scale=PiratesGuiGlobals.TextScaleLarge, text_shadow=PiratesGuiGlobals.TextShadow, image_color=Vec4(0.95, 0.7, 0.7, 1.0), scale=0.9, extraArgs=[1])
self.wardrobeButton.setPos(-0.18, 0.0, 1.15)
tGui = loader.loadModel('models/gui/triangle')
triangle = (tGui.find('**/triangle'), tGui.find('**/triangle_down'), tGui.find('**/triangle_over'))
self.nextPageButton = DirectButton(parent=self.panel, relief=None, state=DGG.DISABLED, image=triangle, image_scale=0.065, pos=(0.54, 0.0, 0.175), rolloverSound=None, command=self.nextPage)
self.prevPageButton = DirectButton(parent=self.panel, relief=None, state=DGG.DISABLED, image=triangle, image_scale=-0.065, pos=(0.16, 0.0, 0.175), rolloverSound=None, command=self.previousPage)
self.pageNumber = DirectFrame(parent=self.panel, relief=None, text='', text_fg=PiratesGuiGlobals.TextFG2, text_align=TextNode.ACenter, text_scale=PiratesGuiGlobals.TextScaleLarge, text_pos=(0.0, 0.0), text_shadow=PiratesGuiGlobals.TextShadow, pos=(0.35, 0, 0.1625))
self.titleLabel = DirectLabel(parent=self, relief=None, text='', text_fg=PiratesGuiGlobals.TextFG1, text_align=TextNode.ACenter, text_scale=PiratesGuiGlobals.TextScaleLarge * 1.3, text_shadow=PiratesGuiGlobals.TextShadow, pos=(0.62, 0.0, 1.33))
self.titleLabel.setBin('gui-fixed', 1)
self.createPirate()
charGui = loader.loadModel('models/gui/char_gui')
self.rotateSlider = DirectSlider(parent=base.a2dBottomLeft, relief=None, command=self.rotatePirate, image=charGui.find('**/chargui_slider_small'), image_scale=(2.15, 2.15, 1.5), thumb_relief=None, thumb_image=(charGui.find('**/chargui_slider_node'), charGui.find('**/chargui_slider_node_down'), charGui.find('**/chargui_slider_node_over')), pos=(0.8, 0.0, 0.09), text_align=TextNode.ACenter, text_scale=(0.1, 0.1), text_pos=(0.0, 0.1), text_fg=PiratesGuiGlobals.TextFG1, scale=0.43, text=PLocalizer.RotateSlider, value=0.5, sortOrder=-1)
self.rotateSlider['extraArgs'] = [self.rotateSlider]
self.rotateSliderOrigin = 0.5
self.accept('mouse1', self._startMouseReadTask)
self.accept('mouse1-up', self._stopMouseReadTask)
self.alertDialog = None
localAvatar.guiMgr.chatPanel.show()
localAvatar.guiMgr.chatPanel.startFadeTextIval()
self.accept(localAvatar.uniqueName('tattooUpdate'), self.reloadPirateDNA)
self.showQuestLabel = False
if not localAvatar.guiMgr.trackedQuestLabel.isHidden():
localAvatar.guiMgr.hideTrackedQuestInfo()
self.showQuestLabel = True
self.equipRequests = {ZONE1: None,ZONE2: None,ZONE3: None,ZONE4: None,ZONE5: None,ZONE6: None,ZONE7: None,ZONE8: None}
self.initTabs()
self.updateBalance()
self.lastRun = 0
return
def showRedeemCodeGUI(self):
if self.redeemCodeGUI:
self.redeemCodeGUI.showCode()
else:
self.redeemCodeGUI = RedeemCodeGUI.RedeemCodeGUI(self)
def rotatePirate(self, slider):
if self.pirate and slider:
value = slider.getValue()
if value != self.rotateSliderOrigin:
diff = value - self.rotateSliderOrigin
h = diff * 360.0 + self.pirate.getH()
self.pirate.setH(h)
self.rotateSliderOrigin = value
def destroy(self):
DirectFrame.destroy(self)
self._stopMouseReadTask()
if self.camIval:
self.camIval.finish()
self.camIval = None
camera.setHpr(0, 0, 0)
self.rotateSlider.destroy()
self.unloadPirate()
if self.model:
self.model.removeNode()
self.model = None
if self.CoinImage:
self.CoinImage.removeNode()
self.CoinImage = None
if self.ParchmentIcon:
self.ParchmentIcon.removeNode()
self.ParchmentIcon = None
if self.ShirtIcon:
self.ShirtIcon.removeNode()
self.ShirtIcon = None
if self.alertDialog:
self.alertDialog.destroy()
if self.TattooIcons:
self.TattooIcons.removeNode()
self.TattooIcons = None
if self.LockIcon:
self.LockIcon.removeNode()
self.LockIcon = None
if self.questIcon:
self.questIcon.removeNode()
self.questIcon = None
if self.redeemCodeGUI:
self.redeemCodeGUI.destroy()
if len(localAvatar.guiMgr.trackedQuestLabel['text']):
if self.showQuestLabel:
localAvatar.guiMgr.showTrackedQuestInfo()
localAvatar.guiMgr.chatPanel.hide()
return
def createPirate(self):
if self.pirate is None:
self.pirate = DynamicHuman.DynamicHuman()
self.pirate.setDNAString(localAvatar.style)
self.pirate.generateHuman(localAvatar.style.gender)
self.pirate.model.setupSelectionChoices('DEFAULT')
self.pirate.mixingEnabled = True
self.pirate.enableBlend()
self.pirate.loop('idle')
self.pirate.useLOD(2000)
dummy = self.npc.attachNewNode('dummy')
dummy.setPos(self.npc.getPos() - Vec3(0, 7, 0))
parent = self.npc.getParent()
self.pirate.reparentTo(parent)
pos = dummy.getPos()
hpr = dummy.getHpr()
self.pirate.setPos(pos)
self.pirate.setHpr(hpr)
self.pirate.lookAt(self.npc)
dummy.detachNode()
self.initialPirateH = self.pirate.getH()
self.pirate.style.setClothesShirt(0)
self.pirate.style.setClothesCoat(0)
self.pirate.style.setClothesVest(0)
self.pirate.model.handleClothesHiding()
self.pirate.show()
localAvatar.stash()
return
| |
<reponame>kiuthed/qutip
# -*- coding: utf-8 -*-
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
#
# This module was initially contributed by <NAME>.
#
"""
This module implements transformations between superoperator representations,
including supermatrix, Kraus, Choi and Chi (process) matrix formalisms.
"""
__all__ = ['super_to_choi', 'choi_to_super', 'choi_to_kraus', 'kraus_to_choi',
'kraus_to_super', 'choi_to_chi', 'chi_to_choi', 'to_choi',
'to_chi', 'to_super', 'to_kraus']
# Python Standard Library
from itertools import starmap, product
# NumPy/SciPy
from numpy.core.multiarray import array, zeros
from numpy.core.shape_base import hstack
from numpy.matrixlib.defmatrix import matrix
from numpy import sqrt
from scipy.linalg import eig
# Other QuTiP functions and classes
from qutip.superoperator import vec2mat, spre, spost, operator_to_vector
from qutip.operators import identity, sigmax, sigmay, sigmaz
from qutip.tensor import tensor
from qutip.qobj import Qobj
# SPECIFIC SUPEROPERATORS -----------------------------------------------------
def _dep_super(pe):
"""
Returns the superoperator corresponding to qubit depolarization for a
given parameter pe.
TODO: if this is going into production (hopefully it isn't) then check
CPTP, expand to arbitrary dimensional systems, etc.
"""
return Qobj(dims=[[[2], [2]], [[2], [2]]],
inpt=array([[1. - pe / 2., 0., 0., pe / 2.],
[0., 1. - pe, 0., 0.],
[0., 0., 1. - pe, 0.],
[pe / 2., 0., 0., 1. - pe / 2.]]))
def _dep_choi(pe):
"""
Returns the choi matrix corresponding to qubit depolarization for a
given parameter pe.
TODO: if this is going into production (hopefully it isn't) then check
CPTP, expand to arbitrary dimensional systems, etc.
"""
return Qobj(dims=[[[2], [2]], [[2], [2]]],
inpt=array([[1. - pe / 2., 0., 0., 1. - pe],
[0., pe / 2., 0., 0.],
[0., 0., pe / 2., 0.],
[1. - pe, 0., 0., 1. - pe / 2.]]),
superrep='choi')
# CHANGE OF BASIS FUNCTIONS ---------------------------------------------------
# These functions find change of basis matrices, and are useful in converting
# between (for instance) Choi and chi matrices. At some point, these should
# probably be moved out to another module.
_SINGLE_QUBIT_PAULI_BASIS = (identity(2), sigmax(), sigmay(), sigmaz())
def _pauli_basis(nq=1):
# NOTE: This is slow as can be.
# TODO: Make this sparse. CSR format was causing problems for the [idx, :]
# slicing below.
B = zeros((4 ** nq, 4 ** nq), dtype=complex)
dims = [[[2] * nq] * 2] * 2
for idx, op in enumerate(starmap(tensor,
product(_SINGLE_QUBIT_PAULI_BASIS,
repeat=nq))):
B[:, idx] = operator_to_vector(op).dag().data.todense()
return Qobj(B, dims=dims)
# PRIVATE CONVERSION FUNCTIONS ------------------------------------------------
# These functions handle the main work of converting between representations,
# and are exposed below by other functions that add postconditions about types.
#
# TODO: handle type='kraus' as a three-index Qobj, rather than as a list?
def _super_tofrom_choi(q_oper):
"""
We exploit that the basis transformation between Choi and supermatrix
representations squares to the identity, so that if we munge Qobj.type,
we can use the same function.
Since this function doesn't respect :attr:`Qobj.type`, we mark it as
private; only those functions which wrap this in a way so as to preserve
type should be called externally.
"""
data = q_oper.data.toarray()
sqrt_shape = int(sqrt(data.shape[0]))
return Qobj(dims=q_oper.dims,
inpt=data.reshape([sqrt_shape] * 4).
transpose(3, 1, 2, 0).reshape(q_oper.data.shape))
def super_to_choi(q_oper):
# TODO: deprecate and make private in favor of to_choi,
# which looks at Qobj.type to determine the right conversion function.
"""
Takes a superoperator to a Choi matrix
TODO: Sanitize input, incorporate as method on Qobj if type=='super'
"""
q_oper = _super_tofrom_choi(q_oper)
q_oper.superrep = 'choi'
return q_oper
def choi_to_super(q_oper):
# TODO: deprecate and make private in favor of to_super,
# which looks at Qobj.type to determine the right conversion function.
"""
Takes a Choi matrix to a superoperator
TODO: Sanitize input, Abstract-ify application of channels to states
"""
q_oper = super_to_choi(q_oper)
q_oper.superrep = 'super'
return q_oper
def choi_to_kraus(q_oper):
"""
Takes a Choi matrix and returns a list of Kraus operators.
TODO: Create a new class structure for quantum channels, perhaps as a
strict sub-class of Qobj.
"""
vals, vecs = eig(q_oper.data.todense())
vecs = list(map(array, zip(*vecs)))
return list(map(lambda x: Qobj(inpt=x),
[sqrt(vals[j]) * vec2mat(vecs[j])
for j in range(len(vals))]))
def kraus_to_choi(kraus_list):
"""
Takes a list of Kraus operators and returns the Choi matrix for the channel
represented by the Kraus operators in `kraus_list`
"""
kraus_mat_list = list(map(lambda x: matrix(x.data.todense()), kraus_list))
op_len = len(kraus_mat_list[0])
op_rng = range(op_len)
choi_blocks = array([[sum([op[:, c_ix] * array([op.H[r_ix, :]])
for op in kraus_mat_list])
for r_ix in op_rng]
for c_ix in op_rng])
return Qobj(inpt=hstack(hstack(choi_blocks)),
dims=[kraus_list[0].dims, kraus_list[0].dims], type='super',
superrep='choi')
def kraus_to_super(kraus_list):
"""
Converts a list of Kraus operators and returns a super operator.
"""
return choi_to_super(kraus_to_choi(kraus_list))
def choi_to_chi(q_oper):
"""
Converts a Choi matrix to a Chi matrix in the Pauli basis.
NOTE: this is only supported for qubits right now. Need to extend to
Heisenberg-Weyl for other subsystem dimensions.
"""
nq = len(q_oper.dims[0][0])
B = _pauli_basis(nq)
B.superrep = 'choi'
return Qobj(B * q_oper * B.dag(), superrep='chi')
def chi_to_choi(q_oper):
"""
Converts a Choi matrix to a Chi matrix in the Pauli basis.
NOTE: this is only supported for qubits right now. Need to extend to
Heisenberg-Weyl for other subsystem dimensions.
"""
nq = len(q_oper.dims[0][0])
B = _pauli_basis(nq)
# The Chi matrix has tr(chi) == d², so we need to divide out
# by that to get back to the Choi form.
return Qobj((B.dag() * q_oper * B) / q_oper.shape[0], superrep='choi')
# PUBLIC CONVERSION FUNCTIONS -------------------------------------------------
# These functions handle superoperator conversions in a way that preserves the
# correctness of Qobj.type, and in a way that automatically branches based on
# the input Qobj.type.
def to_choi(q_oper):
"""
Converts a Qobj representing a quantum map to the Choi representation,
such that the trace of the returned operator is equal to the dimension
of the system.
Parameters
----------
q_oper : Qobj
Superoperator to be converted to Choi representation.
Returns
-------
choi : Qobj
A quantum object representing the same map as ``q_oper``, such that
``choi.superrep == "choi"``.
Raises
------
TypeError: if the given quantum object is not a map, or cannot be converted
to Choi representation.
"""
if q_oper.type == 'super':
if q_oper.superrep == 'choi':
return q_oper
if q_oper.superrep == 'super':
return super_to_choi(q_oper)
if q_oper.superrep == 'chi':
return chi_to_choi(q_oper)
else:
raise TypeError(q_oper.superrep)
elif q_oper.type == 'oper':
return super_to_choi(spre(q_oper) * spost(q_oper.dag()))
else:
raise TypeError(
"Conversion of Qobj with type = {0.type} "
"and superrep = {0.choi} to Choi not supported.".format(q_oper)
)
def to_chi(q_oper):
"""
Converts a Qobj representing a quantum map to a representation as a chi
(process) matrix in the Pauli basis, such that the trace of the returned
operator is equal to the dimension of the system.
Parameters
----------
q_oper : Qobj
Superoperator to be converted to Choi representation.
Returns
-------
choi : Qobj
A quantum object representing the same map as ``q_oper``, such that
``choi.superrep == "choi"``.
Raises
------
TypeError: if the given quantum object is not a map, or cannot be converted
to Choi representation.
"""
if q_oper.type | |
<filename>kaldi_io/LDA_LPLDA.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from scipy import linalg
from sklearn.utils.multiclass import unique_labels
from sklearn.utils import check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
import LDA
import sys
import kaldi_io
## ==========================================================================
## author : <NAME>
## descrption : local pairwise linear discriminant analysis
## revised from sklearn
## created : 20180613
## revised :
##
## <NAME>, +86-13426228839, <EMAIL>
## Aurora Lab, Department of Electronic Engineering, Tsinghua University
## ==========================================================================
__all__ = ['LocalPairwiseTrainedLinearDiscriminantAnalysis']
def _cov(X):
"""Estimate covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
s = np.cov(X, rowvar=0, bias = 1)
return s
def _similarity_function(mean_vec, vecs):
# dot_kernel = np.array([np.dot(mean_vec, vecs) for i in range(0,len(vecs))])
# return dot_kernel
mean_vec_norm = mean_vec / np.sqrt(np.sum(mean_vec ** 2))
vecs_norm = vecs / np.sqrt(np.sum(vecs ** 2, axis=1))[:, np.newaxis]
cosine_kernel = np.array([np.dot(mean_vec_norm, vecs_norm[i]) for i in range(len(vecs_norm))])
return cosine_kernel
def _class_means_and_neighbor_means(X, y, k1, k2):
"""Compute class means and neighor means
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
k1: within_between_ratio
k2: nearest_neighbor_ratio
Returns
-------
means : array-like, shape (n_features,)
Class means and neighbor means
"""
means = []
neighbor_means = []
classes = np.unique(y)
samples = np.size(y)
for group in classes:
Xg = X[y == group, :]
Xg_count = Xg.shape[0]
Xg_mean = Xg.mean(0)
Xn = X[y != group, :]
Xg_similarity = _similarity_function(Xg_mean, Xg)
Xg_similarity_min = min(Xg_similarity)
Xn_similarity = _similarity_function(Xg_mean, Xn)
Xn_neighbor_count = len(Xn_similarity[Xn_similarity > Xg_similarity_min])
Xn_neighbor_count = int(max(k1 * Xg_count, k2 * Xn_neighbor_count))
Xn_neighbor_count = min(Xn_neighbor_count, samples - Xg_count)
Xn_label = np.argsort(Xn_similarity)
Xn_label = Xn_label[::-1]
Xg_neighbor = np.array([Xn[Xn_label[i]] for i in range(Xn_neighbor_count)])
Xg_neighbor_mean = Xg_neighbor.mean(0)
means.append(Xg_mean)
neighbor_means.append(Xg_neighbor_mean)
return np.array(means), np.array(neighbor_means)
def _class_cov(X, y):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg)))
return np.average(covs, axis=0)
def _local_pairwise_cov(class_mean, neighbor_mean):
"""Estimate local pairwise matrix.
Parameters
----------
class_mean : array-like, shape (n_samples, n_features)
each class mean
neighbor_mean: array-like, shape (n_samples, n_features)
each class neighbor mean
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
covs = []
for i in range(0, len(class_mean)):
local_pair = np.vstack((class_mean[i], neighbor_mean[i]))
covs.append(np.atleast_2d(_cov(local_pair)))
return np.average(covs, axis=0)
class LocalPairwiseLinearDiscriminantAnalysis:
def __init__(self, n_components=None, within_between_ratio=10.0,
nearest_neighbor_ratio=1.2):
self.n_components = n_components
self.within_between_ratio = within_between_ratio
self.nearest_neighbor_ratio = nearest_neighbor_ratio
def _solve_eigen(self, X, y):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_, self.neighbor_means_ = _class_means_and_neighbor_means(
X, y, self.within_between_ratio, self.nearest_neighbor_ratio)
Sw = _class_cov(X, y) # within class cov
Sb = _local_pairwise_cov(self.means_, self.neighbor_means_)
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
self.scalings_ = np.asarray(evecs)
def fit(self, X, y):
"""Fit Local Pairwise Trained Linear Discriminant Analysis
model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(np.asarray(X), np.asarray(y.reshape(-1)), ensure_min_samples=2)
self.classes_ = unique_labels(y)
# Get the maximum number of components
if self.n_components is None:
self.n_components = len(self.classes_) - 1
else:
self.n_components = min(len(self.classes_) - 1, self.n_components)
self._solve_eigen(X, y)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, ['scalings_'], all_or_any=any)
X = check_array(X)
X_new = np.dot(X, self.scalings_)
return X_new[:, :self.n_components]
def read_kaldi_scp_flt(kaldi_scp):
fvec = { k:v for k,v in kaldi_io.read_vec_flt_scp(kaldi_scp) } # binary
return fvec
def load_spk2utt(filename):
spk2utt = {}
with open(filename, "r") as fp:
for line in fp.readlines():
line_split = line.strip().split(" ")
spkid = line_split[0]
if spkid in spk2utt.keys():
print ("load spk2utt failed, spkid is not uniq, %s\n", spkid)
exit(-1)
spk2utt[spkid] = []
for i in range(1, len(line_split)):
uttid = line_split[i]
spk2utt[spkid].append(uttid)
return spk2utt
def get_lambda_ids_and_vecs(lambda_xvec, min_utts = 6):
ids = []
vecs = []
for spkid in lambda_xvec.keys():
if len(lambda_xvec[spkid]) >= min_utts:
for vec in lambda_xvec[spkid]:
ids.append(spkid)
vecs.append(vec)
return ids, vecs
def label_str_to_int(label_str):
label_dict = {}
label_int = []
for item in label_str:
if item not in label_dict.keys():
label_dict[item] = len(label_dict) + 1
label_int.append(label_dict[item])
return np.array(label_int)
def train_lda(ids, vecs, lda_dim):
## compute and sub mean
m = np.mean(vecs, axis=0)
vecs = vecs - m
## lplda
lda = LDA.LinearDiscriminantAnalysis(n_components=lda_dim)
lda.fit(np.asarray(vecs), np.asarray(ids))
## compute mean
dim = len(m)
m_trans = lda.transform(np.reshape(m, (1, dim)))
## compute lda trans
vecs_trans = lda.transform(vecs)
## transform matrix
lda_trans = lda.scalings_.T[:lda_dim, :]
return ids, vecs_trans, m_trans, lda_trans
def train_lplda(ids, vecs, lplda_dim):
## compute and sub mean
m = np.mean(vecs, axis=0)
vecs = vecs - m
## lplda
lda = LocalPairwiseLinearDiscriminantAnalysis(n_components=lplda_dim)
lda.fit(np.asarray(vecs), np.asarray(ids))
## compute mean
dim = len(m)
m_trans = lda.transform(np.reshape(m, (1, dim)))
## compute lda trans
vecs_trans = lda.transform(vecs)
## transform matrix
lda_trans = lda.scalings_.T[:lplda_dim, :]
return ids, vecs_trans, m_trans, lda_trans
def lda_lplda_kaldi_wrapper(lda_dim, lplda_dim, kaldi_scp, kaldi_utt2spk, lda_transform):
data = read_kaldi_scp_flt(kaldi_scp)
spk2utt = load_spk2utt(kaldi_utt2spk)
train_vecs = {}
for spkid in spk2utt.keys():
train_vecs[spkid] = []
uttid_uniq = []
for uttid in spk2utt[spkid]:
uttid_uniq.append(uttid)
uttid_uniq = sorted(set(uttid_uniq))
for uttid in uttid_uniq:
if uttid in data.keys():
train_vecs[spkid].append(data[uttid])
## get ids, vecs
ids, vecs = get_lambda_ids_and_vecs(train_vecs)
int_ids = label_str_to_int(ids)
dim = len(vecs[0])
print ("lda lplda, ", len(vecs), len(vecs[0]))
## train lda,lplda
int_ids, lda_trans_vecs, lda_trans_m, lda_trans_mat = train_lda(int_ids, vecs, lda_dim)
int_ids, lplda_trans_vecs, lplda_trans_m, lplda_trans_mat = train_lplda(int_ids, lda_trans_vecs, lplda_dim)
del lplda_trans_vecs, lplda_trans_m
# copy to kaldi format
transform = np.zeros([lplda_dim, dim + 1], float)
lda_lplda_trans = np.dot(lplda_trans_mat, lda_trans_mat)
lda_lplda_m = np.dot(lplda_trans_mat, np.reshape(lda_trans_m, (lda_dim, 1)))
# m_trans = np.dot(lda_trans, m)
for r in range(lplda_dim):
for c in range(dim):
transform[r][c] = lda_lplda_trans[r][c]
transform[r][dim] = -1.0 * lda_lplda_m[r]
## save lda transform
kaldi_io.write_mat(lda_transform, transform)
return
def lplda_kaldi_wrapper(lda_dim, kaldi_scp, kaldi_utt2spk, lda_transform):
data = read_kaldi_scp_flt(kaldi_scp)
spk2utt = load_spk2utt(kaldi_utt2spk)
train_vecs = {}
for spkid in spk2utt.keys():
train_vecs[spkid] = []
uttid_uniq = []
for uttid in spk2utt[spkid]:
uttid_uniq.append(uttid)
uttid_uniq = sorted(set(uttid_uniq))
for uttid in uttid_uniq:
if uttid in data.keys():
train_vecs[spkid].append(data[uttid])
## get ids, vecs
ids, vecs = get_lambda_ids_and_vecs(train_vecs)
int_ids = label_str_to_int(ids)
print ("lplda, ", len(vecs), len(vecs[0]))
## compute and sub mean
m = np.mean(vecs, axis=0)
vecs = vecs - m
## lplda
lda = LocalPairwiseLinearDiscriminantAnalysis(n_components=lda_dim)
lda.fit(np.asarray(vecs), np.asarray(int_ids))
## compute mean
dim = len(m)
transform_m = lda.transform(np.reshape(m, (1, dim)))
# copy to kaldi format
transform = np.zeros([lda_dim, dim + 1], float)
lda_trans = lda.scalings_.T[:lda_dim, :]
# m_trans = np.dot(lda_trans, m)
for r in range(lda_dim):
for c in range(dim):
transform[r][c] = lda_trans[r][c]
transform[r][dim] = -1.0 * transform_m[0][r]
## save lda transform
kaldi_io.write_mat(lda_transform, transform)
return
if __name__ == '__main__':
if len(sys.argv) != 6:
print ("%s lda_dim lplda_dim kaldi_scp kaldi_utt2spk kaldi_lda_transform\n" % sys.argv[0])
sys.exit
lda_dim = int(sys.argv[1])
lplda_dim = int(sys.argv[2])
kaldi_scp = sys.argv[3]
kaldi_utt2spk = sys.argv[4]
lda_transform = sys.argv[5]
# lda_dim = 150
# lplda_dim = 100
# kaldi_scp = "./xvector_sre16_sre18_combined.scp"
# # kaldi_scp = "./xvectors_sre16_sre18_combined.scp"
# kaldi_utt2spk = "spk2utt"
# lda_transform = "python_kaldi_lplda_transform.mat"
# lplda_kaldi_wrapper(lda_dim, kaldi_scp, kaldi_utt2spk, lda_transform)
lda_lplda_kaldi_wrapper(lda_dim, lplda_dim, kaldi_scp, kaldi_utt2spk, lda_transform)
# ivector-compute-lda --total-covariance-factor=0.0 --dim=$lda_dim \
# "ark:ivector-subtract-global-mean scp:$nnet_dir/xvectors_$name/xvector.scp ark:- |" \
# ark:$data/$name/utt2spk $nnet_dir/xvectors_$name/transform.mat
# samples = 20
# dim = 6
# lda_dim = | |
from database.models import UserProfile, CommunityMember, EventAttendee, RealEstateUnit, Location, UserActionRel, Vendor, Action, Data, Community
from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError, NotAuthorizedError
from _main_.utils.massenergize_response import MassenergizeResponse
from _main_.utils.context import Context
from django.db.models import F
from sentry_sdk import capture_message
from .utils import get_community, get_user, get_user_or_die, get_community_or_die, get_admin_communities, remove_dups, find_reu_community, split_location_string, check_location
import json
def _get_or_create_reu_location(args, user=None):
unit_type=args.pop('unit_type', None)
location=args.pop('location', None)
# this address location now will contain the parsed address
address = args.pop('address', None)
if address:
# address passed as a JSON string
address = json.loads(address)
street = address.get('street', '')
unit_number = address.get('unit_number', '')
zipcode = address.get('zipcode', '')
city = address.get('city', '')
county = address.get('county', '')
state = address.get('state', '')
country = address.get('country','US')
else:
# Legacy: get address from location string
loc_parts = split_location_string(location)
street = unit_number = city = county = state = zipcode = None
if len(loc_parts)>= 4:
street = loc_parts[0]
unit_number = ''
city = loc_parts[1]
county = ''
state = loc_parts[2]
zipcode = loc_parts[3]
country = 'US'
# check location is valid
location_type, valid = check_location(street, unit_number, city, state, zipcode, county, country)
if not valid:
print(location_type)
raise Exception(location_type)
reuloc, created = Location.objects.get_or_create(
location_type = location_type,
street = street,
unit_number = unit_number,
zipcode = zipcode,
city = city,
county = county,
state = state,
country = country
)
if created:
print("Location with zipcode "+zipcode+" created for user "+user.preferred_name)
else:
print("Location with zipcode "+zipcode+" found for user "+user.preferred_name)
return reuloc
class UserStore:
def __init__(self):
self.name = "UserProfile Store/DB"
def _has_access(self, context: Context, user_id=None, email=None):
"""
Checks to make sure if the user has access to the user profile they want to
access
"""
if (not user_id and not email):
return False
if not context.user_is_logged_in:
return False
if context.user_is_admin():
# TODO: update this to only super admins. Do specific checks for
# community admins to make sure user is in their community first
return True
if user_id and (context.user_id == user_id):
return True
if email and (context.user_email == email):
return True
return False
def get_user_info(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
#email = args.get('email', None)
#user_id = args.get('user_id', None)
# if not self._has_access(context, user_id, email):
# return None, CustomMassenergizeError("permission_denied")
user = get_user_or_die(context, args)
return user, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def remove_household(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
household_id = args.get('household_id', None) or args.get('household_id', None)
if not household_id:
return None, CustomMassenergizeError("Please provide household_id")
return RealEstateUnit.objects.get(pk=household_id).delete(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def add_household(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
name = args.pop('name', None)
unit_type=args.pop('unit_type', None)
reuloc = _get_or_create_reu_location(args, user)
reu = RealEstateUnit.objects.create(name=name, unit_type=unit_type)
reu.address = reuloc
community = find_reu_community(reu)
if community: reu.community = community
reu.save()
user.real_estate_units.add(reu)
user.save()
return reu, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def edit_household(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
name = args.pop('name', None)
household_id = args.get('household_id', None)
if not household_id:
return None, CustomMassenergizeError("Please provide household_id")
reuloc = _get_or_create_reu_location(args, user)
reu = RealEstateUnit.objects.get(pk=household_id)
reu.name = name
reu.unit_type = unit_type
reu.address = reuloc
verbose = False
community = find_reu_community(reu, verbose)
if community:
if verbose: print("Updating the REU with zipcode " + zipcode + " to the community " + community.name)
reu.community = community
reu.save()
return reu, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def list_households(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
return user.real_estate_units.all(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def list_users(self, community_id) -> (list, MassEnergizeAPIError):
community,err = get_community(community_id)
if not community:
print(err)
return [], None
return community.userprofile_set.all(), None
def list_events_for_user(self, context: Context, args) -> (list, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
if not user:
return []
return EventAttendee.objects.filter(attendee=user), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def create_user(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
email = args.get('email', None)
community = get_community_or_die(context, args)
# allow home address to be passed in
location = args.pop('location', '')
if not email:
return None, CustomMassenergizeError("email required for sign up")
user = UserProfile.objects.filter(email=email).first()
if not user:
new_user: UserProfile = UserProfile.objects.create(
full_name = args.get('full_name'),
preferred_name = args.get('preferred_name', None),
email = args.get('email'),
is_vendor = args.get('is_vendor', False),
accepts_terms_and_conditions = args.pop('accepts_terms_and_conditions', False)
)
else:
new_user: UserProfile = user
community_member_exists = CommunityMember.objects.filter(user=new_user, community=community).exists()
if not community_member_exists:
# add them as a member to community
CommunityMember.objects.create(user=new_user, community=community)
#create their first household
household = RealEstateUnit.objects.create(name="Home", unit_type="residential", community=community, location=location)
new_user.real_estate_units.add(household)
res = {
"user": new_user,
"community": community
}
return res, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def update_user(self, context: Context, user_id, args) -> (dict, MassEnergizeAPIError):
try:
email = args.get('email', None)
# user_id = args.get('user_id', None)
if not self._has_access(context, user_id, email):
return None, CustomMassenergizeError("permission_denied")
if context.user_is_logged_in and ((context.user_id == user_id) or (context.user_is_admin())):
user = UserProfile.objects.filter(id=user_id)
if not user:
return None, InvalidResourceError()
user.update(**args)
return user.first(), None
else:
return None, CustomMassenergizeError('permission_denied')
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def delete_user(self, context: Context, user_id) -> (dict, MassEnergizeAPIError):
try:
if not user_id:
return None, InvalidResourceError()
#check to make sure the one deleting is an admin
if not context.user_is_admin():
# if they are not an admin make sure they can only delete themselves
if not context.user_id != user_id:
return None, NotAuthorizedError()
users = UserProfile.objects.filter(id=user_id)
users.update(is_deleted=True)
return users.first(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_users_for_community_admin(self, context: Context, community_id) -> (list, MassEnergizeAPIError):
try:
if context.user_is_super_admin:
return self.list_users_for_super_admin(context)
elif not context.user_is_community_admin:
return None, NotAuthorizedError()
community, err = get_community(community_id)
if not community and context.user_id:
communities, err = get_admin_communities(context)
comm_ids = [c.id for c in communities]
users = [cm.user for cm in CommunityMember.objects.filter(community_id__in=comm_ids, user__is_deleted=False)]
#now remove all duplicates
users = remove_dups(users)
return users, None
elif not community:
print(err)
return [], None
users = [cm.user for cm in CommunityMember.objects.filter(community=community, is_deleted=False, user__is_deleted=False)]
users = remove_dups(users)
return users, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_users_for_super_admin(self, context: Context):
try:
if not context.user_is_super_admin:
return None, NotAuthorizedError()
users = UserProfile.objects.filter(is_deleted=False)
return users, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def add_action_todo(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
action_id = args.get("action_id", None)
household_id = args.get("household_id", None)
vendor_id = args.get("vendor_id", None)
if not user:
return None, CustomMassenergizeError("sign_in_required / provide user_id or user_email")
action: Action = Action.objects.get(id=action_id)
if not action:
return None, CustomMassenergizeError("Please provide a valid action_id")
if household_id:
household: RealEstateUnit = RealEstateUnit.objects.get(id=household_id)
else:
household = user.real_estate_units.all().first()
if not household:
household = RealEstateUnit(name=f"{user.preferred_name}'s Home'")
household.save()
user.real_estate_units.add(household)
if vendor_id:
vendor = Vendor.objects.get(id=vendor_id) #not required
#if this already exists as a todo just move it over
completed = UserActionRel.objects.filter(user=user, real_estate_unit=household, action=action)
if completed:
#TODO: update action stats
completed.update(status="TODO")
return completed.first(), None
# create a new one since we didn't find it existed before
new_user_action_rel = UserActionRel(user=user, action=action, real_estate_unit=household, status="TODO")
if vendor_id:
new_user_action_rel.vendor = vendor
new_user_action_rel.save()
return new_user_action_rel, None
except Exception as e:
capture_message(str(e), level="error")
import traceback
traceback.print_exc()
return None, CustomMassenergizeError(str(e))
def add_action_completed(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user_id = context.user_id or args.get('user_id')
user_email = context.user_email or args.get('user_email')
action_id = args.get("action_id", None)
household_id = args.get("household_id", None)
vendor_id = args.get("vendor_id", None)
date_completed = args.get("date_completed", None)
# future use
carbon_impact = args.get("carbon_impact", 0)
user = None
if user_id:
user = UserProfile.objects.get(id=user_id)
elif user_email:
user = UserProfile.objects.get(email=user_email)
if not user:
return None, CustomMassenergizeError("sign_in_required / Provide user_id")
action = Action.objects.get(id=action_id)
if not action:
return None, CustomMassenergizeError("Please provide an action_id")
household = RealEstateUnit.objects.get(id=household_id)
if not household:
return None, CustomMassenergizeError("Please provide a household_id")
# update all data points
for t in action.tags.all():
data = Data.objects.filter(community=action.community, tag=t)
if data:
data.update(value=F("value") + 1)
else:
#data for this community, action does not exist so create one
d = Data(tag=t, community=action.community, value=1, name=f"{t.name}")
d.save()
#if this already exists as a todo just move it over
completed = UserActionRel.objects.filter(user=user, real_estate_unit=household, action=action)
if completed:
| |
return self._target_endpoint
@target_endpoint.setter
def target_endpoint(self, target_endpoint):
"""Sets the target_endpoint of this QueryJobResp.
:param target_endpoint: The target_endpoint of this QueryJobResp.
:type: Endpoint
"""
self._target_endpoint = target_endpoint
@property
def net_type(self):
"""Gets the net_type of this QueryJobResp.
网络类型
:return: The net_type of this QueryJobResp.
:rtype: str
"""
return self._net_type
@net_type.setter
def net_type(self, net_type):
"""Sets the net_type of this QueryJobResp.
网络类型
:param net_type: The net_type of this QueryJobResp.
:type: str
"""
self._net_type = net_type
@property
def failed_reason(self):
"""Gets the failed_reason of this QueryJobResp.
失败原因。
:return: The failed_reason of this QueryJobResp.
:rtype: str
"""
return self._failed_reason
@failed_reason.setter
def failed_reason(self, failed_reason):
"""Sets the failed_reason of this QueryJobResp.
失败原因。
:param failed_reason: The failed_reason of this QueryJobResp.
:type: str
"""
self._failed_reason = failed_reason
@property
def inst_info(self):
"""Gets the inst_info of this QueryJobResp.
:return: The inst_info of this QueryJobResp.
:rtype: InstInfo
"""
return self._inst_info
@inst_info.setter
def inst_info(self, inst_info):
"""Sets the inst_info of this QueryJobResp.
:param inst_info: The inst_info of this QueryJobResp.
:type: InstInfo
"""
self._inst_info = inst_info
@property
def actual_start_time(self):
"""Gets the actual_start_time of this QueryJobResp.
实际启动时间,时间戳格式。
:return: The actual_start_time of this QueryJobResp.
:rtype: str
"""
return self._actual_start_time
@actual_start_time.setter
def actual_start_time(self, actual_start_time):
"""Sets the actual_start_time of this QueryJobResp.
实际启动时间,时间戳格式。
:param actual_start_time: The actual_start_time of this QueryJobResp.
:type: str
"""
self._actual_start_time = actual_start_time
@property
def full_transfer_complete_time(self):
"""Gets the full_transfer_complete_time of this QueryJobResp.
全量完成时间,时间戳格式。
:return: The full_transfer_complete_time of this QueryJobResp.
:rtype: str
"""
return self._full_transfer_complete_time
@full_transfer_complete_time.setter
def full_transfer_complete_time(self, full_transfer_complete_time):
"""Sets the full_transfer_complete_time of this QueryJobResp.
全量完成时间,时间戳格式。
:param full_transfer_complete_time: The full_transfer_complete_time of this QueryJobResp.
:type: str
"""
self._full_transfer_complete_time = full_transfer_complete_time
@property
def update_time(self):
"""Gets the update_time of this QueryJobResp.
更新时间,时间戳格式
:return: The update_time of this QueryJobResp.
:rtype: str
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this QueryJobResp.
更新时间,时间戳格式
:param update_time: The update_time of this QueryJobResp.
:type: str
"""
self._update_time = update_time
@property
def job_direction(self):
"""Gets the job_direction of this QueryJobResp.
任务方向
:return: The job_direction of this QueryJobResp.
:rtype: str
"""
return self._job_direction
@job_direction.setter
def job_direction(self, job_direction):
"""Sets the job_direction of this QueryJobResp.
任务方向
:param job_direction: The job_direction of this QueryJobResp.
:type: str
"""
self._job_direction = job_direction
@property
def db_use_type(self):
"""Gets the db_use_type of this QueryJobResp.
迁移场景 - migration:实时迁移 - sync:实时同步 - cloudDataGuard:实时灾备
:return: The db_use_type of this QueryJobResp.
:rtype: str
"""
return self._db_use_type
@db_use_type.setter
def db_use_type(self, db_use_type):
"""Sets the db_use_type of this QueryJobResp.
迁移场景 - migration:实时迁移 - sync:实时同步 - cloudDataGuard:实时灾备
:param db_use_type: The db_use_type of this QueryJobResp.
:type: str
"""
self._db_use_type = db_use_type
@property
def need_restart(self):
"""Gets the need_restart of this QueryJobResp.
是否需要重启
:return: The need_restart of this QueryJobResp.
:rtype: bool
"""
return self._need_restart
@need_restart.setter
def need_restart(self, need_restart):
"""Sets the need_restart of this QueryJobResp.
是否需要重启
:param need_restart: The need_restart of this QueryJobResp.
:type: bool
"""
self._need_restart = need_restart
@property
def is_target_readonly(self):
"""Gets the is_target_readonly of this QueryJobResp.
指定目标实例是否限制为只读
:return: The is_target_readonly of this QueryJobResp.
:rtype: bool
"""
return self._is_target_readonly
@is_target_readonly.setter
def is_target_readonly(self, is_target_readonly):
"""Sets the is_target_readonly of this QueryJobResp.
指定目标实例是否限制为只读
:param is_target_readonly: The is_target_readonly of this QueryJobResp.
:type: bool
"""
self._is_target_readonly = is_target_readonly
@property
def conflict_policy(self):
"""Gets the conflict_policy of this QueryJobResp.
冲突忽略策略 - stop:冲突失败 - overwrite:冲突覆盖 - ignore:冲突忽略
:return: The conflict_policy of this QueryJobResp.
:rtype: str
"""
return self._conflict_policy
@conflict_policy.setter
def conflict_policy(self, conflict_policy):
"""Sets the conflict_policy of this QueryJobResp.
冲突忽略策略 - stop:冲突失败 - overwrite:冲突覆盖 - ignore:冲突忽略
:param conflict_policy: The conflict_policy of this QueryJobResp.
:type: str
"""
self._conflict_policy = conflict_policy
@property
def filter_ddl_policy(self):
"""Gets the filter_ddl_policy of this QueryJobResp.
过滤DDL策略 - drop_database:过滤drop_database - drop_databasefilter_all:过滤所有ddl - \"\":不过滤
:return: The filter_ddl_policy of this QueryJobResp.
:rtype: str
"""
return self._filter_ddl_policy
@filter_ddl_policy.setter
def filter_ddl_policy(self, filter_ddl_policy):
"""Sets the filter_ddl_policy of this QueryJobResp.
过滤DDL策略 - drop_database:过滤drop_database - drop_databasefilter_all:过滤所有ddl - \"\":不过滤
:param filter_ddl_policy: The filter_ddl_policy of this QueryJobResp.
:type: str
"""
self._filter_ddl_policy = filter_ddl_policy
@property
def speed_limit(self):
"""Gets the speed_limit of this QueryJobResp.
迁移速度限制。
:return: The speed_limit of this QueryJobResp.
:rtype: list[SpeedLimitInfo]
"""
return self._speed_limit
@speed_limit.setter
def speed_limit(self, speed_limit):
"""Sets the speed_limit of this QueryJobResp.
迁移速度限制。
:param speed_limit: The speed_limit of this QueryJobResp.
:type: list[SpeedLimitInfo]
"""
self._speed_limit = speed_limit
@property
def schema_type(self):
"""Gets the schema_type of this QueryJobResp.
迁移方案 - Replication-主从复制 - Tungsten-日志解析 - PGBaseBackup-PG备份
:return: The schema_type of this QueryJobResp.
:rtype: str
"""
return self._schema_type
@schema_type.setter
def schema_type(self, schema_type):
"""Sets the schema_type of this QueryJobResp.
迁移方案 - Replication-主从复制 - Tungsten-日志解析 - PGBaseBackup-PG备份
:param schema_type: The schema_type of this QueryJobResp.
:type: str
"""
self._schema_type = schema_type
@property
def node_num(self):
"""Gets the node_num of this QueryJobResp.
节点个数。
:return: The node_num of this QueryJobResp.
:rtype: str
"""
return self._node_num
@node_num.setter
def node_num(self, node_num):
"""Sets the node_num of this QueryJobResp.
节点个数。
:param node_num: The node_num of this QueryJobResp.
:type: str
"""
self._node_num = node_num
@property
def object_switch(self):
"""Gets the object_switch of this QueryJobResp.
对象选择开关
:return: The object_switch of this QueryJobResp.
:rtype: bool
"""
return self._object_switch
@object_switch.setter
def object_switch(self, object_switch):
"""Sets the object_switch of this QueryJobResp.
对象选择开关
:param object_switch: The object_switch of this QueryJobResp.
:type: bool
"""
self._object_switch = object_switch
@property
def master_job_id(self):
"""Gets the master_job_id of this QueryJobResp.
主任务Id。
:return: The master_job_id of this QueryJobResp.
:rtype: str
"""
return self._master_job_id
@master_job_id.setter
def master_job_id(self, master_job_id):
"""Sets the master_job_id of this QueryJobResp.
主任务Id。
:param master_job_id: The master_job_id of this QueryJobResp.
:type: str
"""
self._master_job_id = master_job_id
@property
def full_mode(self):
"""Gets the full_mode of this QueryJobResp.
全量快照模式。
:return: The full_mode of this QueryJobResp.
:rtype: str
"""
return self._full_mode
@full_mode.setter
def full_mode(self, full_mode):
"""Sets the full_mode of this QueryJobResp.
全量快照模式。
:param full_mode: The full_mode of this QueryJobResp.
:type: str
"""
self._full_mode = full_mode
@property
def struct_trans(self):
"""Gets the struct_trans of this QueryJobResp.
是否迁移结构。
:return: The struct_trans of this QueryJobResp.
:rtype: bool
"""
return self._struct_trans
@struct_trans.setter
def struct_trans(self, struct_trans):
"""Sets the struct_trans of this QueryJobResp.
是否迁移结构。
:param struct_trans: The struct_trans of this QueryJobResp.
:type: bool
"""
self._struct_trans = struct_trans
@property
def index_trans(self):
"""Gets the index_trans of this QueryJobResp.
否迁移索引。
:return: The index_trans of this QueryJobResp.
:rtype: bool
"""
return self._index_trans
@index_trans.setter
def index_trans(self, index_trans):
"""Sets the index_trans of this QueryJobResp.
否迁移索引。
:param index_trans: The index_trans of this QueryJobResp.
:type: bool
"""
self._index_trans = index_trans
@property
def replace_definer(self):
"""Gets the replace_definer of this QueryJobResp.
是否使用目标库的用户替换掉definer。
:return: The replace_definer of this QueryJobResp.
:rtype: bool
"""
return self._replace_definer
@replace_definer.setter
def replace_definer(self, replace_definer):
"""Sets the replace_definer of this QueryJobResp.
是否使用目标库的用户替换掉definer。
:param replace_definer: The replace_definer of this QueryJobResp.
:type: bool
"""
self._replace_definer = replace_definer
@property
def migrate_user(self):
"""Gets the migrate_user of this QueryJobResp.
是否迁移用户。
:return: The migrate_user of this QueryJobResp.
:rtype: bool
"""
return self._migrate_user
@migrate_user.setter
def migrate_user(self, migrate_user):
"""Sets the migrate_user of this QueryJobResp.
是否迁移用户。
:param migrate_user: The migrate_user of this QueryJobResp.
:type: bool
"""
self._migrate_user = migrate_user
@property
def sync_database(self):
"""Gets the sync_database of this QueryJobResp.
是否库级同步。
:return: The sync_database of this QueryJobResp.
:rtype: bool
"""
return self._sync_database
@sync_database.setter
def sync_database(self, sync_database):
"""Sets the sync_database of this QueryJobResp.
是否库级同步。
:param sync_database: The sync_database of this QueryJobResp.
:type: bool
"""
self._sync_database = sync_database
@property
def error_code(self):
"""Gets the error_code of this QueryJobResp.
错误码
:return: The error_code of this QueryJobResp.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this QueryJobResp.
错误码
:param error_code: The error_code of this QueryJobResp.
:type: str
"""
self._error_code = error_code
@property
def error_message(self):
"""Gets the error_message of this QueryJobResp.
错误信息。
:return: The error_message of this QueryJobResp.
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""Sets the error_message of this QueryJobResp.
错误信息。
:param error_message: The error_message of this QueryJobResp.
:type: str
"""
self._error_message = error_message
@property
def target_root_db(self):
"""Gets the target_root_db of this QueryJobResp.
:return: The target_root_db of this QueryJobResp.
:rtype: DefaultRootDb
"""
return self._target_root_db
@target_root_db.setter
def target_root_db(self, target_root_db):
"""Sets the target_root_db of this QueryJobResp.
:param target_root_db: The target_root_db of this QueryJobResp.
:type: DefaultRootDb
"""
self._target_root_db = target_root_db
@property
def az_code(self):
"""Gets the az_code of this QueryJobResp.
node所在AZ
:return: The az_code of this QueryJobResp.
:rtype: str
"""
return self._az_code
@az_code.setter
def az_code(self, az_code):
"""Sets the az_code of this QueryJobResp.
node所在AZ
:param az_code: The az_code of this QueryJobResp.
:type: str
| |
"""
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.api_client import ApiClient, Endpoint as _Endpoint
from com.precisely.apis.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from com.precisely.apis.model.error_info import ErrorInfo
from com.precisely.apis.model.parcel_boundary import ParcelBoundary
from com.precisely.apis.model.property_info_address_request import PropertyInfoAddressRequest
from com.precisely.apis.model.property_info_response import PropertyInfoResponse
from com.precisely.apis.model.property_info_responses import PropertyInfoResponses
class PropertyInformationServiceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_parcel_boundary_by_address_endpoint = _Endpoint(
settings={
'response_type': (ParcelBoundary,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/property/v1/parcelboundary/byaddress',
'operation_id': 'get_parcel_boundary_by_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'address',
],
'required': [
'address',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'address':
(str,),
},
'attribute_map': {
'address': 'address',
},
'location_map': {
'address': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_parcel_boundary_by_location_endpoint = _Endpoint(
settings={
'response_type': (ParcelBoundary,),
'auth': [
'<PASSWORD>'
],
'endpoint_path': '/property/v1/parcelboundary/bylocation',
'operation_id': 'get_parcel_boundary_by_location',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'longitude',
'latitude',
],
'required': [
'longitude',
'latitude',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'longitude':
(str,),
'latitude':
(str,),
},
'attribute_map': {
'longitude': 'longitude',
'latitude': 'latitude',
},
'location_map': {
'longitude': 'query',
'latitude': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_property_attributes_by_address_endpoint = _Endpoint(
settings={
'response_type': (PropertyInfoResponse,),
'auth': [
'<PASSWORD>'
],
'endpoint_path': '/property/v2/attributes/byaddress',
'operation_id': 'get_property_attributes_by_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'address',
'attributes',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'address':
(str,),
'attributes':
(str,),
},
'attribute_map': {
'address': 'address',
'attributes': 'attributes',
},
'location_map': {
'address': 'query',
'attributes': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_property_attributes_by_address_batch_endpoint = _Endpoint(
settings={
'response_type': (PropertyInfoResponses,),
'auth': [
'<PASSWORD>'
],
'endpoint_path': '/property/v2/attributes/byaddress',
'operation_id': 'get_property_attributes_by_address_batch',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'property_info_address_request',
],
'required': [
'property_info_address_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'property_info_address_request':
(PropertyInfoAddressRequest,),
},
'attribute_map': {
},
'location_map': {
'property_info_address_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def get_parcel_boundary_by_address(
self,
address,
**kwargs
):
"""Get Parcel Boundary By Address # noqa: E501
Accepts address as input and returns property parcel boundary around that address. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_parcel_boundary_by_address(address, async_req=True)
>>> result = thread.get()
Args:
address (str): free form address text
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ParcelBoundary
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['address'] = \
address
return self.get_parcel_boundary_by_address_endpoint.call_with_http_info(**kwargs)
def get_parcel_boundary_by_location(
self,
longitude,
latitude,
**kwargs
):
"""Get Parcel Boundary By Location # noqa: E501
Accepts latitude/longitude as input and returns property parcel boundary around that location. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_parcel_boundary_by_location(longitude, latitude, async_req=True)
>>> result = thread.get()
Args:
longitude (str): Longitude of Location
latitude (str): Latitude of Location
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ParcelBoundary
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['longitude'] = \
longitude
kwargs['latitude'] = \
latitude
return self.get_parcel_boundary_by_location_endpoint.call_with_http_info(**kwargs)
def get_property_attributes_by_address(
self,
**kwargs
):
"""PropertyV2 Attributes By Address. # noqa: E501
GetPropertyAttributesbyAddress Endpoint will take address as an input and will return key property attributes in response. Optionally user will have the option to filter the attributes and will pay for only returned attributes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_property_attributes_by_address(async_req=True)
>>> result = thread.get()
Keyword Args:
address (str): free form address text. [optional]
attributes (str): Case-insensitive comma separated values of property attributes. Response will contain only the input attributes.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PropertyInfoResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_property_attributes_by_address_endpoint.call_with_http_info(**kwargs)
def get_property_attributes_by_address_batch(
self,
property_info_address_request,
**kwargs
):
"""PropertyV2 Attributes By Address Batch. # noqa: E501
GetPropertyAttributesbyAddressBatch | |
import logging
import tempfile
from bzt import TaurusConfigError
from bzt.modules import ConsolidatingAggregator
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.modules.apiritif import ApiritifNoseExecutor
from bzt.modules.apiritif.executor import ApiritifLoadReader, ApiritifFuncReader
from tests import RESOURCES_DIR, ExecutorTestCase
from tests.mocks import EngineEmul
class TestApiritifScriptGeneration(ExecutorTestCase):
EXECUTOR = ApiritifNoseExecutor
def test_transactions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": [
"url_0",
{"transaction": "t_1", "do": [
"url_1.0",
{"url": "url_1.1", "headers": {"o": "ne", "t": "wo"}}]},
{"transaction": "t_2", "do": [
{"url": "url_2.0", "think-time": 2},
{"transaction": "t_22", "do": [
{"url": "url_22.0", "think-time": 3}]}]}]}}]})
self.obj.prepare()
exp_file = RESOURCES_DIR + "apiritif/test_transactions.py"
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_keepalive_only(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"keepalive": True,
"requests": [
"http://blazedemo.com/"]}}]})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target = apiritif.http.target('')", test_script)
def test_keepalive(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"keepalive": False,
"requests": [
"/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.keep_alive(False)", test_script)
def test_timeout_default(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertNotIn("timeout=30.0", test_script)
def test_timeout(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"timeout": "10s",
"default-address": "http://blazedemo.com",
"requests": [
"/?tag=1",
{
"url": "/?tag=2",
"timeout": "2s",
}
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.timeout(10.0)", test_script)
self.assertNotIn("get('/?tag=1', timeout=10.0", test_script)
self.assertIn("get('/?tag=2', timeout=2.0", test_script)
def test_timeout_notarget(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"timeout": "10s",
"requests": [
"http://blazedemo.com/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("get('http://blazedemo.com/', timeout=10.0", test_script)
def test_think_time(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
{
"url": "/?tag=2",
"think-time": "1s500ms",
}
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("sleep(1.5)", test_script)
def test_methods(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
{"url": "/?tag=get",
"method": "GET"},
{"url": "/?tag=post",
"method": "POST"},
{"url": "/?tag=put",
"method": "PUT"},
{"url": "/?tag=patch",
"method": "PATCH"},
{"url": "/?tag=head",
"method": "HEAD"},
{"url": "/?tag=delete",
"method": "DELETE"},
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("get('/?tag=get'", test_script)
self.assertIn("post('/?tag=post'", test_script)
self.assertIn("put('/?tag=put'", test_script)
self.assertIn("patch('/?tag=patch'", test_script)
self.assertIn("head('/?tag=head'", test_script)
self.assertIn("delete('/?tag=delete'", test_script)
def test_default_address_path_prefix(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://a.blazemeter.com",
"base-path": "/api/latest",
"requests": [
"/user",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target('https://a.blazemeter.com')", test_script)
self.assertIn("target.base_path('/api/latest')", test_script)
def test_headers(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"headers": {"X-Foo": "foo"},
"requests": [{
"url": "/",
"headers": {"X-Bar": "bar"}
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("'X-Foo': 'foo'", test_script)
self.assertIn("'X-Bar': 'bar'", test_script)
def test_follow_redirects_default(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.allow_redirects(True)", test_script)
self.assertNotIn("allow_redirects=True", test_script)
def test_follow_redirects(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"follow-redirects": False,
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("allow_redirects=False", test_script)
def test_body_params(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": {
"foo": "bar",
},
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("params={\n 'foo': 'bar',\n }", test_script)
def test_body_json(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"headers": {
"Content-Type": "application/json",
},
"body": {
"foo": "bar",
},
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("json={\n 'foo': 'bar',\n }", test_script)
def test_body_string(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": "MY PERFECT BODY"
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("data='MY PERFECT BODY'", test_script)
def test_body_unknown(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": 123
}]
}
}]
})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_plain_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"assert": [
"Welcome", "Simple Travel Agency"
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("response.assert_regex_in_body('Welcome')", test_script)
self.assertIn("response.assert_regex_in_body('Simple Travel Agency')", test_script)
def test_plain_assertion_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"assert": [
{"contains": ["1"], "regexp": False, "not": False},
{"contains": ["2"], "regexp": False, "not": True},
{"contains": ["3"], "regexp": True, "not": False},
{"contains": ["4"], "regexp": True, "not": True},
{"contains": ["5"], "regexp": False, "not": False, "subject": "headers"},
{"contains": ["6"], "regexp": False, "not": True, "subject": "headers"},
{"contains": ["7"], "regexp": True, "not": False, "subject": "headers"},
{"contains": ["8"], "regexp": True, "not": True, "subject": "headers"},
{"contains": ["8"], "regexp": True, "not": True, "subject": "headers"},
{"contains": ["9"], "not": False, "subject": "http-code"},
{"contains": ["10"], "not": True, "subject": "http-code"},
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_in_body('1')", test_script)
self.assertIn("assert_not_in_body('2')", test_script)
self.assertIn("assert_regex_in_body('3')", test_script)
self.assertIn("assert_regex_not_in_body('4')", test_script)
self.assertIn("assert_in_headers('5')", test_script)
self.assertIn("assert_not_in_headers('6')", test_script)
self.assertIn("assert_regex_in_headers('7')", test_script)
self.assertIn("assert_regex_not_in_headers('8')", test_script)
self.assertIn("assert_status_code('9')", test_script)
self.assertIn("assert_not_status_code('10')", test_script)
def test_jsonpath_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-jsonpath": [
"$.foo.bar"
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_jsonpath('$.foo.bar', expected_value=None)", test_script)
def test_jsonpath_assertions_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-jsonpath": [
{"jsonpath": "$.1", "invert": False},
{"jsonpath": "$.2", "invert": True},
{"jsonpath": "$.3", "expected-value": "value"},
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_jsonpath('$.1', expected_value=None)", test_script)
self.assertIn("assert_not_jsonpath('$.2', expected_value=None)", test_script)
self.assertIn("assert_jsonpath('$.3', expected_value='value')", test_script)
def test_xpath_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-xpath": [
"//head/title"
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_xpath('//head/title', parser_type='html', validate=False)", test_script)
def test_xpath_assertions_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-xpath": [
{"xpath": "//1", "invert": False},
{"xpath": "//2", "invert": True},
{"xpath": "//3", "validate-xml": True},
{"xpath": "//4", "validate-xml": False, "use-tolerant-parser": False},
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_xpath('//1', parser_type='html', validate=False)", test_script)
self.assertIn("assert_not_xpath('//2', parser_type='html', validate=False)", test_script)
self.assertIn("assert_xpath('//3', parser_type='html', validate=True)", test_script)
self.assertIn("assert_xpath('//4', parser_type='xml', validate=False)", test_script)
def test_complex_codegen(self):
""" This test serves code review purposes, to make changes more visible """
self.obj.engine.config.load([RESOURCES_DIR + 'apiritif/test_codegen.yml'])
self.configure(self.obj.engine.config['execution'][0])
self.obj.settings['verbose'] = True
self.obj.prepare()
exp_file = RESOURCES_DIR + 'apiritif/test_codegen.py'
# import shutil; shutil.copy2(self.obj.script, exp_file) # keep this comment to ease updates
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_jmeter_functions_time(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?time=${__time()}",
"/?time=${__time(MM/dd/yy)}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?time={}'.format(apiritif.format_date())", test_script)
self.assertIn("'/?time={}'.format(apiritif.format_date('MM/dd/yy'))", test_script)
def test_jmeter_functions_random(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?random=${__Random(1, 10)}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?random={}'.format(apiritif.random_uniform(1, 10))", test_script)
def test_jmeter_functions_random_string(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?rs=${__RandomString(3)}",
"/?rs=${__RandomString(4,abcdef)}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?rs={}'.format(apiritif.random_string(3))", test_script)
self.assertIn("'/?rs={}'.format(apiritif.random_string(4, 'abcdef'))", test_script)
def test_jmeter_functions_base64_encode(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"headers": {
"Authorization": "Basic ${__base64Encode(user:pass)}",
},
"requests": [
"/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("base64_encode('user:pass')", test_script)
def test_jmeter_functions_base64_decode(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"headers": {
"Additional": "${__base64Decode(dGVzdCBzdHJpbmc=)}",
},
"requests": [
"/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("base64_decode('dGVzdCBzdHJpbmc=')", test_script)
def test_jmeter_functions_urlencode(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/${__urlencode(Foo Bar Baz)}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("encode_url('Foo Bar Baz')", test_script)
def test_jmeter_functions_uuid(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/${__UUID()}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("uuid()", test_script)
def test_load_reader(self):
reader = ApiritifLoadReader(self.obj.log)
# add empty reader
with tempfile.NamedTemporaryFile() as f_name:
reader.register_file(f_name.name)
items = list(reader.datapoints(True))
self.assertEqual(len(items), 0)
self.assertFalse(reader.read_records)
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
items = list(reader.datapoints(True))
self.assertEqual(len(items), 1)
items = list(reader.datapoints(True))
self.assertEqual(len(items), 0)
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
items = list(reader.datapoints(True))
self.assertTrue(reader.read_records)
self.assertEqual(len(items), 1)
def test_load_reader_real2(self):
reader1 = ApiritifLoadReader(self.obj.log)
reader1.engine = EngineEmul()
reader1.register_file(RESOURCES_DIR + "jmeter/jtl/apiritif-results/apiritif-0.csv")
reader1.register_file(RESOURCES_DIR + "jmeter/jtl/apiritif-results/apiritif-1.csv")
reader2 = | |
[
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00"
],
6: [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33"
],
7: [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33",
"#a65628"
],
8: [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33",
"#a65628",
"#f781bf"
],
9: [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33",
"#a65628",
"#f781bf",
"#999999"
]
},
"bokeh_Set2": {
3: [
"#66c2a5",
"#fc8d62",
"#8da0cb"
],
4: [
"#66c2a5",
"#fc8d62",
"#8da0cb",
"#e78ac3"
],
5: [
"#66c2a5",
"#fc8d62",
"#8da0cb",
"#e78ac3",
"#a6d854"
],
6: [
"#66c2a5",
"#fc8d62",
"#8da0cb",
"#e78ac3",
"#a6d854",
"#ffd92f"
],
7: [
"#66c2a5",
"#fc8d62",
"#8da0cb",
"#e78ac3",
"#a6d854",
"#ffd92f",
"#e5c494"
],
8: [
"#66c2a5",
"#fc8d62",
"#8da0cb",
"#e78ac3",
"#a6d854",
"#ffd92f",
"#e5c494",
"#b3b3b3"
]
},
"bokeh_Set3": {
3: [
"#8dd3c7",
"#ffffb3",
"#bebada"
],
4: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072"
],
5: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3"
],
6: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462"
],
7: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69"
],
8: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5"
],
9: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9"
],
10: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
"#bc80bd"
],
11: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
"#bc80bd",
"#ccebc5"
],
12: [
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
"#bc80bd",
"#ccebc5",
"#ffed6f"
]
},
"bokeh_YlGn": {
3: [
"#31a354",
"#addd8e",
"#f7fcb9"
],
4: [
"#238443",
"#78c679",
"#c2e699",
"#ffffcc"
],
5: [
"#006837",
"#31a354",
"#78c679",
"#c2e699",
"#ffffcc"
],
6: [
"#006837",
"#31a354",
"#78c679",
"#addd8e",
"#d9f0a3",
"#ffffcc"
],
7: [
"#005a32",
"#238443",
"#41ab5d",
"#78c679",
"#addd8e",
"#d9f0a3",
"#ffffcc"
],
8: [
"#005a32",
"#238443",
"#41ab5d",
"#78c679",
"#addd8e",
"#d9f0a3",
"#f7fcb9",
"#ffffe5"
],
9: [
"#004529",
"#006837",
"#238443",
"#41ab5d",
"#78c679",
"#addd8e",
"#d9f0a3",
"#f7fcb9",
"#ffffe5"
]
},
"bokeh_YlGnBu": {
3: [
"#2c7fb8",
"#7fcdbb",
"#edf8b1"
],
4: [
"#225ea8",
"#41b6c4",
"#a1dab4",
"#ffffcc"
],
5: [
"#253494",
"#2c7fb8",
"#41b6c4",
"#a1dab4",
"#ffffcc"
],
6: [
"#253494",
"#2c7fb8",
"#41b6c4",
"#7fcdbb",
"#c7e9b4",
"#ffffcc"
],
7: [
"#0c2c84",
"#225ea8",
"#1d91c0",
"#41b6c4",
"#7fcdbb",
"#c7e9b4",
"#ffffcc"
],
8: [
"#0c2c84",
"#225ea8",
"#1d91c0",
"#41b6c4",
"#7fcdbb",
"#c7e9b4",
"#edf8b1",
"#ffffd9"
],
9: [
"#081d58",
"#253494",
"#225ea8",
"#1d91c0",
"#41b6c4",
"#7fcdbb",
"#c7e9b4",
"#edf8b1",
"#ffffd9"
]
},
"bokeh_GnBu": {
3: [
"#43a2ca",
"#a8ddb5",
"#e0f3db"
],
4: [
"#2b8cbe",
"#7bccc4",
"#bae4bc",
"#f0f9e8"
],
5: [
"#0868ac",
"#43a2ca",
"#7bccc4",
"#bae4bc",
"#f0f9e8"
],
6: [
"#0868ac",
"#43a2ca",
"#7bccc4",
"#a8ddb5",
"#ccebc5",
"#f0f9e8"
],
7: [
"#08589e",
"#2b8cbe",
"#4eb3d3",
"#7bccc4",
"#a8ddb5",
"#ccebc5",
"#f0f9e8"
],
8: [
"#08589e",
"#2b8cbe",
"#4eb3d3",
"#7bccc4",
"#a8ddb5",
"#ccebc5",
"#e0f3db",
"#f7fcf0"
],
9: [
"#084081",
"#0868ac",
"#2b8cbe",
"#4eb3d3",
"#7bccc4",
"#a8ddb5",
"#ccebc5",
"#e0f3db",
"#f7fcf0"
]
},
"bokeh_BuGn": {
3: [
"#2ca25f",
"#99d8c9",
"#e5f5f9"
],
4: [
"#238b45",
"#66c2a4",
"#b2e2e2",
"#edf8fb"
],
5: [
"#006d2c",
"#2ca25f",
"#66c2a4",
"#b2e2e2",
"#edf8fb"
],
6: [
"#006d2c",
"#2ca25f",
"#66c2a4",
"#99d8c9",
"#ccece6",
"#edf8fb"
],
7: [
"#005824",
"#238b45",
"#41ae76",
"#66c2a4",
"#99d8c9",
"#ccece6",
"#edf8fb"
],
8: [
"#005824",
"#238b45",
"#41ae76",
"#66c2a4",
"#99d8c9",
"#ccece6",
"#e5f5f9",
"#f7fcfd"
],
9: [
"#00441b",
"#006d2c",
"#238b45",
"#41ae76",
"#66c2a4",
"#99d8c9",
"#ccece6",
"#e5f5f9",
"#f7fcfd"
]
},
"bokeh_PuBuGn": {
3: [
"#1c9099",
"#a6bddb",
"#ece2f0"
],
4: [
"#02818a",
"#67a9cf",
"#bdc9e1",
"#f6eff7"
],
5: [
"#016c59",
"#1c9099",
"#67a9cf",
"#bdc9e1",
"#f6eff7"
],
6: [
"#016c59",
"#1c9099",
"#67a9cf",
"#a6bddb",
"#d0d1e6",
"#f6eff7"
],
7: [
"#016450",
"#02818a",
"#3690c0",
"#67a9cf",
"#a6bddb",
"#d0d1e6",
"#f6eff7"
],
8: [
"#016450",
"#02818a",
"#3690c0",
"#67a9cf",
"#a6bddb",
"#d0d1e6",
"#ece2f0",
"#fff7fb"
],
9: [
"#014636",
"#016c59",
"#02818a",
"#3690c0",
"#67a9cf",
"#a6bddb",
"#d0d1e6",
"#ece2f0",
"#fff7fb"
]
},
"bokeh_PuBu": {
3: [
"#2b8cbe",
"#a6bddb",
"#ece7f2"
],
4: [
"#0570b0",
"#74a9cf",
"#bdc9e1",
"#f1eef6"
],
5: [
"#045a8d",
"#2b8cbe",
"#74a9cf",
"#bdc9e1",
"#f1eef6"
],
6: [
"#045a8d",
"#2b8cbe",
"#74a9cf",
"#a6bddb",
"#d0d1e6",
"#f1eef6"
],
7: [
"#034e7b",
"#0570b0",
"#3690c0",
"#74a9cf",
"#a6bddb",
"#d0d1e6",
"#f1eef6"
],
8: [
"#034e7b",
"#0570b0",
"#3690c0",
"#74a9cf",
"#a6bddb",
"#d0d1e6",
"#ece7f2",
"#fff7fb"
],
9: [
"#023858",
"#045a8d",
"#0570b0",
"#3690c0",
"#74a9cf",
"#a6bddb",
"#d0d1e6",
"#ece7f2",
"#fff7fb"
]
},
"bokeh_BuPu": {
3: [
"#8856a7",
"#9ebcda",
"#e0ecf4"
],
4: [
"#88419d",
"#8c96c6",
"#b3cde3",
"#edf8fb"
],
5: [
"#810f7c",
"#8856a7",
"#8c96c6",
"#b3cde3",
"#edf8fb"
],
6: [
"#810f7c",
"#8856a7",
"#8c96c6",
"#9ebcda",
"#bfd3e6",
"#edf8fb"
],
7: [
"#6e016b",
"#88419d",
"#8c6bb1",
"#8c96c6",
"#9ebcda",
"#bfd3e6",
"#edf8fb"
],
8: [
"#6e016b",
"#88419d",
"#8c6bb1",
"#8c96c6",
"#9ebcda",
"#bfd3e6",
"#e0ecf4",
"#f7fcfd"
],
9: [
"#4d004b",
"#810f7c",
"#88419d",
"#8c6bb1",
"#8c96c6",
"#9ebcda",
"#bfd3e6",
"#e0ecf4",
"#f7fcfd"
]
},
"bokeh_RdPu": {
3: [
"#c51b8a",
"#fa9fb5",
"#fde0dd"
],
4: [
"#ae017e",
"#f768a1",
"#fbb4b9",
"#feebe2"
],
5: [
"#7a0177",
"#c51b8a",
"#f768a1",
"#fbb4b9",
"#feebe2"
],
6: [
"#7a0177",
"#c51b8a",
"#f768a1",
"#fa9fb5",
"#fcc5c0",
"#feebe2"
],
7: [
"#7a0177",
"#ae017e",
"#dd3497",
"#f768a1",
"#fa9fb5",
"#fcc5c0",
"#feebe2"
],
8: [
"#7a0177",
"#ae017e",
"#dd3497",
"#f768a1",
"#fa9fb5",
"#fcc5c0",
"#fde0dd",
"#fff7f3"
],
9: [
"#49006a",
"#7a0177",
"#ae017e",
"#dd3497",
"#f768a1",
"#fa9fb5",
"#fcc5c0",
"#fde0dd",
"#fff7f3"
]
},
"bokeh_PuRd": {
3: [
"#dd1c77",
"#c994c7",
"#e7e1ef"
],
4: [
"#ce1256",
"#df65b0",
"#d7b5d8",
"#f1eef6"
],
5: [
"#980043",
"#dd1c77",
"#df65b0",
"#d7b5d8",
"#f1eef6"
],
6: [
"#980043",
"#dd1c77",
"#df65b0",
"#c994c7",
"#d4b9da",
"#f1eef6"
],
7: [
"#91003f",
"#ce1256",
"#e7298a",
"#df65b0",
"#c994c7",
"#d4b9da",
"#f1eef6"
],
8: [
"#91003f",
"#ce1256",
"#e7298a",
"#df65b0",
"#c994c7",
"#d4b9da",
"#e7e1ef",
"#f7f4f9"
],
9: [
"#67001f",
"#980043",
"#ce1256",
"#e7298a",
"#df65b0",
"#c994c7",
"#d4b9da",
"#e7e1ef",
"#f7f4f9"
]
},
"bokeh_OrRd": {
3: [
"#e34a33",
"#fdbb84",
"#fee8c8"
],
4: [
"#d7301f",
"#fc8d59",
"#fdcc8a",
"#fef0d9"
],
5: [
"#b30000",
"#e34a33",
"#fc8d59",
"#fdcc8a",
"#fef0d9"
],
6: [
"#b30000",
"#e34a33",
"#fc8d59",
"#fdbb84",
"#fdd49e",
"#fef0d9"
],
7: [
"#990000",
"#d7301f",
"#ef6548",
"#fc8d59",
"#fdbb84",
"#fdd49e",
"#fef0d9"
],
8: [
"#990000",
"#d7301f",
"#ef6548",
"#fc8d59",
"#fdbb84",
"#fdd49e",
"#fee8c8",
"#fff7ec"
],
9: [
"#7f0000",
"#b30000",
"#d7301f",
"#ef6548",
"#fc8d59",
"#fdbb84",
"#fdd49e",
"#fee8c8",
"#fff7ec"
]
},
"bokeh_YlOrRd": {
3: [
"#f03b20",
"#feb24c",
"#ffeda0"
],
4: [
"#e31a1c",
"#fd8d3c",
"#fecc5c",
"#ffffb2"
],
5: [
"#bd0026",
"#f03b20",
"#fd8d3c",
"#fecc5c",
"#ffffb2"
],
6: [
"#bd0026",
"#f03b20",
"#fd8d3c",
"#feb24c",
"#fed976",
"#ffffb2"
],
7: [
"#b10026",
"#e31a1c",
"#fc4e2a",
"#fd8d3c",
"#feb24c",
"#fed976",
"#ffffb2"
],
8: [
"#b10026",
"#e31a1c",
"#fc4e2a",
"#fd8d3c",
"#feb24c",
"#fed976",
"#ffeda0",
"#ffffcc"
],
9: [
"#800026",
"#bd0026",
"#e31a1c",
"#fc4e2a",
"#fd8d3c",
"#feb24c",
"#fed976",
"#ffeda0",
"#ffffcc"
]
},
"bokeh_YlOrBr": {
3: [
"#d95f0e",
"#fec44f",
"#fff7bc"
],
4: [
"#cc4c02",
"#fe9929",
"#fed98e",
"#ffffd4"
],
5: [
"#993404",
"#d95f0e",
"#fe9929",
"#fed98e",
"#ffffd4"
],
6: [
"#993404",
"#d95f0e",
"#fe9929",
"#fec44f",
"#fee391",
"#ffffd4"
],
7: [
"#8c2d04",
"#cc4c02",
"#ec7014",
"#fe9929",
"#fec44f",
"#fee391",
"#ffffd4"
],
8: [
"#8c2d04",
"#cc4c02",
"#ec7014",
"#fe9929",
"#fec44f",
"#fee391",
"#fff7bc",
"#ffffe5"
],
9: [
"#662506",
"#993404",
"#cc4c02",
"#ec7014",
"#fe9929",
"#fec44f",
"#fee391",
"#fff7bc",
"#ffffe5"
]
},
"bokeh_Purples": {
3: [
"#756bb1",
"#bcbddc",
"#efedf5"
],
4: [
"#6a51a3",
"#9e9ac8",
"#cbc9e2",
"#f2f0f7"
],
5: [
"#54278f",
"#756bb1",
"#9e9ac8",
"#cbc9e2",
"#f2f0f7"
],
6: [
"#54278f",
"#756bb1",
"#9e9ac8",
"#bcbddc",
"#dadaeb",
"#f2f0f7"
],
7: [
"#4a1486",
"#6a51a3",
"#807dba",
"#9e9ac8",
"#bcbddc",
"#dadaeb",
"#f2f0f7"
],
8: [
"#4a1486",
"#6a51a3",
"#807dba",
"#9e9ac8",
"#bcbddc",
"#dadaeb",
"#efedf5",
"#fcfbfd"
],
9: [
"#3f007d",
"#54278f",
"#6a51a3",
"#807dba",
"#9e9ac8",
"#bcbddc",
"#dadaeb",
"#efedf5",
"#fcfbfd"
]
},
"bokeh_Blues": {
3: [
"#3182bd",
"#9ecae1",
"#deebf7"
],
4: [
"#2171b5",
"#6baed6",
"#bdd7e7",
"#eff3ff"
],
5: [
"#08519c",
"#3182bd",
"#6baed6",
"#bdd7e7",
"#eff3ff"
],
6: [
"#08519c",
"#3182bd",
"#6baed6",
"#9ecae1",
"#c6dbef",
"#eff3ff"
],
7: [
"#084594",
"#2171b5",
"#4292c6",
"#6baed6",
"#9ecae1",
"#c6dbef",
"#eff3ff"
],
8: [
"#084594",
"#2171b5",
"#4292c6",
"#6baed6",
"#9ecae1",
"#c6dbef",
"#deebf7",
"#f7fbff"
],
9: [
"#08306b",
"#08519c",
"#2171b5",
"#4292c6",
"#6baed6",
"#9ecae1",
"#c6dbef",
"#deebf7",
"#f7fbff"
]
},
"bokeh_Greens": {
3: [
"#31a354",
"#a1d99b",
"#e5f5e0"
],
4: [
"#238b45",
"#74c476",
"#bae4b3",
"#edf8e9"
],
5: [
"#006d2c",
"#31a354",
"#74c476",
"#bae4b3",
"#edf8e9"
],
6: [
"#006d2c",
"#31a354",
"#74c476",
"#a1d99b",
"#c7e9c0",
"#edf8e9"
],
7: [
"#005a32",
"#238b45",
"#41ab5d",
"#74c476",
"#a1d99b",
"#c7e9c0",
"#edf8e9"
],
8: [
"#005a32",
"#238b45",
"#41ab5d",
"#74c476",
"#a1d99b",
"#c7e9c0",
"#e5f5e0",
"#f7fcf5"
],
9: [
"#00441b",
"#006d2c",
"#238b45",
"#41ab5d",
"#74c476",
"#a1d99b",
"#c7e9c0",
"#e5f5e0",
"#f7fcf5"
]
},
"bokeh_Oranges": {
3: [
"#e6550d",
"#fdae6b",
"#fee6ce"
],
4: [
"#d94701",
"#fd8d3c",
"#fdbe85",
"#feedde"
],
5: [
"#a63603",
"#e6550d",
"#fd8d3c",
"#fdbe85",
"#feedde"
],
6: [
"#a63603",
"#e6550d",
"#fd8d3c",
"#fdae6b",
"#fdd0a2",
"#feedde"
],
7: [
"#8c2d04",
"#d94801",
"#f16913",
"#fd8d3c",
"#fdae6b",
"#fdd0a2",
"#feedde"
],
8: [
"#8c2d04",
"#d94801",
"#f16913",
"#fd8d3c",
"#fdae6b",
"#fdd0a2",
"#fee6ce",
"#fff5eb"
],
9: [
"#7f2704",
"#a63603",
"#d94801",
"#f16913",
"#fd8d3c",
"#fdae6b",
"#fdd0a2",
"#fee6ce",
"#fff5eb"
]
},
"bokeh_Reds": {
3: [
"#de2d26",
"#fc9272",
"#fee0d2"
],
4: [
"#cb181d",
"#fb6a4a",
| |
sum
table3['listbar'] = 'bar' # default aggregation is list
table3['bars'] = 'bar', strjoin(', ')
ieq(expect2, table3)
def test_aggregate_empty():
table = (('foo', 'bar'),)
aggregators = OrderedDict()
aggregators['minbar'] = 'bar', min
aggregators['maxbar'] = 'bar', max
aggregators['sumbar'] = 'bar', sum
actual = aggregate(table, 'foo', aggregators)
expect = (('key', 'minbar', 'maxbar', 'sumbar'),)
ieq(expect, actual)
def test_rangeaggregate_simple():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4),
('d', 3))
# simplest signature - aggregate whole rows
table2 = rangeaggregate(table1, 'bar', 2, len)
expect2 = (('key', 'value'),
((1, 3), 2),
((3, 5), 3),
((5, 7), 0),
((7, 9), 1),
((9, 11), 1))
ieq(expect2, table2)
ieq(expect2, table2) # verify can iterate twice
# next simplest signature - aggregate single field
table3 = rangeaggregate(table1, 'bar', 2, list, 'foo')
expect3 = (('key', 'value'),
((1, 3), ['b', 'b']),
((3, 5), ['a', 'd', 'c']),
((5, 7), []),
((7, 9), ['a']),
((9, 11), ['b']))
ieq(expect3, table3)
# alternative signature for simple aggregation
table4 = rangeaggregate(table1, key='bar', width=2, aggregation=list, value='foo')
ieq(expect3, table4)
def test_rangeaggregate_minmax():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4),
('d', 3))
# check specifying minimum value for first bin
table2 = rangeaggregate(table1, 'bar', 2, len, minv=0)
expect2 = (('key', 'value'),
((0, 2), 1),
((2, 4), 3),
((4, 6), 1),
((6, 8), 1),
((8, 10), 1))
ieq(expect2, table2)
# check specifying min and max values
table3 = rangeaggregate(table1, 'bar', 2, len, minv=2, maxv=6)
expect3 = (('key', 'value'),
((2, 4), 3),
((4, 6), 1))
ieq(expect3, table3)
# check last bin is open if maxv is specified
table4 = rangeaggregate(table1, 'bar', 2, len, maxv=9)
expect4 = (('key', 'value'),
((1, 3), 2),
((3, 5), 3),
((5, 7), 0),
((7, 9), 2))
ieq(expect4, table4)
# check we get empty bins if maxv is large
table5 = rangeaggregate(table1, 'bar', 2, len, minv=10, maxv=14)
expect5 = (('key', 'value'),
((10, 12), 0),
((12, 14), 0))
ieq(expect5, table5)
def test_rangeaggregate_empty():
table1 = (('foo', 'bar'),)
table2 = rangeaggregate(table1, 'bar', 2, len)
expect2 = (('key', 'value'),)
ieq(expect2, table2)
table3 = rangeaggregate(table1, 'bar', 2, len, minv=0)
ieq(expect2, table3)
table4 = rangeaggregate(table1, 'bar', 2, len, minv=0, maxv=4)
expect4 = (('key', 'value'),
((0, 2), 0),
((2, 4), 0))
ieq(expect4, table4)
def test_rangeaggregate_multifield():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4),
('d', 3))
# dict arg
aggregators = OrderedDict()
aggregators['foocount'] = len
aggregators['foojoin'] = 'foo', strjoin('')
aggregators['foolist'] = 'foo' # default is list
table2 = rangeaggregate(table1, 'bar', 2, aggregators)
expect2 = (('key', 'foocount', 'foojoin', 'foolist'),
((1, 3), 2, 'bb', ['b', 'b']),
((3, 5), 3, 'adc', ['a', 'd', 'c']),
((5, 7), 0, '', []),
((7, 9), 1, 'a', ['a']),
((9, 11), 1, 'b', ['b']))
ieq(expect2, table2)
# suffix notation
table3 = rangeaggregate(table1, 'bar', 2)
table3['foocount'] = len
table3['foojoin'] = 'foo', strjoin('')
table3['foolist'] = 'foo' # default is list
ieq(expect2, table3)
# list arg
aggregators = [('foocount', len),
('foojoin', 'foo', strjoin('')),
('foolist', 'foo', list)]
table4 = rangeaggregate(table1, 'bar', 2, aggregators)
ieq(expect2, table4)
def test_rangeaggregate_multifield_2():
table1 = (('foo', 'bar'),
('aa', 3),
('aa', 7),
('bb', 2),
('bb', 1),
('bb', 9),
('cc', 4),
('dd', 3))
table2 = rangeaggregate(table1, 'bar', 2)
table2['foocount'] = len
table2['foolist'] = 'foo' # default is list
expect2 = (('key', 'foocount', 'foolist'),
((1, 3), 2, ['bb', 'bb']),
((3, 5), 3, ['aa', 'dd', 'cc']),
((5, 7), 0, []),
((7, 9), 1, ['aa']),
((9, 11), 1, ['bb']))
ieq(expect2, table2)
def test_rangecounts():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4),
('d', 3))
table2 = rangecounts(table1, 'bar', width=2)
expect2 = (('key', 'value'),
((1, 3), 2),
((3, 5), 3),
((5, 7), 0),
((7, 9), 1),
((9, 11), 1))
ieq(expect2, table2)
ieq(expect2, table2)
table3 = rangecounts(table1, 'bar', width=2, minv=0)
expect3 = (('key', 'value'),
((0, 2), 1),
((2, 4), 3),
((4, 6), 1),
((6, 8), 1),
((8, 10), 1))
ieq(expect3, table3)
table4 = rangecounts(table1, 'bar', width=2, minv=2, maxv=6)
expect4 = (('key', 'value'),
((2, 4), 3),
((4, 6), 1))
ieq(expect4, table4)
# N.B., last bin is open if maxv is specified
table5 = rangecounts(table1, 'bar', width=2, maxv=9)
expect5 = (('key', 'value'),
((1, 3), 2),
((3, 5), 3),
((5, 7), 0),
((7, 9), 2))
ieq(expect5, table5)
def test_rowmap():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65, 51.9))
def rowmapper(row):
transmf = {'male': 'M', 'female': 'F'}
return [row[0],
transmf[row[1]] if row[1] in transmf else row[1],
row[2] * 12,
row[4] / row[3] ** 2]
actual = rowmap(table, rowmapper, fields=['subject_id', 'gender', 'age_months', 'bmi'])
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16*12, 62.0/1.45**2),
(2, 'F', 19*12, 55.4/1.34**2),
(3, 'F', 17*12, 74.4/1.78**2),
(4, 'M', 21*12, 45.2/1.33**2),
(5, '-', 25*12, 51.9/1.65**2))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
# test short rows
table2 = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65))
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16*12, 62.0/1.45**2),
(2, 'F', 19*12, 55.4/1.34**2),
(3, 'F', 17*12, 74.4/1.78**2),
(4, 'M', 21*12, 45.2/1.33**2))
actual = rowmap(table2, rowmapper, fields=['subject_id', 'gender', 'age_months', 'bmi'])
ieq(expect, actual)
def test_rowmap_empty():
table = (('id', 'sex', 'age', 'height', 'weight'),)
def rowmapper(row):
transmf = {'male': 'M', 'female': 'F'}
return [row[0],
transmf[row[1]] if row[1] in transmf else row[1],
row[2] * 12,
row[4] / row[3] ** 2]
actual = rowmap(table, rowmapper, fields=['subject_id', 'gender', 'age_months', 'bmi'])
expect = (('subject_id', 'gender', 'age_months', 'bmi'),)
ieq(expect, actual)
def test_recordmap():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65, 51.9))
def recmapper(rec):
transmf = {'male': 'M', 'female': 'F'}
return [rec['id'],
transmf[rec['sex']] if rec['sex'] in transmf else rec['sex'],
rec['age'] * 12,
rec['weight'] / rec['height'] ** 2]
actual = rowmap(table, recmapper, fields=['subject_id', 'gender', 'age_months', 'bmi'])
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16*12, 62.0/1.45**2),
(2, 'F', 19*12, 55.4/1.34**2),
(3, 'F', 17*12, 74.4/1.78**2),
(4, 'M', 21*12, 45.2/1.33**2),
(5, '-', 25*12, 51.9/1.65**2))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
# test short rows
table2 = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65))
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16*12, 62.0/1.45**2),
(2, 'F', 19*12, 55.4/1.34**2),
(3, 'F', 17*12, 74.4/1.78**2),
(4, 'M', 21*12, 45.2/1.33**2))
actual = recordmap(table2, recmapper, fields=['subject_id', 'gender', 'age_months', 'bmi'])
ieq(expect, actual)
def test_rowmapmany():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, '-', 17, 1.78, 74.4),
(4, 'male', 21, 1.33))
def rowgenerator(row):
transmf = {'male': 'M', 'female': 'F'}
yield [row[0], 'gender', transmf[row[1]] if row[1] in transmf else row[1]]
yield [row[0], 'age_months', row[2] * 12]
yield [row[0], 'bmi', row[4] / row[3] ** 2]
actual = rowmapmany(table, rowgenerator, fields=['subject_id', 'variable', 'value'])
expect = (('subject_id', 'variable', 'value'),
(1, 'gender', 'M'),
(1, 'age_months', 16*12),
(1, 'bmi', 62.0/1.45**2),
(2, 'gender', 'F'),
(2, 'age_months', 19*12),
(2, 'bmi', 55.4/1.34**2),
(3, 'gender', '-'),
(3, 'age_months', 17*12),
(3, 'bmi', 74.4/1.78**2),
(4, 'gender', 'M'),
(4, 'age_months', 21*12))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
def test_recordmapmany():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, '-', 17, 1.78, 74.4),
(4, 'male', 21, 1.33))
def rowgenerator(rec):
transmf = {'male': 'M', 'female': 'F'}
yield [rec['id'], 'gender', transmf[rec['sex']] if rec['sex'] in transmf else rec['sex']]
yield [rec['id'], 'age_months', rec['age'] * 12]
yield [rec['id'], 'bmi', rec['weight'] / rec['height'] ** 2]
actual = rowmapmany(table, rowgenerator, fields=['subject_id', 'variable', 'value'])
expect = (('subject_id', 'variable', 'value'),
(1, 'gender', 'M'),
(1, | |
# escludi multi hits
exd=0 # escludi duplicati
conc=0 # se presenti paired-end, usa solo quelle concordanti
mq=0 # considera il map quality
rmnuc=0 # rimuovi nucleotide a monte ed a valle delle read; connesso a rmp e rmpv
blatr=0 # applica la correzione blat
blatfolder=''
rmsh=0 # rimuovi sostituzioni in omopolimeri di lunghezza maggiore o uguale a homo
vnuc=3 # numero minimo di basi che supportano la variazione
mmf=0.1 # frequenza minima della variazione
exms=0 # escludi sostituzioni multiple
exss=0 # escludi posizioni introniche nei pressi dei siti di splicing a nss nucleotidi
nss=4 # basi introniche da esplorare per ogni sito si splicing
splicefile='' #'splicesites.hg18.sorted.txt'
ftail='l' # pvalue tail
custsub=0 # use custom distribution
custfile='' # custom distribution file
sigsites=0 # select significant sites
test = 'bh' # select statistical test
usubs=[x+y for x in 'ACGT' for y in 'ACGT' if x!=y] # use these substitutions [default all]
sval=0.05 # significant value
annfile='' # use annotation file for strand correction and features
sortann=0 # sort annotation file
uann=0 # use annotation
exfile='' # use annotations to exclude positions
expos=0 #
unchange1=1
unchange2=0
noheader = 0
for o, a in opts:
if o == "-h":
usage()
sys.exit()
elif o == "-H": noheader=1
elif o == "-i": bamfile=a
elif o == "-f": fastafile=a
elif o == "-b":
custfile=a
custsub=1
#elif o == "-G":
# getstrand=0
# annfile=a
elif o == "-k":
if os.path.exists(a):
f=open(a)
nochrs=[x.strip() for x in f if x.strip()!='']
f.close()
else: nochrs=[x for x in a.split(',') if x.strip()!='']
elif o == "-t": NCPU=int(a)
elif o == "-F": infolder=a
elif o == "-o": outfolder_=a
elif o == "-c": MINCOV=int(a)
# elif o == "-Q": QVAL=int(a)
elif o == "-q": MQUAL=int(a)
elif o == "-m": MAPQ=int(a)
elif o == "-O": homo=int(a)
elif o == "-V": sval=float(a)
elif o == "-x": strconf=float(a)
elif o == "-g":
if a=='2': useconf=1
elif o == "-s":
getstrand=1
if int(a)==1: unchange1,unchange2=1,0
elif int(a)==0: unchange1,unchange2=0,0
elif int(a)==2: unchange1,unchange2=0,1
elif int(a)==12: unchange1,unchange2=1,1
elif o == "-U": usubs=[x.upper() for x in a.split(',') if x.strip()!='']
elif o == "-e": exh=1
elif o == "-l": sigsites=1
elif o == "-d": exd=1
elif o == "-p": conc=1
elif o == "-I": sortbam=1
elif o == "-X": sortann=1
elif o == "-w": test=a.lower()
elif o == "-u": mq=1
elif o == "-T":
rmpv = a
try:
rmp = [int(x) for x in rmpv.split('-')]
rmnuc=1
except: rmnuc=0
elif o == "-B":
blatfolder=a
if os.path.exists(blatfolder): blatr=1
elif o == "-S": corrstr=1
elif o == "-W": rmsh=1
elif o == "-a": ftail=a
elif o == "-v": vnuc=int(a)
elif o == "-n": mmf=float(a)
elif o == "-E": exms=1
elif o == "-P":
splicefile=a
if os.path.exists(splicefile): exss=1
elif o == "-K":
exfile=a
if os.path.exists(exfile): expos=1
elif o == "-r": nss=int(a)
elif o == "-G":
annfile=a
uann=1
else:
print o
assert False, "Unhandled Option"
#######
commandLine=' '.join(sys.argv[1:])
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
params=[]
#Input parameters
params.append('REDItoolDenovo version %s\n' %(version))
params.append('User command line: %s\n' %(commandLine))
params.append('Analysis ID: %s\n' %(pid))
params.append('Analysis time: %s\n' %(script_time))
params.append('-i --> BAM file: %s\n' %(bamfile))
params.append('-f --> Reference file: %s\n' %(fastafile))
params.append('-I --> Sort input BAM file: %i\n' %(sortbam))
params.append('-k --> Regions to exclude: %s\n' %(','.join(nochrs)))
params.append('-t --> Number of working threads: %i\n' %(NCPU))
params.append('-o --> Output folder: %s\n' %(outfolder_))
params.append('-F --> Infolder folder: %s\n' %(infolder))
params.append('-b --> Use input distribution file: %i - %s\n' %(custsub,custfile))
params.append('-a --> Fisher tail: %s\n' %(ftail))
params.append('-c --> Min. per base coverage: %i\n' %(MINCOV))
#params.append('-Q --> FastQ offset value: %i\n' %(QVAL))
params.append('-q --> Min. per base quality: %i\n' %(MQUAL))
params.append('-m --> Min. mapping quality: %i\n' %(MAPQ))
params.append('-O --> Min. homoplymeric length: %i\n' %(homo))
params.append('-s --> Infer strand: %i - %i-%i\n' %(getstrand,unchange1,unchange2))
params.append('-g --> Use confidence: %i\n' %(useconf))
params.append('-x --> Strand confidence: %.2f\n' %(strconf))
params.append('-S --> Strand correction : %i\n' %(corrstr))
params.append('-G --> GFF annotation to infer strand: %s\n' %(annfile))
params.append('-X --> Sort annotation files: %i\n' %(sortann))
params.append('-K --> File with positions to exclude: %s\n' %(exfile))
params.append('-e --> Exclude multi hits: %i\n' %(exh))
params.append('-d --> Exclude duplicates: %i\n' %(exd))
params.append('-g --> Select significant sites: %i\n' %(sigsites))
params.append('-V --> Significant value: %.2f\n' %(sval))
params.append('-x --> Statistical test: %s\n' %(test))
params.append('-U --> Use specific substitutions: %s\n' %(','.join(usubs)))
params.append('-p --> Use paired concardant reads only: %i\n' %(conc))
params.append('-u --> Consider mapping quality: %i\n' %(mq))
params.append('-T --> Trim x bases up and y bases down per read: %i - %i-%i\n' %(rmnuc,rmp[0],rmp[1]))
params.append('-B --> Blat folder for correction: %s\n' %(blatfolder))
params.append('-S --> Remove substitutions in homopolymeric regions: %i\n' %(rmsh))
params.append('-v --> Min. num. of reads supporting the variation: %i\n' %(vnuc))
params.append('-n --> Min. editing frequency: %.2f\n' %(mmf))
params.append('-E --> Exclude positions with multiple changes: %i\n' %(exms))
params.append('-P --> File containing splice sites annotations: %s\n' %(splicefile))
params.append('-r --> Num. of bases near splice sites to explore: %i\n' %(nss))
#######
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def get_no(pvalue,siglevel,ngenes): # No Correction
lista=[]
pp=siglevel
y=0
for i in pvalue:
p=i[0]
if p<=siglevel:
lista.append(i)
y+=1
return lista,y,pp
def get_b(pvalue,siglevel,ngenes): # Bonferroni
pvalue.sort()
lista=[]
y=0
#bcorr=siglevel/ngenes
pp=1.0
for i in pvalue:
p=i[0]*ngenes
if p<=siglevel:
lista.append(i)
#lista[i[1]]=i[0]
y+=1
if p<pp: pp=p
#print "Passed:",y,pp
return lista,y,pp
def get_bh(pvalue,siglevel,ngenes): # B-H
pvalue.sort()
#print ngenes
lista=[]
x=1
y=0
p=0
for i in pvalue:
nf=i[0]*ngenes
fdr=nf/x
if fdr<=siglevel:
#dic[i[1]]=i[0]
lista.append(i)
p=i[0]
y+=1
x+=1
#print "Passed:",y,p
return lista,y,p
def getTail(pp):
if not exfisher:
if ftail=='l': return pp.left_tail_p()
elif ftail=='r': return pp.right_tail_p()
elif ftail=='t': return pp.two_tail_p()
else:
if ftail=='l': return pp.left_tail
elif ftail=='r': return pp.right_tail
elif ftail=='t': return pp.two_tail
def getDicSS(dicp): # dicp = dizionario con le frequenze di sostituzione
dicpp={}
for i in dicp:
if i[0]!=i[1]:
dicpp[i]=1-dicp[i]
return dicpp
def getFreads(bases):
fread={'A':0,'C':0,'G':0,'T':0}
for i in range(4):
if i==0: fread['A']=bases[i]
elif i==1: fread['C']=bases[i]
elif i==2: fread['G']=bases[i]
elif i==3: fread['T']=bases[i]
return fread
def getSub(ref,fread,dics):
#fread={A,C,G,T}
nref=fread[ref.upper()]
sub=[(ref.upper()+i,nref,fread[i]) for i in fread if i!=ref.upper() and fread[i]!=0]
allsub=' '.join([x[0] for x in sub])
# lista del tipo [('AT', 50, 10), ('AG', 50, 2)]
res=[] #[(int(dics[i[0]]*(i[1]+i[2])),((i[1]+i[2])-exp1),pvalue(i[1],i[2],int(dics[i[0]]*(i[1]+i[2])),((i[1]+i[2])-exp1))) for i in sub]
for i in sub:
obs1=i[1]
obs2=i[2]
exp1=int(dics[i[0]]*(i[1]+i[2]))
exp2=((i[1]+i[2]) - exp1)
if not exfisher: pval=FishersExactTest([[exp1,exp2],[obs1,obs2]])
else: pval=pvalue(obs1,obs2,exp1,exp2)
pval=getTail(pval)
res.append((i[0],obs1,obs2,exp1,exp2,str(pval)))
if len(res)==1: return res[0][5] #,allsub,fread
elif len(res) > 1:
rr=[float(x[-1]) for x in res]
idx=rr.index(min(rr))
return res[idx][5] #,allsub,fread
else: return '1.0' #,0,0
def BaseCount(seq,ref):
b={'A':0,'C':0,'G':0,'T':0}
subs=[]
subv=[]
for i in seq.upper():
if b.has_key(i): b[i]+=1
for i in b:
if not b.has_key(ref): continue
if b[i]!=0 and i!=ref:
vv=float(b[i])/(b[i]+b[ref])
subv.append((b[i],vv,ref+i))
subv.sort()
subv.reverse()
for i in subv:
if i[0]>=vnuc and i[1]>=mmf: subs.append(i[2])
freq=0.0
if len(subs)==0: subs.append('-')
else: freq=subv[0][1]
return sum(b.values()),[b['A'],b['C'],b['G'],b['T']],' '.join(subs),'%.2f'%(freq)
def meanq(v,n):
try:m=float(v)/n
except: m=0.0
return '%.2f'%(m)
def rmHomo(sequp,seqdw,gh,ref):
if len(sequp)==0 and len(seqdw)==0: return 0
up,dw=0,0
for i in seqdw:
if i==ref:dw+=1
else:break
for i in sequp[::-1]:
if i==ref:up+=1
else:break
if up+dw+1 >= gh : return 1
return 0
def prop(tot,va):
try: av=float(va)/tot
except: av=0.0
return av
def vstand(strand):
vv=[(strand.count('+'),'+'),(strand.count('-'),'-'),(strand.count('*'),'*')]
if vv[0][0]==0 and vv[1][0]==0: return '*'
if useconf:
totvv=sum([x[0] for x in vv[:2]])
if prop(totvv,vv[0][0])>=strconf: return '+'
if prop(totvv,vv[1][0])>=strconf: return '-'
return '*'
else:
if vv[0][0]==vv[1][0] and vv[2][0]==0: return '+'
return max(vv)[1]
def comp(s):
a={'A':'T','T':'A','C':'G','G':'C'}
ss=''
for i in s.upper():
if a.has_key(i): ss+=a[i]
else: ss+='N'
return ss
def whereis(program):
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(os.path.join(path, program)): return 1
return 0
def vstrand(lista):
if len(lista)==0: return '2'
p=lista.count('+')
m=lista.count('-')
if p==len(lista): return '1'
elif m==len(lista): return '0'
else: return '2'
def normByStrand(seq_,strand_,squal_,mystrand_):
st='+'
if mystrand_=='0': st='-'
seq,qual,squal='',0,[]
for i in range(len(seq_)):
if strand_[i]==st:
seq+=seq_[i]
qual+=squal_[i] #-QVAL
squal.append(squal_[i])
return seq,qual,squal
def normByBlat(seq_,strand_,squal_,blatc_):
seq,qual,squal,strand='',0,[],''
for i in range(len(seq_)):
if blatc_[i]=='1':
seq+=seq_[i]
qual+=squal_[i]
squal.append(squal_[i])
strand+=strand_[i]
return seq,qual,squal,strand
def testBlat(blc):
if blc.count('1') > blc.count('0'): return 1
return 0
#######
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stderr.write("Script time --> START: %s\n"%(script_time))
sys.stderr.write("Analysis ID: %s\n"%(pid))
if not os.path.exists(bamfile):
usage()
sys.exit('BAM file %s not found.' %(bamfile))
if sortbam:
sys.stderr.write('Sorting BAM file.\n')
pysam.sort(bamfile,'sorted_%s'%(pid))
os.rename(bamfile,bamfile+'_old')
os.rename('sorted_%s.bam'%(pid),bamfile)
sys.stderr.write('Indexing BAM file.\n')
pysam.index(bamfile)
if not os.path.exists(bamfile+'.bai') and not sortbam:
sys.stderr.write('Indexing BAM file.\n')
pysam.index(bamfile)
if not os.path.exists(fastafile):
usage()
sys.exit('Fasta file %s not found.' %(fastafile))
if not os.path.exists(fastafile+'.fai'):
sys.stderr.write('Indexing Fasta file.\n')
pysam.faidx(fastafile)
if custsub:
if not os.path.exists(custfile):
usage()
sys.exit('Substitution file %s not found.' %(custfile))
#####################
# check reference names
rrefs={}
ridxinfo=pysam.idxstats(bamfile)
for j in ridxinfo.split('\n'): #MOD
l=(j.strip()).split('\t')
if l[0] in ['*', '']: continue #MOD
if int(l[2])+int(l[3]) > 0: rrefs[l[0]]=int(l[1])
frefs=[]
fidxinfo=open(fastafile+'.fai')
for j in fidxinfo:
l=(j.strip()).split('\t')
if l[0]=='': continue
frefs.append(l[0])
fidxinfo.close()
rnof=[]
for i in rrefs.keys():
if i not in frefs: sys.stderr.write('WARNING: Region %s in RNA-Seq not found in reference file.\n' %(i))
#####################
if uann:
getstrand=0
if not os.path.exists(annfile):
usage()
sys.exit('Annotation file %s not found.' %(annfile))
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting annotation file.\n')
scmd='grep ^"#" %s; grep -v ^"#" %s | sort -k1,1 -k4,4n > %s' %(annfile,annfile,'annotation_%s'%(pid))
os.system(scmd)
os.rename(annfile,annfile+'_old')
os.rename('annotation_%s'%(pid),annfile)
if not os.path.exists(annfile+'.tbi'):
sys.stderr.write('Indexing annotation file.\n')
annfile=pysam.tabix_index(annfile, preset='gff')
if expos:
if not os.path.exists(exfile):
usage()
sys.exit('File %s not found.' %(exfile))
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting file.\n')
scmd='grep ^"#" %s; grep -v ^"#" %s | sort -k1,1 -k4,4n > %s' %(exfile,exfile,'exfile_%s'%(pid))
os.system(scmd)
os.rename(exfile,exfile+'_old')
os.rename('exfile_%s'%(pid),exfile)
if not os.path.exists(exfile+'.tbi'):
sys.stderr.write('Indexing %s file.\n' %(exfile))
exfile=pysam.tabix_index(exfile, preset='gff')
if test not in ['bh', 'bo']: test='no'
#mainbam=pysam.Samfile(bamfile,"rb")
#regions=mainbam.references
#mainbam.close()
dicregions=dict(rrefs.items())
chrs=[x for x in dicregions.keys() if x not in nochrs]
sys.stderr.write('Analysis on %i regions.\n' %(len(chrs)))
if infolder!='': outfolder=os.path.join(outfolder_,'denovo_%s_%s' %(infolder,pid))
else: outfolder=os.path.join(outfolder_,'denovo_%s' %(pid))
if not os.path.exists(outfolder):
splitfolder=os.path.split(outfolder)
if not os.path.exists(splitfolder[0]): os.mkdir(splitfolder[0])
os.mkdir(outfolder)
outtable=os.path.join(outfolder,'outTable_%s' %(pid))
outdisto=os.path.join(outfolder,'outSubs_%s' %(pid))
#write command line and input parameters
f=open(os.path.join(outfolder,'parameters.txt'),'w')
f.writelines(params)
f.close()
#######################################
d={}
if blatr:
badblat=blatfolder #os.path.join(blatfolder,'blatseqs_%s.bad'%(chr))
if os.path.exists(badblat):
sys.stderr.write('Using Blat mapping for RNAseq...\n')
f=open(badblat)
for i in f:
l=(i.strip()).split()
d[l[0]+'_'+l[1]]=int(l[1])
f.close()
sys.stderr.write('Found %i reads.\n'%(len(d)))
def exploreBAM(myinput):
inputs=myinput.split('$')
chr,bamfile=inputs[0],inputs[1]
outfile=os.path.join(outfolder,'table_%s_%s'%(chr,pid))
outfile2=os.path.join(outfolder,'subs_%s_%s'%(chr,pid))
d,di={},{}
bam=pysam.Samfile(bamfile,"rb")
fasta=pysam.Fastafile(fastafile)
if uann: tabix=pysam.Tabixfile(annfile)
if expos: extabix=pysam.Tabixfile(exfile)
out=open(outfile,'w')
if not custsub:
dsubs=dict([(x+y, 0) for x in 'ACGT' for y in 'ACGT'])
out2=open(outfile2,'w')
#header='Region\tPosition\tReference\tCoverage\tMeanQuality\tBaseCount\tSubs\tFrequency\n'
#out.write(header)
sys.stderr.write('Started analysis on region: %s\n'%(chr))
#if blatr:
# badblat=os.path.join(blatfolder,'blatseqs_%s.bad'%(chr))
# if os.path.exists(badblat):
# sys.stderr.write('Using Blat mapping for region %s\n'%(chr))
# f=open(badblat)
# for i in f:
# l=(i.strip()).split()
# d[l[0]+'_'+l[1]]=int(l[1])
# f.close()
# sys.stderr.write('Found %i reads for region %s\n'%(len(d),chr))
if exss:
if os.path.exists(splicefile):
sys.stderr.write('Loading known splice sites for region %s\n'%(chr))
f=open(splicefile)
for i in f:
l=(i.strip()).split()
if l[0]!=chr: continue
st,tp,cc=l[4],l[3],int(l[1])
if st=='+' and tp=='D':
for j in range(nss): di[cc+(j+1)]=0
if st=='+' and tp=='A':
for j in range(nss): di[cc-(j+1)]=0
if st=='-' and tp=='D':
for j in range(nss): di[cc-(j+1)]=0
if st=='-' and tp=='A':
for j in range(nss): di[cc+(j+1)]=0
f.close()
sys.stderr.write('Loaded %i positions for %s\n'%(len(di),chr))
for pileupcolumn in bam.pileup(chr,stepper='nofilter',max_depth=MAX_DEPTH): #MOD
ref=fasta.fetch(chr,pileupcolumn.reference_pos,pileupcolumn.reference_pos+1).upper() #MOD
seq,qual,strand,squal,blatc='',0,'',[],'' #MOD
if rmsh:
if ((pileupcolumn.reference_pos+1)-homo)-1 < 0: sequp='' #MOD
else: sequp=(fasta.fetch(chr,((pileupcolumn.reference_pos+1)-homo)-1,(pileupcolumn.reference_pos+1)-1)).upper() #MOD
seqdw=(fasta.fetch(chr,pileupcolumn.reference_pos+1,(pileupcolumn.reference_pos+1)+homo)).upper() #MOD
for pileupread in pileupcolumn.pileups: # per ogni base dell'allineamento multiplo
if pileupread.is_del: continue #MOD
if pileupread.alignment.is_qcfail: continue #MOD
if pileupread.alignment.is_supplementary: continue
if pileupread.alignment.has_tag('SA'): continue
#s,q,t,qq=pileupread.alignment.seq[pileupread.qpos].upper(),ord(pileupread.alignment.qual[pileupread.qpos])-QVAL,'*',pileupread.alignment.qual[pileupread.qpos]
# escludi posizioni introniche nei pressi di splice sites
if exss and di.has_key(pileupcolumn.reference_pos+1): continue #MOD
# multiple hit
if exh: #MOD
if pileupread.alignment.is_secondary: continue #MOD
if pileupread.alignment.has_tag('NH'): #MOD
if pileupread.alignment.get_tag('NH') > 1: continue #MOD
# duplicates
if exd | |
<reponame>Jexp97/Bisca_JV_num_01<filename>classes_base.py
###################################################
# Arquivo base para jogos que usem cartas e baralho.
# Autor: <NAME>
# Ano: 2021
###################################################
from random import shuffle
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#import tensorflow as tf
#from tensorflow import keras
import numpy as np
class Carta:
# Uma classe para definir o que é uma carta.
def __init__(self, naipe, numero, pontos=0):
# naipe: PAUS; OUROS; COPAS; ESPADAS
# numero: A(1),2,3,...,J,Q,K
#pontos: depende do jogo. Por padrão, todos vão receber 0.
self.__naipe = naipe # Do tipo string
self.__numero = numero # Do tipo string
self.__pontos = pontos # Do tipo integer
@property
def naipe(self):
# retorna o naipe da carta
return self.__naipe
@property
def numero(self):
# retorna o numero da carta
return self.__numero
@property
def pontos(self):
# retorna a quantidade de pontos da carta
return self.__pontos
def __str__(self):
# retorna o número e o naipe da carta. Por exemplo, 3.OUROS
# lembrando que tanto _numero quanto _naipe são strings.
return self.__numero + '.' + self.__naipe
class Baralho:
# Um baralho é composto por cartas. Aqui, a base é o baralho francês.
# Esta classe usa a classe Carta e servirá como base para outras classes a herdarem.
def __init__(self):
# O atributo cartas_no_baralho guarda a sequência de cartas que compõem o baralho.
self._cartas_no_baralho = []
self._naipes = ("ESPADAS", "PAUS", "COPAS", "OUROS")
self._numeros = ("A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "Joker")
def embaralhar(self):
#Esta função embaralha a sequência do atributo cartas_no_baralho. É necessário importar o módulo random do python.
shuffle(self._cartas_no_baralho)
def tirar_uma_carta(self, posicao=None):
# Esta função tira a carta do topo do baralho na posição indicada e o devolve.
if (posicao != None):
return self._cartas_no_baralho.pop(posicao)
else:
return self._cartas_no_baralho.pop()
def __len__(self):
# Vai devolver o comprimento de __cartas_no_baralho.
return len(self._cartas_no_baralho)
def __str__(self):
# Vai devolver o conteúdo do baralho
conteudo_do_baralho = ""
for carta in self._cartas_no_baralho:
conteudo_do_baralho = (carta.__str__() + "\n") + conteudo_do_baralho
return conteudo_do_baralho
def contar_pontos(self):
# somar a quantidade de pontos no baralho
pontos = 0
for carta in self._cartas_no_baralho:
pontos += carta.pontos
return pontos
def adicionar_carta(self, carta):
# adicionando uma carta à pilha
self._cartas_no_baralho.append(carta)
class BaralhoDeBisca(Baralho):
def __init__(self, n_par_de_jogadores = True):
# Um baralho de bisca contém 40 cartas (baralho sujo) e, caso o número de jogadores
# seja ímpar, as cartas de 2 devem ser retiradas.
super().__init__()
self._numeros = ["A", "2", "3", "4", "5", "6", "7", "J", "Q", "K"]
self._pontos = {"A": 11, "2": 0, "3": 0, "4": 0, "5": 0, "6": 0, "7": 10, "J": 3, "Q": 2, "K": 4}
#criando o baralho
if (not n_par_de_jogadores):
# removendo '2' dos _numeros
self._numeros.remove('2')
for naipe in self._naipes:
for numero in self._numeros:
self._cartas_no_baralho.append(Carta(naipe,numero,self._pontos[numero]))
class Jogador:
# Classe base para jogadores
def __init__(self, nome):
self._nome = nome
self._pontos = 0
@property
def nome(self):
return self._nome
@property
def pontos(self):
return self._pontos
@pontos.setter
def pontos(self, novos_pontos):
self._pontos = novos_pontos
def somar_pontos(self, novos_pontos):
# novos_pontos pode ser tanto positivo quanto negativo!!
soma = self._pontos + novos_pontos
if (soma > 0):
self._pontos = soma
else:
self.pontos = 0
class JogadorDeBisca(Jogador):
# Classe a ser usada para o jogador de bisca
tamanho_max_mao = 3
def __init__(self, nome):
super().__init__(nome)
# Lista vai guardar o conteúdo da mão
self._mao = []
# Variável do tipo Baralho vai guardar a pilha de pontos (cartas)
self._pilha_de_pontos = Baralho()
def escolhe_carta_para_jogar(self, carta_da_mesa=None ,cartas_jogadas_na_mesa=None):
# Função para a escolha da carta a ser jogada pelo jogador humano
while(True):
# Mostrando as cartas na mão
print(self.nome)
print(self) # Deve imprimir as cartas da mão
if len(self._mao) == 3:
try:
print('Qual carta jogar? (1), (2) ou (3): ', end='')
carta_escolhida = input()
if (carta_escolhida != '1' and carta_escolhida != '2' and carta_escolhida != '3'):
raise
# Se a entrada for válida, saida do loop e devolva o valor da carta_escolhida
break
except:
print('Entrada inválida!')
elif len(self._mao) == 2:
try:
print('Qual carta jogar? (1) ou (2): ', end='')
carta_escolhida = input()
if (carta_escolhida != '1' and carta_escolhida != '2'):
raise
# Se a entrada for válida, saida do loop e devolva o valor da carta_escolhida
break
except:
print('Entrada inválida!')
else:
try:
print('Jogue sua última carta pressionando (1): ', end='')
carta_escolhida = input()
if (carta_escolhida != '1'):
raise
# Se a entrada for válida, saida do loop e devolva o valor da carta_escolhida
break
except:
print('Entrada inválida!')
return carta_escolhida
def adicionar_carta_na_mao(self, carta):
# carta deve ser do tipo Carta
self._mao.append(carta)
def retirar_carta_da_mao(self, posicao):
# vai retirar a carta da mão na posição marcada pelo parâmetro posicao
return self._mao.pop(int(posicao) - 1)
def __str__(self):
# Vai devolver uma string com o conteúdo da mão em um formato adequado.
conteudo_da_mao = ""
for carta in self._mao:
conteudo_da_mao = conteudo_da_mao + (carta.__str__() + " ")
return conteudo_da_mao
@property
def pilha_de_pontos(self):
return self._pilha_de_pontos
def adicionar_carta_na_pilha (self, carta):
# adiciona uma carta na pilha de pontos
self._pilha_de_pontos.adicionar_carta(carta)
def __len__(self):
# devolve o comprimento da lista _mao
return len(self._mao)
class DummyPlayer(JogadorDeBisca):
# Este jogador sempre escolhe a primeira carta da mão
def __init__(self, nome='Dummy'):
super().__init__(nome)
# Substitui o nome da carta por um ID ordenado pela força da carta
self._ID_de_carta = {"2": 0.1, "3": 0.2, "4": 0.3, "5": 0.4, "6": 0.5,
"7": 0.6, "J": 0.7, "Q": 0.8, "K": 0.9, "A": 1.0}
# Substitui o naipe por um ID
self._ID_de_naipe = {"ESPADAS": 0.25, "PAUS": 0.5, "COPAS": 0.75, "OUROS": 1.0}
def escolhe_carta_para_jogar(self, carta_da_mesa=None ,cartas_jogadas_na_mesa=None):
# Função que recebe a carta da mesa, as cartas já jogadas e escolhe
# uma na carta na mão
# No caso do DummyPlayer, a carta jogada sempre será a primeira disponível (1)
return 1
# O treinamento ainda não está funcionando bem :(
'''
class SmartPlayer(JogadorDeBisca):
# Este jogador tem uma rede neural treinada para escolher uma carta da mão
def __init__(self, nome='Smart'):
super().__init__(nome)
# Substitui o nome da carta por um ID ordenado pela força da carta
self._ID_de_carta = {"2": 0.1, "3": 0.2, "4": 0.3, "5": 0.4, "6": 0.5,
"7": 0.6, "J": 0.7, "Q": 0.8, "K": 0.9, "A": 1.0}
# Substitui o naipe por um ID
self._ID_de_naipe = {"ESPADAS": 0.25, "PAUS": 0.5, "COPAS": 0.75, "OUROS": 1.0}
# Criando a arquitetura da rede neural responsável pelas decisões de qual carta jogar
entradas = keras.Input(shape=(14,))
x = keras.layers.Dense(14, activation='relu', name='first_layer')(entradas)
x = keras.layers.Dense(7, activation='relu', name='second_layer')(x)
x = keras.layers.Dense(4, activation='relu', name='third_layer')(x)
saidas = keras.layers.Dense(3, activation='softmax', name='last_layer')(x)
self._tomador_de_decisao = keras.Model(inputs=entradas, outputs=saidas, name='tomador_de_decisao')
# Compilando o modelo. Ainda não sei se será necessário
self._tomador_de_decisao.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.metrics.SparseCategoricalAccuracy()])
# Carregando os parâmetros da rede treinada
self._tomador_de_decisao.load_weights('pesos_da_rede_neural_treinada_para_smart_player')
def salvar_pesos_da_rede(self, nome_do_arquivo_de_saida='pesos_salvos'):
# Esta função salva no disco os pesos da rede neural
try:
self._tomador_de_decisao.save_weights(nome_do_arquivo_de_saida)
except:
print('Algo deu errado na hora de salvar os pesos!')
raise
else:
print('Pesos da rede salvo com sucesso!')
def escolhe_carta_para_jogar(self, carta_da_mesa=None, cartas_jogadas_na_mesa=None):
# A partir das cartas na mão, da carta da mesa e das cartas já jogadas (x)
# vai escolher uma carta a ser jogada (y)
# A primeira coisa a se fazer e transformar as entradas em uma lista (numpy)
# que tem a ordem [ID_carta_1, ID_naipe_1, ..., ID_naipe_3, ID_carta_mesa, ID_naipe_mesa,...
# ... ID_carta_ja_jogada_1, ID_naipe_ja_jogado_1, ..., ID_naipe_ja_jogado_3]
#
# Essa lista deve ter um conter 14 itens. Caso não haja alguma informação,
# deve-se preencher com 0 a posição (por exemplo, não existe cartas jogadas ainda).
lista_de_entrada_do_decisor = []
# Preenchendo com o conteúdo da mão
for carta_na_mao in self._mao:
# Adiciona ID_carta
lista_de_entrada_do_decisor.append(self._ID_de_carta[
carta_na_mao.numero
])
# Adiciona ID_naipe
lista_de_entrada_do_decisor.append(self._ID_de_naipe[
carta_na_mao.naipe
])
# Se a mão tinha menos do que 3 cartas, devo preencher o restante das posições
# com 0.
while (len(lista_de_entrada_do_decisor) < 6):
lista_de_entrada_do_decisor.append(0)
# Preenchendo com o conteúdo da carta da mesa
# Lembrando: carta_da_mesa deve ser do tipo Carta
if carta_da_mesa != None:
lista_de_entrada_do_decisor.append(self._ID_de_carta[
carta_da_mesa.numero
])
lista_de_entrada_do_decisor.append(self._ID_de_naipe[
carta_da_mesa.naipe
])
# Preenchendo com o conteúdo das cartas jogadas
# Lembrando: cartas_jogadas_na_mesa deve ser uma lista com itens do tipo Carta
if cartas_jogadas_na_mesa != None:
for carta_ja_jogada in cartas_jogadas_na_mesa[:-1]:
# cartas_jogadas_na_mesa | |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane` :class:`Device` class.
"""
import pytest
import pennylane as qml
from pennylane import Device, DeviceError
from pennylane.qnodes import QuantumFunctionError
from pennylane.wires import Wires
from collections import OrderedDict
mock_device_paulis = ["PauliX", "PauliY", "PauliZ"]
# pylint: disable=abstract-class-instantiated, no-self-use, redefined-outer-name, invalid-name
@pytest.fixture(scope="function")
def mock_device_with_operations(monkeypatch):
"""A function to create a mock device with non-empty operations"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, 'operations', mock_device_paulis)
m.setattr(Device, 'observables', mock_device_paulis)
m.setattr(Device, 'short_name', 'MockDevice')
def get_device(wires=1):
return Device(wires=wires)
yield get_device
@pytest.fixture(scope="function")
def mock_device_with_observables(monkeypatch):
"""A function to create a mock device with non-empty observables"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, 'operations', mock_device_paulis)
m.setattr(Device, 'observables', mock_device_paulis)
m.setattr(Device, 'short_name', 'MockDevice')
def get_device(wires=1):
return Device(wires=wires)
yield get_device
@pytest.fixture(scope="function")
def mock_device_supporting_paulis(monkeypatch):
"""A function to create a mock device with non-empty observables"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, 'operations', mock_device_paulis)
m.setattr(Device, 'observables', mock_device_paulis)
m.setattr(Device, 'short_name', 'MockDevice')
def get_device(wires=1):
return Device(wires=wires)
yield get_device
@pytest.fixture(scope="function")
def mock_device_supporting_paulis_and_inverse(monkeypatch):
"""A function to create a mock device with non-empty operations
and supporting inverses"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, 'operations', mock_device_paulis)
m.setattr(Device, 'observables', mock_device_paulis)
m.setattr(Device, 'short_name', 'MockDevice')
m.setattr(Device, '_capabilities', {"inverse_operations": True})
def get_device(wires=1):
return Device(wires=wires)
yield get_device
@pytest.fixture(scope="function")
def mock_device_supporting_observables_and_inverse(monkeypatch):
"""A function to create a mock device with non-empty operations
and supporting inverses"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, 'operations', mock_device_paulis)
m.setattr(Device, 'observables', mock_device_paulis + ['Hermitian'])
m.setattr(Device, 'short_name', 'MockDevice')
m.setattr(Device, '_capabilities', {"inverse_operations": True})
def get_device(wires=1):
return Device(wires=wires)
yield get_device
mock_device_capabilities = {
"measurements": "everything",
"noise_models": ["depolarizing", "bitflip"],
}
@pytest.fixture(scope="function")
def mock_device_with_capabilities(monkeypatch):
"""A function to create a mock device with non-empty observables"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, '_capabilities', mock_device_capabilities)
def get_device(wires=1):
return Device(wires=wires)
yield get_device
@pytest.fixture(scope="function")
def mock_device_with_paulis_and_methods(monkeypatch):
"""A function to create a mock device with non-empty observables"""
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, '_capabilities', mock_device_capabilities)
m.setattr(Device, 'operations', mock_device_paulis)
m.setattr(Device, 'observables', mock_device_paulis)
m.setattr(Device, 'short_name', 'MockDevice')
m.setattr(Device, 'expval', lambda self, x, y, z: 0)
m.setattr(Device, 'var', lambda self, x, y, z: 0)
m.setattr(Device, 'sample', lambda self, x, y, z: 0)
m.setattr(Device, 'apply', lambda self, x, y, z: None)
def get_device(wires=1):
return Device(wires=wires)
yield get_device
@pytest.fixture(scope="function")
def mock_device(monkeypatch):
with monkeypatch.context() as m:
m.setattr(Device, '__abstractmethods__', frozenset())
m.setattr(Device, '_capabilities', mock_device_capabilities)
m.setattr(Device, 'operations', ["PauliY", "RX", "Rot"])
m.setattr(Device, 'observables', ["PauliZ"])
m.setattr(Device, 'short_name', 'MockDevice')
m.setattr(Device, 'expval', lambda self, x, y, z: 0)
m.setattr(Device, 'var', lambda self, x, y, z: 0)
m.setattr(Device, 'sample', lambda self, x, y, z: 0)
m.setattr(Device, 'apply', lambda self, x, y, z: None)
def get_device(wires=1):
return Device(wires=wires)
yield get_device
class TestDeviceSupportedLogic:
"""Test the logic associated with the supported operations and observables"""
# pylint: disable=no-self-use, redefined-outer-name
def test_supports_operation_argument_types(self, mock_device_with_operations):
"""Checks that device.supports_operations returns the correct result
when passed both string and Operation class arguments"""
dev = mock_device_with_operations()
assert dev.supports_operation("PauliX")
assert dev.supports_operation(qml.PauliX)
assert not dev.supports_operation("S")
assert not dev.supports_operation(qml.CNOT)
def test_supports_observable_argument_types(self, mock_device_with_observables):
"""Checks that device.supports_observable returns the correct result
when passed both string and Operation class arguments"""
dev = mock_device_with_observables()
assert dev.supports_observable("PauliX")
assert dev.supports_observable(qml.PauliX)
assert not dev.supports_observable("Identity")
assert not dev.supports_observable(qml.Identity)
def test_supports_obeservable_inverse(self, mock_device_supporting_paulis_and_inverse):
dev = mock_device_supporting_paulis_and_inverse()
assert dev.supports_observable("PauliX.inv")
assert not dev.supports_observable("Identity.inv")
def test_supports_obeservable_raise_error_hermitian_inverse(self, mock_device_supporting_observables_and_inverse):
dev = mock_device_supporting_observables_and_inverse()
assert dev.supports_observable("PauliX")
assert dev.supports_observable("PauliX.inv")
assert dev.supports_observable("Hermitian")
assert not dev.supports_observable("Hermitian.inv")
def test_supports_operation_exception(self, mock_device):
"""check that device.supports_operation raises proper errors
if the argument is of the wrong type"""
dev = mock_device()
with pytest.raises(
ValueError,
match="The given operation must either be a pennylane.Operation class or a string.",
):
dev.supports_operation(3)
with pytest.raises(
ValueError,
match="The given operation must either be a pennylane.Operation class or a string.",
):
dev.supports_operation(Device)
def test_supports_observable_exception(self, mock_device):
"""check that device.supports_observable raises proper errors
if the argument is of the wrong type"""
dev = mock_device()
with pytest.raises(
ValueError,
match="The given observable must either be a pennylane.Observable class or a string.",
):
dev.supports_observable(3)
operation = qml.CNOT
with pytest.raises(
ValueError,
match="The given observable must either be a pennylane.Observable class or a string.",
):
dev.supports_observable(operation)
class TestInternalFunctions:
"""Test the internal functions of the abstract Device class"""
def test_check_validity_on_valid_queue(self, mock_device_supporting_paulis):
"""Tests the function Device.check_validity with valid queue and observables"""
dev = mock_device_supporting_paulis()
queue = [
qml.PauliX(wires=0),
qml.PauliY(wires=1),
qml.PauliZ(wires=2),
]
observables = [qml.expval(qml.PauliZ(0))]
# Raises an error if queue or observables are invalid
dev.check_validity(queue, observables)
def test_check_validity_on_valid_queue_with_inverses(self,
mock_device_supporting_paulis_and_inverse):
"""Tests the function Device.check_validity with valid queue
and the inverse of operations"""
dev = mock_device_supporting_paulis_and_inverse()
queue = [
qml.PauliX(wires=0).inv(),
qml.PauliY(wires=1).inv(),
qml.PauliZ(wires=2).inv(),
qml.PauliX(wires=0).inv().inv(),
qml.PauliY(wires=1).inv().inv(),
qml.PauliZ(wires=2).inv().inv(),
]
observables = [qml.expval(qml.PauliZ(0))]
# Raises an error if queue or observables are invalid
dev.check_validity(queue, observables)
def test_check_validity_with_not_supported_operation_inverse(self, mock_device_supporting_paulis_and_inverse):
"""Tests the function Device.check_validity with an valid queue
and the inverse of not supported operations"""
dev = mock_device_supporting_paulis_and_inverse()
queue = [
qml.CNOT(wires=[0, 1]).inv(),
]
observables = [qml.expval(qml.PauliZ(0))]
with pytest.raises(
DeviceError,
match="Gate {} not supported on device {}".format("CNOT", 'MockDevice'),
):
dev.check_validity(queue, observables)
def test_check_validity_on_tensor_support(self, mock_device_supporting_paulis):
"""Tests the function Device.check_validity with tensor support capability"""
dev = mock_device_supporting_paulis()
queue = [
qml.PauliX(wires=0),
qml.PauliY(wires=1),
qml.PauliZ(wires=2),
]
observables = [qml.expval(qml.PauliZ(0) @ qml.PauliX(1))]
# mock device does not support Tensor product
with pytest.raises(DeviceError, match="Tensor observables not supported"):
dev.check_validity(queue, observables)
def test_check_validity_on_invalid_observable_with_tensor_support(self, monkeypatch):
"""Tests the function Device.check_validity with tensor support capability
but with an invalid observable"""
queue = [
qml.PauliX(wires=0),
qml.PauliY(wires=1),
qml.PauliZ(wires=2),
]
observables = [qml.expval(qml.PauliZ(0) @ qml.Hadamard(1))]
D = Device
with monkeypatch.context() as m:
m.setattr(D, '__abstractmethods__', frozenset())
m.setattr(D, 'operations', ["PauliX", "PauliY", "PauliZ"])
m.setattr(D, 'observables', ["PauliX", "PauliY", "PauliZ"])
m.setattr(D, 'capabilities', lambda self: {"tensor_observables": True})
m.setattr(D, 'short_name', "Dummy")
dev = D()
# mock device supports Tensor products but not hadamard
with pytest.raises(DeviceError, match="Observable Hadamard not supported"):
dev.check_validity(queue, observables)
def test_check_validity_on_invalid_queue(self, mock_device_supporting_paulis):
"""Tests the function Device.check_validity with invalid queue and valid observables"""
dev = mock_device_supporting_paulis()
queue = [
qml.RX(1.0, wires=0),
qml.PauliY(wires=1),
qml.PauliZ(wires=2),
]
observables = [qml.expval(qml.PauliZ(0))]
with pytest.raises(DeviceError, match="Gate RX not supported on device"):
dev.check_validity(queue, observables)
def test_check_validity_on_invalid_observable(self, mock_device_supporting_paulis):
"""Tests the function Device.check_validity with valid queue and invalid observables"""
dev = mock_device_supporting_paulis()
queue = [
qml.PauliX(wires=0),
qml.PauliY(wires=1),
qml.PauliZ(wires=2),
]
observables = [qml.expval(qml.Hadamard(0))]
with pytest.raises(DeviceError, match="Observable Hadamard not supported on device"):
dev.check_validity(queue, observables)
def test_check_validity_on_invalid_queue_of_inverses(self, mock_device_supporting_paulis_and_inverse):
"""Tests the function Device.check_validity with invalid queue and valid inverses of operations"""
dev = mock_device_supporting_paulis_and_inverse()
queue = [
qml.PauliY(wires=1).inv(),
qml.PauliZ(wires=2).inv(),
qml.RX(1.0, wires=0).inv(),
]
observables = [qml.expval(qml.PauliZ(0))]
with pytest.raises(DeviceError, match="Gate RX not supported on device"):
dev.check_validity(queue, observables)
def test_supports_inverse(self, mock_device_supporting_paulis_and_inverse):
"""Tests the function Device.supports_inverse on device which supports inverses"""
dev = mock_device_supporting_paulis_and_inverse()
assert dev.check_validity([qml.PauliZ(0).inv()], []) is None
assert dev.check_validity([], [qml.PauliZ(0).inv()]) is None
def test_supports_inverse_device_does_not_support_inverses(self, mock_device_supporting_paulis):
"""Tests the function Device.supports_inverse on device which does not support inverses"""
dev = mock_device_supporting_paulis()
with pytest.raises(DeviceError, match="The inverse of gates are not supported on device {}".
format(dev.short_name)):
dev.check_validity([qml.PauliZ(0).inv()], [])
with pytest.raises(DeviceError, match="The inverse of gates are not supported on device {}".
format(dev.short_name)):
dev.check_validity([], [qml.PauliZ(0).inv()])
def test_args(self, mock_device):
"""Test that the device requires correct arguments"""
with pytest.raises(qml.DeviceError, match="specified number of shots needs to be at least 1"):
Device(mock_device, shots=0)
@pytest.mark.parametrize('wires, expected', [(['a1', 'q', -1, 3], Wires(['a1', 'q', -1, 3])),
(3, Wires([0, 1, 2])),
([3], Wires([3]))])
def test_wires_property(self, mock_device, wires, expected):
"""Tests that the wires attribute is set correctly."""
dev = mock_device(wires=wires)
assert dev.wires == expected
def test_wire_map_property(self, mock_device):
"""Tests that the wire_map is constructed correctly."""
dev = mock_device(wires=['a1', 'q', -1, 3])
expected = OrderedDict([(Wires('a1'), Wires(0)), (Wires('q'), Wires(1)),
(Wires(-1), Wires(2)), (Wires(3), Wires(3))])
assert dev.wire_map == expected
class TestClassmethods:
"""Test the classmethods of Device"""
def test_capabilities(self, mock_device_with_capabilities):
"""check that device can give a dict of further capabilities"""
dev = mock_device_with_capabilities()
assert dev.capabilities() == mock_device_capabilities
class TestOperations:
"""Tests the logic related to operations"""
def test_shots_setter(self, mock_device):
"""Tests that the property setter of shots changes the number of shots."""
dev = mock_device()
assert dev._shots == 1000
dev.shots = 10
assert dev._shots == 10
@pytest.mark.parametrize("shots", [-10, 0])
def test_shots_setter_error(self, mock_device, shots):
"""Tests that the property setter of shots raises an error | |
import re
import os
import functools
from itertools import chain
from datetime import date, datetime, timedelta
import yaml
from flask import current_app, Markup
from veripress import cache
from veripress.model.models import Page, Post, Widget
from veripress.model.parsers import get_standard_format_name, get_parser
from veripress.helpers import to_list, to_datetime, Pair, traverse_directory
class Storage(object):
def __init__(self):
"""Initialization."""
self._closed = False
def close(self):
"""
Close the storage.
Subclasses should override this to close any file descriptor
or database connection if necessary.
"""
self._closed = True
@property
def closed(self):
"""
Read-only property.
This state should be changed only in 'close' method.
"""
return self._closed
@cache.memoize(timeout=1 * 60)
def fix_relative_url(self, publish_type, rel_url):
"""
Fix post or page relative url to a standard, uniform format.
:param publish_type: publish type ('post' or 'page')
:param rel_url: relative url to fix
:return: tuple(fixed relative url or file path if exists else None,
file exists or not)
:raise ValueError: unknown publish type
"""
if publish_type == 'post':
return self.fix_post_relative_url(rel_url), False
elif publish_type == 'page':
return self.fix_page_relative_url(rel_url)
else:
raise ValueError(
'Publish type "{}" is not supported'.format(publish_type))
@staticmethod
@cache.memoize(timeout=2 * 60 * 60) # actually, it will never change
def fix_post_relative_url(rel_url):
"""
Fix post relative url to a standard, uniform format.
Possible input:
- 2016/7/8/my-post
- 2016/07/08/my-post.html
- 2016/8/09/my-post/
- 2016/8/09/my-post/index
- 2016/8/09/my-post/index.htm
- 2016/8/09/my-post/index.html
:param rel_url: relative url to fix
:return: fixed relative url, or None if cannot recognize
"""
m = re.match(
r'^(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/'
r'(?P<post_name>[^/]+?)'
r'(?:(?:\.html)|(?:/(?P<index>index(?:\.html?)?)?))?$',
rel_url
)
if not m:
return None
year, month, day, post_name = m.groups()[:4]
try:
d = date(year=int(year), month=int(month), day=int(day))
return '/'.join((d.strftime('%Y/%m/%d'), post_name,
'index.html' if m.group('index') else ''))
except (TypeError, ValueError):
# the date is invalid
return None
@staticmethod
def fix_page_relative_url(rel_url):
"""
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
NOTE!
Because custom page has a very strong connection with
the storage type chosen, this method should be implemented
in subclasses.
:param rel_url: relative url to fix
:return: tuple(fixed relative url or file path if exists else None,
file exists or not)
"""
raise NotImplementedError
def get_posts(self, include_draft=False, filter_functions=None):
"""
Get all posts, returns an iterable of Post object.
"""
raise NotImplementedError
def get_post(self, rel_url, include_draft=False):
"""
Get post for given relative url, returns a Post object.
"""
raise NotImplementedError
def get_tags(self):
"""
Get all tags as a list of
dict_item(tag_name, Pair(count_all, count_published)).
"""
raise NotImplementedError
def get_categories(self):
"""
Get all categories as a list of
dict_item(category_name, Pair(count_all, count_published)).
"""
raise NotImplementedError
def get_pages(self, include_draft=False):
"""
Get all custom pages, returns an iterable of Page object.
"""
raise NotImplementedError
def get_page(self, rel_url, include_draft=False):
"""
Get custom page for given relative url, returns a Page object.
"""
raise NotImplementedError
def get_widgets(self, position=None, include_draft=False):
"""
Get all widgets, returns an iterable of Widget object.
"""
raise NotImplementedError
@staticmethod
def _filter_result(result, filter_functions=None):
"""
Filter result with given filter functions.
:param result: an iterable object
:param filter_functions: some filter functions
:return: a filter object (filtered result)
"""
if filter_functions is not None:
for filter_func in filter_functions:
result = filter(filter_func, result)
return result
def get_posts_with_limits(self, include_draft=False, **limits):
"""
Get all posts and filter them as needed.
:param include_draft: return draft posts or not
:param limits: other limits to the attrs of the result,
should be a dict with string or list values
:return: an iterable of Post objects
"""
filter_funcs = []
for attr in ('title', 'layout', 'author',
'email', 'tags', 'categories'):
if limits.get(attr):
filter_set = set(to_list(limits.get(attr)))
def get_filter_func(filter_set_, attr_):
return lambda p: filter_set_.intersection(
to_list(getattr(p, attr_)))
filter_funcs.append(get_filter_func(filter_set, attr))
for attr in ('created', 'updated'):
interval = limits.get(attr)
if isinstance(interval, (list, tuple)) and len(interval) == 2 \
and isinstance(interval[0], date) and isinstance(
interval[1], date):
# [start date(time), end date(time)]
start, end = interval
start = to_datetime(start)
if not isinstance(end, datetime):
# 'end' is a date,
# we should convert it to 00:00:00 of the next day,
# so that posts of that day will be included
end = datetime.strptime(
'%04d-%02d-%02d' % (end.year, end.month, end.day),
'%Y-%m-%d')
end += timedelta(days=1)
def get_filter_func(attr_, start_dt, end_dt):
return lambda p: start_dt <= getattr(p, attr_) < end_dt
filter_funcs.append(get_filter_func(attr, start, end))
return self.get_posts(include_draft=include_draft,
filter_functions=filter_funcs)
def search_for(self, query, include_draft=False):
"""
Search for a query text.
:param query: keyword to query
:param include_draft: return draft posts/pages or not
:return: an iterable object of posts and pages (if allowed).
"""
query = query.lower()
if not query:
return []
def contains_query_keyword(post_or_page):
contains = query in post_or_page.title.lower() \
or query in Markup(
get_parser(post_or_page.format).parse_whole(
post_or_page.raw_content)
).striptags().lower()
return contains
return filter(contains_query_keyword,
chain(self.get_posts(include_draft=include_draft),
self.get_pages(include_draft=include_draft)
if current_app.config[
'ALLOW_SEARCH_PAGES'] else []))
class FileStorage(Storage):
@staticmethod
@cache.memoize(timeout=1 * 60)
def fix_page_relative_url(rel_url):
"""
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not)
"""
rel_url = rel_url.lstrip('/') # trim all heading '/'
endswith_slash = rel_url.endswith('/')
rel_url = rel_url.rstrip('/') + (
'/' if endswith_slash else '') # preserve only one trailing '/'
if not rel_url or rel_url == '/':
return None, False
file_path = os.path.join(current_app.instance_path, 'pages',
rel_url.replace('/', os.path.sep))
if rel_url.endswith('/'):
index_html_file_path = os.path.join(file_path, 'index.html')
if os.path.isfile(index_html_file_path):
# index.html exists
return index_html_file_path, True
return rel_url, False
elif os.path.isfile(file_path):
ext = os.path.splitext(file_path)[1][1:]
if get_standard_format_name(ext) is not None:
# is source of custom page
if current_app.config['PAGE_SOURCE_ACCESSIBLE']:
return file_path, True
else:
# is other direct files
return file_path, True
elif os.path.isdir(file_path):
return rel_url + '/', False
sp = rel_url.rsplit('/', 1)
m = re.match(r'(.+)\.html?', sp[-1])
if m:
sp[-1] = m.group(1) + '.html'
else:
sp[-1] += '.html'
return '/'.join(sp), False
@staticmethod
@cache.memoize(timeout=1 * 60)
def search_file(search_root, search_filename,
instance_relative_root=False):
"""
Search for a filename in a specific search root dir.
:param search_root: root dir to search
:param search_filename: filename to search (no extension)
:param instance_relative_root: search root is relative to instance path
:return: tuple(full_file_path, extension without heading dot)
"""
if instance_relative_root:
search_root = os.path.join(current_app.instance_path, search_root)
file_path = None
file_ext = None
for file in os.listdir(search_root):
filename, ext = os.path.splitext(file)
if filename == search_filename and ext and ext != '.':
file_path = os.path.join(search_root, filename + ext)
file_ext = ext[1:] # remove heading '.' (dot)
break
return file_path, file_ext
# noinspection PyUnresolvedReferences
search_instance_file = staticmethod(
functools.partial(search_file.__func__, instance_relative_root=True))
@staticmethod
@cache.memoize(timeout=1 * 60)
def read_file(file_path):
"""
Read yaml head and raw body content from a file.
:param file_path: file path
:return: tuple(meta, raw_content)
"""
with open(file_path, 'r', encoding='utf-8') as f:
whole = f.read().strip()
if whole.startswith('---'):
# may has yaml meta info, so we try to split it out
sp = re.split(r'-{3,}', whole.lstrip('-'), maxsplit=1)
if len(sp) == 2:
# do have yaml meta info, so we read it
return yaml.load(sp[0]), sp[1].lstrip()
return {}, whole
@cache.memoize(timeout=2 * 60)
def get_posts(self, include_draft=False, filter_functions=None):
"""
Get all posts from filesystem.
:param include_draft: return draft posts or not
:param filter_functions: filter to apply BEFORE result being sorted
:return: an iterable of Post objects (the first is the latest post)
"""
def posts_generator(path):
"""Loads valid posts one by one in the given path."""
if os.path.isdir(path):
for file in os.listdir(path):
filename, ext = os.path.splitext(file)
format_name = get_standard_format_name(ext[1:])
if format_name is not None and re.match(
r'\d{4}-\d{2}-\d{2}-.+', filename):
# the format is supported and the filename is valid,
# so load this post
post = Post()
post.format = format_name
post.meta, post.raw_content = FileStorage.read_file(
os.path.join(path, file))
post.rel_url = filename.replace('-', '/', 3) + '/'
post.unique_key = '/post/' + post.rel_url
yield post
posts_path = os.path.join(current_app.instance_path, 'posts')
result = filter(lambda p: include_draft or not p.is_draft,
posts_generator(posts_path))
result = self._filter_result(result, filter_functions)
return sorted(result, key=lambda p: p.created, reverse=True)
@cache.memoize(timeout=2 * 60)
def get_post(self, rel_url, include_draft=False):
"""
Get post for given relative url from filesystem.
Possible input:
- 2017/01/01/my-post/
- 2017/01/01/my-post/index.html
:param rel_url: relative url
:param include_draft: return draft post or not
:return: a Post object
"""
raw_rel_url = str(rel_url)
if rel_url.endswith('/index.html'):
rel_url = rel_url.rsplit('/', 1)[
0] + '/' # remove the trailing 'index.html'
post_filename = rel_url[:-1].replace('/', '-')
post_file_path, post_file_ext |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.