text stringlengths 957 885k |
|---|
# coding: utf-8
import re
__author__ = "<NAME>"
_SUBSTITUTIONS = [
('&', ' AND '),
("INTL", 'INTERNATIONAL'),
("INTN'L", "INTERNATIONAL")
]
_PREFIXES = [
'SAME AS CONSIGNEE',
'AGENCJA CELNA',
'AGENCJA TRANSPORTOWA',
'OOO',
'BY ORDER OF',
'BY ORDER TO',
'BY ORDER',
'FHU',
'F H U',
'LLC',
'OY',
'PPHU',
'P P H U',
'ZAO',
'TC',
'TO THE ORDER OF',
'TO THE ORDER BY',
'TO THE ORDER',
'TO ORDER OF',
'TO ORDER BY',
'TO ORDER',
'AS AGENT OF ',
'BRANCH OF'
]
_SUFFIXES = [
' CO',
'GROUP CORP',
'HONG KONG',
'HONGKONG',
'POLSKA',
' OY',
'POLAND',
'CHINA',
'A S',
'S A',
'SP J',
'SHANGHAI',
'DEUTSCHLAND',
'BANGLADESH',
'BV',
'COMPANY',
' H K',
'MOSCOW',
'S C',
'KOREA',
'LLC',
'GDYNIA',
'IMP AND EXP',
'IMPORT EXPORT',
'IMPORT AND EXPORT',
'IMP EXP',
'INTERNATIONAL',
'INTL',
'SHENZHEN',
'CAMBODIA',
'RUS',
'RUSSIA',
'FINLAND',
'PRC',
'JAPAN'
]
_SPLITTERS = (
',',
'SP Z O O',
'SP ZOO',
'SP Z OO',
'SP ZO O',
'S Z O O',
'SPOLKA ZOO',
'LIMITED',
'LTD',
' LLC ',
'P O BOX',
'PO BOX',
' SA ',
' AB ',
'PVT',
' PRIVATE ',
' CO ',
' S A ',
' A S ',
' AS ',
' ZAO ',
' UL ',
' INTL ',
' SP J ',
' OY ',
'GMBH',
'SPOLKA Z OGRANICZONA ODPOWIEDZIALNOSCIA',
' SP K ',
'OOO',
'"',
'SPOLKA AKCYJNA',
' TEL ',
' FAX ',
' B V ',
'S KA Z O O',
'SDN BDH',
'SPOLKA JAWNA',
'S P A',
'SPOLKA Z O O',
'SDN BHD',
' C O ',
'HQ ',
' INC ',
' ZIP ',
'OYKATRIINANTIE',
'SPZ O O',
' AG ',
' SP K ',
' SP KOM ',
'SPOLKA KOMANDYTOWA',
' SP K ',
' SP Z O ',
'S P Z O O',
'BRANCH OF',
'BRANCHOF',
' AND ',
'SP KOMANDYTOWA',
'POLSKA',
'S R O',
'STREET',
' STR ',
'OYKOIVUHAANTIE',
)
_SUBSTRINGS = (
'LOGISTICS',
'LOGISTIC',
'INTERNATIONAL TRADING',
'INTERNATIONAL TRADE',
'SERVICE CONTRACT',
'CITY',
'OFFICE',
'INDUSTRIAL',
'MANUFACTURING',
'INTERNATIONAL',
'SHIPPING',
'FORWARDING',
'SERVICE',
'TRADE',
'IMP EXP',
'IMPORT EXPORT',
'IMP AND EXP',
'IMPORT AND EXPORT',
'TRADING',
'INDUSTRY',
' AND ',
'GLOBAL',
'HOLDINGS',
'TRANSPORT',
'ENTERPRISES',
'SHANGHAI',
'SHENZHEN',
'VIETNAM',
'POLSKA',
'TECHNOLOGY',
'FURNITURE',
'GROUP',
'CARGO',
'POLAND',
'POLLAND',
'INDUSTRIES',
'ELECTRONICS',
'SPOLKA KOMANDYTOWA',
' SP K ',
'SP KOMANDYTOWA',
'CORPORATION',
'SPOLKAKOMANDYTOWA',
)
_TO_DELETE = re.compile('[^A-Z,"\']+')
_SPACES = re.compile(' {2,}')
_TO_STRIP = ',"\' '
def process(line):
line = line.upper().split('2.', 1)[0]
for (s_from, s_to) in _SUBSTITUTIONS:
line = line.replace(s_from, s_to)
line = _TO_DELETE.sub(' ', line).lstrip(_TO_STRIP)
for prefix in _PREFIXES:
if line.startswith(prefix):
line = line[len(prefix):]
line = _SPACES.sub(' ', line.lstrip(_TO_STRIP))
for splitter in _SPLITTERS:
line = line.split(splitter, 1)[0]
line = _SPACES.sub(' ', line.rstrip(_TO_STRIP))
for suffix in _SUFFIXES:
if line.endswith(suffix):
line = line[:-len(suffix)]
line = _SPACES.sub(' ', line.rstrip(_TO_STRIP))
for substring in _SUBSTRINGS:
line = line.replace(substring, '').strip(_TO_STRIP)
line = _SPACES.sub(' ', line)
return line
|
from __future__ import print_function
import math
import torch
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from torch.nn import functional as F
class HMTLoss(nn.Module):
def __init__(self, weight_g=1, weight_r=1, weight_a=2):
super(HMTLoss, self).__init__()
self.weight_g = weight_g
self.weight_r = weight_r
self.weight_a = weight_a
self.g_criterion = nn.CrossEntropyLoss()
self.r_criterion = nn.CrossEntropyLoss()
# self.a_criterion = nn.L1Loss()
# self.a_criterion = HuberLoss()
self.a_criterion = SmoothHuberLoss()
def forward(self, g_pred, g_gt, r_pred, r_gt, a_pred, a_gt):
g_loss = self.g_criterion(g_pred, g_gt)
r_loss = self.r_criterion(r_pred, r_gt)
a_loss = self.a_criterion(a_pred, a_gt)
hmt_loss = self.weight_g * g_loss + self.weight_r * r_loss + self.weight_a * a_loss
return hmt_loss
def log_cosh_loss(input, target, epsilon=0):
"""
Definition of LogCosh Loss
"""
return torch.log(torch.cosh(target - input) + epsilon)
class HuberLoss(_Loss):
"""
Huber Loss
if |y-\hat{y}| < \delta, return \frac{1}{2}MSE
else return \delta MAE - \frac{1}{2}\delta ** 2
"""
def __init__(self, size_average=True, reduce=True, delta=0.1):
super(HuberLoss, self).__init__(size_average, reduce)
self.delta = delta
def forward(self, input, target):
if F.l1_loss(input, target) < self.delta:
return 0.5 * F.mse_loss(input, target, size_average=self.size_average, reduce=self.reduce)
else:
return self.delta * F.l1_loss(input, target, size_average=self.size_average,
reduce=self.reduce) - 0.5 * self.delta * self.delta
class SmoothHuberLoss(_Loss):
"""
SmoothHuberLoss
if |y-\hat{y}| < \delta, return log(\frac{1}{2}LogCosh(y-\hat{y}))
else return |y-\hat{y}|
"""
def __init__(self, reduction='mean', delta=0.6):
super(SmoothHuberLoss, self).__init__()
self.delta = delta
self.reduction = reduction
def forward(self, input, target):
t = torch.abs(input - target)
return torch.mean(torch.where(t < self.delta, log_cosh_loss(input, target), F.l1_loss(input, target)))
class HMTFERLoss(nn.Module):
"""
HMTLossRaf definition
"""
def __init__(self, emotion_branch_w=0.7, age_branch_w=0.1, race_branch_w=0.1, gender_branch_w=0.1):
super(HMTFERLoss, self).__init__()
self.emotion_branch_w = emotion_branch_w
self.age_branch_w = age_branch_w
self.race_branch_w = race_branch_w
self.gender_branch_w = gender_branch_w
self.emotion_criterion = nn.CrossEntropyLoss()
self.age_criterion = nn.CrossEntropyLoss()
self.race_criterion = nn.CrossEntropyLoss()
self.gender_criterion = nn.CrossEntropyLoss()
def forward(self, gender_pred, gender_gt, race_pred, race_gt, age_pred, age_gt, emotion_pred, emotion_gt):
gender_loss = self.gender_criterion(gender_pred, gender_gt)
race_loss = self.race_criterion(race_pred, race_gt)
emotion_loss = self.emotion_criterion(emotion_pred, emotion_gt)
age_loss = self.age_criterion(age_pred, age_gt)
hmt_fer_loss = self.emotion_branch_w * emotion_loss + self.age_branch_w * age_loss + self.race_branch_w * race_loss \
+ self.gender_branch_w * gender_loss
return hmt_fer_loss
|
<reponame>shapeshift-legacy/watchtower<gh_stars>0
import os
import json
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete
from django.dispatch import receiver
from tracker.models import Account, Address
from common.services.rabbitmq import RabbitConnection, EXCHANGE_UNCHAINED
from common.services.launchdarkly import is_feature_enabled, UNCHAINED_REGISTRY
host = os.environ.get('UNCHAINED_RABBIT_HOST')
port = os.environ.get('UNCHAINED_RABBIT_PORT')
user = os.environ.get('UNCHAINED_RABBIT_USER')
password = os.environ.get('UNCHAINED_RABBIT_PASS')
is_enabled = is_feature_enabled(UNCHAINED_REGISTRY) and os.environ.get('UNCHAINED_RABBIT_ENABLED').lower() == 'true'
class ConnectionError(Exception):
pass
def should_migrate(account):
is_supported = _network_to_name(account.network) is not None
return account.migrated is False and is_enabled and is_supported
def _network_to_name(network):
network_lookup = {
'ETH': "ethereum"
}
return network_lookup.get(network)
@receiver(pre_delete, sender=Account)
def check_unregister_account(sender, instance, **kwargs):
if is_enabled is False:
return
name = _network_to_name(instance.network)
if name is None:
return
rabbit = RabbitConnection(host, port, user, password)
if rabbit._connection and rabbit._connection.is_open is True:
return
raise ConnectionError("unable to connect to rabbitmq at {}:{}".format(host, port))
@receiver(post_delete, sender=Account)
def unregister_account(sender, instance, **kwargs):
if is_enabled is False:
return
name = _network_to_name(instance.network)
if name is None:
return
msg = {
'action': 'unregister',
'client_id': 'axiom',
'watchtower_meta': {
'tracker_account_id': instance.id
},
'registration': {
'pubkey': instance.xpub
}
}
RabbitConnection(host, port, user, password).publish(
exchange=EXCHANGE_UNCHAINED,
routing_key='{}.registry'.format(name),
message_type='unchained.registry',
body=json.dumps(msg)
)
@receiver(pre_save, sender=Address)
def check_register_address(sender, instance, **kwargs):
if is_enabled is False:
return
name = _network_to_name(instance.account.network)
if name is None:
return
rabbit = RabbitConnection(host, port, user, password)
if rabbit._connection and rabbit._connection.is_open is True:
return
raise ConnectionError("unable to connect to rabbitmq at {}:{}".format(host, port))
@receiver(post_save, sender=Address)
def register_address(sender, instance, created, **kwargs):
if is_enabled is False:
return
name = _network_to_name(instance.account.network)
if name is None:
return
instance.account.migrated = True
instance.account.save()
msg = {
'action': 'register',
'client_id': 'axiom',
'watchtower_meta': {
'tracker_account_id': instance.account.id,
'tracker_address_ids': {instance.address: instance.id}
},
'registration': {
'addresses': [instance.address],
'pubkey': instance.account.xpub
}
}
RabbitConnection(host, port, user, password).publish(
exchange=EXCHANGE_UNCHAINED,
routing_key='{}.registry'.format(name),
message_type='unchained.registry',
body=json.dumps(msg)
)
@receiver(pre_delete, sender=Address)
def check_unregister_address(sender, instance, **kwargs):
if is_enabled is False:
return
name = _network_to_name(instance.account.network)
if name is None:
return
rabbit = RabbitConnection(host, port, user, password)
if rabbit._connection and rabbit._connection.is_open is True:
return
raise ConnectionError("unable to connect to rabbitmq at {}:{}".format(host, port))
@receiver(post_delete, sender=Address)
def unregister_address(sender, instance, **kwargs):
if is_enabled is False:
return
name = _network_to_name(instance.account.network)
if name is None:
return
msg = {
'action': 'unregister',
'client_id': 'axiom',
'watchtower_meta': {
'tracker_account_id': instance.account.id,
'tracker_address_ids': {instance.address: instance.id}
},
'registration': {
'addresses': [instance.address],
'pubkey': instance.account.xpub
}
}
RabbitConnection(host, port, user, password).publish(
exchange=EXCHANGE_UNCHAINED,
routing_key='{}.registry'.format(name),
message_type='unchained.registry',
body=json.dumps(msg)
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Pitch-tracking and tuning estimation"""
import warnings
import numpy as np
from .spectrum import _spectrogram
from . import time_frequency
from .._cache import cache
from .. import util
__all__ = ["estimate_tuning", "pitch_tuning", "piptrack"]
def estimate_tuning(
y=None, sr=22050, S=None, n_fft=2048, resolution=0.01, bins_per_octave=12, **kwargs
):
"""Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
"""
pitch, mag = piptrack(y=y, sr=sr, S=S, n_fft=n_fft, **kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return pitch_tuning(
pitch[(mag >= threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave,
)
def pitch_tuning(frequencies, resolution=0.01, bins_per_octave=12):
"""Given a collection of pitches, estimate its tuning offset
(in fractions of a bin) relative to A440=440.0Hz.
Parameters
----------
frequencies : array-like, float
A collection of frequencies detected in the signal.
See `piptrack`
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
estimate_tuning
Estimating tuning from time-series or spectrogram input
Examples
--------
>>> # Generate notes at +25 cents
>>> freqs = librosa.cqt_frequencies(24, 55, tuning=0.25)
>>> librosa.pitch_tuning(freqs)
0.25
>>> # Track frequencies from a real spectrogram
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes, stft = librosa.ifptrack(y, sr)
>>> # Select out pitches with high energy
>>> pitches = pitches[magnitudes > np.median(magnitudes)]
>>> librosa.pitch_tuning(pitches)
0.089999999999999969
"""
frequencies = np.atleast_1d(frequencies)
# Trim out any DC components
frequencies = frequencies[frequencies > 0]
if not np.any(frequencies):
warnings.warn("Trying to estimate tuning from empty frequency set.")
return 0.0
# Compute the residual relative to the number of bins
residual = np.mod(bins_per_octave * time_frequency.hz_to_octs(frequencies), 1.0)
# Are we on the wrong side of the semitone?
# A residual of 0.95 is more likely to be a deviation of -0.05
# from the next tone up.
residual[residual >= 0.5] -= 1.0
bins = np.linspace(-0.5, 0.5, int(np.ceil(1.0 / resolution)) + 1)
counts, tuning = np.histogram(residual, bins)
# return the histogram peak
return tuning[np.argmax(counts)]
@cache(level=30)
def piptrack(
y=None,
sr=22050,
S=None,
n_fft=2048,
hop_length=None,
fmin=150.0,
fmax=4000.0,
threshold=0.1,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
ref=None,
):
"""Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
"""
# Check that we received an audio time series or STFT
S, n_fft = _spectrogram(
y=y,
S=S,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
)
# Make sure we're dealing with magnitudes
S = np.abs(S)
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
fft_freqs = time_frequency.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (np.abs(shift) < util.tiny(shift)))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode="constant")
shift = np.pad(shift, ([1, 1], [0, 0]), mode="constant")
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
if ref is None:
ref = np.max
if callable(ref):
ref_value = threshold * ref(S, axis=0)
else:
ref_value = np.abs(ref)
idx = np.argwhere(freq_mask & util.localmax(S * (S > ref_value)))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = (
(idx[:, 0] + shift[idx[:, 0], idx[:, 1]]) * float(sr) / n_fft
)
mags[idx[:, 0], idx[:, 1]] = S[idx[:, 0], idx[:, 1]] + dskew[idx[:, 0], idx[:, 1]]
return pitches, mags
|
<filename>reachweb/migrations/0001_initial.py<gh_stars>0
# Generated by Django 2.2.5 on 2019-11-01 13:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chv',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.IntegerField()),
('phonenumber', models.CharField(max_length=255)),
('profile_picture', models.ImageField(blank=True, default='prof.jpg', upload_to='chv_profiles/')),
('location', models.CharField(max_length=200)),
('name', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'chv',
'ordering': ['-name'],
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('county', models.CharField(max_length=200)),
('accessibility', models.CharField(choices=[('Great', 'The roads are well passable in all weather conditions'), ('Good', 'The roads are passable in favourable weather conditions'), ('Bad', 'The roads are not passable')], max_length=200)),
],
options={
'db_table': 'location',
'ordering': ['-name'],
},
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('age', models.IntegerField()),
('gender', models.CharField(max_length=200)),
('location', models.CharField(choices=[('Juja', 'Gachororo'), ('High Point', 'Sewage'), ('K-road', 'Stage'), ('Gwa-Kairu', 'Estate'), ('Ruiru', 'Kimbo'), ('Kasarani', 'Nairobi')], default='Ruiru', max_length=200)),
('time', models.DateTimeField()),
('symptoms', models.TextField()),
('urgency', models.CharField(choices=[('red', 'High severity'), ('yellow', 'Moderate severity'), ('green', 'Low severity'), ('blue', 'Unknown severity')], default='blue', max_length=200)),
('action_taken', models.TextField()),
('examiner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='chv', to='reachweb.Chv')),
],
options={
'db_table': 'patient',
'ordering': ['-name'],
},
),
migrations.CreateModel(
name='Emergencies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('Road', 'Road accidents'), ('Fire', 'Fire emergencies'), ('Water', 'Water related accidents'), ('Sickness', 'Sick people emergencies')], default='Sickness', max_length=200)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='locale', to='reachweb.Location')),
('reported_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reporter', to='reachweb.Chv')),
],
options={
'db_table': 'emergencies',
'ordering': ['type'],
},
),
]
|
<reponame>PaccMann/paccmann_chemistry<filename>paccmann_chemistry/models/stack_rnn.py<gh_stars>1-10
"""Stack Augmented GRU Implementation."""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from ..utils import get_device
logger = logging.getLogger('stack_gru')
# pylint: disable=not-callable, no-member
class StackGRU(nn.Module):
"""Stack Augmented Gated Recurrent Unit (GRU) class."""
def __init__(self, params):
"""
Initialization.
Reference:
GRU layers intended to help with the training of VAEs by weakening
the decoder as proposed in: https://arxiv.org/abs/1511.06349.
Args:
params (dict): Hyperparameters.
Items in params:
embedding_size (int): The embedding size for the dict tokens
rnn_cell_size (int): Hidden size of GRU.
vocab_size (int): Output size of GRU (vocab size).
stack_width (int): Number of stacks in parallel.
stack_depth (int): Stack depth.
n_layers (int): The number of GRU layer.
dropout (float): Dropout on the output of GRU layers except the
last layer.
batch_size (int): Batch size.
lr (float, optional): Learning rate default 0.01.
optimizer (str, optional): Choice from OPTIMIZER_FACTORY.
Defaults to 'adadelta'.
padding_index (int, optional): Index of the padding token.
Defaults to 0.
"""
super(StackGRU, self).__init__()
self.device = get_device()
self.embedding_size = params['embedding_size']
self.rnn_cell_size = params['rnn_cell_size']
self.vocab_size = params['vocab_size']
self.stack_width = params['stack_width']
self.stack_depth = params['stack_depth']
self.n_layers = params['n_layers']
self._update_batch_size(params['batch_size'])
self.gru_input = self.embedding_size
self.use_stack = params.get('use_stack', True)
# Create the update function conditioned on whether stack is used.
if self.use_stack:
self.gru_input += self.stack_width
self.update = lambda inp, hidden, stack: self._stack_update(
inp, hidden, stack
)
else:
self.update = lambda inp, hidden, stack: (inp, stack)
logger.warning('Attention: No stack will be used')
# Network
self.stack_controls_layer = nn.Linear(
in_features=self.rnn_cell_size, out_features=3
)
self.stack_input_layer = nn.Linear(
in_features=self.rnn_cell_size, out_features=self.stack_width
)
if params.get('embedding', 'learned') == 'learned':
self.embedding = nn.Embedding(
self.vocab_size,
self.embedding_size,
padding_idx=params.get('pad_index', 0)
)
elif params.get('embedding', 'learned') == 'one_hot':
self.embedding_size = self.vocab_size
self.embedding = nn.Embedding(
self.vocab_size,
self.embedding_size,
padding_idx=params.get('pad_index', 0)
)
# Plug in one hot-vectors and freeze weights
self.embedding.load_state_dict(
{
'weight':
torch.nn.functional.one_hot(
torch.arange(self.vocab_size)
)
}
)
self.embedding.weight.requires_grad = False
self.gru = nn.GRU(
self.gru_input,
self.rnn_cell_size,
self.n_layers,
bidirectional=False,
dropout=params['dropout']
)
self.set_batch_mode(params.get('batch_mode'))
self._check_params()
def _update_batch_size(self, batch_size: int) -> None:
"""Updates the batch_size
Arguments:
batch_size (int): New batch size
"""
self.batch_size = batch_size
self.expected_shape = torch.tensor(
[self.n_layers, self.batch_size, self.rnn_cell_size]
)
# Variable to initialize hidden state and stack
self.init_hidden = Variable(
torch.zeros(self.n_layers, self.batch_size,
self.rnn_cell_size).to(self.device)
)
self.init_stack = Variable(
torch.zeros(self.batch_size, self.stack_depth,
self.stack_width).to(self.device)
)
def forward(self, input_token, hidden, stack):
"""
StackGRU forward function.
Args:
input_token (torch.Tensor): LongTensor containing
indices of the input token of or `[1, batch_size]`.
hidden (torch.Tensor): Hidden state of size
`[n_layers, batch_size, rnn_cell_size]`.
stack (torch.Tensor): Previous step's stack of size
`[batch_size, stack_depth, stack_width]`.
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor): output, hidden, stack.
Output of size `[batch_size, vocab_size]`.
Hidden state of size `[1, batch_size, rnn_cell_size]`.
Stack of size `[batch_size, stack_depth, stack_width]`.
"""
embedded_input = self.embedding(input_token)
# NOTE: Only if use_stack is True, this actually updates the stack.
gru_input, stack = self.update(embedded_input, hidden, stack)
output, hidden = self.gru(gru_input, hidden)
return output, hidden, stack
def _forward_fn(self, input_seq, hidden, stack):
raise NotImplementedError
def _forward_pass_padded(self, *args):
raise NotImplementedError
def _forward_pass_packed(self, *args):
raise NotImplementedError
def set_batch_mode(self, mode: str) -> None:
"""Select forward function mode
Args:
mode (str): Mode to use. Available modes:
`padded`, `packed`
"""
if not isinstance(mode, str):
raise TypeError('Argument `mode` should be a string.')
mode = mode.capitalize()
MODES = {
'Padded': self._forward_pass_padded,
'Packed': self._forward_pass_packed
}
if mode not in MODES:
raise NotImplementedError(
f'Unknown mode: {mode}. Available modes: {MODES.keys()}'
)
self._forward_fn = MODES[mode]
def _stack_update(self, embedded_input, hidden, stack):
"""Pre-gru stack update operations"""
stack_controls = self.stack_controls_layer(hidden[-1, :, :])
stack_controls = F.softmax(stack_controls, dim=-1)
stack_input = self.stack_input_layer(hidden[-1, :, :].unsqueeze(0))
stack_input = torch.tanh(stack_input)
stack = self.stack_augmentation(
stack_input.permute(1, 0, 2), stack, stack_controls
)
stack_top = stack[:, 0, :].unsqueeze(0)
inp = torch.cat((embedded_input, stack_top), dim=2)
return inp, stack
def stack_augmentation(self, input_val, prev_stack, controls):
"""
Stack update function.
Args:
input_val (torch.Tensor): Contributon of the current
hidden state to be input to the stack.
Must be of shape `[batch_size, 1, stack_width]`.
prev_stack (torch.Tensor): The stack from previous
step. Must be of shape
`[batch_size, stack_depth, stack_width]`.
controls (torch.Tensor): Stack controls giving
probabilities of PUSH, POP or NO-OP for the pushdown
stack. Must be of shape `[batch_size, 3]`.
Returns:
torch.Tensor: Updated stack of shape
`[batch_size, stack_depth, stack_width]`.
"""
batch_size = prev_stack.size(0)
controls = controls.view(-1, 3, 1, 1)
zeros_at_the_bottom = torch.zeros(batch_size, 1, self.stack_width)
zeros_at_the_bottom = Variable(zeros_at_the_bottom.to(self.device))
a_push, a_pop, a_no_op = (
controls[:, 0], controls[:, 1], controls[:, 2]
)
stack_down = torch.cat((prev_stack[:, 1:], zeros_at_the_bottom), dim=1)
stack_up = torch.cat((input_val, prev_stack[:, :-1]), dim=1)
new_stack = a_no_op * prev_stack
new_stack = new_stack + a_push * stack_up
new_stack = new_stack + a_pop * stack_down
return new_stack
def _check_params(self):
"""
Runs size checks on input parameter
"""
if self.rnn_cell_size < self.embedding_size:
logger.warning('Refrain from squashing embeddings in RNN cells')
|
from __future__ import division, print_function
from itertools import islice
import numpy as np
from numpy.linalg import norm
import spatial
import sphere
class Snake3D(object):
def __init__(self, contour, image,
step=0.2,
scale=1.0,
tension=1,
stiffness=0.0,
push=0.01,
clip=np.inf,
threshold=None,
iterations=500):
self.vertices = np.array(contour.vertices)
self.faces = np.array(contour.faces)
self.distances = np.zeros(len(contour.vertices))
self.contour = contour
self.image = image
self.step = step
self.scale = scale
self.tension = tension
self.stiffness = stiffness
self.push = push
self.clip = clip
self.average_scale = False
self.tangential_interal_forces = False
self.search = 2 # Number of adjacent vertices to check
self.threshold = threshold
self.iterations = 0
self.max_iterations = iterations
self.neighbor_cache = {}
self.normal_cache = {}
@property
def starting_points(self):
return self.contour.vertices
@property
def travel(self):
return np.array(map(norm, self.starting_points - self.vertices))
@property
def unit_starting_points(self):
return self.contour.unit_vertices
@property
def unit_travel(self):
return self.travel / self.contour.radius
def neighbors(self, idx, dist, cycles=False):
if (idx,dist) in self.neighbor_cache:
return self.neighbor_cache[idx,dist]
neighbors = self.contour.neighboring_vertices(idx, dist, cycles=cycles)
self.neighbor_cache[idx,dist] = neighbors
return neighbors
def control_value(self, idx, dist, cycles=False):
neighbors = self.neighbors(idx, dist, cycles=cycles)
points = self.vertices[neighbors]
control = points.mean(axis=0)
return control
def nearest_on_surface(self, idx, point):
closest_data = self.image.approximate_nearest_point(point, steps=self.search)
distance, triangle, nearest = closest_data
self.normal_cache[idx] = spatial.triangle_normal(triangle)
return closest_data
def internal_forces(self, idx, point):
# Find 1st and 2nd degree neighbors
control_1 = self.control_value(idx, 1)
control_2 = self.control_value(idx, 2)
# Calculate internal energies
elasticity = 2 * (control_1 - point)
rigidity = 2 * (control_2 + 3 * point - 4 * control_1)
scaled_elacticity = self.tension * elasticity
scaled_rigidity = self.stiffness * rigidity
internal = scaled_elacticity + scaled_rigidity
return internal
def external_forces(self, idx, point):
# Find nearest point on surface and distance as attraction
distance, triangle, nearest = self.nearest_on_surface(idx, point)
direction = nearest - point
external = self.scale * direction
return external, triangle
def adjust_internace_force(self, internal, surface_normal):
# Compute internal energy only tangential to surface
# (e.g. remove attractive aspects of elacticty)
if self.tangential_interal_forces:
internal = spatial.project_to_plane(internal, surface_normal)
return internal
def force(self, idx, point):
internal = self.internal_forces(idx, point)
external, triangle = self.external_forces(idx, point)
normal = self.normal_cache[idx]
internal = self.adjust_internace_force(internal, normal)
total_force = external + internal
magnitude = norm(total_force)
if magnitude > self.clip:
total_force = self.clip * (total_force / magnitude)
return total_force
def averaged_attractions(self):
attractions = np.zeros(self.vertices.shape)
for idx, point in enumerate(self.vertices):
distance, triangle, nearest = self.nearest_on_surface(idx, point)
attractions[idx] = nearest - point
distances = np.apply_along_axis(norm, 1, attractions)
directions = attractions / distances.reshape(1,-1).transpose()
mean_distance = distances.mean()
normalized = distances - mean_distance
normalized_attractions = directions * normalized.reshape(1,-1).transpose()
return normalized_attractions
def update_vertex(self, idx, vertex):
direction = self.force(idx, vertex)
step = self.step * direction
return vertex + step
def update(self):
delta = self.step_external_forces()
for _ in range(10):
self.stabilize_internal_forces(tangent_only=delta < 0.05)
return
delta_total = 0
num_vertices = len(self.vertices)
new_vertices = np.zeros(self.vertices.shape)
self.normal_cache.clear()
for idx, vertex in enumerate(self.vertices):
new_vertex = self.update_vertex(idx, vertex)
new_vertices[idx] = new_vertex
delta = norm(vertex - new_vertex)
delta_total += delta
self.vertices = new_vertices
average_delta = delta_total / num_vertices
return average_delta
def normalize_distances(self):
offsets = self.averaged_attractions()
self.vertices = self.vertices + offsets
def decrease_internal_forces(self):
self.tension /= 2
self.stiffness /= 2
def step_external_forces(self, step=None):
if step is None:
step = self.step
self.normal_cache.clear()
delta_sum = 0
num_vertices = len(self.vertices)
new_vertices = np.zeros(self.vertices.shape)
for idx, vertex in enumerate(self.vertices):
external, triangle = self.external_forces(idx, vertex)
new_vertex = vertex + step * external
distance = norm(external)
delta = norm(vertex - new_vertex)
delta_sum += delta
self.distances[idx] = distance
new_vertices[idx] = new_vertex
self.vertices = new_vertices
delta = delta_sum / num_vertices
return delta
def stabilize_internal_forces(self, iterations=5, step=None, tangent_only=True):
if step is None:
step = self.step
for iteration in range(iterations):
try:
max_distance = self.contour.radius
except AttributeError:
small, large = self.contour.extents.transpose()
max_distance = np.abs(large - small)
new_vertices = np.zeros(self.vertices.shape)
for idx, vertex in enumerate(self.vertices):
internal = self.internal_forces(idx, vertex)
if tangent_only:
normal = self.normal_cache[idx]
internal = self.adjust_internace_force(internal, normal)
relative_distance = (max_distance - self.distances[idx]) / max_distance
new_vertices[idx] = vertex + relative_distance * self.step * internal
self.vertices = new_vertices
def run(self):
# Initialization phase:
# Place snake in better starting position
self.normalize_distances()
self.step_external_forces()
self.normalize_distances()
delta = np.inf
# Primary Refinement
# Get snake closer to surface while maintaining total internal energy
while delta > 0.01 and self.iterations < self.max_iterations:
# Step towards image
delta = self.step_external_forces()
# As snake approaches begin decreasing internal energy
if delta < 0.2:
self.decrease_internal_forces()
steps = 5
else:
steps = 10
# Enforce only tangential component of engergy when very close
self.stabilize_internal_forces(steps, tangent_only=delta < 0.1)
self.iterations += 1
# Push snake back up to surface if it has "fallen in"
#self.step_external_forces(1)
@classmethod
def create_for_surface(cls, surf, sphere_iterations=2, *args, **kwargs):
radius = (surf.extents.ptp() / 2) * 1.1 # Increase slightly
tessalation = sphere.Sphere.from_tessellation(radius=radius,
center=surf.centroid,
iterations=sphere_iterations)
snake = cls(tessalation, surf, *args, **kwargs)
return snake
@classmethod
def create_for_surface_invert(cls, surf, sphere_iterations=2, *args, **kwargs):
radius = (surf.extents.ptp() / 2) * 1.1 # Increase slightly
tess = sphere.Sphere.from_tessellation(radius=radius,
center=surf.centroid,
iterations=sphere_iterations)
snake = cls(surf, tess, *args, **kwargs)
return snake
def load_sphmap(stream):
""" Ad-hoc sphere mapping dump format:
First Line: [# Vertices, Original Sphere Radius, Original Sphere Center (XYZ)]
Others: [Shape (distance), Sphere Coords (XYZ),
Unit shape (distance), Unit Sphere Coords (XYZ),
Surface Coords (XYZ)]
"""
tokens = next(stream).split()
nV, radius, center = int(tokens[0]), float(tokens[1]), np.array(tokens[2:], dtype=np.float)
mappings = np.loadtxt(islice(stream, nV), dtype=np.float)
return mappings, radius, center
def dump_sphmap(stream, snake):
""" Ad-hoc sphere mapping dump format:
First Line: [# Vertices, Original Sphere Radius, Original Sphere Center (XYZ)]
Others: [Shape (distance), Sphere Coords (XYZ),
Unit shape (distance), Unit Sphere Coords (XYZ),
Surface Coords (XYZ)]
"""
dump_data = zip(snake.travel, snake.starting_points,
snake.unit_travel, snake.unit_starting_points,
snake.vertices)
num_vertices = len(snake.vertices)
radius = snake.contour.radius
cx, cy, cz = snake.contour.center
print("{0}\t{1}\t{2}\t{3}\t{4}".format(num_vertices, radius, cx, cy, cz), file=stream)
for idx, vertex_data in enumerate(dump_data):
travel, points, unit_travel, unit_points, on_surf = vertex_data
line = []
line.append(travel)
line.extend(points)
line.append(unit_travel)
line.extend(unit_points)
line.extend(on_surf)
format = ("{:.4f}\t" * len(line)).strip()
print(format.format(*line), file=stream)
def main(args, stdin=None, stdout=None):
import sys
opts = [a for a in args if a.startswith('-')]
args = [a for a in args if not a.startswith('-')]
params = dict((k[2:], v) for k,v in (a.split('=') for a in opts if '=' in a))
if stdin is None:
stdin = sys.stdin
if stdout is None:
stdout = sys.stdout
if len(args) > 0:
with open(args[0]) as f:
surf = spatial.Surface.from_vet_file(f)
else:
surf = spatial.Surface.from_vet_file(stdin)
iterations = int(params.get('iterations', 2))
if '--invert' in opts:
snake = Snake3D.create_for_surface_invert(surf, sphere_iterations=iterations)
else:
snake = Snake3D.create_for_surface(surf, sphere_iterations=iterations)
snake.run()
if len(args) > 1:
with open(args[1], 'w') as f:
dump_sphmap(f, snake)
else:
dump_sphmap(stdout, snake)
if '--show-embedding' in opts:
show_embedding(snake.image, snake)
if '--show-travel' in opts:
show_travel(snake)
def show_embedding(s, m, fig=None, ax=None):
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111, projection='3d')
else:
ax.clear()
ax.set_axis_off()
ax.axison = False
if s is not None:
sph = ax.plot_trisurf(*s.vertices.transpose(), triangles=s.faces)
sph.set_alpha(.1)
else:
sph = None
if m is not None:
mol = ax.plot_trisurf(*m.vertices.transpose(), triangles=m.faces)
mol.set_alpha(.9)
else:
mol = None
plt.show()
return fig, ax, sph, mol
def show_travel(s, fig=None, ax=None):
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111, projection='3d')
else:
ax.clear()
ax.set_axis_off()
ax.axison = False
max_travel = s.contour.radius
values = []
for face in s.faces:
travel = s.travel[face].mean()
value = (max_travel - travel) / max_travel
values.append(value)
colors = map(lambda x: (x, x, x), values)
sph = ax.plot_trisurf(*s.contour.vertices.transpose(), triangles=s.faces,
shade=True)
sph.set_facecolors(colors)
plt.show()
return fig, ax, sph
def demo(path, invert=False, fig=None, ax=None):
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111, projection='3d')
ax.set_axis_off()
with open(path) as f:
surf = spatial.Surface.from_vet_file(f)
if invert:
snake = Snake3D.create_for_surface_invert(surf, sphere_iterations=2)
else:
snake = Snake3D.create_for_surface(surf, sphere_iterations=2)
for i in range(25):
try:
sph.remove()
except:
pass
sph = ax.plot_trisurf(*snake.vertices.transpose(), triangles=snake.faces)
sph.set_alpha(1)
fig.canvas.draw()
snake.update()
sph.remove()
#s.clip += i/.25
sph = ax.plot_trisurf(*snake.vertices.transpose(), triangles=snake.faces)
return fig, ax
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import sys
import math
try:
import json
except ImportError:
import simplejson as json
try:
import colored
__colored__ = True
except ImportError:
__colored__ = False
def print_c(text, color=None, **kwargs):
"""
Prints using colors.
"""
if __colored__ and color is not None and sys.stdout.isatty():
print(colored.fg(color), end='')
print(text, **kwargs)
print(colored.attr("reset"), end='')
else:
print(text, **kwargs)
sys.stdout.flush()
def p_json(data, color=None):
"""
Prints a formatted JSON to the output
"""
if sys.stdout.isatty():
print_c(format_json(data), color)
else:
print(json.dumps(data, separators=(',', ':')))
def format_json(data):
"""
Returns a human-formatted JSON
"""
return json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))
def format_timedelta(seconds, lookup=None, sep=', '):
"""
Formats a timedelta into a human readable expanded format with a precusion up to microsecond
"""
if lookup is None:
loopkup = [
{'divider': 1, 'format': '{0:.0f} {1}', 'unit': 'us', 'units': 'us', 'value': None},
{'divider': 1000, 'format': '{0:.0f} {1}', 'unit': 'ms', 'units': 'ms', 'value': 0},
{'divider': 1000, 'format': '{0:.0f} {1}', 'unit': 'sec', 'units': 'secs', 'value': 0},
{'divider': 60, 'format': '{0:.0f} {1}', 'unit': 'min', 'units': 'mins', 'value': 0},
{'divider': 60, 'format': '{0:.0f} {1}', 'unit': 'hour', 'units': 'hours', 'value': 0},
{'divider': 24, 'format': '{0:.0f} {1}', 'unit': 'day', 'units': 'days', 'value': 0},
{'divider': 7, 'format': '{0:.0f} {1}', 'unit': 'week', 'units': 'weeks', 'value': 0},
{'divider': 4.348214, 'format': '{0:.0f} {1}', 'unit': 'month', 'units': 'months', 'value': 0},
{'divider': 12, 'format': '{0:.0f} {1}', 'unit': 'year', 'units': 'years', 'value': 0},
]
for i, current in enumerate(loopkup):
if i == 0:
current.update({'value': round(seconds * 1E+6)})
else:
previous = loopkup[i - 1]
current.update({'value': math.floor(previous['value'] / current['divider'])})
previous.update({'value': previous['value'] - current['value'] * current['divider']})
output = ""
for entry in loopkup:
if entry['value'] != 0:
unit = entry['unit'] if entry['value'] == 1 else entry['units']
entry = entry['format'].format(entry['value'], unit)
output = entry if output == "" else entry + sep + output
if output == "":
return "0s"
return output
def format_filesize(num, suffix='B'):
"""
See: https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
# vim: ft=python:ts=4:sw=4 |
<reponame>Hieronymus98/Distributed-Batteryless-Microphone
from inspect import currentframe, getframeinfo
import numpy as np
import matplotlib.pyplot as plt
import json
plt.style.use('seaborn-ticks')
#-----------------------------------------------------------------
###### ####### #
# # # #### ##### # # #### # # #####
# # # # # # # # # # # # # #
###### ##### # # # # # # # ###### #
# # # # # ##### # # # ### # # #
# # # # # # # # # # # # # #
# # # #### # # ####### # #### # # #
# ----------------------------------------------------------------
# Run for RF data
rf_flag = False
# on or off time
ontime_flag = True
# To disable debugging set it to False
# To print all debugging info set the second entry to 0
# To print a specific message set its id
DEBUG = [False, 77]
def print_debug_info(label,msg, id):
if DEBUG[0]:
if DEBUG[1] == 0 or DEBUG[1] == id:
print(label)
print(msg)
def load_cis_data(path):
data=[]
with open(path) as f:
for cis in f:
print_debug_info("+++ cis len", len(cis), getframeinfo(currentframe()).lineno)
data.append(json.loads(cis))
return data
def get_unlabelled_data(data):
# remove data label
return __get_column(data,1)
def __get_column(data,idx):
col=[]
for row in data:
col.append(row[idx])
return col
def boxplot_color_fontsize(box, color, fs):
for _, line_list in box.items():
for line in line_list:
line.set_color(color)
line.set_linewidth(fs)
def main():
fontSize=16
if rf_flag:
ontime_path = '../processed_data/intermittent_nodes_ontimesRF.json'
offtime_path = '../processed_data/intermittent_nodes_offtimesRF.json'
else:
ontime_path = '../processed_data/paper_intermittent_nodes_ontimes680.json'
offtime_path = '../processed_data/paper_intermittent_nodes_offtimes680.json'
# Data Layout in a file
## ['label', [[1,2],[1,3,4,5,],[]...]]
## ['label', [[4l,2],[1],[9,3,4]...]]
# Get the raw data of cis of 8 nodes
ontime_data = load_cis_data(ontime_path)
print_debug_info('+++ on-time raw data', ontime_data, getframeinfo(currentframe()).lineno)
offtime_data = load_cis_data(offtime_path)
print_debug_info('+++ off-time raw data', offtime_data, getframeinfo(currentframe()).lineno)
# Get the data without the labels
unlabelled_ontime_data = get_unlabelled_data(ontime_data)[4 if rf_flag else 2]
print_debug_info('+++ unlabelled ontime data', unlabelled_ontime_data,getframeinfo(currentframe()).lineno )
unlabelled_offtime_data = get_unlabelled_data(offtime_data)[4 if rf_flag else 2]
print_debug_info('+++ unlabelled offtime data', unlabelled_offtime_data, getframeinfo(currentframe()).lineno)
if rf_flag:
del unlabelled_ontime_data[3]
del unlabelled_offtime_data[3]
# else:
# del unlabelled_ontime_data[0]
# del unlabelled_offtime_data[0]
if ontime_flag:
data = unlabelled_ontime_data
else:
data = unlabelled_offtime_data
fig = plt.figure(figsize=(8,4))
color_list = ["#66a61e" , '#e7298a', '#7570b3', '#d95f02', '#1b9e77', '#fc8d59']
boxs = plt.boxplot(data, showfliers=False)
boxplot_color_fontsize(boxs,color_list[3], 1.5)
plt.gca().grid(True, axis='y')
if ontime_flag:
plt.ylabel("On time (sec)", fontsize=fontSize)
else:
plt.ylabel("Off time (sec)", fontsize=fontSize)
plt.xlabel("Node ID", fontsize=fontSize)
plt.yticks(fontsize=fontSize-2)
plt.xticks(fontsize=fontSize-2)
plt.tight_layout()
if rf_flag:
if ontime_flag:
plt.savefig('../../paper/figures/rf_on_time.eps')
else:
plt.savefig('../../paper/figures/rf_off_time.eps')
else:
if ontime_flag:
plt.savefig('../../paper/figures/light_on_time.eps')
else:
plt.savefig('../../paper/figures/light_off_time.eps')
plt.show()
if __name__=="__main__":
main()
|
<gh_stars>0
# coding=utf-8
""" dummyF.py
"""
from __future__ import print_function
import xml.etree.ElementTree as ET
import sys, re,codecs
dandas = {'hk':'|','slp1':'.','deva':'।'}
ddandas = {'hk':'||','slp1':'..','deva':'॥'}
dandaregexes= {'hk':r'([|]+)', 'slp1':r'([.]+)', 'deva': r'([।॥])'}
def get_L_from_D(line):
"""
Assume line contains one or more <Dn>
Return list of all n
"""
a = []
for m in re.finditer(r'<D([0-9]+)>',line):
a.append(m.group(1))
return a
def get_A_from_D(line):
"""
Assume line starts with one or more <Dn>
Return list of all n
"""
a = []
for m in re.finditer(r'<A([0-9]+)>',line):
a.append(m.group(1))
return a
def get_D_from_F(line):
"""
<F>[0-9. ]+)
"""
a = []
m = re.search(r'^<F>([0-9. ]+)\)',line)
if not m:
return a
x = m.group(1)
# remove trailing space or period, if any
x = re.sub(r'[ .]*$','',x)
a = re.split(r'[ .]+',x)
return a
def get_D_from_V(line):
"""
<V>[0-9]+.
"""
a = []
m = re.search(r'^<V>([0-9]+)[.] ',line)
if not m:
return a
x = m.group(1)
a = [x]
return a
class Entry(object):
def __init__(self,grouplist,page):
self.groups = grouplist
self.page = page # page reference (1.n) where <S> starts
#self.entrylines = entrylines(grouplist)
# compute tags and Ls (some groups have more than 1 <Dx>
Ls = []
a = []
for group in self.groups:
# group is a sequence of lines from boesp
firstline = group[0]
m = re.search(r'^<(.*?)>',firstline)
if not m:
a.append('X')
else:
tag = m.group(1)
if tag.startswith('D'):
# there may be multiple <DX><DY> in line Example <D145> 146.
a.append('D')
Lvals = get_L_from_D(firstline)
Ls = Ls + Lvals
else:
a.append(tag)
self.Ls = Ls
self.tags = a
def xml_header(xmlroot):
# write header lines
text = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE %s SYSTEM "%s.dtd">
<!-- <H> Boehtlingk, Indische Sprüche, 2. Auflage, St. Petersburg 1870 -->
<%s>
""" % (xmlroot,xmlroot,xmlroot)
lines = text.splitlines()
lines = [x.strip() for x in lines if x.strip()!='']
return lines
def generate_groups(lines):
iline = 0 # start at first line
nlines = len(lines)
# ignore blank lines
while iline < nlines:
while (lines[iline].strip() == ''):
iline = iline + 1
if iline == nlines:
return [] # yield [] gives error
# gather block of non-blank lines
group = []
while (lines[iline].strip() != ''):
group.append(lines[iline])
iline = iline + 1
if iline == nlines:
break
yield group
def entrysummary(entry):
gtypes = ','.join(entry.tags)
page = entry.page # page on which entry starts
Ls = entry.Ls
if len(Ls) == 0:
L = '?'
else:
L = ','.join(Ls)
text = 'L=%s: %s %s' %(L,gtypes,page)
return text
def make_xml_S(group,entry,tranin):
# group is a list of lines
# get danda and doubledanda for this transliteration
danda = dandas[tranin]
ddanda = ddandas[tranin]
dandaregex = dandaregexes[tranin]
dbg = True
outarr = []
text = ' '.join(group)
# remove page if any. If it occurs, is it always at the end?
pb = None
m = re.search(r' *\[Seite([0-9][.][0-9]+)\] *$',text)
if m:
pb = m.group(0)
text = re.sub(r'\[Seite([0-9][.][0-9]+)\] *$','',text)
if '[Seite' in text:
print('make_xml_S WARNING 1 Seite:',entrysummary(entry))
text = text.rstrip()
# 09-01-2021. Remove gratuitous <br/> tag
text = text.replace('<br/>',' ')
# expect ddanda at end
# A very few have a third line ending in single danda (D=1026, 1027)
if not text.endswith(ddanda):
if text.endswith(danda):
print('make_xml_S WARNING 2a %s:' % 'danda', entrysummary(entry))
else:
print('make_xml_S WARNING 2b %s:' % 'ddanda',entrysummary(entry))
print('text=',text)
print()
# generate lines, with each line ending in | or ||
# remove initial <S>
text = re.sub(r'^<S> *','',text)
# note cases with {# or #} in Sanskrit text.
# These will need to be reformated
if '#' in text:
print('# in S: %s' % entrysummary(entry))
# reformat lines so danda at end
#parts = re.split(r'([|]+)',text)
parts = re.split(dandaregex,text)
lines = []
for part in parts:
if part in (danda,ddanda):
lines[-1] = lines[-1] + ' ' + part
else:
part = part.strip()
if part != '':
part = re.sub(r' +',' ',part) # single-space separation
lines.append(part)
# reattach pb text to end, if present
if pb != None:
lines[-1] = lines[-1] + pb
outarr.append(' <S>')
for line in lines:
outarr.append(' '+line)
outarr.append(' </S>')
return outarr
def make_xml_S1(group,entry):
# group is a list of lines
# For S, assume boesp-1 has already formatted the lines
# except for <S> and [Seite] and <br/>
group1 = []
for text in group:
m = re.search(r'\[Seite([0-9][.][0-9]+)\] *$',text)
if m:
pb = m.group(0)
text = re.sub(r'\[Seite([0-9][.][0-9]+)\] *$','',text)
if '[Seite' in text:
print('make_xml_S WARNING 1 Seite:',entrysummary(entry))
text = text.replace('<br/>',' ')
text = re.sub(r'^<S> *','',text)
if '#' in text:
print('# in S: %s' % entrysummary(entry))
text = text.strip()
group1.append(text)
lines = group1
outarr = []
outarr.append(' <S>')
for line in lines:
outarr.append(' '+line)
outarr.append(' </S>')
return outarr
def curly_to_s(text):
text = text.replace('{#','<s>')
text = text.replace('#}','</s>')
return text
def make_xml_D(group,entry):
# group is a list of lines
outarr = []
text = ' '.join(group)
Ls = get_L_from_D(group[0])
As = get_A_from_D(group[0])
# remove the D and A tags
text = re.sub(r'<D.*?>','',text)
text = re.sub(r'<A.*?>','',text)
# will have <D n="" a="">
#if '{#' in text:
# print('D WARNING {#: %s' %entrysummary(entry))
# <F> occurs once in D 1475
text = re.sub(r'<F>','Fussnote ',text)
text = text.strip()
text = curly_to_s(text)
parts = re.split(r' +',text)
nc = 0
lines = []
words = []
for part in parts:
ncp = len(part)
if (nc + ncp) < 60:
words.append(part)
nc = nc + ncp
else:
line = ' '.join(words)
lines.append(line)
words = [part]
nc = ncp
# last line
if words != []:
line = ' '.join(words)
lines.append(line)
L = ','.join(Ls)
A = ','.join(As)
outarr.append(' <D n="%s" a="%s">' % (L,A))
for line in lines:
outarr.append(' '+line)
outarr.append(' </D>')
return outarr
def make_xml_F(group,entry):
# group is a list of lines
outarr = []
text = ' '.join(group)
Ds = get_D_from_F(group[0])
if Ds == []:
print('make_xml_F: WARNING: %s' %entrysummary(entry))
print(text)
print()
# remove the F tag
text = re.sub(r'^<F>([0-9. ]+)\)','',text)
# will have <D n="<attrib>" >
text = text.strip()
text = curly_to_s(text)
parts = re.split(r' +',text)
nc = 0
lines = []
words = []
for part in parts:
ncp = len(part)
if (nc + ncp) < 60:
words.append(part)
nc = nc + ncp
else:
line = ' '.join(words)
lines.append(line)
words = [part]
nc = ncp
# last line
if words != []:
line = ' '.join(words)
lines.append(line)
attrib = ','.join(Ds)
outarr.append(' <F n="%s">' % attrib)
for line in lines:
outarr.append(' '+line)
outarr.append(' </F>')
return outarr
def make_xml_V(group,entry):
# group is a list of lines
outarr = []
text = ' '.join(group)
Ds = get_D_from_V(group[0])
if Ds == []:
print('make_xml_V: WARNING: %s' %entrysummary(entry))
# remove the V tag
text = re.sub(r'^<V>([0-9]+)[.] ','',text)
# will have <V n="<attrib>" >
text = text.strip()
text = curly_to_s(text)
parts = re.split(r' +',text)
nc = 0
lines = []
words = []
for part in parts:
ncp = len(part)
if (nc + ncp) < 60:
words.append(part)
nc = nc + ncp
else:
line = ' '.join(words)
lines.append(line)
words = [part]
nc = ncp
# last line
if words != []:
line = ' '.join(words)
lines.append(line)
attrib = ','.join(Ds) # Ds has just one for V
outarr.append(' <V n="%s">' % attrib)
for line in lines:
outarr.append(' '+line)
outarr.append(' </V>')
return outarr
def make_xml_HS(group,entry):
# group is a list of lines
# Here, the printed text is always (I think) in one line.
# And we construct this similarly.
# We also do NOT adjust the line lengths
outarr = []
text = ' '.join(group)
# remove the HS tag
text = re.sub(r'^<HS>','',text)
text = text.strip()
text = curly_to_s(text)
text = re.sub(r' +',' ',text) # remove extra spaces
lines = [text]
outarr.append(' <HS>')
for line in lines:
outarr.append(' '+line)
outarr.append(' </HS>')
return outarr
def make_xml_H(group,entry):
# group is a list of lines
outarr = []
text = ' '.join(group)
# remove the H tag
text = re.sub(r'^<H>','',text)
text = text.strip()
parts = re.split(r' +',text)
nc = 0
lines = []
words = []
for part in parts:
ncp = len(part)
if (nc + ncp) < 60:
words.append(part)
nc = nc + ncp
else:
line = ' '.join(words)
lines.append(line)
words = [part]
nc = ncp
# last line
if words != []:
line = ' '.join(words)
lines.append(line)
outarr.append(' <H>')
for line in lines:
outarr.append(' '+line)
outarr.append(' </H>')
return outarr
def make_xml_HS2(group,entry):
# group is a list of lines. Only 1 instance
outarr = []
text = ' '.join(group)
# remove the HS2 tag
text = re.sub(r'^<HS2>','',text)
text = text.strip()
text = curly_to_s(text)
parts = re.split(r' +',text)
nc = 0
lines = []
words = []
for part in parts:
ncp = len(part)
if (nc + ncp) < 60:
words.append(part)
nc = nc + ncp
else:
line = ' '.join(words)
lines.append(line)
words = [part]
nc = ncp
# last line
if words != []:
line = ' '.join(words)
lines.append(line)
outarr.append(' <HS2>')
for line in lines:
outarr.append(' '+line)
outarr.append(' </HS2>')
return outarr
def make_xml_unknown(group,entry):
# group is a list of lines. Only 1 instance
# The tag is unknown
# print with X
outarr = []
outarr.append('<X>')
for line in group:
outarr.append(line)
outarr.append('</X>')
return outarr
def test_S_prep(a):
# a is array of strings
b = []
for x in a:
x = x.strip()
x = re.sub(r' +',' ',x)
b.append(x)
return b
def test_S(outgroup,outgroup1,entry):
#compare to ways to compute the lines for <S>
# use difflib
lines = test_S_prep(outgroup)
lines1 = test_S_prep(outgroup1)
import difflib
d = difflib.Differ()
diff = d.compare(lines,lines1)
print('\n' .join(diff))
exit(1)
def entrylines(entry,tranin):
outarr = []
outarr.append('<entry>')
text = entrysummary(entry)
outarr.append(' <info n="%s"/>' %text)
for igroup,group in enumerate(entry.groups):
tag = entry.tags[igroup]
if tag == 'S':
outgroup = make_xml_S(group,entry,tranin)
#outgroup = make_xml_S1(group,entry)
#outgroup1 = make_xml_S1(group,entry)
#test_S(outgroup,outgroup1,entry)
elif tag == 'D':
outgroup = make_xml_D(group,entry)
elif tag == 'F':
outgroup = make_xml_F(group,entry)
elif tag == 'V':
outgroup = make_xml_V(group,entry)
elif tag == 'HS':
outgroup = make_xml_HS(group,entry)
elif tag == 'HS2':
outgroup = make_xml_HS2(group,entry)
elif tag == 'H':
outgroup = make_xml_H(group,entry)
else:
#print('unknown tag:',tag)
outgroup = make_xml_unknown(group,entry)
for x in outgroup:
outarr.append(x)
outarr.append('</entry>')
return outarr
def updatepage(entry,page):
for group in entry:
for line in group:
m = re.search(r'\[Seite([0-9][.][0-9]+)\]',line)
if m:
newpage = m.group(1)
return newpage
return page # no change
def has_D(entry):
for group in entry:
if group[0].startswith('<D'):
return True
# no D-group in entry yet
return False
def generate_entries(lines):
ngroup = 0
#nentry = 0
firstfound = False
page = '1.1'
for group in generate_groups(lines):
ngroup = ngroup+1
# skip the groups until a condition is met
if firstfound:
if group[0].startswith(('<S>','<HS>')): # 10-17-2021. Added HS
if (entry != []):
if has_D(entry):
# we're starting a new entry.
# First, finish the prior entry
e = Entry(entry,page)
page = updatepage(entry,page) # for next entry
yield e
entry = [group] # start a new group
else:
# an S or HS appended to entry without a D-group yet
entry.append(group)
else:
entry.append(group)
elif group[0].startswith('<H> Boehtlingk'):
firstfound = True
entry = []
yield Entry(entry,page)
def xml_body(entries,tranin):
# generate xml header lines
body = []
nentry = 0
for entry in entries:
outarr = entrylines(entry,tranin)
nentry = nentry + 1
for out in outarr:
body.append(out)
print(nentry,'entries found')
return body
def check_L(entries):
# check sequencing of L-valuese of entries. Print aberrations
Lprev = None
nprob = 0
for ientry,entry in enumerate(entries):
Ls = entry.Ls
#print('Ls=',Ls)
if Ls == []:
print('ERROR: No L. previous = ',Lprev)
continue
L = int(Ls[0])
if ientry == 0:
Lprev = L
elif L != (Lprev + 1):
print('Sequencing problem at entry %s: %s should be %s'%(ientry+1,L,Lprev+1))
# sequence comes from <DN>, in first line of second group of entry
dgroup = entry.groups[1]
old = dgroup[0]
dold = '<D%s>' % L
Lnew = Lprev+1
dnew = '<D%s>' % Lnew
new = old.replace(dold,dnew)
print(old)
print(new)
print()
Lprev = int(Ls[-1])
nprob = nprob + 1
if nprob == 5:
print('quitting after 5 problems')
return
else:
Lprev = int(Ls[-1])
def check_tagsequence(entries):
d = {}
for entry in entries:
tagseq = ','.join(entry.tags)
if tagseq not in d:
d[tagseq] = 0
d[tagseq] = d[tagseq] + 1
keys = d.keys()
for tagseq in keys:
print('%04d %s' %(d[tagseq],tagseq))
def check_tagfreq(entries):
d = {}
for entry in entries:
for tag in entry.tags:
if tag not in d:
d[tag] = 0
d[tag] = d[tag] + 1
keys = d.keys()
for tag in keys:
print('%04d %s' %(d[tag],tag))
def check_page(entries):
nprob = 0
for ientry,entry in enumerate(entries):
page = entry.page
v,p = page.split('.')
if ientry == 0:
p0 = p
elif p0 == p:
pass
elif int(p) == (int(p0)+1):
p0 = p
else:
text = entrysummary(entry)
print('page problem: ',text,' p0=%s, p=%s'% (p0,p))
nprob = nprob + 1
print('check_page found %s problems' %nprob)
def check_san(entries):
nprob = 0
for ientry,entry in enumerate(entries):
for group in entry.groups:
text = ' '.join(group)
n1 = len(re.findall('{#',text))
n2 = len(re.findall('#}',text))
if n1 != n2:
text1 = entrysummary(entry)
print('unbalanced {#..#} ',text1)
for line in group:
print(line)
print()
nprob = nprob + 1
print('check_san found %s problems' %nprob)
def statistics(entries):
check_san(entries)
exit(1)
check_L(entries)
# check_tagsequence(entries)
check_tagfreq(entries)
check_page(entries)
#check_san(entries)
def entries_HS_adjust(entries):
""" HS entries are known to occur at the END of groups
However, they seem to belong to the NEXT group.
This routine makes the blanket change of entries thus indicated.
That is, if an entry ends with one or more HS items, then we
remove these and put them at the beginning of the next entry.
"""
dbg = False
for ientry,entry in enumerate(entries):
oldtags = entry.tags
ntags = len(oldtags)
hsend = ntags - 1
if 'HS' != oldtags[hsend]:
continue
#oldgroups = entry.groups not used
while True:
hsend1 = hsend - 1
if oldtags[hsend1] == 'HS':
hsend = hsend1
else:
break
# So when hsend <= idx < ntags, tags[idx] = HS
idxkeep = [i for i in range(len(entry.groups)) if i < hsend]
idxdrop = [i for i in range(len(entry.groups)) if hsend <= i]
groups = [entry.groups[i] for i in idxkeep]
tags = [entry.tags[i] for i in idxkeep]
#
groups1 = [entry.groups[i] for i in idxdrop]
tags1 = [entry.tags[i] for i in idxdrop]
# change entry.groups and tags
entry.groups = groups
entry.tags = tags
# now also modify the next entry
ientry1 = ientry+1
if ientry1 == len(entries):
print('entries_HS_adjust anomaly:',entrysummary(entry))
continue
entry1 = entries[ientry+1]
if dbg: print('old1:',ientry1,entrysummary(entry1))
entry1.groups = groups1 + entry1.groups
entry1.tags = tags1 + entry1.tags
# The page number for the entry1 should be that for entry
entry1.page = entry.page # only line changed
entries[ientry1] = entry1
def read_lines(filein):
with codecs.open(filein,encoding='utf-8',mode='r') as f:
lines = [x.rstrip('\r\n') for x in f]
print(len(lines),"lines read from",filein)
return lines
def parse_gtag(gtag):
gtype = gtag[1] # gtype = '<X....'
if gtype == 'D':
regex = r'<D([0-9]+)>'
m = re.search(regex,gtag)
num = m.group(1)
elif gtype == 'F':
regex = r'<F>([0-9.]+)\)'
m = re.search(regex,gtag)
num0 = m.group(1) # could have periods X.Y.Z
# just return first
nums = num0.split('.')
num = nums[0]
elif gtype == 'S':
regex = r'<S>'
num = 0
elif gtype == 'H':
# could be either H (header) or HS
regex = r'^(<HS>|<H>)'
num = 0
elif gtype == 'V':
# V1, V2, V3
regex = '<(V[123])>([0-9]+)[.] '
m = re.search(regex,gtag)
gtype = m.group(1)
num = m.group(2)
else:
# unrecognized
print('parse_gtag ERROR: gtag=',gtag)
exit(1)
return (gtype,num)
def check_gtypes(gtypes):
gtagnums = [parse_gtag(x) for x in gtypes]
dtagnums = {y for x,y in gtagnums if x == 'D'} # python set
ftagnums = {y for x,y in gtagnums if x == 'F'}
flag = True
if not ftagnums.issubset(dtagnums):
print('F problem',gtypes)
flag = False
for tag in ['V1','V2','V3']:
vtagnums = {y for x,y in gtagnums if x == tag}
if not vtagnums.issubset(dtagnums):
print(tag,'problem',gtypes)
flag = False
return flag
def checkgroups(entries,fileout):
nprob = 0
outarr = []
regexstart = r'^<(.)'
for entry in entries:
gtypes = []
for group in entry.groups:
line = group[0] # first line
m = re.search(regexstart,line)
if m == None:
#outarr.append('group problem:',line)
gtype = '?'
else:
gtype = m.group(1)
gtype1 = expand_gtype(line,gtype)
gtypes.append(gtype1)
flag = check_gtypes(gtypes)
x = ','.join(gtypes)
if flag:
outarr.append(x)
else:
y = '[gtype problem]'
outarr.append(x + ' ' + y)
nprob = nprob + 1
with codecs.open(fileout,"w","utf-8") as f:
for out in outarr:
f.write(out+'\n')
print(len(outarr),'groups written to',fileout)
def markdup(entries,fileout):
# Make a dictionary using firstline of
n = 0
d = {}
for ientry,entry in enumerate(entries):
groups = entry.groups
for igroup,group in enumerate(groups):
if entry.tags[igroup] == 'S':
n = n + 1
firstline = group[0]
if firstline not in d:
d[firstline] = []
d[firstline].append(ientry)
firstlines = [firstline for firstline in d if len(d[firstline]) != 1]
#for firstline in firstlines:
# print(firstline)
ientry_multiple = [entrylist for entrylist in d.values() if len(entrylist)!=1]
outrecs = []
for ientries in ientry_multiple:
outarr = []
Ls_arr = [','.join(entries[ientry].Ls) for ientry in ientries]
s = ' ; '.join(Ls_arr)
outarr.append('; %s' %s)
outrecs.append(outarr)
with codecs.open(fileout,"w","utf-8") as f:
for outarr in outrecs:
for out in outarr:
f.write(out+'\n')
print('see output in',fileout)
print(n,'S groups')
print(len(ientry_multiple),'verses appearing more than once')
return outrecs
def parse_f(xarr):
base = xarr[0]
ibase = int(base)
ans = [base]
for i,x in enumerate(xarr[1:]):
n = len(x)
val = base[0:-n]+x
ans.append(val)
ival = int(val)
assert (ibase+i+1) == ival
return ans
def addlines(lines):
outlines = []
inF = False
nF = 0
for line in lines:
if inF:
if line.strip() != '': # line within F-group
outlines.append(line)
else: # empty line at end of F-group
outlines.append(line)
# now append the extra F blocks
fvals = parse_f(rawfvals)
for fval in fvals:
outlines.append('<F>%s) DUMMY· ' %fval)
#outlines.append('<F>%s) · ' %fval)
outlines.append(' ')
nF = nF + 1
inF = False
else: # Not in F-block
m = re.search(r'^<F>([0-9.]+)\)',line)
if m == None:
# not the start of an F-block
outlines.append(line)
continue
fparm = m.group(1)
rawfvals = fparm.split('.')
if len(rawfvals) == 1:
outlines.append(line)
continue
# We are in the first line of a compound F-group
outlines.append(line)
inF = True
assert inF == False
print(nF,'compound F-blocks handled')
print(len(outlines),'prepared for output')
return outlines
def multiF(lines):
d = {}
for line in lines:
m = re.search(r'<F>[0-9.]+')
return d
if __name__=="__main__":
filein = sys.argv[1] # boesp_utf8.txt
fileout = sys.argv[2]
lines = read_lines(filein)
outlines = addlines(lines)
with codecs.open(fileout,"w","utf-8") as f:
for out in outlines:
f.write(out+'\n')
print(len(outlines),'lines written to',fileout)
|
<gh_stars>1-10
"""
Reference. https://gist.github.com/jakemmarsh/8273963
"""
outputdebug = False
def debug(msg):
if outputdebug:
print(msg)
class Node(object):
def __init__(self, val):
self.val = val
self.leftChild = None
self.rightChild = None
def get(self):
return self.val
def set(self, val):
self.val = val
def getChildren(self):
children = []
if self.leftChild is not None:
children.append(self.leftChild)
if self.rightChild is not None:
children.append(self.rightChild)
return children
class BST(object):
def __init__(self):
self.root = None
def setRoot(self, val):
self.root = Node(val)
def insert(self, val):
if self.root is None:
self.setRoot(val)
else:
self.insertNode(self.root, val)
def insertNode(self, currentNode, val):
if val <= currentNode.val:
if currentNode.leftChild:
self.insertNode(currentNode.leftChild, val)
else:
currentNode.leftChild = Node(val)
elif val > currentNode.val:
if currentNode.rightChild:
self.insertNode(currentNode.rightChild, val)
else:
currentNode.rightChild = Node(val)
def find(self, val):
return self.findNode(self.root, val)
def findNode(self, currentNode, val):
if currentNode is None:
return False
elif val == currentNode.val:
return True
elif val < currentNode.val:
return self.findNode(currentNode.leftChild, val)
else:
return self.findNode(currentNode.rightChild, val)
def delete(self, val):
if self.root is None:
return None
else:
self.deleteNode(self.root, val)
def findPrecessorNode(self, currentNode):
"""
Find the bigger valued node in RIGHT child
"""
node = currentNode.leftChild
# just a sanity check
if node is not None:
while node.rightChild is not None:
debug("RS: traversing: " + str(node.val))
if node.rightChild is None:
return node
else:
node = node.rightChild
return node
def findSuccessorNode(self, currentNode):
"""
Find the smaller valued node in RIGHT child
"""
node = currentNode.rightChild
# just a sanity check
if node is not None:
while node.leftChild is not None:
debug("LS: traversing: " + str(node.val))
if node.leftChild is None:
return node
else:
node = node.leftChild
return node
def deleteNode(self, currentNode, val):
LeftChild = currentNode.leftChild
RightChild = currentNode.rightChild
debug(f"current value: {currentNode.val},target: {val}")
# Value Check
if currentNode is None:
return False
if currentNode.val == val:
currentChild = currentNode.getChildren()
if len(currentChild) == 0:
debug("Root, Non-Child case")
currentNode = None
return True
elif len(currentChild) == 1:
debug("Root, a Child case")
currentNode = currentChild[0]
return True
else:
debug("Root, two children case")
successorNode = self.findSuccessorNode(currentNode)
successorValue = successorNode.val
self.delete(successorValue)
currentNode.val = successorValue
return True
if LeftChild is not None:
if val == LeftChild.val:
LeftChildChild = LeftChild.getChildren()
if len(LeftChildChild) == 0:
debug("Left, Non-Child case")
currentNode.leftChild = None
return True
elif len(LeftChildChild) == 1:
debug("Left, a Child case")
currentNode.leftChild = LeftChildChild[0]
return True
else:
debug("Left, two children case")
successorNode = self.findSuccessorNode(LeftChild)
successorValue = successorNode.val
self.delete(successorValue)
currentNode.leftChild.val = successorValue
return True
else:
pass
if RightChild is not None:
if val == RightChild.val:
RightChildChild = RightChild.getChildren()
if len(RightChildChild) == 0:
debug("Right, Non-Child case")
currentNode.rightChild = None
return True
elif len(RightChildChild) == 1:
debug("Right, a Child case")
currentNode.rightChild = RightChildChild[0]
return True
else:
debug("Right, two children case")
successorNode = self.findSuccessorNode(RightChild)
successorValue = successorNode.val
self.delete(successorValue)
currentNode.rightChild.val = successorValue
return True
else:
pass
# Move Child Node
if val < currentNode.val:
debug("Go to Left")
return self.deleteNode(currentNode.leftChild, val)
else:
debug("Go to Right")
return self.deleteNode(currentNode.rightChild, val)
def traverse(self):
return self.traverseNode(self.root)
def traverseNode(self, currentNode):
result = []
if currentNode.leftChild is not None:
result.extend(self.traverseNode(currentNode.leftChild))
if currentNode is not None:
result.extend([currentNode.val])
if currentNode.rightChild is not None:
result.extend(self.traverseNode(currentNode.rightChild))
return result
# Usage example
if __name__ == "__main__":
a = BST()
print("----- Inserting -------")
# inlist = [5, 2, 12, -4, 3, 21, 19, 25]
inlist = [7, 5, 2, 6, 3, 4, 1, 8, 9, 0]
for i in inlist:
a.insert(i)
print(a.traverse())
import copy
print("----- Deleting -------")
test = copy.deepcopy(a)
del_list = test.traverse()
for value in del_list:
print(f"delete {value}")
test.delete(value)
print(test.traverse())
test = copy.deepcopy(a)
|
# COS738 Assignment 3
# <NAME>, 3869003
import numpy as np
from numpy.fft import fft2, ifft2
import tifffile
from PIL import Image
class StegCrypt(object):
"""
Information Hiding with Data Diffusion using Convolutional
Encoding for Super-encryption. Hides an image, the 'Plaintext',
in a chosen camouflage image, the 'Covertext' to produce a
'Stegotext' image. Also performs decryption of the stegotext
image using the input covertext as a key.
Code adapted from:
<NAME>., <NAME>., <NAME>. and Adolfo,
C.M., 2017. Information Hiding with Data Diffusion
Using Convolutional Encoding for Super-Encryption.
"""
# Hidden utility functions
def _key_generator(self, channel):
channel = channel * 1e10
# 32-bit integer required for random seed in numpy
key = int(np.floor(np.sqrt(np.sum(channel ** 2)))
% 2 ** 32)
return key
def _hide_data(self, covertext, sdiffuse, c):
return ifft2(c * fft2(sdiffuse) + fft2(covertext)).real
def _recover_data(self, covertext, stegotext):
return ifft2(fft2(stegotext) - fft2(covertext)).real
def _image_diffusion(self, plaintext, covertext):
plaintext = fft2(plaintext)
covertext = fft2(covertext)
p = np.abs(covertext ** 2)
p[p == 0] = 1.
diffuse = plaintext * covertext / p
diffuse = ifft2(diffuse).real
return diffuse / diffuse.max()
def _inverse_image_diffusion(self, diffuse, covertext):
diffuse = fft2(diffuse)
covertext = fft2(covertext)
plaintext = covertext.conj() * diffuse
plaintext = ifft2(plaintext).real
return plaintext / plaintext.max()
def _stochastic_diffusion(self, diffuse, key):
np.random.seed(key)
arr_noise = fft2(np.random.rand(*diffuse.shape))
p = np.abs(arr_noise ** 2)
p[p == 0] = 1
diffuse = fft2(diffuse)
sdiffuse = diffuse * arr_noise / p
sdiffuse = ifft2(sdiffuse).real
return sdiffuse / sdiffuse.max()
def _inverse_stochastic_diffusion(self, sdiffuse, key):
np.random.seed(key)
noise = fft2(np.random.rand(*sdiffuse.shape))
sdiffuse = fft2(sdiffuse)
diffuse = noise.conj() * sdiffuse
diffuse = ifft2(diffuse).real
return diffuse / diffuse.max()
def encrypt(self, plaintext, covertext):
"""
Hides the plaintext image within the provided covertext image
Parameters
==========
plaintext : array-like, shape (rows, columns, channels)
The plaintext image to hide.
Values must be ranging from 0 to 1
covertext : array-like, shape (rows, columns, channels)
The covertext image in which to hide the plaintext.
Values must be ranging from 0 to 1
Information Hiding with Data Diffusion using ... 347
Returns
=======
stegotext : array-like, shape (rows, columns, channels)
The stegotext image
"""
c = 0.0001
if len(covertext.shape) != 2 and len(covertext.shape) != 3:
raise Exception(ValueError, \
"Input arrays must be 2- or 3-dimensional")
# Ensure inputs have the same shape
if not np.array_equal(covertext.shape, plaintext.shape):
raise Exception(ValueError, \
"Covertext and Plaintext shape do not match")
covertext_2D = False
if len(covertext.shape) == 2:
covertext = covertext[:, :, None]
plaintext = plaintext[:, :, None]
covertext_2D = True
# Ensure images are 64-bit floating point
plaintext = plaintext.astype('float64')
covertext = covertext.astype('float64')
stegotext = np.zeros_like(covertext)
for i in range(plaintext.shape[-1]):
plaintext_channel = plaintext[:, :, i]
covertext_channel = covertext[:, :, i]
key = self._key_generator(covertext_channel)
# Hide each of the channels
diff = self._image_diffusion(plaintext_channel,covertext_channel)
sdiff = self._stochastic_diffusion(diff, key)
stegotext[:, :, i] = self._hide_data(covertext_channel,sdiff, c)
if covertext_2D:
stegotext = stegotext[:, :, 0]
return stegotext
def decrypt(self, stegotext, covertext):
"""
Hides the plaintext image within the provided
covertext image
Parameters
==========
stegotext : array-like, shape (rows, columns, channels)
The stegotext image in which the plaintext
image is hidden.
covertext : array-like, shape (rows, columns, channels)
The covertext image (the key)
Values must be ranging from 0 to 1
Returns
=======
plaintext : array-like, shape (rows, columns, channels)
The hidden plaintext image
"""
if len(covertext.shape) != 2 and len(covertext.shape) != 3:
raise Exception(ValueError, \
"Input arrays must be 2- or 3-dimensional")
# Ensure inputs have the same shape
if not np.array_equal(covertext.shape, stegotext.shape):
raise Exception(ValueError, \
"Covertext and Stegotext shape do not match")
covertext_2D = False
if len(covertext.shape) == 2:
covertext = covertext[:, :, None]
stegotext = stegotext[:, :, None]
covertext_2D = True
stegotext = stegotext.astype('float64')
covertext = covertext.astype('float64')
plaintext = np.zeros_like(stegotext)
for i in range(stegotext.shape[-1]):
covertext_channel = covertext[:, :, i]
stegotext_channel = stegotext[:, :, i]
key = self._key_generator(covertext_channel)
# Recover the plaintext channel
sdiff = self._recover_data(covertext_channel,\
stegotext_channel)
diff = self._inverse_stochastic_diffusion(sdiff, key)
plaintext[:, :, i] = \
self._inverse_image_diffusion(diff, covertext_channel)
if covertext_2D == True:
plaintext = plaintext[:, :, 0]
return plaintext
def save_stegotext_tiff(self, stegotext, filename):
"""
Save stegotext as tiff file
Parameters
==========
stegotext : array-like, shape (rows, columns, channels)
The stegotext to save
filename : str
The filename to save the stegotext
Returns
=======
self : object
"""
tifffile.imsave(filename, stegotext)
return self
def save_image(self, image, filename):
"""
Save image as jpeg file
Parameters
==========
image : array-like, shape (rows, columns, channels)
The image to save
filename : str
The filename to save the image
Returns
=======
self : object
"""
# Rescales Image and converts to uint8 for PIL
rescale = (255.0 / image.max() * (image - image.min())).astype(np.uint8)
# Saves Image
Image.fromarray(rescale).save(filename)
return self
def open_stegotext_tiff(self, filename):
"""
Open a stegotext from a tiff file
Parameters
==========
filename : str
The filename to of the stegotext image
Returns
=======
stegotext : array-like, shape (rows, columns, channels)
The stegotext array
"""
# Checks if file name is empty
if filename == "":
# Raises exception if filename is missing
raise Exception(IOError, "No file selected")
stegotext = tifffile.imread(filename)
if stegotext.dtype != 'float64':
raise Exception(IOError, "Improperly saved stegotext file")
return stegotext
def open_img(self, filename):
"""
Open a image from a file
Parameters
==========
filename : str
The filename to of the stegotext image
Returns
=======
img_array : array-like, shape (rows, columns, channels)
The image array
"""
# Checks if file name is empty
if filename == "":
# Raises exception if filename is missing
raise Exception(IOError, "No file selected")
# Open Image File
image = Image.open(filename)
# COnvert Image to array
img_array = np.array(image).astype(np.float64)
# Normalizes image array if required
if img_array.max() > 1.0:
img_array /= 255
return img_array |
# Gigawhat Website Minecraft server util.
# Copyright 2022 Gigawhat Programming Team
# Written by <NAME>, 2020 - 2022.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Minecraft server util.
Gets query and status information from a Minecraft server.
"""
# ------- Libraries and utils -------
from typing import Union
from init import mcserver
# ------- Query class (Gets its information via query) -------
class Query():
# ------- Server's operation status (ON = Online, OF = Offline, ST = Starting) -------
def opstat() -> str:
try:
mcserver.ping()
return "ON"
except ConnectionRefusedError:
return "OF"
except OSError:
return "ST"
# ------- Usernames of players that are currently on the server -------
def players() -> Union[list, bool]:
try:
data = mcserver.query()
resp = []
for player in data.players.names:
resp.append(player)
return resp
except Exception:
return False
# ------- Number of online players on the server -------
def players_online() -> Union[int, bool]:
try:
data = mcserver.query()
return data.players.online
except Exception:
return False
# ------- Number of maximum player slots -------
def max_players() -> Union[int, bool]:
try:
data = mcserver.query()
return data.players.max
except Exception:
return False
# ------- Server's Minecraft version -------
def version() -> Union[str, bool]:
try:
data = mcserver.query()
return data.software.version
except Exception:
return False
# ------- Server's motd (Message of the day) -------
def motd() -> Union[str, bool]:
try:
data = mcserver.query()
return data.motd
except Exception:
return False
# ------- Get information from the raw server response -------
def raw(key: str) -> Union[str, list, int, bool]:
try:
data = mcserver.query()
return data.raw[key]
except Exception:
return False
# ------- Status class (Gets its information via status) -------
class Status():
# ------- Server's operation status (ON = Online, OF = Offline, ST = Starting) -------
def opstat() -> str:
try:
mcserver.ping()
return "ON"
except ConnectionRefusedError:
return "OF"
except OSError:
return "ST"
# ------- Username and UUIDs of players that are currently on the server -------
def players() -> Union[list, bool]:
try:
data = mcserver.status()
resp = []
for player in data.players.sample:
resp.append({"username": player.name, "uuid": player.id})
return resp
except Exception:
return False
# ------- Number of online players on the server -------
def players_online() -> Union[int, bool]:
try:
data = mcserver.status()
return data.players.online
except Exception:
return False
# ------- Number of maximum player slots -------
def max_players() -> Union[int, bool]:
try:
data = mcserver.status()
return data.players.max
except Exception:
return False
# ------- Server's Minecraft version -------
def version() -> Union[str, bool]:
try:
data = mcserver.status()
return data.version.name
except Exception:
return False
# ------- Server's description/motd (Message of the day) -------
def motd() -> Union[str, bool]:
try:
data = mcserver.status()
return data.description
except Exception:
return False
# ------- Get information from the raw server response -------
def raw(key: str) -> Union[str, list, int, bool]:
try:
data = mcserver.status()
return data.raw[key]
except Exception:
return False
|
from typing import Dict, Type, Any, Union, List, cast, TypeVar
import typing
import abc
import enum
import json
from dataclasses import dataclass, fields, MISSING
from datetime import datetime, timezone
Props = Dict[str, str]
def as_list(data: Union[str, List[str]]) -> List[str]:
if not isinstance(data, str):
return list(data)
item = json.loads(data)
if isinstance(item, list):
return cast(List[str], item)
return [str(item)]
def _find_enum(enum_type: Type[enum.Enum], val: str) -> Any:
for member in enum_type.__members__.values():
if member.value == val:
return member
return None
@dataclass(frozen=True)
class _PyField:
name: str
type: type
required: bool
def encode(self, value: str):
def get_type_origin(t):
# type: ignore
if hasattr(typing, "get_origin"):
return typing.get_origin(t) # type: ignore
else:
return getattr(t, "__origin__", None)
def get_type_args(t):
# >= py3.8
if hasattr(typing, "get_args"):
return typing.get_args(t) # type: ignore
else:
return getattr(t, "__args__")
# print("name=", self.name, "type=", self.type, type(self.type))
if get_type_origin(self.type) == Union:
if datetime in get_type_args(self.type):
# TODO: fix this.
return self.name, datetime.fromtimestamp(int(float(value) * 0.001), timezone.utc)
return self.name, value
if issubclass(self.type, enum.Enum):
return self.name, self.type(value)
return self.name, value
ME = TypeVar("ME", bound="Model")
class InvalidPropertiesError(Exception):
"""Raised by `Model.from_properties(cls, properties)` when given invalid `properties`."""
def __str__(self):
msg = "Invalid properties"
if self.args:
msg += f": {self.args[0]}"
return msg
class Model(abc.ABC):
"""
Base class from which all property models inherit.
Provides helper methods to load the property model data from a dictionary and
to get a mapping of all the keys available in the given model.
"""
def __init__(self, **kwargs):
pass
@classmethod
def _custom_mapping(cls, props: Props, data: Dict[str, Any]):
pass
@classmethod
def from_properties(cls: Type[ME], props: Props) -> ME:
"""
Initialize the model from a dictionary representation.
When provided with a dictionary of properties, it will find the matching keys
within it and fill the model fields with the values from the dictionary.
It ignores non-matching keys - i.e. doesn't require filtering of the properties'
dictionary before the model is fed with the data. Thus, several models can be
initialized from the same dictionary and all models will only load their own data.
"""
field_map = dict(
(
f.metadata["key"],
_PyField(name=f.name, type=f.type, required=f.default is MISSING),
)
for f in fields(cls)
if "key" in f.metadata
)
data = dict(
(field_map[key].encode(val) for (key, val) in props.items() if key in field_map)
)
try:
cls._custom_mapping(props, data)
self = cls(**data)
return self
except Exception as exc:
# Handle some common cases to improve error diagnostics
if isinstance(exc, KeyError):
msg = f"Missing key: {exc}"
elif isinstance(exc, json.JSONDecodeError):
msg = f"Error when decoding '{exc.doc}': {exc}"
else:
msg = f"{exc}"
raise InvalidPropertiesError(msg) from exc
@classmethod
def keys(cls):
"""
:return: a mapping between the model's field names and the property keys
example:
```python
>>> import dataclasses
>>> import typing
>>> from yapapi.properties.base import Model
>>> @dataclasses.dataclass
... class NodeInfo(Model):
... name: typing.Optional[str] = \
... dataclasses.field(default=None, metadata={"key": "golem.node.id.name"})
...
>>> NodeInfo.keys().name
'golem.node.id.name'
```
"""
class _Keys:
def __init__(self, iter):
self.__dict__ = dict(iter)
def names(self):
return self.__dict__.keys()
return _Keys((f.name, f.metadata["key"]) for f in fields(cls))
__all__ = ("Model", "as_list", "Props")
|
<gh_stars>10-100
"""Tests for era5cli utility functions."""
import unittest.mock as mock
import pytest
import era5cli.cli as cli
import era5cli.inputref as ref
def test_parse_args():
"""Test argument parser of cli."""
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble', '--land']
args = cli._parse_args(argv)
assert args.command == 'hourly'
assert args.days == list(range(1, 32))
assert args.endyear == 2008
assert args.ensemble
assert args.format == 'netcdf'
assert args.hours == list(range(0, 24))
assert args.levels == ref.PLEVELS
assert args.months == list(range(1, 13))
assert args.outputprefix == 'era5'
assert not args.merge
assert args.startyear == 2008
assert args.statistics
assert not args.threads
assert args.variables == ['total_precipitation']
assert args.land
assert not args.area
def test_area_argument():
"""Test if area argument is parsed correctly."""
# Test if area arguments are parsed correctly
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble',
'--area', '90', '-180', '-90', '180']
args = cli._parse_args(argv)
assert args.area == [90, -180, -90, 180]
# Check that area defaults to None
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble']
args = cli._parse_args(argv)
assert not args.area
# Requires four values
with pytest.raises(SystemExit):
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble',
'--area', '90', '-180', '-90']
cli._parse_args(argv)
# A value cannot be missing
with pytest.raises(SystemExit):
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble',
'--area', '90', '-180', '-90', '']
cli._parse_args(argv)
# Values must be numeric
with pytest.raises(SystemExit):
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble',
'--area', '90', '-180', '-90', 'E']
cli._parse_args(argv)
def test_period_args():
"""Test the period specific argument setter with synoptic options."""
argv = ['monthly', '--startyear', '2008',
'--variables', 'total_precipitation',
'--endyear', '2008', '--ensemble']
args = cli._parse_args(argv)
period_args = cli._set_period_args(args)
# Period_args consists of (synoptic, statistics, days, hours)
assert period_args == (None, None, None, [0])
argv = ['monthly', '--startyear', '2008',
'--variables', 'total_precipitation',
'--synoptic', '4', '7', '--ensemble']
args = cli._parse_args(argv)
period_args = cli._set_period_args(args)
# Period_args consists of (synoptic, statistics, days, hours)
assert period_args == (True, None, None, [4, 7])
argv = ['monthly', '--startyear', '2008',
'--variables', 'total_precipitation',
'--synoptic', '--ensemble']
args = cli._parse_args(argv)
period_args = cli._set_period_args(args)
# Period_args consists of (synoptic, statistics, days, hours)
assert period_args == (True, None, None, range(0, 24))
# test whether the info option does not end up in _set_period_args
argv = ['info', '2Dvars']
args = cli._parse_args(argv)
with pytest.raises(AttributeError):
assert cli._set_period_args(args)
def test_level_arguments():
"""Test if levels are parsed correctly"""
argv = ['hourly', '--startyear', '2008',
'--variables', 'geopotential', '--levels', 'surface']
args = cli._parse_args(argv)
assert args.levels == ['surface']
# only numeric values or 'surface' are accepted levels
argv = ['hourly', '--startyear', '2008',
'--variables', 'geopotential', '--levels', 'somethingelse']
with pytest.raises(SystemExit):
args = cli._parse_args(argv)
@mock.patch("era5cli.fetch.Fetch", autospec=True)
def test_main_fetch(fetch):
"""Test if Fetch part of main completes without error."""
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble']
args = cli._parse_args(argv)
assert cli._execute(args)
# should give an AssertionError if endyear is before startyear
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2007', '--ensemble']
args = cli._parse_args(argv)
with pytest.raises(AssertionError):
assert cli._execute(args)
# should give an AssertionError if years are out of bounds
argv = ['hourly', '--startyear', '1950',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2007', '--ensemble']
args = cli._parse_args(argv)
with pytest.raises(AssertionError):
assert cli._execute(args)
# should give an AssertionError if years are out of bounds
argv = ['hourly', '--startyear', '1950',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2007', '--ensemble', '--prelimbe']
args = cli._parse_args(argv)
with pytest.raises(AssertionError):
assert cli._execute(args)
# monthly call without endyear
argv = ['monthly', '--startyear', '2008',
'--variables', 'total_precipitation', '--synoptic',
'--ensemble']
args = cli._parse_args(argv)
cli._execute(args)
# no land available for back extension
argv = ['monthly', '--startyear', '1980', '--endyear', '1980',
'--variables', 'total_precipitation', '--synoptic',
'--ensemble', '--land']
args = cli._parse_args(argv)
with pytest.raises(AssertionError):
cli._execute(args)
@mock.patch("era5cli.info.Info", autospec=True)
def test_main_info(info):
"""Test if Info part of main completes without error."""
info.return_value.infotype = 'list'
argv = ['info', 'levels']
args = cli._parse_args(argv)
cli._execute(args)
info.return_value.infotype = 'total_precipitation'
argv = ['info', 'total_precipitation']
args = cli._parse_args(argv)
cli._execute(args)
|
<filename>tests/test_faust_processor.py
from utils import *
BUFFER_SIZE = 1
def test_faust_passthrough():
DURATION = 5.1
engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
data = load_audio_file("assets/575854__yellowtree__d-b-funk-loop.wav", duration=DURATION)
playback_processor = engine.make_playback_processor("playback", data)
faust_processor = engine.make_faust_processor("faust")
assert(faust_processor.set_dsp_string('process = _, _;'))
assert(faust_processor.compile())
# print(faust_processor.get_parameters_description())
graph = [
(playback_processor, []),
(faust_processor, ["playback"])
]
assert(engine.load_graph(graph))
render(engine, file_path='output/test_faust_passthrough.wav')
audio = engine.get_audio()
# Todo: the last sample is inaccurate by a little bit
# So we trim the last sample and compare
data = data[:,:audio.shape[1]-1]
audio = audio[:,:audio.shape[1]-1]
assert(np.allclose(data, audio, atol=1e-07))
def test_faust_sidechain():
"""Have the volume of the drums attenuate the volume of the bass."""
DURATION = 5.1
engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
drums = engine.make_playback_processor("drums",
load_audio_file("assets/Music Delta - Disco/drums.wav", duration=DURATION))
bass = engine.make_playback_processor("bass",
load_audio_file("assets/Music Delta - Disco/bass.wav", duration=DURATION))
dsp_path = abspath("faust_dsp/sidechain.dsp")
faust_processor = engine.make_faust_processor("faust")
faust_processor.set_dsp(dsp_path)
assert(faust_processor.compile())
# print(faust_processor.get_parameters_description())
graph = [
(drums, []),
(bass, []),
(faust_processor, [bass.get_name(), drums.get_name()]),
(engine.make_add_processor("add", [1., 1.]), ["faust", "drums"])
]
assert(engine.load_graph(graph))
render(engine, file_path='output/test_sidechain_on.wav')
graph = [
(drums, []),
(bass, []),
(engine.make_add_processor("add", [1., 1.]), ["bass", "drums"])
]
assert(engine.load_graph(graph))
render(engine, file_path='output/test_sidechain_off.wav')
def test_faust_zita_rev1(set_data=False):
engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
data = load_audio_file("assets/575854__yellowtree__d-b-funk-loop.wav")
playback_processor = engine.make_playback_processor("playback", data)
if set_data:
playback_processor.set_data(data)
dsp_path = abspath("faust_dsp/dm.zita_rev1.dsp")
faust_processor = engine.make_faust_processor("faust")
faust_processor.set_dsp(dsp_path)
assert(faust_processor.set_dsp(dsp_path))
assert(faust_processor.compile())
# print(faust_processor.get_parameters_description())
graph = [
(playback_processor, []),
(faust_processor, ["playback"])
]
assert(engine.load_graph(graph))
render(engine, file_path='output/test_faust_dm.zita_rev1.wav')
def test_faust_automation():
DURATION = 5.1
engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
drums = engine.make_playback_processor("drums",
load_audio_file("assets/Music Delta - Disco/drums.wav", duration=DURATION))
other = engine.make_playback_processor("other",
load_audio_file("assets/Music Delta - Disco/other.wav", duration=DURATION))
dsp_path = abspath("faust_dsp/two_stereo_inputs_filter.dsp")
faust_processor = engine.make_faust_processor("faust")
faust_processor.set_dsp(dsp_path)
assert(faust_processor.set_dsp(dsp_path))
assert(faust_processor.compile())
# print(faust_processor.get_parameters_description())
faust_processor.set_parameter("/MyEffect/cutoff", 7000.0) # Change the cutoff frequency.
assert(faust_processor.get_parameter("/MyEffect/cutoff") == 7000.)
# or set automation like this
faust_processor.set_automation("/MyEffect/cutoff", 10000+9000*make_sine(2, DURATION))
graph = [
(drums, []),
(other, []),
(faust_processor, [drums.get_name(), other.get_name()])
]
assert(engine.load_graph(graph))
render(engine, file_path='output/test_faust_automation.wav')
|
import numpy as np
from pyapprox.univariate_polynomials.orthonormal_polynomials import \
gauss_quadrature
from pyapprox.univariate_polynomials.orthonormal_recursions import \
jacobi_recurrence, hermite_recurrence
def clenshaw_curtis_rule_growth(level):
"""
The number of samples in the 1D Clenshaw-Curtis quadrature rule of a given
level.
Parameters
----------
level : integer
The level of the quadrature rule
Return
------
num_samples_1d : integer
The number of samples in the quadrature rule
"""
if level == 0:
return 1
else:
return 2**level+1
def clenshaw_curtis_hierarchical_to_nodal_index(level, ll, ii):
"""
Convert a 1D hierarchical index (ll,ii) to a nodal index for lookup in a
Clenshaw-Curtis quadrature rule.
Given a quadrature rule of the specified max level (level)
with indices [0,1,...,num_indices] this function can be used
to convert a hierarchical index, e.g. of the constant function
(poly_index=0), to the quadrature index, e.g for poly_index=0,
index=num_indices/2). This allows one to take advantage of nestedness of
quadrature rule and only store quadrature rule for max level.
Parameters
----------
level : integer
The maximum level of the quadrature rule
ll : integer
The level of the polynomial index
ii : integer
The polynomial index
Return
------
nodal_index : integer
The equivalent nodal index of (ll,ii)
"""
num_indices = clenshaw_curtis_rule_growth(level)
# mid point
if ll == 0:
return num_indices/2
# boundaries
elif ll == 1:
if ii == 0:
return 0
else:
return num_indices-1
# higher level points
return (2*ii+1)*2**(level-ll)
def clenshaw_curtis_poly_indices_to_quad_rule_indices(level):
"""
Convert all 1D polynomial indices of up to and including a given level
to their equivalent nodal index for lookup in a Clenshaw-Curtis
quadrature rule.
Parameters
----------
level : integer
The maximum level of the quadrature rule
Return
------
quad_rule_indices : np.ndarray (num_vars x num_indices)
All the quadrature rule indices
"""
quad_rule_indices = []
num_previous_hier_indices = 0
for ll in range(level+1):
num_hierarchical_indices =\
clenshaw_curtis_rule_growth(ll)-num_previous_hier_indices
for ii in range(num_hierarchical_indices):
quad_index = clenshaw_curtis_hierarchical_to_nodal_index(
level, ll, ii)
quad_rule_indices.append(quad_index)
num_previous_hier_indices += num_hierarchical_indices
return np.asarray(quad_rule_indices, dtype=int)
def clenshaw_curtis_in_polynomial_order(level,
return_weights_for_all_levels=True):
"""
Return the samples and weights of the Clenshaw-Curtis rule using
polynomial ordering.
The first point will be the middle weight of the rule. The second and
third weights will be the left and right boundary weights. All other
weights left of mid point will come next followed by all remaining points.
Parameters
----------
level : integer
The level of the isotropic sparse grid.
return_weights_for_all_levels : boolean
True - return weights [w(0),w(1),...,w(level)]
False - return w(level)
Return
------
ordered_samples_1d : np.ndarray (num_samples_1d)
The reordered samples.
ordered_weights_1d : np.ndarray (num_samples_1d)
The reordered weights.
"""
# w*=2. #use if want do not want to use probability formulation
if return_weights_for_all_levels:
ordered_weights_1d = []
for ll in range(level+1):
x, w = clenshaw_curtis_pts_wts_1D(ll)
quad_indices = clenshaw_curtis_poly_indices_to_quad_rule_indices(
ll)
ordered_weights_1d.append(w[quad_indices])
# ordered samples for last x
ordered_samples_1d = x[quad_indices]
else:
x, w = clenshaw_curtis_pts_wts_1D(level)
quad_indices = clenshaw_curtis_poly_indices_to_quad_rule_indices(level)
ordered_samples_1d = x[quad_indices]
ordered_weights_1d = w[quad_indices]
return ordered_samples_1d, ordered_weights_1d
def clenshaw_curtis_pts_wts_1D(level):
"""
Generated a nested, exponentially-growing Clenshaw-Curtis quadrature rule
that exactly integrates polynomials of degree 2**level+1 with respect to
the uniform probability measure on [-1,1].
Parameters
----------
level : integer
The level of the nested quadrature rule. The number of samples in the
quadrature rule will be 2**level+1
Returns
-------
x : np.ndarray(num_samples)
Quadrature samples
w : np.ndarray(num_samples)
Quadrature weights
"""
try:
from pyapprox.cython.univariate_quadrature import \
clenshaw_curtis_pts_wts_1D_pyx
return clenshaw_curtis_pts_wts_1D_pyx(level)
# from pyapprox.weave.univariate_quadrature import \
# c_clenshaw_curtis_pts_wts_1D
# return c_clenshaw_curtis_pts_wts_1D(level)
except ImportError:
print('clenshaw_curtis_pts_wts failed')
return __clenshaw_curtis_pts_wts_1D(level)
def __clenshaw_curtis_pts_wts_1D(level):
num_samples = clenshaw_curtis_rule_growth(level)
wt_factor = 1./2.
x = np.empty((num_samples))
w = np.empty_like(x)
if (level == 0):
x[0] = 0.
w[0] = 1.
else:
for jj in range(num_samples):
if (jj == 0):
x[jj] = -1.
w[jj] = wt_factor / float(num_samples*(num_samples - 2.))
elif (jj == num_samples-1):
x[jj] = 1.
w[jj] = wt_factor / float(num_samples*(num_samples-2.))
else:
x[jj] = -np.cos(np.pi*float(jj)/float(num_samples-1))
mysum = 0.0
for kk in range(1, (num_samples-3)//2+1):
mysum += 1. / float(4.*kk*kk-1.) *\
np.cos(2.*np.pi*float(kk*jj)/float(num_samples-1.))
w[jj] = 2./float(num_samples-1.)*(
1.-np.cos(np.pi*float(jj)) /
float(num_samples*(num_samples - 2.))-2.*(mysum))
w[jj] *= wt_factor
if (abs(x[jj]) < 2.*np.finfo(float).eps):
x[jj] = 0.
return x, w
def gauss_hermite_pts_wts_1D(num_samples):
"""
Return Gauss Hermite quadrature rule that exactly integrates polynomials
of degree 2*num_samples-1 with respect to the Gaussian probability measure
1/sqrt(2*pi)exp(-x**2/2)
Parameters
----------
num_samples : integer
The number of samples in the quadrature rule
Returns
-------
x : np.ndarray(num_samples)
Quadrature samples
w : np.ndarray(num_samples)
Quadrature weights
"""
rho = 0.0
ab = hermite_recurrence(num_samples, rho, probability=True)
x, w = gauss_quadrature(ab, num_samples)
return x, w
def gauss_jacobi_pts_wts_1D(num_samples, alpha_poly, beta_poly):
"""
Return Gauss Jacobi quadrature rule that exactly integrates polynomials
of num_samples 2*num_samples-1 with respect to the probabilty density
function of Beta random variables on [-1,1]
C*(1+x)^(beta_poly)*(1-x)^alpha_poly
where
C = 1/(2**(alpha_poly+beta_poly)*beta_fn(beta_poly+1,alpha_poly+1))
or equivalently
C*(1+x)**(alpha_stat-1)*(1-x)**(beta_stat-1)
where
C = 1/(2**(alpha_stat+beta_stat-2)*beta_fn(alpha_stat,beta_stat))
Parameters
----------
num_samples : integer
The number of samples in the quadrature rule
alpha_poly : float
The Jaocbi parameter alpha = beta_stat-1
beta_poly : float
The Jacobi parameter beta = alpha_stat-1
Returns
-------
x : np.ndarray(num_samples)
Quadrature samples
w : np.ndarray(num_samples)
Quadrature weights
"""
ab = jacobi_recurrence(
num_samples, alpha=alpha_poly, beta=beta_poly, probability=True)
return gauss_quadrature(ab, num_samples)
def leja_growth_rule(level):
"""
The number of samples in the 1D Leja quadrature rule of a given
level. Most leja rules produce two point quadrature rules which
have zero weight assigned to one point. Avoid this by skipping from
one point rule to 3 point rule and then increment by 1.
Parameters
----------
level : integer
The level of the quadrature rule
Return
------
num_samples_1d : integer
The number of samples in the quadrature rule
"""
if level == 0:
return 1
return level+2
def constant_increment_growth_rule(increment, level):
"""
The number of samples in the 1D quadrature rule where number of of points
grow by a fixed constant at each level.
Parameters
----------
level : integer
The level of the quadrature rule
Return
------
num_samples_1d : integer
The number of samples in the quadrature rule
"""
if level == 1:
return 3
return increment*level+1
def algebraic_growth(rate, level):
return (level)**rate+1
def exponential_growth(level, constant=1):
"""
The number of samples in an exponentially growing 1D quadrature rule of
a given level.
Parameters
----------
level : integer
The level of the quadrature rule
Return
------
num_samples_1d : integer
The number of samples in the quadrature rule
"""
if level == 0:
return 1
return constant*2**(level+1)-1
def exponential_growth_rule(quad_rule, level):
return quad_rule(exponential_growth(level))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/17 15:49
# @Author : Dengsc
# @Site :
# @File : ansible.py
# @Software: PyCharm
import os
import time
import logging
import datetime
from celery import shared_task
from django.conf import settings
from utils.ansible_api_v2.runner import AdHocRunner, PlaybookRunner
from ops.models import (AnsibleLock, AnsiblePlayBookTask,
AnsibleScriptTask,
AnsibleScriptTaskLog,
AnsiblePlayBookTaskLog)
from utils.ansible_api_v2.display import MyDisplay
from ops.tasks.alert import run_alert_task
from utils.redis_api import RedisQueue
logger = logging.getLogger(__name__)
class Ansible(object):
def __init__(self, ansible_type, task_id, celery_task_id):
self.ansible_type = ansible_type
self.task_id = task_id
self.celery_task_id = celery_task_id
self.task = None
self.extra_vars = {}
self.succeed = False
self.result = None
# current year and month
yy_mm = datetime.datetime.now().strftime('%Y%m')
# define log path
self.log_path = os.path.join(settings.ANSIBLE_BASE_LOG_DIR,
yy_mm, self.task_id)
# define ansible display
self.display = MyDisplay(log_id=self.task_id, log_path=self.log_path)
# get ansible instance
if self.ansible_type == 'script':
self.task = AnsibleScriptTask.objects.get(task_id=task_id)
self.user_raw = self.task.get_json_user_input()
if self.user_raw.get('module_args'):
self.module_args = self.user_raw.get('module_args')
else:
self.module_args = self.task.instance.module_args
elif self.ansible_type == 'playbook':
self.task = AnsiblePlayBookTask.objects.get(task_id=self.task_id)
self.user_raw = self.task.get_json_user_input()
else:
raise Exception('Not support ansible type: {0} !'.format(
self.ansible_type))
self.extra_vars = self.task.instance.get_json_extra_vars()
if self.user_raw.get('extra_vars'):
self.extra_vars.update(self.user_raw.get('extra_vars'))
self.kwargs = self.task.config.get_json_config()
self.kwargs['extra_vars'] = [self.extra_vars, ]
self.lock_object_id = '{0}-{1}'.format(self.ansible_type,
self.task.instance.instance_id)
self.log_instance = self._get_log_instance()
self.runner = self._get_runner()
def _get_runner(self):
if self.ansible_type == 'script':
runner = AdHocRunner(
module_name=self.task.instance.ansible_module.name,
module_args=self.module_args,
hosts=self.task.inventory.get_json_inventory(),
log_path=self.log_path,
log_id=self.task_id,
**self.kwargs
)
else:
runner = PlaybookRunner(
playbook_path=self.task.instance.file_path.path,
hosts=self.task.inventory.get_json_inventory(),
log_path=self.log_path,
log_id=self.task_id,
roles_path=self.task.instance.role_path or None,
**self.kwargs
)
return runner
def run(self):
try:
if self.runner:
self._run_before()
self.succeed, self.result = self.runner.run()
else:
self.result = {'error': 'runner is not defined!'}
except Exception as e:
self.result = str(e)
self.display.display(self.result, stderr=True)
finally:
self._run_end()
return self.result
def _run_before(self):
if not self.task.instance.concurrent:
while True:
lock = AnsibleLock.objects.filter(
lock_object_id=self.lock_object_id).first()
if not lock:
break
self.display.display(
'Another same {0} is running, waiting...'.format(
self.ansible_type))
time.sleep(10)
AnsibleLock(lock_object_id=self.lock_object_id).save()
self.display.display(settings.ANSIBLE_TASK_START_PREFIX)
def _release_lock(self):
try:
# release lock
lock = AnsibleLock.objects.filter(
lock_object_id=self.lock_object_id).first()
if lock:
lock.delete()
self.display.display(settings.ANSIBLE_TASK_END_PREFIX)
except Exception as e:
logger.exception(e)
def _set_redis_expire(self):
self.redis = RedisQueue(name=self.task_id)
self.redis.expire(settings.ANSIBLE_RESULT_CACHE_EXPIRE)
def _get_log_instance(self):
log_instance = None
try:
# get log instance
if self.ansible_type == 'script':
log_instance = AnsibleScriptTaskLog(
task=self.task,
celery_task_id=self.celery_task_id)
else:
log_instance = AnsiblePlayBookTaskLog(
task=self.task,
celery_task_id=self.celery_task_id)
log_instance.save()
except Exception as e:
logger.exception(e)
return log_instance
def _update_task_log(self):
if self.log_instance:
self.log_instance.succeed = self.succeed
self.log_instance.completed_log = self.result
self.log_instance.save()
else:
logger.exception('log instance is None!')
def _send_alert(self):
markdown_template = '### Alert Information\n' + \
'- **Ansible Type**: {ansible_type}\n' + \
'- **Instance Name**: {instance_name}\n' + \
'- **Execute User**: {exec_user}\n' + \
'- **Detail Log**: [task log]({full_log})\n' + \
'- **Succeed**: {succeed}'
message = markdown_template.format(
ansible_type=self.ansible_type,
instance_name=self.task.instance.name,
succeed=self.succeed,
exec_user=self.task.owner,
full_log='{0}{1}'.format(
settings.SERVER_BASE_URL,
self.log_instance.task_log.url)
)
# send alert
if self.task.instance.alert:
if (self.task.instance.alert_succeed and self.succeed) or \
(self.task.instance.alert_failed and not self.succeed):
run_alert_task(
self.task.instance.alert.name,
message=message
)
def _run_end(self):
self._release_lock()
self._update_task_log()
self._set_redis_expire()
self._send_alert()
@shared_task
def run_ansible_playbook_task(task_id):
return Ansible(ansible_type='playbook', task_id=task_id,
celery_task_id=run_ansible_playbook_task.request.id
).run()
@shared_task
def run_ansible_script_task(task_id):
return Ansible(ansible_type='script', task_id=task_id,
celery_task_id=run_ansible_script_task.request.id
).run()
|
<reponame>DrewHans555/Kana2Romaji-File-Renamer<gh_stars>0
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
_num_files_ignored: int = 0
_num_rename_success: int = 0
_num_rename_failure: int = 0
_filenames_failed = []
_KANA_DIGRAPH_DICT = {
# map digraph hiragana to romaji
"きゃ": "kya", "きゅ": "kyu", "きょ": "kyo",
"しゃ": "sha", "しゅ": "shu", "しょ": "sho",
"ちゃ": "cha", "ちゅ": "chu", "ちょ": "cho",
"にゃ": "nya", "にゅ": "nyu", "にょ": "nyo",
"ひゃ": "hya", "ひゅ": "hyu", "ひょ": "hyo",
"びゃ": "bya", "びゅ": "byu", "びょ": "byo",
"ぴゃ": "pya", "ぴゅ": "pyu", "ぴょ": "pyo",
"みゃ": "mya", "みゅ": "myu", "みょ": "myo",
"りゃ": "rya", "りゅ": "ryu", "りょ": "ryo",
"ぎゃ": "gya", "ぎゅ": "gyu", "ぎょ": "gyo",
"じゃ": "ja", "じゅ": "ju", "じょ": "jo",
"ぢゃ": "ja", "ぢゅ": "ju", "ぢょ": "jo",
# map digraph katakana to romaji
"キャ": "kya", "キュ": "kyu", "キョ": "kyo",
"シャ": "sha", "シュ": "shu", "ショ": "sho",
"チャ": "cha", "チュ": "chu", "チョ": "cho",
"ニャ": "nya", "ニュ": "nyu", "ニョ": "nyo",
"ヒャ": "hya", "ヒュ": "hyu", "ヒョ": "hyo",
"ビャ": "bya", "ビュ": "byu", "ビョ": "byo",
"ピャ": "pya", "ピュ": "pyu", "ピョ": "pyo",
"ミャ": "mya", "ミュ": "myu", "ミョ": "myo",
"リャ": "rya", "リュ": "ryu", "リョ": "ryo",
"ギャ": "gya", "ギュ": "gyu", "ギョ": "gyo",
"ジャ": "ja", "ジュ": "ju", "ジョ": "jo",
"ヂャ": "ja", "ヂュ": "ju", "ヂョ": "jo",
}
_KANA_MONOGRAPH_DICT = {
# map monograph hiragana to romaji
"あ": "a", "い": "i", "う": "u", "え": "e", "お": "o",
"か": "ka", "き": "ki", "く": "ku", "け": "ke", "こ": "ko",
"が": "ga", "ぎ": "gi", "ぐ": "gu", "げ": "ge", "ご": "go",
"さ": "sa", "し": "shi", "す": "su", "せ": "se", "そ": "so",
"ざ": "za", "じ": "ji", "ず": "zu", "ぜ": "ze", "ぞ": "zo",
"た": "ta", "ち": "chi", "つ": "tsu", "て": "te", "と": "to",
"だ": "da", "ぢ": "ji", "づ": "zu", "で": "de", "ど": "do",
"な": "na", "に": "ni", "ぬ": "nu", "ね": "ne", "の": "no",
"は": "ha", "ひ": "hi", "ふ": "fu", "へ": "he", "ほ": "ho",
"ば": "ba", "び": "bi", "ぶ": "bu", "べ": "be", "ぼ": "bo",
"ぱ": "pa", "ぴ": "pi", "ぷ": "pu", "ぺ": "pe", "ぽ": "po",
"ま": "ma", "み": "mi", "む": "mu", "め": "me", "も": "mo",
"や": "ya", "ゆ": "yu", "よ": "yo",
"ら": "ra", "り": "ri", "る": "ru", "れ": "re", "ろ": "ro",
"わ": "wa", "ゐ": "wi", "ゑ": "we", "を": "wo", "ん": "n",
# map monograph katakana to romaji
"ア": "a", "イ": "i", "ウ": "u", "エ": "e", "オ": "o",
"カ": "ka", "キ": "ki", "ク": "ku", "ケ": "ke", "コ": "ko",
"ガ": "ga", "ギ": "gi", "グ": "gu", "ゲ": "ge", "ゴ": "go",
"サ": "sa", "シ": "shi", "ス": "su", "セ": "se", "ソ": "so",
"ザ": "za", "ジ": "ji", "ズ": "zu", "ゼ": "ze", "ゾ": "zo",
"タ": "ta", "チ": "chi", "ツ": "tsu", "テ": "te", "ト": "to",
"ダ": "da", "ヂ": "ji", "ヅ": "zu", "デ": "de", "ド": "do",
"ナ": "na", "ニ": "ni", "ヌ": "nu", "ネ": "ne", "ノ": "no",
"ハ": "ha", "ヒ": "hi", "フ": "fu", "ヘ": "he", "ホ": "ho",
"バ": "ba", "ビ": "bi", "ブ": "bu", "ベ": "be", "ボ": "bo",
"パ": "pa", "ピ": "pi", "プ": "pu", "ペ": "pe", "ポ": "po",
"マ": "ma", "ミ": "mi", "ム": "mu", "メ": "me", "モ": "mo",
"ヤ": "ya", "ユ": "yu", "ヨ": "yo",
"ラ": "ra", "リ": "ri", "ル": "ru", "レ": "re", "ロ": "ro",
"ワ": "wa", "ヰ": "wi", "ヱ": "we", "ヲ": "wo", "ン": "n",
# map obsolete symbols to empty string
"ゝ": "", "ヽ": "", "ゞ": "", "ヾ": "",
}
def _replace_choonpu(lst: list) -> list:
"""choonpu indicates that the previous vowel is extended"""
return [lst[idx - 1] if str(item) == "ー"
else str(item) for idx, item in enumerate(lst)]
def _replace_sokuon(lst: list) -> list:
"""sokuon indicates that the following consonate is geminated"""
return [lst[idx + 1] if str(item) == "っ" or str(item) == "ッ"
else str(item) for idx, item in enumerate(lst)]
def _replace_digraphs(lst: list) -> list:
"""digraphs have two kana - one regular sized, one small"""
temp = []
for idx, c in enumerate(lst):
if c == "ゃ" or c == "ゅ" or c == "ょ" \
or c == "ャ" or c == "ュ" or c == "ョ":
temp.pop()
digraph_str = "".join(lst[idx - 1:idx + 1])
temp.extend(list(_KANA_DIGRAPH_DICT[digraph_str]))
else:
temp.append(c)
return temp
def _replace_monographs(lst: list) -> list:
"""monographs are one regular sized kana"""
temp = []
for idx, c in enumerate(lst):
if c in _KANA_MONOGRAPH_DICT:
temp.extend(list(_KANA_MONOGRAPH_DICT[c]))
else:
temp.append(c)
return temp
def translate_kana_to_romaji(s: str) -> str:
"""translates hiragana/katakana string to romaji equivalent"""
translation = list(s) # split s into list of chars
translation = _replace_digraphs(translation)
translation = _replace_monographs(translation)
translation = _replace_choonpu(translation)
translation = _replace_sokuon(translation)
return "".join(translation) # return str of translation
def print_detailed_results():
num_files = _num_files_ignored + _num_rename_failure + _num_rename_success
print("Results: " + str(_num_rename_success) + " of "
+ str(num_files) + " files were renamed successfully.")
if _num_rename_success != num_files:
print(" - " + str(_num_files_ignored) + " ignored files")
print(" - " + str(_num_rename_failure) + " files failed to rename")
print("Failed to rename:")
for x in _filenames_failed:
print(str(x))
def try_rename(filename: str, new_filename: str):
global _num_rename_failure, _num_rename_success
try:
os.rename(filename, new_filename)
_num_rename_success += 1
except():
_filenames_failed.append(filename)
_num_rename_failure += 1
def valid_new_filename(filename: str, new_filename: str) -> bool:
return new_filename is not None \
and new_filename != "" \
and new_filename != filename
def main(dir_path):
global _num_files_ignored
print("Starting Kana2Romaji File Renamer by DrewHans555...")
if dir_path is None or dir_path == "":
dir_path = "."
for filename in os.listdir(dir_path):
new_filename = translate_kana_to_romaji(filename)
if valid_new_filename(filename, new_filename):
try_rename(filename, new_filename)
else:
_num_files_ignored += 1
print("...finished!")
print_detailed_results()
if __name__ == "__main__":
main(sys.argv[0])
|
'''
NOTICE
This module was solely developed under MITRE corporation internal funding project code 10AOH630-CA
Approved for Public Release; Distribution Unlimited. Public Release Case Number 20-1780.
(c) 2020 The MITRE Corporation. ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
'''
import random, math, numpy
def age_model(tm, dt):
# This function iterates through threats and starting points in a given TRACE model and "rolls the dice" on them for given amount of time passing
for i, j in tm["threats"].items():
if "on" not in j:
if j["rate"]==0:
j["on"]=True
else:
if random.random() < 1-math.exp(-dt/j["rate"]):
j["on"]=True
for i in tm["graph"]:
if "start rate" in i:
if "started" not in i:
if i["start rate"]==0:
i["started"]=True
else:
if random.random() < 1-math.exp(-dt/i["start rate"]):
i["started"]=True
tm["age"]=tm["age"]+dt
def check_model(tm, stop_at_hit=True, involvement=False):
# This function performs a path check of the model to find MTTC and MTTI
# This has been implemented using a dictionary here simply to improve readiblity. In practice, this function is typically implemented in compiled code using a variety of fixed-width integer and floating point arrays
# The concept of operation of this algorithm is to first use a spanning tree to find all nodes which are connected to an entry point through a realized threat concept (where the Monte Carlo "dice roll" has turned that vulnerability on). This spanning tree then represents all nodes that can be reached with some set of available vulnerabilities for a given model age. The spanning tree is stored in the "stack" list as individual tm["graph"] list indices
# After the spanning tree is completed, the reached nodes are traced backwards from all "end" nodes to determine which nodes are involved in an end-to-end path. Since all nodes that are reached must be in a path from some entry point, all nodes which connect to an end node or to any node that connects to an end node ad infinitum must consequently be in a path from an entry point to an end node.
# A key concept here is that each node gets a "hit" key when it is reached by the spanning tree that contains the value of the model age the first time it was reached. If that node is then shown to be part of an end-to-end path, it gets an "involved" key containing the model age when that path was confirmed.
# This loops through the graph and initializes a stack that contains all nodes that are "started" based on the Monte Carlo or have been reached in a previous history.
stack=[]
for i in range(len(tm["graph"])):
# While not shown in the provided examples, the "coincidence" key allows for use of "AND"-like logic, where the "coincidence" value represents the number of incoming edges that must be activated before the node is considered "reached." This reinitializes that count for this spanning tree run.
if "coincidence" in tm["graph"][i]:
tm["graph"][i]["coincidence hits"]=tm["graph"][i]["coincidence"]
# This adds nodes to the stack that have been previously reached or are started
if "hit" in tm["graph"][i] or "involved" in tm["graph"][i] or "started" in tm["graph"][i]:
stack.append(i)
# If the node is in the stack, it is by definition "hit"
if "hit" not in tm["graph"][i]:
tm["graph"][i]["hit"] = tm["age"]
# If it's an end node and it's in the stack, it is by definition "involved"
if i in tm["end"] and "involved" not in tm["graph"][i]:
tm["graph"][i]["involved"] = tm["age"]
# This will loop through all items in the stack, even if items are added while in the loop
for i in stack:
# This checks every outgoing edge on each node to see if the target should be added to the spanning tree stack
for j in tm["graph"][i]["edges"]:
# If the threat is active, we might be able to add the target to the stack
if "on" in tm["threats"][j["threat"]]:
# If it's not in the stack, add it
if j["to"] not in stack:
# If it's a coincidence node, subtract one from the coincidence hits before we check to see if we can add it to the stack
if "coincidence" in tm["graph"][j["to"]]:
tm["graph"][j["to"]]["coincidence hits"] = tm["graph"][j["to"]]["coincidence hits"] - 1
# If it's not a coincidence node, or if the coincidence hits is down to zero, add it to the stack
if "coincidence hits" not in tm["graph"][i] or tm["graph"][i]["coincidence hits"] <= 0:
stack.append(j["to"])
# If it hasn't been hit before, it got hit at this model age
if "hit" not in tm["graph"][j["to"]]:
tm["graph"][j["to"]]["hit"] = tm["age"]
# If this is a target node, then it's by definition "involved" and the overall model is "hit"
if j["to"] in tm["end"]:
if "involved" not in tm["graph"][j["to"]]:
tm["graph"][j["to"]]["involved"] = tm["age"]
if "hit" not in tm:
tm["hit"]=True
# Leave now if we're supposed to
if stop_at_hit:
break
# If the target is "involved," so is this node. This just makes checking for this a little faster later.
if "involved" in tm["graph"][j["to"]] and "involved" not in tm["graph"][i]:
tm["graph"][i]["involved"] = tm["age"]
# Leave if we're supposed to after getting a hit
if stop_at_hit and "hit" in tm:
break
# This goes back through the stack to check for involvement in end-to-end paths
if involvement and not (stop_at_hit and "hit" in tm):
# Worst case we need to check the stack len(stack) number of times. This might go faster if we store the stack between model checks, since populating the stack using the spanning tree should naturally sort nodes into an order that's fast to do this in reverse on, but that isn't done here simply for simplicity.
for k in stack:
no_change = True
# If you go backwards through the stack, you can take advantage of the fact that nodes in a valid path should be added sequentially
for ix in range(len(stack)):
i = len(stack) - ix - 1
# If the node isn't in a path, see if it connects to someone who is
if "involved" not in tm["graph"][stack[ix]]:
for j in tm["graph"][stack[ix]]["edges"]:
# If any of the valid edges go to a node that's in a path, this node is in a path
if "involved" in tm["graph"][j["to"]] and "on" in tm["threats"][j["threat"]]:
no_change = False
tm["graph"][stack[ix]]["involved"] = tm["age"]
break
# If no new nodes were modified after checking the whole stack, we don't need to check it anymore
if no_change:
break
if "hit" in tm:
return True
else:
return False
def check_hits(tm):
# This function is not used, but included as an alternative to the check_model function for MTTC analysis, and can be used to validate those results if desired.
# This function uses a simple spanning tree to check for any connectivity between any "start" nodes and any "end" nodes, which would signify that the model has a "hit". It isn't technically a completely different approach, but the algorithm is so simple that it is much easier to check by hand that it should be correct.
# Notably, cursory testing suggests this is ~8x faster than configuring the model to use the networkx built in has_path method, which also would only work from one starting point to one end
stack=[]
for i in range(len(tm["graph"])):
if "started" in tm["graph"][i]:
stack.append(i)
if "hit" not in tm["graph"][i]:
tm["graph"][i]["hit"] = tm["age"]
for i in stack:
for j in tm["graph"][i]["edges"]:
if j["to"] not in stack:
if "on" in tm["threats"][j["threat"]]:
stack.append(j["to"])
if "hit" not in tm["graph"][j["to"]]:
tm["graph"][j["to"]]["hit"] = tm["age"]
if i in tm["end"]:
tm["hit"] = True
if "hit" in tm:
return True
else:
return False
def check_involvement(tm):
# This function is not used, but is included as an alternative to the check_model function for MTTI analysis, and can be used to validate those results if desired.
# This is an alternative algorithm that provides the mean time to involvement measure
# This algorith uses a stack to track the path taken through the graph
# When a dead end is found, it backtracks up the stack
# It uses the list index of the outgoing edges in the stack to track
# paths it has gone down before
# If it reaches a node in the "end" list, it sets the "hit" time for
# every node in the stack to the current model age, if not already
# set by an earlier hit
# Each index of the stack contains a node and the edge index used to
# leave that node, so that when the algorithm backtracks it knows
# where it's been before
# This algorithm uses a boolean called "backtrack" to keep track of
# whether it needs to backtrack. If there are no valid outgoing edges
# (edges whose threats are "on" and whose destination isn't in the
# stack), then backtrack is true and the last node is popped from/
# the stack.
# Go through all the nodes in the graph
for i in range(len(tm["graph"])):
# If a node has been designated as "started" by the age_model function, then start an attack path recursion from it
if "started" in tm["graph"][i]:
# Start a fresh stack using the started node
stack={"nodes":[tm["graph"][i]["id"]],"edges":[]}
if "hit" not in tm["graph"][stack["nodes"][-1]]:
tm["graph"][stack["nodes"][-1]]["hit"]=tm["age"]
# Use a safety loop that allows for lots of backtracking steps, the upper end of the loop is arbitrary, but may be exceeded in very large models
for safety in range(1000000000):
# Assumed behavior is to backtrack unless a valid destination node is found
backtrack = True
# Iterate through all the outgoing edges from the last node in the stack
for j in range(len(tm["graph"][stack["nodes"][-1]]["edges"])):
# Check to see if you entered this for loop after a backtrack, in which case the last outgoing edge attempted will still be in the edge stack, making the edge stack as long as the node stack
if len(stack["edges"])==len(stack["nodes"]):
# If the edge the for loop is looking at is earlier in the outgoing edge list than the one currently in the stack, then...
if j<=stack["edges"][-1]:
# If the one currently in the stack is the last one in the outgoing edge list (or higher as a bug check), then leave the for loop, which will result in a backtrack
if stack["edges"][-1] >= len(tm["graph"][stack["nodes"][-1]]["edges"])-1:
break
else:
# Otherwise, set the for loop to the next unchecked outgoing edge
stack["edges"][-1]=stack["edges"][-1]+1
j = stack["edges"][-1]
# If the threat for the outgoing edge being checked is "on", then...
if "on" in tm["threats"][tm["graph"][stack["nodes"][-1]]["edges"][j]["threat"]]:
# If the destination node is not in the stack, then...
if tm["graph"][stack["nodes"][-1]]["edges"][j]["to"] not in stack["nodes"]:
# If there's no outgoing edge associated with the last node in the stack, add this outgoing edge to the edge stack
if len(stack["edges"])<len(stack["nodes"]):
stack["edges"].append(j)
# Add the destination node to the node stack
stack["nodes"].append(tm["graph"][stack["nodes"][-1]]["edges"][j]["to"])
if "hit" not in tm["graph"][stack["nodes"][-1]]:
tm["graph"][stack["nodes"][-1]]["hit"]=tm["age"]
# Don't backtrack, but do get out of this loop so we can restart the outgoing edge checking process from the next node
backtrack = False
break
# Now that you're out of the outgoing edge checking for loop, see if we've just stepped into a node in the "end" list
if not backtrack:
if stack["nodes"][-1] in tm["end"]:
# If we have, we can backtrack after we set all the nodes in the stack to have a hit time of now (if they don't already have a hit time)
backtrack = True
tm["hit"] = True
for j in stack["nodes"]:
if "involved" not in tm["graph"][j]:
tm["graph"][j]["involved"]=tm["age"]
# If we're backtracking, pop the last node and possibly the last edge from the stack, and if we've run out of stack, exit the safety for loop and go on to find the next starting point (if any)
if backtrack:
stack["nodes"].pop()
if len(stack["nodes"])==0:
break
if len(stack["edges"])>len(stack["nodes"]):
stack["edges"].pop()
# This lets the function act like a simple checker if you don't want to dig into the "hit" values yourself
if "hit" in tm:
return True
else:
return False
def run_history(tm, t, dt, stop_at_hit=False, involvement=False):
# Ages tm from the current state by time t in steps of dt
# The stop_at_hit parameter will stop the loop if any full path is completed
# The involvement parameter toggles between check hits (just MTTC) and check involvement (MTTI and MTTC)
hits=False
age_model(tm, 0)
for i in range(int(t/dt)+2):
hits = check_model(tm, stop_at_hit=stop_at_hit, involvement=involvement)
if hits and stop_at_hit:
break
age_model(tm, dt)
def reset_history(tm):
# Reset threat states
if "hit" in tm:
tm.pop("hit")
tm["age"]=0.0
for i in tm["graph"]:
if "hit" in i:
i.pop("hit")
if "involved" in i:
i.pop("involved")
for i, j in tm["threats"].items():
if "on" in j:
j.pop("on")
def find_time(tm, p, cc=(20,.05,.05), verbose=False, hunt_depth=5):
# Returns the time when model tm has full paths with probability p
# "cc" is the convergence criteria in terms of (trials, probability, time), where the results is considered converged when the past cc[0] trials are within the range of cc[1] and cc[2], this setting is a fairly quick low-quality result, something like (200, .001, .001) is pretty high quality
# The verbose parameter gives lots of detailed output when True
# The hunt_depth parameter gives the starting number of runs to do per bundle when hunting for a starting value. 5 is a fairly minimal "enough," turning it up might just waste time
if verbose:
print ("\nFinding time for probability of " + str(round(p,4)))
dt = 10
if verbose:
print ("Doubling time until getting regular hits")
trials={"p":[0],"t":[0],"histories":0}
# Run a loop aging a model a few times at a given age, then doubling that age until the result is regular hits, giving a rough timeframe to start a more detailed hunting algorithm. 100 doublings is enough to cause the code to crash if it doesn't find any valid paths, which is likely a sub-optimal way to check for that issue.
for i in range(100):
hits=0
for j in range(hunt_depth):
run_history(tm, dt, dt, stop_at_hit=True)
if "hit" in tm:
hits=hits+1
reset_history(tm)
trials["p"].append(hits/hunt_depth)
trials["t"].append(dt)
trials["histories"]=trials["histories"]+hunt_depth
if hits > hunt_depth*p:
break
dt=dt+dt
"""
Notes on math:
Casual experimentation for graphs with no coincidence above 1 indicates that cdfs for connectivity over time appear to follow a particular general form. Unfortunately, that form is not trivially inverted without using some unusual functions (Lambert W function, the "product log").
Consequently, this hunt function treats the curve as a simple exponential curve for the purpose of predicting the next guess. Nevertheless, I felt it was important to explain why that is wrong.
Compromise time cdfs can be observed to be well-described by a compound exponential distribution of the form:
cdf[t] = 1-exp(-t*(a*exp(-b*t)))
Where a and b are some constants describing the shape of the curve.
We can find coefficients a and b given some points, maybe t_0=0, cdf_0=0; t_n=x1, cdf_n=y1; t_f=x2, cdf_n=y2
Rearranging the general form:
a*exp(-b*t) = -t / (ln(1-cdf[t])) = k
Solving for a:
a=k*exp(-b*t)
Inserting knowns, substituting into the two above functions, and solving for a and b:
b=ln(k1/k2)/(x2-x1)
=ln((-x1/ln(1-y1))/(-x2/ln(1-y2)))/(x2-x1)
a=k1*exp(x1*b)
=(-x1/ln(1-y1))*exp(x1*b)
From here it is observed that all the sample points must be non-zero to avoid 0/0, thus we need non-zero x1 and x2 values.
While also not used in this module, the expectation value for any curve can now be readily found via Fubini's thereom:
E[x]={integral from 0 to inf of (1-F(x))dx} - {integral from -inf to 0 of F(x)dx}
Given cdf =0 for t<=0, we can ignore the second integral, leaving:
E[x]={integral from 0 to inf of (exp(-t*(a*exp(-b*t))))dx}
I cannot find a closed form solution for this, but it can be trivially numerically approximated given a and b.
Thus, collecting two data points, nominally points with values that are not close to zero or one, can potentially allow for rapid estimation of expectation value for any graph.
"""
# This provides for the case where a start and end node are directly connected
if dt==0:
return 0, trials
if verbose:
print ("Hunting for t")
# This value is somewhat arbitrarily selected to prevent the automated hunting from
# jumping "too much" in a single guess
slow_hunt_growth = 1.2
# This loop hunts to find a time dt where a series of monte carlo runs consistently results in a hit rate of p
for i in range(1000):
if hits == 0:
# This covers the special case where a previous run loop didn't get any hits, which would cause an error in the math downstream otherwise
dt = dt*1.2
else:
if hits == hunt_depth:
# This covers the case where 100% of the trials in the last bundle had completed paths, which is a somewhat non-useful result so we need to dial time down a notch
dt = dt * (1 - cc[2])
else:
# This covers the case where we have a "good" data point to try and predict from. A few different methods are shown here with notes.
# Using numpy polyfit is fast and stable, but if it doesn't have a bias for 0,0 being on the curve it sometimes gets stuck just a little bit off and never converges
m, b = numpy.polyfit([0] + trials["p"][-cc[0]:], [0] + trials["t"][-cc[0]:], 1)
dt = m*p+b
# Assuming linear between last two test points tends to go unstable and dive into negative values
#m = (trials["p"][-1]-trials["p"][-2])/(trials["t"][-1]-trials["t"][-2])
#dt = (p-trials["p"][-1])/m+trials["t"][-1]
# Assuming exponential tends to be unstable and diverges
# dt = (dt/math.log(1 - (hits / hunt_depth))) * math.log(1-p)
# This initializes the next hunt bundle
hits=0
if len(trials["p"])>2 and trials["p"][-1]>0:
slow_hunt_growth = 1+abs(trials["p"][-1]-trials["p"][-2])/trials["p"][-1]
if slow_hunt_growth > 1.2:
slow_hunt_growth = 1.2
hunt_depth=int(hunt_depth*slow_hunt_growth)
# This runs a bundle of monte carlo runs at a specific model age to get a probability of how often the model has a complete path in that amount of time
for j in range(int(hunt_depth)):
run_history(tm, dt, dt, stop_at_hit=True)
if "hit" in tm:
hits=hits+1
reset_history(tm)
# This documents the last bundle of monte carlo runs
trials["p"].append(hits/hunt_depth)
trials["t"].append(dt)
trials["histories"]=trials["histories"]+hunt_depth
# This checks convergence status against the specified criteria to see if the current estimated time is "good enough"
if len(trials["p"])>cc[0]:
convergence_status = list_range(trials["p"][-cc[0]:]+[p])
if verbose:
print ("\r" + " "*40 + "\rp convergence: " + str(round(convergence_status, 4)) + " / " + str(cc[1]), end="")
if convergence_status<cc[1]:
convergence_status = list_range([abs((dt-x)/dt) for x in trials["t"][-cc[0]:]+[dt]])
if verbose:
print (" - OK!, t convergence: " + str(round(convergence_status, 4)) + " / " + str(cc[2]), end="")
if convergence_status<cc[2]:
if verbose:
print (" - OK!")
break
if verbose:
print ("Time for p of " + str(round(p, 2)) + " estimated at " + str(round(sum(trials["t"][-cc[0]:])/cc[0],2)) + " days")
return sum(trials["t"][-cc[0]:])/cc[0], trials
def list_range(trials):
# This function returns the maximum range between the values in a given list
# Seems a little faster than using built-ins
min_trial = trials[0]
max_trial = trials[0]
for i in trials:
if i<min_trial:
min_trial=i
else:
if i>max_trial:
max_trial=i
return max_trial-min_trial
def find_mean(tm, resolution=100, cc=(50,.005,.01), node_details=False, involvement=False, verbose=False, timeframe=-1):
# Finds the mean time to compromise for a given trace model
# resolution is how many steps to divide the cdf into to characterize it. 100 is pretty high, we've gotten away with fairly consistent results with a resolution as low as 5, particularly if you force a timeframe that's a little higher, like a p=.99 timeframe from find_time.
# "cc" is the convergence criteria in terms of (trials, probability, time), where the results is considered converged when the past cc[0] trials are within the range of cc[2] (cc[1] does not apply for this)
# The node_details parameter turns on and off storing data about every node. The purpose of the analysis is typically to get that node-level information, but it does take a little longer to do all the data stuff.
# The verbose parameter gives lots of detailed output when True
# Timeframe lets you skip the "find time" hunt that starts this function. That hunt takes time, and the value it picks can change the mean value a little. So if you're doing lots of "find mean" runs, it's handy to run "find time" first and then run "find mean" a few times. Also, it can improve the accuracy of the result if you set the timeframe really high.
if verbose:
if involvement:
print ("\nFinding mean time to involvement")
else:
print ("\nFinding mean time to compromise")
if timeframe==-1:
if verbose:
print ("Finding upper bound timeframe for p of " + str(round(1-1/(resolution+1), 4)))
# Find the upper range time to run histories against
timeframe = find_time(tm, 1-1/(resolution+1), verbose=True)[0]
# Set up data structure for results
trials={"t":[],"mu":[],"histories":0,"timeframe":timeframe,"resolution":resolution}
if node_details:
trials["nodes"]=[]
for i in tm["graph"]:
trials["nodes"].append({"id":i["id"],"results":{"mttc samples":[],"mtti samples":[]}})
if verbose:
print ("Running histories with timeframe of " + str(round(timeframe,2)) + " and dT of " + str(round(timeframe/resolution, 2)))
# Run histories until the convergence criteria is met or a safety limit is exceeded, the upper end of the loop is arbitrary, and may be exceeded in very large models
for safety in range(10000):
# Find one hit age
reset_history(tm)
run_history(tm, timeframe, timeframe/resolution, involvement=involvement)
trials["histories"]=trials["histories"]+1
# Store the result for the overall graph
trials["t"].append(tm["age"]+1)
for i in tm["end"]:
if "hit" in tm["graph"][i]:
if tm["graph"][i]["hit"]<trials["t"][-1]:
trials["t"][-1]=tm["graph"][i]["hit"]
if trials["t"][-1]==tm["age"]+1:
trials["t"].pop()
if len(trials["t"])>len(trials["mu"]):
trials["mu"].append(fubini(trials["t"],trials["histories"]))
# Store the per-node results if doing all nodes
if node_details:
for i in range(len(tm["graph"])):
if "hit" in tm["graph"][i]:
trials["nodes"][i]["results"]["mttc samples"].append(tm["graph"][i]["hit"])
if involvement:
for i in range(len(tm["graph"])):
if "involved" in tm["graph"][i]:
trials["nodes"][i]["results"]["mtti samples"].append(tm["graph"][i]["involved"])
# Check current data against convergence criteria
if len(trials["mu"])>cc[0]:
convergence_status = list_range([abs((trials["mu"][-1]-x)/trials["mu"][-1]) for x in trials["mu"][-cc[0]:]])
if verbose:
print ("\r" + " "*40 + "\rt convergence: " + str(round(convergence_status, 4)) + " / " + str(cc[2]), end="")
if convergence_status<cc[2]:
if verbose:
print (" - OK!")
break
else:
print ("\r" + str(len(trials["mu"])) + " / " + str(cc[0]) + " initial samples.", end="")
# Store result data in accessible dictionary
trials["mttc"] = {}
trials["mttc"]["mean"]=trials["mu"][-1]
if involvement:
trials["mtti"] = {}
trials["mtti"]["mean"]=trials["mu"][-1]
# Add per-node results
if node_details:
# Min and max result values are used to generate the color map in the plot module
trials["mttc"]["max"]=0
trials["mttc"]["min"]=-1
for i in trials["nodes"]:
if len(i["results"]["mttc samples"])>0:
i["results"]["mttc"]=fubini(i["results"]["mttc samples"], trials["histories"])
if i["results"]["mttc"]>trials["mttc"]["max"]:
trials["mttc"]["max"]=i["results"]["mttc"]
if trials["mttc"]["min"]==-1 or i["results"]["mttc"]<trials["mttc"]["min"]:
trials["mttc"]["min"]=i["results"]["mttc"]
if trials["mttc"]["max"]==trials["mttc"]["min"]:
trials["mttc"]["min"]=0
if involvement:
trials["mtti"]["max"]=0
trials["mtti"]["min"]=-1
for i in trials["nodes"]:
if len(i["results"]["mtti samples"])>0:
i["results"]["mtti"]=fubini(i["results"]["mtti samples"], trials["histories"])
if i["results"]["mtti"]>trials["mtti"]["max"]:
trials["mtti"]["max"]=i["results"]["mtti"]
if trials["mtti"]["min"]==-1 or i["results"]["mtti"]<trials["mtti"]["min"]:
trials["mtti"]["min"]=i["results"]["mtti"]
if trials["mtti"]["max"]==trials["mtti"]["min"]:
trials["mtti"]["min"]=0
if verbose:
print ("Overall mean estimated at " + str(round(trials["mu"][-1],2)) + " days")
return trials["mu"][-1], trials
def fubini(samples_ref, sample_size=-1):
# Takes an incomplete list of samples from an arbitrary probability distribution and applies a rough discrete version of fubini's theorem to approximate the expectation value
# If we had a complete list of samples, this wouldn't be necessary, we could just average them to get the mean. However, this approach is necessary because we *aren't* actually sampling the real distribution, our samples are incomplete because we don't always run the models long enough to get a result. Since we do know that all the results we don't have are greater than the results we have, we can do this. In practice, this tends to give much more consistent results than just averaging the samples.
samples=samples_ref.copy()
if sample_size==-1:
sample_size=len(samples)
samples.sort()
points={"x":[0],"y":[1]}
dy = 1/sample_size
for x in range(len(samples)):
if samples[x] in points["x"]:
points["y"][-1]=points["y"][-1]-dy
else:
points["x"].append(samples[x])
points["y"].append(1-(x+1)*dy)
# This is the part that adds "extra mass" for all the samples that went over the simulation timeframe
if len(samples)<sample_size:
m, b = numpy.polyfit(points["y"], points["x"], 1)
if b < points["x"][-1]:
b=points["x"][-1]
points["x"].append(b)
points["y"].append(0)
expectation_value = 0
for x in range(len(points["x"])-1):
expectation_value = expectation_value + (points["x"][x+1]-points["x"][x])*(points["y"][x]+points["y"][x+1])*0.5
return expectation_value
|
import requests
import json
import os
# import related models here
from djangoapp.models import CarDealer, DealerReview
from requests.auth import HTTPBasicAuth
import dotenv
dotenv.load_dotenv()
# Create a `get_request` to make HTTP GET requests
# e.g., response = requests.get(url, params=params, headers={'Content-Type': 'application/json'},
# auth=HTTPBasicAuth('apikey', api_key))
def get_request(url, params=None, api_key=None):
# print(params)
# print("GET from {} ".format(url))
try:
# Call get method of requests library with URL and parameters
response = requests.get(url, headers={'Content-Type': 'application/json'},
params=params, auth=HTTPBasicAuth("apikey", api_key))
except:
# If any error occurs
print("Network exception occurred")
return {}
status_code = response.status_code
# print(f"With status {status_code}")
json_data = {}
if response.ok:
json_data = json.loads(response.text)
return json_data
# Create a `post_request` to make HTTP POST requests
# e.g., response = requests.post(url, params=kwargs, json=payload)
def post_request(url, params=None, payload=None):
print("POST to {} ".format(url))
print(f"{payload=}")
response = requests.post(url, headers={'Content-Type': 'application/json'},
params=params,
json=payload)
status_code = response.status_code
print(f"With status {status_code}")
return response
# Create a get_dealers_from_cf method to get dealers from a cloud function
# def get_dealers_from_cf(url, **kwargs):
# - Call get_request() with specified arguments
# - Parse JSON results into a CarDealer object list
def get_dealers_from_cf(url, **kwargs):
results = []
# Call get_request with a URL parameter
json_result = get_request(url)
if json_result:
# Get the row list in JSON as dealers
dealers = json_result
# For each dealer object
results = [CarDealer(
address = dealer_doc["address"],
city = dealer_doc["city"],
full_name = dealer_doc["full_name"],
id = dealer_doc["id"],
lat = dealer_doc["lat"],
long = dealer_doc["long"],
short_name = dealer_doc["short_name"],
st = dealer_doc["st"],
zip = dealer_doc["zip"]
) for dealer_doc in dealers]
return results
# Create a get_dealer_reviews_from_cf method to get reviews by dealer id from a cloud function
# def get_dealer_by_id_from_cf(url, dealerId):
# - Call get_request() with specified arguments
# - Parse JSON results into a DealerView object list
def get_dealer_reviews_from_cf(url, dealerId):
results = []
# Call get_request with a URL parameter
params = {
"dealerId": dealerId
}
json_result = get_request(url, params=params)
if json_result:
results = [DealerReview(
id=review["_id"],
name=review.get("name"),
text=review.get("review"),
dealer_id=review.get("dealership"),
car_make=review.get("car_make"),
car_model=review.get("car_model"),
car_year=review.get("car_year"),
did_purchase=review.get("purchase"),
purchase_date=review.get("purchase_date"),
sentiment=analyse_review_sentiments(review.get("review"))
) for review in json_result]
return results
# Create an `analyze_review_sentiments` method to call Watson NLU and analyze text
# def analyze_review_sentiments(text):
# - Call get_request() with specified arguments
# - Get the returned sentiment label such as Positive or Negative
def analyse_review_sentiments(text: str):
API_PATH = "/v1/analyze"
try:
url = os.environ["NLU_URL"]
api_key = os.environ["NLU_API_KEY"]
except KeyError as err:
print(err)
return None
params = {
"text": text,
"version": "2021-08-01",
"features": {
"sentiment"
},
"return_analyzed_text": False
}
result = get_request(url + API_PATH, api_key=api_key, params=params)
print(result)
sentiment = result["sentiment"]["document"]["label"] if result.get("sentiment", None) else "neutral"
return sentiment
|
<reponame>norips/visual-navigation-agent-pytorch<gh_stars>1-10
import json
import math
import os
import re
import GPUtil
def find_restore_point(checkpoint_path, fail=True):
checkpoint_path = os.path.abspath(checkpoint_path)
# Find latest checkpoint
restore_point = None
if checkpoint_path.find('{checkpoint}') != -1:
files = os.listdir(os.path.dirname(checkpoint_path))
base_name = os.path.basename(checkpoint_path)
regex = re.escape(base_name).replace(
re.escape('{checkpoint}'), '(\d+)')
points = [(fname, int(match.group(1))) for (fname, match) in (
(fname, re.match(regex, fname),) for fname in files) if not match is None]
if len(points) == 0:
if fail:
raise Exception('Restore point not found')
else:
return None
(base_name, restore_point) = max(points, key=lambda x: x[1])
return (base_name, restore_point)
else:
if not os.path.exists(checkpoint_path):
if fail:
raise Exception('Restore point not found')
else:
return None
return (checkpoint_path, None)
def find_restore_points(checkpoint_path, fail=True):
checkpoint_path = os.path.abspath(checkpoint_path)
# Find latest checkpoint
restore_point = None
if checkpoint_path.find('{checkpoint}') != -1:
files = os.listdir(os.path.dirname(checkpoint_path))
base_name = os.path.basename(checkpoint_path)
regex = re.escape(base_name).replace(
re.escape('{checkpoint}'), '(\d+)')
points = [(fname, int(match.group(1))) for (fname, match) in (
(fname, re.match(regex, fname),) for fname in files) if not match is None]
if len(points) == 0:
if fail:
raise Exception('Restore point not found')
else:
return None
points = sorted(points, key=lambda x: x[1], reverse=True)
(base_name, restore_point) = zip(*points)
return (base_name, restore_point)
else:
if not os.path.exists(checkpoint_path):
if fail:
raise Exception('Restore point not found')
else:
return None
return (checkpoint_path, None)
def populate_config(config, mode='train', checkpoint=True):
exp_path = config['exp']
json_file = open(exp_path)
json_dump = json.load(json_file)
json_file.close()
compute_param = json_dump['train_param']
eval_param = json_dump['eval_param']
config = {**config, **compute_param}
config = {**config, **eval_param}
base_path = os.path.dirname(exp_path) + '/'
config['base_path'] = base_path
from datetime import datetime
config['log_path'] = base_path + 'logs/' + \
datetime.now().strftime('%b%d_%H-%M-%S')
if checkpoint:
config['checkpoint_path'] = base_path + 'checkpoints/{checkpoint}.pth'
config['h5_file_path'] = json_dump['h5_file_path']
config['total_step'] = int(json_dump['total_step'])
if mode == 'train':
config['task_list'] = json_dump['task_list']['train']
else:
config['task_list'] = json_dump['task_list']['eval']
config['saving_period'] = int(json_dump['saving_period'])
config['max_t'] = int(json_dump['max_t'])
config['action_size'] = int(json_dump['action_size'])
config['method'] = json_dump['method']
return config
def get_first_free_gpu(memory_needed):
GPUs = GPUtil.getGPUs()
# maxLoad = 2 Bypass maxLoad filter
GPUs_available = GPUtil.getAvailability(
GPUs, maxLoad=2, maxMemory=0.8, memoryFree=memory_needed)
GPUs_available = [gpu for i, gpu in enumerate(
GPUs) if (GPUs_available[i] == 1)]
if not GPUs_available:
return None
GPUs_available.sort(key=lambda x: float('inf') if math.isnan(
x.memoryUtil) else x.memoryUtil, reverse=True)
return GPUs_available[0].id
|
<gh_stars>0
#!/usr/bin/env python3
# AUTHOR:
#
# <NAME>, DANS, NL, <EMAIL>
#
# USAGE
#
# ./selective-harvest.py --help
#
# or
#
# python3 selective-harvest.py --help
#
# will show complete usage information.
#
# Shortest form:
#
# ./selective-harvest.py
#
# assuming that the config file is in config.xml
#
# Usual form
#
# ./selective-harvest.py -w work-directory -c config-file -l logfile
#
# Verbosity can be increased with -v, -vv, -vvv.
#
# All messages go to log file regardless of verbosity.
import sys
import os
import time
import datetime
import re
import argparse
from subprocess import run
import xml.etree.ElementTree as ET
LOG = None
TIMESTAMP = time.time()
VERBOSE = False
COMMAND = ('curl', '-s', '-o')
metadataPat = re.compile('<metadata[^>]*>(.*)</metadata>', re.S)
errorPat = re.compile('''<error.*code=['"]([^'"]*)['"][^>]*>(.*)</error>''', re.S)
def timestamp():
now = time.time()
interval = now - TIMESTAMP
if interval < 10:
intervalRep = "{: 2.2f}s".format(interval)
else:
interval = int(round(interval))
if interval < 60:
intervalRep = "{:>2d}s".format(interval)
elif interval < 3600:
intervalRep = "{:>2d}m {:>02d}s".format(interval // 60, interval % 60)
else:
intervalRep = "{:>2d}h {:>02d}m {:>02d}s".format(
interval // 3600, (interval % 3600) // 60, interval % 60
)
return '{} = {}'.format(datetime.datetime.now().isoformat(), intervalRep)
def _msg(msg, verbosity, newline, time, log, term):
"""
Print a message msg if the verbosity admits it.
optionally with newline, optionally with timestamp,
optionally to a log file including an optional timestamp,
Writes to log file regardless of verbosity.
"""
nl = '\n' if newline else ''
if term and VERBOSE > verbosity:
sys.stderr.write('{}{}'.format(msg, nl))
if log and LOG is not None:
ts = '{} > '.format(timestamp()) if time else ''
LOG.write('{}{}{}'.format(ts, msg, nl))
def shout(msg):
_msg(msg, -100, False, False, False, True)
def shoutln(msg):
_msg(msg, -100, True, False, False, True)
def error(msg, time=True, log=True, term=True):
_msg(msg, 0, False, time, log, term)
def errorln(msg, time=True, log=True, term=True):
_msg(msg, 0, True, time, log, term)
def info(msg, time=True, log=True, term=True):
_msg(msg, 1, False, time, log, term)
def infoln(msg, time=True, log=True, term=True):
_msg(msg, 1, True, time, log, term)
def extra(msg, time=True, log=True, term=True):
_msg(msg, 2, False, time, log, term)
def extraln(msg, time=True, log=True, term=True):
_msg(msg, 2, True, time, log, term)
def readTasks(configPath, selectRepos):
"""
Read an XML config file, and convert it to a tasks list.
Each task specifies a repository with pseudo sets and ids in
those sets to harvest, plus a location where the harvested
documents should end up.
"""
if not os.path.exists(configPath):
errorln('No config file "{}"'.format(configPath))
return False
info('Reading config file "{}" ...'.format(configPath))
tree = ET.parse(configPath)
infoln('done', time=False)
repos = []
root = tree.getroot()
for rElem in root.iter('repository'):
repoName = rElem.attrib['id']
if selectRepos is not None and repoName not in selectRepos:
infoln('skipping repo "{}"'.format(repoName))
continue
repoInfo = {
'name': repoName,
'sets': [],
}
for elem in rElem.findall('baseurl'):
repoInfo['url'] = elem.text
for elem in rElem.findall('metadataprefix'):
repoInfo['meta'] = elem.text
for elem in rElem.findall('recordpath'):
repoInfo['dest'] = elem.text
for elem in rElem.findall('output-set'):
setInfo = {
'name': elem.attrib['name'],
'ids': set(),
}
for iElem in elem.findall('id'):
setInfo['ids'].add(iElem.text)
repoInfo['sets'].append(setInfo)
repos.append(repoInfo)
for repo in repos:
extraln('{} => {}'.format(
repo.get('name', 'UNKNOWN REPO'),
repo.get('dest'),
))
for pset in repo.get('sets', set()):
extraln('\t{}'.format(
pset.get('name', 'UNKNOWN SET'),
))
for did in sorted(pset.get('ids', set())):
extraln('\t\t{}'.format(did))
return repos
def harvestAll(repoTasks):
"""
Execute all harvesting tasks.
"""
good = True
for repoTask in repoTasks:
thisGood = harvestTask(repoTask)
if not thisGood:
good = False
return good
def harvestTask(repoTask):
"""
Execute a single harvesting task.
"""
taskName = repoTask.get('name', 'UNSPECIFIED')
infoln('Harvesting from "{}"'.format(taskName))
dest = repoTask.get('dest', '')
good = True
if not os.path.exists(dest):
try:
os.makedirs(dest, exist_ok=True)
except Exception:
errorln('Cannot create directory "{}"'.format(dest))
good = False
else:
if not os.path.isdir(dest):
errorln('"{}" is not a directory'.format(dest))
good = False
if not good:
return False
for repoSet in repoTask.get('sets', []):
setGood = True
setName = repoSet.get('name', 'UNSPECIFIED')
ids = sorted(repoSet.get('ids', []))
infoln('\tHarvesting "{}" set "{}" with {} documents'.format(
taskName, setName, len(ids),
))
setDest = '{}/{}'.format(dest, setName)
if not os.path.exists(setDest):
try:
os.makedirs(setDest, exist_ok=True)
except Exception:
errorln('Cannot create directory "{}"'.format(setDest))
setGood = False
else:
if not os.path.isdir(setDest):
errorln('"{}" is not a directory'.format(setDest))
setGood = False
if not setGood:
good = False
continue
nError = 0
for docId in ids:
docError = None
repoUrl = repoTask.get('url', '')
meta = repoTask.get('meta', '')
msg = ' harvesting "{}" ... '.format(docId)
info(msg)
docUrl = '{}?verb=GetRecord&identifier={}&metadataPrefix={}'.format(
repoUrl,
docId,
meta,
)
docDest = '{}/{}.xml'.format(setDest, docId.replace(':', '-'))
try:
run(
COMMAND + (docDest, docUrl)
)
error = deliver(docDest)
if error is not None:
docError = error
except Exception as e:
docError = e
if docError and os.path.exists(docDest):
os.unlink(docDest)
if docError:
setGood = False
nError += 1
if docError:
if VERBOSE <= 1:
errorln('{}XX'.format(msg), log=False)
errorln('XX', time=False, term=False)
else:
errorln('XX', time=False)
else:
infoln('OK', time=False)
if docError:
docError = str(docError).rstrip('\n')
infoln('\t\t\t{}'.format(docError))
if not setGood:
good = False
infoln('\tHarvested "{}" set "{}" {} good, {} missed'.format(
taskName, setName, len(ids) - nError, nError,
))
return good
def deliver(path):
"""
harvestTask writes the result of a harvest request to disk as is.
This function peels the OAI-PMH wrapper off the document, and saves
the document in the same place.
"""
with open(path) as fh:
text = fh.read()
error = None
if '</GetRecord>' in text and '</metadata>' in text:
match = metadataPat.search(text)
if match:
text = match.group(1).strip()
with open(path, 'w') as fh:
fh.write(text)
else:
error = 'No metadata found'
elif '</error>' in text:
match = errorPat.search(text)
if match:
code = match.group(1)
msg = match.group(2)
error = '{code}: {msg}'.format(code=code, msg=msg)
else:
error = 'Could not parse error message'
else:
error = 'No record found and no error message found'
return error
def main():
"""
Sets up an argument parser.
Changes to the working directory which is specified in the -w argument.
Starts appending to the log file.
Reads the config file which is specified in the -c argument.
Harvests all repos and document-ids found in the config file,
unless -r repo is given, in which case only document-ids from
repo are harvested.
The harvested documents are stored in a location specified
per repo in the config file.
Example config file:
<config>
<repository id="dans-easy">
<baseurl>https://easy.dans.knaw.nl/oai/</baseurl>
<metadataprefix>oai_dc</metadataprefix>
<output-set name="theo1">
<id>oai:easy.dans.knaw.nl:easy-dataset:4215</id>
<id>oai:easy.dans.knaw.nl:easy-dataset:25037</id>
<id>oai:easy.dans.knaw.nl:easy-dataset:25037x</id>
</output-set>
<output-set name="theo2">
<id>oai:easy.dans.knaw.nl:easy-dataset:30678</id>
<id>oai:easy.dans.knaw.nl:easy-dataset:32044</id>
</output-set>
<from>2006-11-01T00:00:00Z</from>
<recordpath>_temp/oai-pmh-harvester/dans</recordpath>
</repository>
</config>
Run ./selective-harvest.py --help to see complete usage information.
"""
global VERBOSE
global LOG
parser = argparse.ArgumentParser(description='selective harvest arguments')
parser.add_argument(
'-c', '--config',
default='config.xml',
help='path to config file (xml)',
)
parser.add_argument(
'-l', '--log',
default='log.txt',
help='path to log file',
)
parser.add_argument(
'-w', '--workdir',
default='',
help=(
'path to working directory which is'
' the starting point of a relative config path'
' and the starting point of a relative output path'
),
)
parser.add_argument(
'-r', '--repo',
default='',
type=str,
help='only do repos in comma separated list of repo ids',
)
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help=(
'print errors, messages, verbose messages.'
' Repeat the option to increase verbosity.'
),
)
args = parser.parse_args()
VERBOSE = args.verbose
workDir = os.path.abspath(args.workdir)
os.chdir(workDir)
logPath = os.path.abspath(args.log)
logDir = os.path.dirname(logPath)
if not os.path.exists(logDir):
try:
os.makedirs(logDir, exist_ok=True)
except Exception:
shoutln('Cannot create log directory "{}"'.format(logDir))
try:
LOG = open(logPath, 'a')
except Exception:
LOG = None
shoutln('Cannot write to log file "{}"'.format(logPath))
infoln('working in directory "{}"'.format(os.getcwd()))
repos = None if args.repo == '' else set(args.repo.split(','))
if repos is None:
infoln('Harvest all repos found in "{}"'.format(args.config))
else:
infoln('Harvest repositories "{}" only'.format('", "'.join(repos)))
repoTasks = readTasks(args.config, repos)
if not repoTasks:
return 1
if not harvestAll(repoTasks):
return 1
return 0
returnCode = main()
if returnCode != 0:
shoutln('ERROR {}'.format(returnCode))
sys.exit(returnCode)
|
"""
重要的事情说三遍
这是一份参考协议声明,即后端根据这份协议生成出对应的配置
"""
class FieldType:
STRING = 'string'
TEXT = 'text'
RICHTEXT = 'richtext'
BOOL = 'bool'
INTEGER = 'integer'
FLOAT = 'float'
DECIMAL = 'decimal'
DATE = 'date'
DATETIME = 'datetime'
TIME = 'time'
IMAGE = 'image'
FILE = 'file'
REF = 'ref'
MREF = 'mref'
DURATION = 'duration'
common_attribute = [
{
'name': 'required',
'type': 'bool',
'required': False,
'default': False,
},
{
'name': 'name',
'type': 'string',
'required': True,
},
{
'name': 'displayName',
'type': 'string',
'required': False,
},
{
'name': 'default',
'type': 'object',
'required': False,
},
{
'name': 'helpText',
'type': 'string',
'required': False,
},
{
'name': 'choices',
'type': 'array',
'required': False,
},
{
'name': 'editable',
'type': 'bool',
'required': False,
},
{
'name': 'readonly',
'type': 'bool',
'required': False,
'default': False
}
]
# 定义字段类型,每个字段类型有其自身的参数及意义。
FIELDS = {
'String': {
'name': 'string',
'displayName': '字符串',
'attributes': [
{
'name': 'maxLength',
'type': 'integer',
'required': True,
},
],
},
'Text': {
'name': 'text',
'displayName': '长文本',
},
'RichText': {
'name': 'richtext',
'displayName': '富文本',
},
'Integer': {
'name': 'integer',
'displayName': '整型',
},
'Float': {
'name': 'float',
'displayName': '浮点型',
},
'Decimal': {
'name': 'decimal',
'displanName': '小数',
},
'Bool': {
'name': 'bool',
'displayName': '布尔型',
},
'Date': {
'name': 'date',
},
'Time': {
'name': 'time',
},
'DateTime': {
'name': 'datetime',
},
'Image': {
'name': 'image',
},
'File': {
'name': 'file',
},
'Ref': {
'name': 'ref',
'attributes': [
{
'name': 'ref',
'type': 'string',
'required': False,
},
],
},
'RefMult': {
'name': 'mref',
'attributes': [
{
'name': 'ref',
'type': 'string',
'required': False,
},
],
},
'RefOne': {
'name': 'oref',
'attributes': [
{
'name': 'ref',
'type': 'string',
'required': False,
},
],
},
'Object': {
'name': 'object',
},
'Array': {
'name': 'array',
},
'TimeStamp': {
'name': 'timestamp',
'displayName': '时间戳'
}
}
|
<filename>src/rl_addons/renderPM/pfm.py
import struct
from types import StringType
class _BUILDER:
'''Virtual base helper class for structured file scanning'''
def _get_struct_fmt(self,info):
fmt = '<'
for f, _, _ in info:
fmt += f
return fmt
def _scan_from_file(self,f,info):
fmt = self._get_struct_fmt(info)
size = struct.calcsize(fmt)
T = struct.unpack(fmt,f.read(size))
i = 0
for _, n, _ in info:
setattr(self,n,T[i])
i = i + 1
def _dump(self,A):
for a in A:
print a, getattr(self,a)
def _attr_names(self,*I):
A = []
for i in I:
if type(i) is StringType:
A.append(i)
else:
A.extend(map(lambda x: x[1],i))
return A
def _scanZTStr(self,f,loc):
'''scan a zero terminated string from the file'''
f.seek(loc)
s = ''
while 1:
c = f.read(1)
if c=='\000': break
s = s+c
return s
def _scanN(self,N,fmt,f,loc):
if not loc: return None
fmt = len(fmt)==1 and ("<%d%c" % (N,fmt)) or ("<"+N*fmt)
f.seek(loc)
size = struct.calcsize(fmt)
return struct.unpack(fmt,f.read(size))
def _scanNT(self,T,N,fmt,f,loc):
if not loc: return None
n = len(fmt)
X = []
i = 0
S = []
for x in self._scanN(N,fmt,f,loc):
S.append(x)
i = i + 1
if i==n:
X.append(S)
i = 0
S = []
return map(lambda x,T=T: T(*x),X)
class KernPair:
'''hold info about a possible kerning pair'''
def __init__(self,first,second,amount):
self.first = first
self.scond = second
self.amount = amount
class KernTrack:
def __init__(self,degree,minSize,minAmount,maxSize,maxAmount):
'''
degree amount to change the character spacing. Negative values mean closer together,p
ositive values mean farther apart.
minSize minimum font height (in device units) for which to use linear track kerning.
minAmount track kerning amount to use for font heights less or equal ktMinSize.
maxSize maximum font height (in device units) for which to use linear track kerning.f
For font heights between ktMinSize and ktMaxSize the track kerning amount has
to increase linearily from ktMinAmount to ktMaxAmount.
maxAmount track kerning amount to use for font heights greater or equal ktMaxSize.
'''
self.degree = degree
self.minSize = minSize
self.minAmount = minAmount
self.maxSize = maxSize
self.maxAmount = maxAmount
class PFM(_BUILDER):
def __init__(self,fn=None):
if fn:
if type(fn) is StringType:
f = open(fn,'rb')
else:
f = fn
self.scan_from_file(f)
if f is not fn: f.close()
'''Class to hold information scanned from a type-1 .pfm file'''
def scan_from_file(self,f):
self._scan_from_file(f,self._header_struct_info)
if self.dfType!=0x81: raise ValueError, "Not a Type-1 Font description"
else: self.WidthTable = None
self._scan_from_file(f,self._extension_struct_info)
if not self.dfExtentTable: raise ValueError, 'dfExtentTable is zero'
if not self.dfExtMetricsOffset: raise ValueError, 'dfExtMetricsOffset is zero'
if self.dfDevice: self.DeviceName = self._scanZTStr(f,self.dfDevice)
else: self.DeviceName = None
if self.dfFace: self.FaceName = self._scanZTStr(f,self.dfFace)
else: self.FaceName = None
f.seek(self.dfExtMetricsOffset)
self._scan_from_file(f, self._extTextMetrics_struct_info)
N = self.dfLastChar - self.dfFirstChar + 1
self.ExtentTable = self._scanN(N,'H',f,self.dfExtentTable)
if self.dfDriverInfo: self.DriverInfo = self._scanZTStr(f,self.dfDriverInfo)
else: self.DriverInfo = None
if self.dfPairKernTable: self.KerningPairs = self._scanNT(KernPair,self.dfPairKernTable,'BBh',f,self.etmKernPairs)
else: self.KerningPairs = []
if self.dfTrackKernTable: self.KerningTracks = self._scanNT(KernTrack,self.dfTrackKernTable,'hhhhh',f,self.etmKernTracks)
else: self.KerningTracks = []
def dump(self):
self._dump(
self._attr_names(
self._header_struct_info,'WidthTable',
self._extension_struct_info,
'DeviceName',
'FaceName',
self._extTextMetrics_struct_info,
'DriverInfo',
))
_header_struct_info = (('H','dfVersion',
'''This field contains the version of the PFM file.
For PFM files that conform to this description
(namely PFM files for Type-1 fonts) the
value of this field is always 0x0100.'''),
('i','dfSize',
'''This field contains the total size of the PFM file in bytes.
Some drivers check this field and compare its value with the size of the PFM
file, and if these two values don't match the font is ignored
(I know this happens e.g. with Adobe PostScript printer drivers). '''),
('60s','dfCopyright',
'''This field contains a null-terminated copyright
string, often from the application that created the
PFM file (this normally isn't the
copyright string for the font file itself).
The unused bytes in this field should be set to zero. '''),
('H','dfType',
'''This field contains the font type. The low-order
byte is a combination of the following values
(only the values being of interest in PFM
files are given):
0x00 (PF_RASTER_TYPE): font is a raster font
0x01 (PF_VECTOR_TYPE): font is a vector font
0x80 (PF_DEVICE_REALIZED): font realized by the device driver
The high-order byte is never used in PFM files, it is always zero.
In PFM files for Type-1 fonts the value in this field is always 0x0081. '''),
('H','dfPoints',
'''This field contains the point size at which this font
looks best. Since this is not relevant for scalable fonts
the field is ignored. The value
of this field should be set to 0x000a (10 pt). '''),
('H','dfVertRes',
'''This field contains the vertical resolution at which the
font was digitized (the value is in dots per inch).
The value of this field should be
set to 0x012C (300 dpi). '''),
('H','dfHorizRes',
'''This field contains the horizontal resolution at which
the font was digitized (the value is in dots per inch).
The value of this field should
be set to 0x012C (300 dpi). '''),
('H','dfAscent',
'''This field contains the distance from the top of a
character definition cell to the baseline of the
typographical font. It is useful for aligning the
baseline of fonts of different heights. '''),
('H','dfInternalLeading',
'''This field contains the amount of leading inside
the bounds set by the dfPixHeight field in the PFMHEADER
structure. Accent marks may occur in this area. '''),
('H','dfExternalLeading',
'''This field contains the amount of extra leading that the
designer requests the application to add between rows. Since this area is
outside the character definition cells, it contains no marks and will not be altered by text outputs. '''),
('B','dfItalic',
'''This field specifies whether this font is an italic
(or oblique) font. The low-order bit is 1 if the flag
is set, all other bits are zero. '''),
('B','dfUnderline',
'''This field specifies whether this font is an underlined
font. The low-order bit is 1 if the flag is set, all other
bits are zero. '''),
('B','dfStrikeOut',
'''This field specifies whether this font is a striked-out font.
The low-order bit is 1 if the flag is set, all other bits are zero. '''),
('H','dfWeight',
'''This field contains the weight of the characters in this font.
The value is on a scale from 0 through 1000, increments are in
steps of 100 each. The values roughly give the number of black
pixel from every 1000 pixels. Typical values are:
0 (FW_DONTCARE): unknown or no information
300 (FW_LIGHT): light font
400 (FW_NORMAL): normal font
700 (FW_BOLD): bold font '''),
('B','dfCharSet',
'''This field specifies the character set used in this font.
It can be one of the following values (probably other values
may be used here as well):
0x00 (ANSI_CHARSET): the font uses the ANSI character set;
this means that the font implements all characters needed for the
current Windows code page (e.g. 1252). In case of a Type-1 font
this font has been created with the encoding StandardEncoding
Note that the code page number itself is not stored in the PFM file.
0x02 (SYMBOL_CHARSET): the font uses a font-specific encoding
which will be used unchanged in displaying an printing text
using this font. In case of a Type-1 font this font has been
created with a font-specific encoding vector. Typical examples are
the Symbol and the ZapfDingbats fonts.
0xFF (OEM_CHARSET): the font uses the OEM character set; this
means that the font implements all characters needed for the
code page 437 used in e.g. MS-DOS command line mode (at least
in some versions of Windows, others might use code page
850 instead). In case of a Type-1 font this font has been created with a font-specific encoding vector. '''),
('H','dfPixWidth',
'''This field contains the width of all characters in the font.
For raster fonts this field contains the width in pixels of every
character bitmap if the font is fixed-pitch, otherwise this field
is zero and the character's widths are specified in the WidthTable
table. For vector fonts this field contains the width of the grid
on which the font was digitized. The value is ignored by PostScript
printer drivers. '''),
('H','dfPixHeight',
'''This field contains the height of all characters in the font.
For raster fonts this field contains the height in scan lines of
every character bitmap. For vector fonts this field contains the
height of the grid on which the font was digitized. The value is
ignored by PostScript printer drivers. '''),
('B','dfPitchAndFamily',
'''This field specifies the font pitch and the font family. The
font pitch specifies whether all characters in the font have the
same pitch (this is called fixed pitch too) or variable pitch.
The font family indicates, in a rather general way, the look of a font.
The least significant bit in this field contains the pitch flag.
If the bit is set the font is variable pitch, otherwise it's fixed pitch. For
Type-1 fonts this flag is set always, even if the Type-1 font is fixed pitch.
The most significant bits of this field specify the font family.
These bits may have one of the following values:
0x00 (FF_DONTCARE): no information
0x10 (FF_ROMAN): serif font, variable pitch
0x20 (FF_SWISS): sans serif font, variable pitch
0x30 (FF_MODERN): fixed pitch, serif or sans serif font
0x40 (FF_SCRIPT): cursive or handwriting font
0x50 (FF_DECORATIVE): novelty fonts '''),
('H','dfAvgWidth',
'''This field contains the average width of the characters in the font.
For a fixed pitch font this is the same as dfPixWidth in the
PFMHEADER structure. For a variable pitch font this is the width
of the character 'X'. '''),
('H','dfMaxWidth',
'''This field contains the maximum width of the characters in the font.
For a fixed pitch font this value is identical to dfAvgWidth in the
PFMHEADER structure. '''),
('B','dfFirstChar',
'''This field specifies the first character code defined by this font.
Width definitions are stored only for the characters actually present
in a font, so this field must be used when calculating indexes into the
WidthTable or the ExtentTable tables. For text fonts this field is
normally set to 0x20 (character space). '''),
('B','dfLastChar',
'''This field specifies the last character code defined by this font.
Together with the dfFirstChar field in the PFMHEADER structure this
field specifies the valid character range for this font. There must
be an entry in the WidthTable or the ExtentTable tables for every
character between these two values (including these values themselves).
For text fonts this field is normally set to 0xFF (maximum
possible value). '''),
('B','dfDefaultChar',
'''This field specifies the default character to be used whenever a
character is used that is outside the range of the dfFirstChar through
dfLastChar fields in the PFMHEADER structure. The character is given
relative to dfFirstChar so that the actual value of the default
character is the sum of dfFirstChar and dfDefaultChar. Ideally, the
default character should be a visible character in the current font,
e.g. a period ('.'). For text fonts this field is normally set to
either 0x00 (character space) or 0x75 (bullet). '''),
('B','dfBreakChar',
'''This field specifies the word-break character. Applications
use this character to separate words when wrapping or justifying lines of
text. The character is given relative to dfFirstChar in the PFMHEADER
structure so that the actual value of the word-break character
is the sum of dfFirstChar and dfBreakChar. For text fonts this
field is normally set to 0x00 (character space). '''),
('H','dfWidthBytes',
'''This field contains the number of bytes in every row of the
font bitmap. The value is always an even quantity so that rows of the
bitmap start on 16 bit boundaries. This field is not used for vector
fonts, it is therefore zero in e.g. PFM files for Type-1 fonts. '''),
('i','dfDevice',
'''This field contains the offset from the beginning of the PFM file
to the DeviceName character buffer. The DeviceName is always
present in PFM files for Type-1 fonts, this field is therefore never zero.'''),
('i','dfFace',
'''This field contains the offset from the beginning of the PFM file
to the FaceName character buffer. The FaceName is always present
in PFM files for Type-1 fonts, this field is therefore never zero. '''),
('i','dfBitsPointer',
'''This field is not used in PFM files, it must be set to zero. '''),
('i','dfBitsOffset',
'''This field is not used in PFM files, it must be set to zero. '''),
)
#'H','WidthTable[]'
#This section is present in a PFM file only when this PFM file describes a
#variable pitch raster font. Since Type-1 fonts aren't raster fonts this
#section never exists in PFM files for Type-1 fonts.'''
#The WidthTable table consists of (dfLastChar - dfFirstChar + 2) entries of type WORD (dfFirstChar and dfLastChar can be found in the
#PFMHEADER structure). Every entry contains the width of the corresponding character, the last entry in this table is extra, it is set to zero.
_extension_struct_info=(
('H','dfSizeFields',
'''This field contains the size (in bytes) of the
PFMEXTENSION structure. The value is always 0x001e. '''),
('I','dfExtMetricsOffset',
'''This field contains the offset from the beginning
of the PFM file to the ExtTextMetrics section.
The ExtTextMetrics section is always present in PFM
files for Type-1 fonts, this field is therefore never
zero. '''),
('I','dfExtentTable',
'''This field contains the offset from the beginning
of the PFM file to the ExtentTable table. This table
is always present in PFM files for Type-1 fonts, this
field is therefore never zero. '''),
('I','dfOriginTable',
'''This field contains the offset from the beginning
of the PFM file to a table containing origin coordinates
for screen fonts. This table is not present in PFM files
for Type-1 fonts, the field must therefore be set to zero. '''),
('I','dfPairKernTable',
'''This field contains the offset from the beginning of
the PFM file to the KerningPairs table. The value must
be zero if the PFM file doesn't contain a KerningPairs
table. '''),
('I','dfTrackKernTable',
'''This field contains the offset from the beginning of
the PFM file to the KerningTracks table. The value must
be zero if the PFM file doesn't contain a kerningTracks
table. '''),
('I','dfDriverInfo',
'''This field contains the offset from the beginning of
the PFM file to the DriverInfo section. This section is
always present in PFM files for Type-1 fonts, this field
is therefore never zero. '''),
('I','dfReserved',
'''This field must be set to zero. '''),
)
#char DeviceName[]
#The DeviceName character buffer is a null-terminated string
#containing the name of the printer driver family. PFM files
#for Type-1 fonts have the string 'PostScript', PFM files for
#PCL fonts have the string 'PCL/HP LaserJet'.
#char FaceName[]
#The FaceName character buffer is a null-terminated string
#containing the name of the font face. In PFM files for Type-1
#fonts this is normally
#the PostScript name of the font without suffixes like
#'-Bold', '-Italic' etc.
_extTextMetrics_struct_info = (('h','etmSize',
'''This field contains the size (in bytes) of the
EXTTEXTMETRIC structure. The value is always 0x0034. '''),
('h','etmPointSize',
'''This field contains the nominal point size of the font
in twips (this is a twentieth of a point or 1/1440 inch).
This is the intended graphics art size of the font, the
actual size may differ slightly depending on the resolution
of the output device. In PFM files for Type-1 fonts this value
should be set to 0x00f0 (240 twips or 12 pt). '''),
('h','etmOrientation',
'''This field contains the orientation of the font.
This value refers to the ability of the font to be
imaged on a page of a given orientation. It
can be one of the following values:
0x0000: any orientation
0x0001: portrait (page width is smaller that its height)
0x0002: landscape (page width is greater than its height)
In PFM files for Type-1 fonts this field is always 0x0000
since a Type-1 font can be arbitrarily rotated. '''),
('h','etmMasterHeight',
'''This field contains the font size in device units for
which the values in the ExtentTable table are exact. Since
Type-1 fonts are by convention defined in a box of 1000 x 1000
units, PFM files for Type-1 fonts have the value 0x03E8 (1000,
the number of units per em) in this field. '''),
('h','etmMinScale',
'''This field contains the minimum valid size for the font in
device units. The minimum valid point size can then be calculated
as follows:
(etmMinScale * points-per-inch) / dfVertRes
The value for 'points-per-inch' is normally 72, the dfVertRes
field can be found in the PFMHEADER structure, it contains the
vertical resolution at which the font was digitized (this
value is in dots per inch).
In PFM files for Type-1 fonts the value should be set to 0x0003. '''),
('h','etmMaxScale',
'''This field contains the maximum valid size for the font in
device units. The maximum valid point size can then be calculated
as follows:
(etmMaxScale * points-per-inch) / dfVertRes
(see also above etmMinScale).
In PFM files for Type-1 fonts the value should be set to 0x03E8 (1000). '''),
('h','etmMasterUnits',
'''This field contains the integer number of units per em
where an em equals etmMasterHeight in the EXTTEXTMETRIC structure.
In other words, the etmMasterHeight value is expressed in font
units rather than device units.
In PFM files for Type-1 fonts the value should be set to
0x03E8 (1000). '''),
('h','etmCapHeight',
'''This field contains the height for uppercase characters
in the font (the value is in font units). Typically, the
character 'H' is used for measurement purposes.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmXHeight',
'''This field contains the height for lowercase characters
in the font (the value is in font units). Typically, the
character 'x' is used for measurement purposes.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmLowerCaseAscent',
'''This field contains the distance (in font units) that
the ascender of lowercase letters extends above the baseline.
This distance is typically specified for a lowercase character 'd'.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmLowerCaseDescent',
'''This field contains the distance (in font units) that
the descender of lowercase letters extends below the baseline.
This distance is typically specified for a lowercase character 'p'.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmSlant',
'''This field contains the angle in tenth of degrees clockwise
from the upright version of the font. The value is typically not zero only for
an italic or oblique font.
For Type-1 fonts you may find this value in the AFM file
(search for the entry 'ItalicAngle' and multiply it by 10). '''),
('h','etmSuperScript',
'''This field contains the recommended amount (in font units)
to offset superscript characters from the baseline. This amount
is typically specified by a negative offset. '''),
('h','etmSubScript',
'''This field contains the recommended amount (in font units)
to offset subscript characters from the baseline. This amount
is typically specified by a positive offset. '''),
('h','etmSuperScriptSize',
'''This field contains the recommended size (in font units)
for superscript characters in the font. '''),
('h','etmSubScriptSize',
'''This field contains the recommended size (in font units)
for subscript characters in the font. '''),
('h','etmUnderlineOffset',
'''This field contains the offset (in font units) downward
from the baseline where the top of a single underline bar
should appear.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmUnderlineWidth',
'''This field contains the thickness (in font units) of the underline bar.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmDoubleUpperUnderlineOffset',
'''This field contains the offset (in font units) downward from
the baseline where the top of the upper, double underline bar should
appear. '''),
('h','etmDoubleLowerUnderlineOffset',
'''This field contains the offset (in font units) downward
from the baseline where the top of the lower, double underline
bar should appear. '''),
('h','etmDoubleUpperUnderlineWidth',
'''This field contains the thickness (in font units) of the
upper, double underline bar. '''),
('h','etmDoubleLowerUnderlineWidth',
'''This field contains the thickness (in font units) of the
lower, double underline bar. '''),
('h','etmStrikeOutOffset',
'''This field contains the offset (in font units) upward from
the baseline where the top of a strikeout bar should appear. '''),
('h','etmStrikeOutWidth',
'''This field contains the thickness (in font units) of the
strikeout bar. '''),
('H','etmKernPairs',
'''This field contains the number of kerning pairs defined
in the KerningPairs table in this PFM file. The number (and
therefore the table) may not be greater than 512. If the PFM
file doesn't contain a KerningPairs table the value is zero. '''),
('H','etmKernTracks',
'''This field contains the number of kerning tracks defined in
the KerningTracks table in this PFM file. The number (and therefore the
table) may not be greater than 16. If the PFM file doesn't contain
a KerningTracks table the value is zero. '''),
)
#'H','ExtentTable[]'
#The ExtentTable table must be present in a PFM file for a Type-1 font,
#it contains the unscaled widths (in 1/1000's of an em) of the characters
#in the font. The table consists of (dfLastChar - dfFirstChar + 1) entries
#of type WORD (dfFirstChar and dfLastChar can be found in the PFMHEADER
#structure). For Type-1 fonts these widths can be found in the AFM file.
#DRIVERINFO DriverInfo
#The DriverInfo section must be present in a PFM file for a Type-1 font,
#in this case it consists of a null-terminated string containing the
#PostScript name of the font.
#PAIRKERN KerningPairs[]
#The KerningPairs table need not be present in a PFM file for a Type-1
#font, if it exists it contains etmKernPairs (from the EXTTEXTMETRIC
#structure) entries. Each of these entries looks as follows:
#B kpFirst This field contains the first (left) character of the kerning pair.
#B kpSecond This field contains the second (right) character of the kerning pair.
#h kpKernAmount This field contains the kerning amount in font units, the value
# is mostly negative.
#KERNTRACK KerningTracks[]
#The KerningTracks table need not be present in a PFM file for a Type-1 font, if it exists it contains etmKernTracks (from the EXTTEXTMETRIC structure) entries. Each of these entries looks as follows:
#h ktDegree This field contains the amount to change the character spacing. Negative values mean closer together, positive values mean farther apart.
#h ktMinSize This field contains the minimum font height (in device units) for which to use linear track kerning.
#h ktMinAmount This field contains the track kerning amount to use for font heights less or equal ktMinSize.
#h ktMaxSize This field contains the maximum font height (in device units) for which to use linear track kerning. For font heights between ktMinSize and ktMaxSize the track kerning amount has to increase linearily from ktMinAmount to ktMaxAmount.
#h ktMaxAmount This field contains the track kerning amount to use for font heights greater or equal ktMaxSize.
if __name__=='__main__':
from glob import glob
for f in glob('/Program Files/Adobe/Acrobat 4.0/resource/font/pfm/*.pfm'):
print f
p=PFM(f)
p.dump()
|
<filename>src/sage/combinat/tableau_tuple.py
r"""
TableauTuples
A :class:`TableauTuple` is a tuple of tableaux. These objects arise naturally
in representation theory of the wreath products of cyclic groups and the
symmetric groups where the standard tableau tuples index bases for the ordinary
irreducible representations. This generalises the well-known fact the ordinary
irreducible representations of the symmetric groups have bases indexed by the
standard tableaux of a given shape. More generally, :class:`TableauTuples`, or
multitableaux, appear in the representation theory of the degenerate and
non-degenerate cyclotomic Hecke algebras and in the crystal theory of the
integral highest weight representations of the affine special linear groups.
A :class:`TableauTuple` is an ordered tuple
`(t^{(1)}, t^{(2)}, \ldots, t^{(l)})` of tableaux. The length of the tuple is
its *level* and the tableaux `t^{(1)}, t^{(2)}, \ldots, t^{(l)}` are the
components of the :class:`TableauTuple`.
A tableaux can be thought of as the labelled diagram of a partition.
Analogously, a :class:`TableauTuple` is the labelled diagram of a
:class:`PartitionTuple`. That is, a :class:`TableauTuple` is a tableau of
:class:`PartitionTuple` shape. As much as possible, :class:`TableauTuples`
behave in exactly the same way as :class:`Tableaux`. There are obvious
differences in that the cells of a partition are ordered pairs `(r, c)`,
where `r` is a row index and `c` a column index, whereas the cells of a
:class:`PartitionTuple` are ordered triples `(k, r, c)`, with `r` and `c` as
before and `k` indexes the component.
Frequently, we will call a :class:`TableauTuple` a tableau, or a tableau of
:class:`PartitionTuple` shape. If the shape of the tableau is known this
should not cause any confusion.
.. WARNING::
In sage the convention is that the `(k, r, c)`-th entry of a tableau tuple
`t` is the entry in row `r`, column `c` and component `k` of the tableau.
This is because it makes much more sense to let ``t[k]`` be component of
the tableau. In particular, we want ``t(k,r,c) == t[k][r][c]``. In the
literature, the cells of a tableau tuple are usually written in the form
`(r, c, k)`, where `r` is the row index, `c` is the column index, and
`k` is the component index.
The same convention applies to the cells of :class:`PartitionTuples`.
.. NOTE::
As with partitions and tableaux, the cells are 0-based. For example, the
(lexicographically) first cell in any non-empty tableau tuple is
``[0,0,0]``.
EXAMPLES::
sage: TableauTuple([[1,2,3],[4,5]])
[[1, 2, 3], [4, 5]]
sage: t = TableauTuple([ [[6,7],[8,9]],[[1,2,3],[4,5]] ]); t
([[6, 7], [8, 9]], [[1, 2, 3], [4, 5]])
sage: t.pp()
6 7 1 2 3
8 9 4 5
sage: t(0,0,1)
7
sage: t(1,0,1)
2
sage: t.shape()
([2, 2], [3, 2])
sage: t.size()
9
sage: t.level()
2
sage: t.components()
[[[6, 7], [8, 9]], [[1, 2, 3], [4, 5]]]
sage: t.entries()
[6, 7, 8, 9, 1, 2, 3, 4, 5]
sage: t.parent()
Tableau tuples
sage: t.category()
Category of elements of Tableau tuples
One reason for implementing :class:`TableauTuples` is to be able to consider
:class:`StandardTableauTuples`. These objects arise in many areas of algebraic
combinatorics. In particular, they index bases for the Specht modules of the
cyclotomic Hecke algebras of type `G(r,1,n)`. A :class:`StandardTableauTuple`
of tableau whose entries are increasing along rows and down columns in each
component and which contain the numbers `1,2, \ldots, n`, where the shape of
the :class:`StandardTableauTuple` is a :class:`PartitionTuple` of `n`.
::
sage: s = StandardTableauTuple([ [[1,2],[3]],[[4,5]]])
sage: s.category()
Category of elements of Standard tableau tuples
sage: t = TableauTuple([ [[1,2],[3]],[[4,5]]])
sage: t.is_standard(), t.is_column_strict(), t.is_row_strict()
(True, True, True)
sage: t.category()
Category of elements of Tableau tuples
sage: s == t
True
sage: s is t
False
sage: s == StandardTableauTuple(t)
True
sage: StandardTableauTuples([ [2,1],[1] ])[:]
[([[1, 2], [3]], [[4]]),
([[1, 3], [2]], [[4]]),
([[1, 2], [4]], [[3]]),
([[1, 3], [4]], [[2]]),
([[2, 3], [4]], [[1]]),
([[1, 4], [2]], [[3]]),
([[1, 4], [3]], [[2]]),
([[2, 4], [3]], [[1]])]
As tableaux (of partition shape) are in natural bijection with 1-tuples of
tableaux all of the :class:`TableauTuple` classes return an ordinary
:class:`Tableau` when given :class:`TableauTuple` of level 1.
::
sage: TableauTuples( level=1 ) is Tableaux()
True
sage: TableauTuple([[1,2,3],[4,5]])
[[1, 2, 3], [4, 5]]
sage: TableauTuple([ [[1,2,3],[4,5]] ])
[[1, 2, 3], [4, 5]]
sage: TableauTuple([[1,2,3],[4,5]]) == Tableau([[1,2,3],[4,5]])
True
There is one situation where a 1-tuple of tableau is not actually a
:class:`Tableau`; tableaux generated by the :func:`StandardTableauTuples()`
iterators must have the correct parents, so in this one case 1-tuples of
tableaux are different from :class:`Tableaux`::
sage: StandardTableauTuples()[:10]
[(),
([[1]]),
([], []),
([[1, 2]]),
([[1], [2]]),
([[1]], []),
([], [[1]]),
([], [], []),
([[1, 2, 3]]),
([[1, 3], [2]])]
AUTHORS:
- <NAME> (2012-10-09): Initial version -- heavily based on
``tableau.py`` by <NAME> (2007) and <NAME> (2011).
- <NAME> (2016-08-11): Row standard tableaux added
Element classes:
* :class:`TableauTuples`
* :class:`StandardTableauTuples`
* :class:`RowStandardTableauTuples`
Factory classes:
* :class:`TableauTuples`
* :class:`StandardTableauTuples`
* :class:`RowStandardTableauTuples`
Parent classes:
* :class:`TableauTuples_all`
* :class:`TableauTuples_level`
* :class:`TableauTuples_size`
* :class:`TableauTuples_level_size`
* :class:`StandardTableauTuples_all`
* :class:`StandardTableauTuples_level`
* :class:`StandardTableauTuples_size`
* :class:`StandardTableauTuples_level_size`
* :class:`StandardTableauTuples_shape`
* :class:`StandardTableaux_residue`
* :class:`StandardTableaux_residue_shape`
* :class:`RowStandardTableauTuples_all`
* :class:`RowStandardTableauTuples_level`
* :class:`RowStandardTableauTuples_size`
* :class:`RowStandardTableauTuples_level_size`
* :class:`RowStandardTableauTuples_shape`
* :class:`RowStandardTableauTuples_residue`
* :class:`RowStandardTableauTuples_residue_shape`
.. SEEALSO::
* :class:`Tableau`
* :class:`StandardTableau`
* :class:`Tableaux`
* :class:`StandardTableaux`
* :class:`Partitions`
* :class:`PartitionTuples`
* :class:`ResidueSequence`
.. TODO::
Implement semistandard tableau tuples as defined in [DJM1998]_.
Much of the combinatorics implemented here is motivated by this and
subsequent papers on the representation theory of these algebras.
"""
#*****************************************************************************
# Copyright (C) 2012,2016 <NAME> <andrew dot mathas at sydney dot edu dot au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function, absolute_import
from six.moves import range
from six import add_metaclass
from sage.combinat.combinat import CombinatorialElement
from sage.combinat.words.word import Word
from sage.combinat.posets.posets import Poset
from sage.combinat.tableau import (Tableau, Tableaux, Tableaux_size, Tableaux_all,
StandardTableau, RowStandardTableau,
StandardTableaux, StandardTableaux_size,
StandardTableaux_all, StandardTableaux_shape,
RowStandardTableaux, RowStandardTableaux_size,
RowStandardTableaux_all, RowStandardTableaux_shape)
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.sets_cat import Sets
from sage.groups.perm_gps.permgroup import PermutationGroup
from sage.misc.classcall_metaclass import ClasscallMetaclass
from sage.misc.flatten import flatten
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.misc_c import prod
from sage.misc.prandom import random
from sage.arith.all import factorial
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing
from sage.rings.integer import Integer
from sage.rings.all import NN
from sage.sets.disjoint_union_enumerated_sets import DisjointUnionEnumeratedSets
from sage.sets.family import Family
from sage.sets.positive_integers import PositiveIntegers
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.combinat import permutation
#--------------------------------------------------
# Tableau tuple - element class
#--------------------------------------------------
class TableauTuple(CombinatorialElement):
"""
A class to model a tuple of tableaux.
INPUT:
- ``t`` -- a list or tuple of :class:`Tableau`, a list or tuple of lists
of lists
OUTPUT:
- The Tableau tuple object constructed from ``t``.
A :class:`TableauTuple` is a tuple of tableau of shape a
:class:`PartitionTuple`. These combinatorial objects are useful is
several areas of algebraic combinatorics. In particular, they are
important in:
- the representation theory of the complex reflection groups of
type `G(l,1,n)` and the representation theory of the associated
(degenerate and non-degenerate) Hecke algebras. See, for example,
[DJM1998]_
- the crystal theory of (quantum) affine special linear groups and its
integral highest weight modules and their canonical bases. See, for
example, [BK2009]_.
These apparently different and unrelated contexts are, in fact, intimately
related as in characteristic zero the cyclotomic Hecke algebras categorify
the canonical bases of the integral highest weight modules of the quantum
affine special linear groups.
The :meth:`level` of a tableau tuple is the length of the tuples. This
corresponds to the level of the corresponding highest weight module.
In sage a :class:`TableauTuple` looks an behaves like a real tuple of
(level 1) :class:`Tableaux`. Many of the operations which are defined
on :class:`Tableau` extend to :class:`TableauTuples`. Tableau tuples of
level 1 are just ordinary :class:`Tableau`.
In sage, the entries of :class:`Tableaux` can be very general, including
arbitrarily nested lists, so some lists can be interpreted either as a
tuple of tableaux or simply as tableaux. If it is possible to interpret
the input to :class:`TableauTuple` as a tuple of tableaux then
:class:`TableauTuple` returns the corresponding tuple. Given a 1-tuple of
tableaux the tableau itself is returned.
EXAMPLES::
sage: t = TableauTuple([ [[6,9,10],[11]], [[1,2,3],[4,5]], [[7],[8]] ]); t
([[6, 9, 10], [11]], [[1, 2, 3], [4, 5]], [[7], [8]])
sage: t.level()
3
sage: t.size()
11
sage: t.shape()
([3, 1], [3, 2], [1, 1])
sage: t.is_standard()
True
sage: t.pp() # pretty printing
6 9 10 1 2 3 7
11 4 5 8
sage: t.category()
Category of elements of Tableau tuples
sage: t.parent()
Tableau tuples
sage: s = TableauTuple([ [['a','c','b'],['d','e']],[[(2,1)]]]); s
([['a', 'c', 'b'], ['d', 'e']], [[(2, 1)]])
sage: s.shape()
([3, 2], [1])
sage: s.size()
6
sage: TableauTuple([[],[],[]]) # The empty 3-tuple of tableaux
([], [], [])
sage: TableauTuple([[1,2,3],[4,5]])
[[1, 2, 3], [4, 5]]
sage: TableauTuple([[1,2,3],[4,5]]) == Tableau([[1,2,3],[4,5]])
True
.. SEEALSO::
- :class:`StandardTableauTuple`
- :class:`StandardTableauTuples`
- :class:`StandardTableau`
- :class:`StandardTableaux`
- :class:`TableauTuple`
- :class:`TableauTuples`
- :class:`Tableau`
- :class:`Tableaux`
TESTS::
sage: TableauTuple( [[1,2,3],[4,5]] ).category()
Category of elements of Tableaux
sage: TableauTuple([[[1,2,3],[4,5]]]).category()
Category of elements of Tableaux
sage: TableauTuple([[1],[2,3]])
Traceback (most recent call last):
...
ValueError: A tableau must be a list of iterables.
sage: TestSuite( TableauTuple([ [[1,2],[3,4]], [[1,2],[3,4]] ]) ).run()
sage: TestSuite( TableauTuple([ [[1,2],[3,4]], [], [[1,2],[3,4]] ]) ).run()
sage: TestSuite( TableauTuple([[[1,1],[1]],[[1,1,1]],[[1],[1],[1]],[[1]]]) ).run()
"""
Element = Tableau
@staticmethod
def __classcall_private__(self, t):
r"""
This ensures that a :class:`TableauTuples` is only ever constructed
via an ``element_class()`` call of an appropriate parent.
EXAMPLES::
sage: t=TableauTuple([[[1,1],[1]],[[1,1,1]],[[1],[1],[1]],[[1]]])
sage: t.parent()
Tableau tuples
sage: t.category()
Category of elements of Tableau tuples
sage: type(t)
<class 'sage.combinat.tableau_tuple.TableauTuples_all_with_category.element_class'>
sage: TableauTuples(level=4)(t).parent()
Tableau tuples of level 4
"""
if isinstance(t, (Tableau,TableauTuple)):
return t
# one way or another these two cases need to be treated separately
if t == [] or t == [[]]:
return Tableaux_all().element_class(Tableaux_all(), [])
# The Tableau class is very general in that it allows the entries of a
# tableau to be almost anything, including lists. For this reason we
# first try and interpret t as a tuple of tableaux and if this fails we
# then try to think of t as a tableau.
try:
t = [Tableau(s) for s in t]
except (TypeError,ValueError):
try:
t = [Tableau(t)]
except ValueError:
pass
if len(t) == 1:
return Tableaux_all().element_class(Tableaux_all(), t[0])
else:
return TableauTuples_all().element_class(TableauTuples_all(), t)
raise ValueError( '%s is not a Tableau tuple' % t )
def __init__(self, parent, t, check=True):
r"""
Initializes a tableau.
EXAMPLES::
sage: t = TableauTuples( )([[[1,1],[1]],[[1,1,1]],[[1],[1],[1]]])
sage: s = TableauTuples(3)([[[1,1],[1]],[[1,1,1]],[[1],[1],[1]]])
sage: s == t
True
sage: t.parent()
Tableau tuples
sage: s.parent()
Tableau tuples of level 3
sage: r = TableauTuples()(s); r.parent()
Tableau tuples
sage: s is t # identical tableaux are distinct objects
False
"""
# By calling Tableau we implicitly check that the shape is a PartitionTuple
t = [Tableau(s) for s in t]
CombinatorialElement.__init__(self, parent, t)
self._level = len(self._list)
def _repr_(self):
"""
The string representation of ``self``.
EXAMPLES::
sage: TableauTuple([[]]) # indirect doctest
[]
sage: TableauTuple([[],[]])
([], [])
sage: TableauTuple([[],[],[]])
([], [], [])
sage: TableauTuple([[],[],[],[]])
([], [], [], [])
"""
return self.parent().options._dispatch(self,'_repr_','display')
def _repr_list(self):
"""
Return a string representation of ``self`` as a list.
EXAMPLES::
sage: TableauTuple([[],[],[],[]])._repr_list()
'([], [], [], [])'
"""
return '('+', '.join('%s'%s for s in self)+')'
def _repr_compact(self):
"""
Return a compact string representation of ``self``.
EXAMPLES::
sage: TableauTuple([[],[],[],[]])._repr_compact()
'-|-|-|-'
sage: TableauTuple([[[1,2,3],[4,5]],[],[[6]],[]])._repr_compact()
'1,2,3/4,5|-|6|-'
"""
return '|'.join('%s'%s._repr_compact() for s in self)
def _repr_diagram(self):
"""
Return a string representation of ``self`` as an array.
EXAMPLES::
sage: print(TableauTuple([[[2,3]],[[1]],[[4],[5]],[]])._repr_diagram())
2 3 1 4 -
5
sage: print(TableauTuple([[[2,3]],[],[[4],[5]],[]])._repr_diagram())
2 3 - 4 -
5
sage: TableauTuples.options(convention='French')
sage: print(TableauTuple([[[2,3]],[[1]],[[4],[5]],[]])._repr_diagram())
5
2 3 1 4 -
sage: print(TableauTuple([[[2,3]],[],[[4],[5]],[]])._repr_diagram())
5
2 3 - 4 -
sage: TableauTuples.options._reset()
TESTS:
Check that :trac:`20768` is fixed::
sage: T = TableauTuple([[[1,2,1],[1],[12345]], [], [[1523,1,2],[1,12341,-2]]])
sage: T.pp()
1 2 1 - 1523 1 2
1 1 12341 -2
12345
"""
str_tt = [T._repr_diagram().split('\n') for T in self]
if TableauTuples.options('convention') == "French":
for T_str in str_tt:
T_str.reverse()
widths = [len(T_str[0]) for T_str in str_tt]
num_cols = max(len(T_str) for T_str in str_tt)
diag = [' '.join(' ' * widths[j] if i >= len(T_str) else
"{:<{width}}".format(T_str[i], width=widths[j])
for j,T_str in enumerate(str_tt))
for i in range(num_cols)]
if TableauTuples.options('convention') == "English":
return '\n'.join(diag)
else:
return '\n'.join(diag[::-1])
def _ascii_art_(self):
"""
TESTS::
sage: ascii_art(TableauTuple([[[2,3]],[],[[4],[5]],[]]))
2 3 - 4 -
5
"""
from sage.typeset.ascii_art import AsciiArt
return AsciiArt(self._repr_diagram().splitlines())
def _latex_(self):
r"""
Returns a LaTeX version of ``self``.
EXAMPLES::
sage: t=TableauTuple([ [[1,2],[3]], [], [[4,5],[6,7]] ])
sage: latex(t) # indirect doctest
\Bigg( {\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{1}&\lr{2}\\\cline{1-2}
\lr{3}\\\cline{1-1}
\end{array}$},\emptyset,\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{4}&\lr{5}\\\cline{1-2}
\lr{6}&\lr{7}\\\cline{1-2}
\end{array}$}
} \Bigg)
sage: TableauTuples.options(convention="french")
sage: latex(t) # indirect doctest
\Bigg( {\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{2}c}\cline{1-1}
\lr{3}\\\cline{1-2}
\lr{1}&\lr{2}\\\cline{1-2}
\end{array}$},\emptyset,\raisebox{-.6ex}{$\begin{array}[t]{*{2}c}\cline{1-2}
\lr{6}&\lr{7}\\\cline{1-2}
\lr{4}&\lr{5}\\\cline{1-2}
\end{array}$}
} \Bigg)
sage: TableauTuples.options._reset()
"""
return self.parent().options._dispatch(self,'_latex_','latex')
_latex_list = _repr_list
def _latex_diagram(self):
r"""
Return a LaTeX representation of ``self`` as a Young diagram.
EXAMPLES::
sage: t = TableauTuple([ [[1,2],[3]], [], [[4,5],[6,7]] ])
sage: print(t._latex_diagram())
\Bigg( {\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{1}&\lr{2}\\\cline{1-2}
\lr{3}\\\cline{1-1}
\end{array}$},\emptyset,\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{4}&\lr{5}\\\cline{1-2}
\lr{6}&\lr{7}\\\cline{1-2}
\end{array}$}
} \Bigg)
"""
from sage.combinat.output import tex_from_array_tuple
return r'\Bigg( %s \Bigg)' % tex_from_array_tuple(self)
def components(self):
"""
Return a list of the components of tableau tuple ``self``. The
`components` are the individual :class:`Tableau` which are contained
in the tuple ``self``.
For compatibility with :class:`TableauTuples` of :meth:`level` 1,
:meth:`components` should be used to iterate over the components of
:class:`TableauTuples`.
EXAMPLES::
sage: for t in TableauTuple([[1,2,3],[4,5]]).components(): t.pp()
1 2 3
4 5
sage: for t in TableauTuple([ [[1,2,3],[4,5]], [[6,7],[8,9]] ]).components(): t.pp()
1 2 3
4 5
6 7
8 9
"""
return [t for t in self]
def to_list(self):
"""
Return the list representation of the tableaux tuple ``self``.
EXAMPLES::
sage: TableauTuple([ [[1,2,3],[4,5]], [[6,7],[8,9]] ]).to_list()
[[[1, 2, 3], [4, 5]], [[6, 7], [8, 9]]]
"""
return [t.to_list() for t in self]
def __call__(self, *cell):
r"""
Get a cell in ``self``.
INPUT:
- ``self`` -- a tableau
- ``cell`` -- a triple of integers, tuple, or list specifying a cell
in ``self``
OUTPUT:
- The value in the corresponding cell.
EXAMPLES::
sage: t = TableauTuple([[[1,2,3],[4,5]],[[6,7]],[[8],[9]]])
sage: t(1,0,0)
6
sage: t((1,0,0))
6
sage: t(3,3,3)
Traceback (most recent call last):
...
IndexError: The cell (3, 3, 3) is not contained in the tableau
"""
if isinstance(cell[0], (int, Integer)):
k,r,c = cell[0], cell[1], cell[2]
else:
k,r,c = cell[0]
try:
return self[k][r][c]
except IndexError:
raise IndexError("The cell (%s, %s, %s) is not contained in the tableau"% (k,r,c))
def level(self):
"""
Return the level of the tableau ``self``, which is just the number of
components in the tableau tuple ``self``.
EXAMPLES::
sage: TableauTuple([[[7,8,9]],[],[[1,2,3],[4,5],[6]]]).level()
3
"""
return self._level
def shape(self):
r"""
Returns the :class:`PartitionTuple` which is the shape of the tableau
tuple ``self``.
EXAMPLES::
sage: TableauTuple([[[7,8,9]],[],[[1,2,3],[4,5],[6]]]).shape()
([3], [], [3, 2, 1])
"""
from sage.combinat.partition_tuple import PartitionTuples
P = PartitionTuples()
return P.element_class(P, [t.shape() for t in self])
def size(self):
"""
Returns the size of the tableau tuple ``self``, which is just the
number of boxes, or the size, of the underlying
:class:`PartitionTuple`.
EXAMPLES::
sage: TableauTuple([[[7,8,9]],[],[[1,2,3],[4,5],[6]]]).size()
9
"""
return self.shape().size()
def conjugate(self):
r"""
Return the conjugate of the tableau tuple ``self``.
The conjugate tableau tuple `T'` is the :class:`TableauTuple`
obtained from `T` by reversing the order of the components and
conjugating each component -- that is, swapping the rows and
columns of the all of :class:`Tableau` in `T` (see
:meth:`sage.combinat.tableau.Tableau.conjugate`).
EXAMPLES::
sage: TableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).conjugate()
([[9, 11, 12], [10]], [[5, 8], [6], [7]], [[1, 3], [2, 4]])
"""
conj = [t.conjugate() for t in reversed(self)]
# attempt to return a tableau of the same type
try:
return self.parent()(conj)
except Exception:
try:
return self.parent().element_class(self.parent(), conj)
except Exception:
return Tableau(conj)
def pp(self):
"""
Pretty printing for the tableau tuple ``self``.
EXAMPLES::
sage: TableauTuple([ [[1,2,3],[4,5]], [[1,2,3],[4,5]] ]).pp()
1 2 3 1 2 3
4 5 4 5
sage: TableauTuple([ [[1,2],[3],[4]],[],[[6,7,8],[10,11],[12],[13]]]).pp()
1 2 - 6 7 8
3 10 11
4 12
13
sage: t = TableauTuple([ [[1,2,3],[4,5],[6],[9]], [[1,2,3],[4,5,8]], [[11,12,13],[14]] ])
sage: t.pp()
1 2 3 1 2 3 11 12 13
4 5 4 5 8 14
6
9
sage: TableauTuples.options(convention="french")
sage: t.pp()
9
6
4 5 4 5 8 14
1 2 3 1 2 3 11 12 13
sage: TableauTuples.options._reset()
"""
print(self._repr_diagram())
def to_word_by_row(self):
"""
Returns a word obtained from a row reading of the tableau tuple
``self``.
EXAMPLES::
sage: TableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).to_word_by_row()
word: 12,11,9,10,8,5,6,7,3,4,1,2
"""
w = []
for t in self.components()[::-1]:
for row in reversed(t):
w+=row
return Word(w)
# an alias -- should remove?
to_word=to_word_by_row
def to_word_by_column(self):
"""
Returns the word obtained from a column reading of the tableau tuple
``self``.
EXAMPLES::
sage: TableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).to_word_by_column()
word: 12,11,9,10,8,5,6,7,3,1,4,2
"""
w = []
for t in self.conjugate():
for row in t:
w += row[::-1]
return Word(w)
def to_permutation(self):
"""
Returns a permutation with the entries in the tableau tuple ``self``
which is obtained by ``self`` obtained by reading the entries of the
tableau tuple in order from left to right along the rows, and then
top to bottom, in each component and then left to right along the
components.
EXAMPLES::
sage: TableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).to_permutation()
[12, 11, 9, 10, 8, 5, 6, 7, 3, 4, 1, 2]
"""
return permutation.Permutation(self.to_word_by_row())
def entries(self):
"""
Return a sorted list of all entries of ``self``, in the order
obtained by reading across the rows.
EXAMPLES::
sage: TableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).entries()
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
sage: TableauTuple([[[1,2],[3,4]],[[9,10],[11],[12]],[[5,6,7],[8]]]).entries()
[1, 2, 3, 4, 9, 10, 11, 12, 5, 6, 7, 8]
"""
return list(sum((s.entries() for s in self), ()))
def entry(self, l, r, c):
"""
Return the entry of the cell ``(l, r, c)`` in ``self``.
A cell is a tuple ``(l, r, c)`` of coordinates, where ``l`` is the
component index, ``r`` is the row index, and ``c`` is the column index.
EXAMPLES::
sage: t = TableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]])
sage: t.entry(1, 0, 0)
5
sage: t.entry(1, 1, 1)
Traceback (most recent call last):
...
IndexError: tuple index out of range
"""
return self[l][r][c]
def is_row_strict(self):
"""
Return ``True`` if the tableau ``self`` is row strict and ``False``
otherwise.
A tableau tuple is *row strict* if the entries in each row of each
component are in increasing order, when read from left to right.
EXAMPLES::
sage: TableauTuple([[[5,7],[8]],[[1, 3], [2, 4]],[[6]]]).is_row_strict()
True
sage: TableauTuple([[[1, 2], [2, 4]],[[4,5,6],[7,8]]]).is_row_strict()
True
sage: TableauTuple([[[1]],[[2, 3], [2, 4]]]).is_row_strict()
True
sage: TableauTuple([[[1]],[[2, 2], [4,5]]]).is_row_strict()
False
sage: TableauTuple([[[1,2],[6,7]],[[4,8], [6, 9]],[]]).is_row_strict()
True
"""
return all(t.is_row_strict() for t in self)
def first_row_descent(self):
r"""
Return the first cell of ``self`` that is not row standard.
Cells are ordered left to right along the rows and then top to
bottom. That is, the cell minimal `(k,r,c)` such that the entry in
position `(k,r,c)` is bigger than the entry in position `(k,r,c+1)`.
If there is no such cell then ``None`` is returned - in this
case the tableau is row strict.
OUTPUT:
The cell corresponding to the first row descent or ``None``
if the tableau is row strict.
EXAMPLES::
sage: TableauTuple([[[5,6,7],[1,2]],[[1,3,2],[4]]]).first_row_descent()
(1, 0, 1)
sage: TableauTuple([[[1,2,3],[4]],[[6,7,8],[1,2,3]],[[1,11]]]).first_row_descent() is None
True
"""
for k in range(len(self)):
cell = self[k].first_row_descent()
if cell is not None:
return (k, cell[0], cell[1])
return None
def is_column_strict(self):
"""
Return ``True`` if the tableau ``self`` is column strict and ``False``
otherwise.
A tableau tuple is *column strict* if the entries in each column of
each component are in increasing order, when read from top to bottom.
EXAMPLES::
sage: TableauTuple([[[5,7],[8]],[[1, 3], [2, 4]],[[6]]]).is_column_strict()
True
sage: TableauTuple([[[1, 2], [2, 4]],[[4,5,6],[7,8]]]).is_column_strict()
True
sage: TableauTuple([[[1]],[[2, 3], [2, 4]]]).is_column_strict()
False
sage: TableauTuple([[[1]],[[2, 2], [4,5]]]).is_column_strict()
True
sage: TableauTuple([[[1,2],[6,7]],[[4,8], [6, 9]],[]]).is_column_strict()
True
"""
return all(t.is_column_strict() for t in self)
def first_column_descent(self):
r"""
Return the first cell of ``self`` is not column standard.
Cells are ordered left to right along the rows and then top to
bottom. That is, return the cell `(k,r,c)` with `(k,r,c)` minimal
such that the entry in position `(k,r,c)` is bigger than the entry
in position `(k,r,c+1)`. If there is no such cell then ``None``
is returned - in this case the tableau is column strict.
OUTPUT:
The cell corresponding to the first column descent or ``None``
if the tableau is column strict.
EXAMPLES::
sage: TableauTuple([[[3,5,6],[2,4,5]],[[1,4,5],[2,3]]]).first_column_descent()
(0, 0, 0)
sage: Tableau([[[1,2,3],[4]],[[5,6,7],[8,9]]]).first_column_descent() is None
True
"""
for k in range(len(self)):
cell=self[k].first_column_descent()
if cell is not None:
return (k,cell[0],cell[1])
return None
def is_standard(self):
r"""
Return ``True`` if the tableau ``self`` is a standard tableau and
``False`` otherwise.
A tableau tuple is *standard* if it is row standard, column standard
and the entries in the tableaux are `1, 2, \ldots, n`, where `n`
is the :meth:`size` of the underlying partition tuple of ``self``.
EXAMPLES::
sage: TableauTuple([[[5,7],[8]],[[1, 3], [2, 4]],[[6]]]).is_standard()
True
sage: TableauTuple([[[1, 2], [2, 4]],[[4,5,6],[7,8]]]).is_standard()
False
sage: TableauTuple([[[1]],[[2, 3], [2, 4]]]).is_standard()
False
sage: TableauTuple([[[1]],[[2, 2], [4,5]]]).is_row_strict()
False
sage: TableauTuple([[[1,2],[6,7]],[[4,8], [6, 9]],[]]).is_standard()
False
"""
entries = sorted(self.entries())
return entries == list(range(1, self.size() + 1)) and self.is_row_strict() and self.is_column_strict()
def reduced_row_word(self):
r"""
Return the lexicographically minimal reduced expression for the
permutation that maps the :meth:`initial_tableau` to ``self``.
This reduced expression is a minimal length coset representative for the
corresponding Young subgroup. In one line notation, the permutation is
obtained by concatenating the rows of the tableau from top to bottom in
each component, and then left to right along the components.
EXAMPLES::
sage: StandardTableauTuple([[[1,2],[3]],[[4,5,6],[7,8],[9]]]).reduced_row_word()
[]
sage: StandardTableauTuple([[[1,2],[3]],[[4,5,6],[7,9],[8]]]).reduced_row_word()
[8]
sage: StandardTableauTuple([[[1,2],[3]],[[4,5,7],[6,9],[8]]]).reduced_row_word()
[6, 8]
sage: StandardTableauTuple([[[1,2],[3]],[[4,5,8],[6,9],[7]]]).reduced_row_word()
[6, 8, 7]
sage: StandardTableauTuple([[[1,2],[3]],[[4,5,9],[6,8],[7]]]).reduced_row_word()
[6, 7, 8, 7]
sage: StandardTableauTuple([[[7,9],[8]],[[1,3,5],[2,6],[4]]]).reduced_row_word()
[2, 3, 2, 1, 4, 3, 2, 5, 4, 3, 6, 5, 4, 3, 2, 7, 6, 5, 8, 7, 6, 5, 4]
"""
from sage.combinat.permutation import Permutation
return Permutation(list(self.entries())).inverse().reduced_word_lexmin()
def reduced_column_word(self):
r"""
Return the lexicographically minimal reduced expression for the
permutation that maps the :meth:`initial_column_tableau` to ``self``.
This reduced expression is a minimal length coset representative for the
corresponding Young subgroup. In one line notation, the permutation is
obtained by concatenating the rows of the tableau from top to bottom in
each component, and then left to right along the components.
EXAMPLES::
sage: StandardTableauTuple([[[7,9],[8]],[[1,4,6],[2,5],[3]]]).reduced_column_word()
[]
sage: StandardTableauTuple([[[7,9],[8]],[[1,3,6],[2,5],[4]]]).reduced_column_word()
[3]
sage: StandardTableauTuple([[[6,9],[8]],[[1,3,7],[2,5],[4]]]).reduced_column_word()
[3, 6]
sage: StandardTableauTuple([[[6,8],[9]],[[1,3,7],[2,5],[4]]]).reduced_column_word()
[3, 6, 8]
sage: StandardTableauTuple([[[5,8],[9]],[[1,3,7],[2,6],[4]]]).reduced_column_word()
[3, 6, 5, 8]
"""
from sage.combinat.permutation import Permutation
return Permutation(list(self.conjugate().entries())).inverse().reduced_word_lexmin()
def cells_containing(self, m):
r"""
Return the list of cells in which the letter ``m`` appears in the
tableau ``self``.
The list is ordered with cells appearing from left to right.
EXAMPLES::
sage: t = TableauTuple([[[4,5]],[[1,1,2,4],[2,4,4],[4]],[[1,3,4],[3,4]]])
sage: t.cells_containing(4)
[(0, 0, 0),
(1, 2, 0),
(1, 1, 1),
(1, 1, 2),
(1, 0, 3),
(2, 1, 1),
(2, 0, 2)]
sage: t.cells_containing(6)
[]
"""
return [(k,r,c) for k in range(len(self)) for (r,c) in self[k].cells_containing(m)]
def up(self, n=None):
"""
An iterator for all the :class:`TableauTuple` that can be obtained
from ``self`` by adding a cell with the label ``n``. If ``n`` is not
specified then a cell with label ``n`` will be added to the tableau
tuple, where ``n-1`` is the size of the tableau tuple before any cells
are added.
EXAMPLES::
sage: list(TableauTuple([[[1,2]],[[3]]]).up())
[([[1, 2, 4]], [[3]]),
([[1, 2], [4]], [[3]]),
([[1, 2]], [[3, 4]]),
([[1, 2]], [[3], [4]])]
"""
if n is None:
n = self.size()
# Go through and add n+1 to the end of each of the rows
# (We could call shape().addable_cells() but this seems more efficient)
for k in range(len(self)):
for row in range(len(self[k])):
if row==0 or self.shape()[k][row]<self.shape()[k][row-1]:
new_t=self.to_list() # a copy
new_t[k][row].append(n+1)
yield StandardTableauTuple(new_t)
# now add node to last row
new_t=self.to_list() # a copy
new_t[k].append([n+1])
yield StandardTableauTuple(new_t)
def row_stabilizer(self):
"""
Return the :class:`PermutationGroup` corresponding to ``self``. That
is, return subgroup of the symmetric group of degree :meth:`size`
which is the row stabilizer of ``self``.
EXAMPLES::
sage: rs = TableauTuple([[[1,2,3],[4,5]],[[6,7]],[[8],[9]]]).row_stabilizer()
sage: rs.order()
24
sage: PermutationGroupElement([(1,3,2),(4,5)]) in rs
True
sage: PermutationGroupElement([(1,4)]) in rs
False
sage: rs.one().domain()
[1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
# Ensure that the permutations involve all elements of the
# tableau, by including the identity permutation on the set [1..n].
n = max(self.entries())
gens = [list(range(1, n + 1))]
for t in self:
for i in range(len(t)):
for j in range(0, len(t[i])-1):
gens.append( (t[i][j], t[i][j+1]) )
return PermutationGroup( gens )
def column_stabilizer(self):
"""
Return the :class:`PermutationGroup` corresponding to ``self``. That
is, return subgroup of the symmetric group of degree :meth:`size`
which is the column stabilizer of ``self``.
EXAMPLES::
sage: cs = TableauTuple([[[1,2,3],[4,5]],[[6,7]],[[8],[9]]]).column_stabilizer()
sage: cs.order()
8
sage: PermutationGroupElement([(1,3,2),(4,5)]) in cs
False
sage: PermutationGroupElement([(1,4)]) in cs
True
"""
return self.conjugate().row_stabilizer()
def charge(self):
r"""
Return the charge of the reading word of ``self``.
See :meth:`~sage.combinat.words.finite_word.FiniteWord_class.charge`
for more information.
EXAMPLES::
sage: TableauTuple([[[4,5]],[[1,1,2,4],[2,4,4],[4]],[[1,3,4],[3,4]]]).charge()
4
"""
return self.to_word_by_row().charge()
def cocharge(self):
r"""
Return the cocharge of the reading word of ``self``.
See :meth:`~sage.combinat.words.finite_word.FiniteWord_class.cocharge`
for more information.
EXAMPLES::
sage: TableauTuple([[[4,5]],[[1,1,2,4],[2,4,4],[4]],[[1,3,4],[3,4]]]).charge()
4
"""
return self.to_word_by_row().cocharge()
def add_entry(self, cell, m):
"""
Set the entry in ``cell`` equal to ``m``. If the cell does not exist
then extend the tableau, otherwise just replace the entry.
EXAMPLES::
sage: s = StandardTableauTuple([ [[3,4,7],[6,8]], [[9,13],[12]], [[1,5],[2,11],[10]] ]); s.pp()
3 4 7 9 13 1 5
6 8 12 2 11
10
sage: t = s.add_entry( (0,0,3),14); t.pp(); t.category()
3 4 7 14 9 13 1 5
6 8 12 2 11
10
Category of elements of Standard tableau tuples
sage: t = s.add_entry( (0,0,3),15); t.pp(); t.category()
3 4 7 15 9 13 1 5
6 8 12 2 11
10
Category of elements of Tableau tuples
sage: t = s.add_entry( (1,1,1),14); t.pp(); t.category()
3 4 7 9 13 1 5
6 8 12 14 2 11
10
Category of elements of Standard tableau tuples
sage: t = s.add_entry( (2,1,1),14); t.pp(); t.category()
3 4 7 9 13 1 5
6 8 12 2 14
10
Category of elements of Tableau tuples
sage: t = s.add_entry( (2,1,2),14); t.pp(); t.category()
Traceback (most recent call last):
...
IndexError: (2, 1, 2) is not an addable cell of the tableau
"""
(k,r,c) = cell
tab = self.to_list()
try:
tab[k][r][c] = m
except IndexError:
if (k,r,c) in self.shape().addable_cells():
# add (k,r,c) is an addable cell the following should work
# so we do not need to trap anything
if r == len(tab[k]):
tab[k].append([])
tab[k][r].append(m)
else:
raise IndexError('%s is not an addable cell of the tableau' % ( (k,r,c),))
# finally, try and return a tableau belonging to the same category
try:
return self.parent()(tab)
except ValueError:
try:
return self.parent().element_class(self.parent(), tab)
except ValueError:
return TableauTuple(tab)
def restrict(self, m=None):
"""
Returns the restriction of the standard tableau ``self`` to ``m``.
The restriction is the subtableau of ``self`` whose entries are less
than or equal to ``m``.
By default, ``m`` is one less than the current size.
EXAMPLES::
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict()
([], [[1, 2], [3, 4]])
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(6)
([[5]], [[1, 2], [3, 4]])
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(5)
([[5]], [[1, 2], [3, 4]])
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(4)
([], [[1, 2], [3, 4]])
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(3)
([], [[1, 2], [3]])
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(2)
([], [[1, 2]])
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(1)
([], [[1]])
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(0)
([], [])
Where possible the restricted tableau belongs to the same category as
the original tableaux::
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(3).category()
Category of elements of Tableau tuples
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(3).category()
Category of elements of Tableau tuples
sage: TableauTuples(level=2)([[[5]],[[1,2],[3,4]]]).restrict(3).category()
Category of elements of Tableau tuples of level 2
"""
if m is None: m=self.size()-1
# We are lucky in that currently restriction is defined for arbitrary
# (level one) tableau and not just standard ones. If this ever changes
# we will have to treat the cases where the components restrict to
# empty lists of the form [[]] separately.
tab=[t.restrict(m) for t in self]
try:
return self.parent()(tab)
except ValueError:
try:
return self.parent().Element(tab)
except ValueError:
return TableauTuple(tab)
def symmetric_group_action_on_entries(self, w):
r"""
Return the action of a permutation ``w`` on ``self``.
Consider a standard tableau tuple
`T = (t^{(1)}, t^{(2)}, \ldots t^{(l)})` of size `n`, then the
action of `w \in S_n` is defined by permuting the entries of `T`
(recall they are `1, 2, \ldots, n`). In particular, suppose the entry
at cell `(k, i, j)` is `a`, then the entry becomes `w(a)`. In general,
the resulting tableau tuple `wT` may *not* be standard.
INPUT:
- ``w`` -- a permutation
EXAMPLES::
sage: TableauTuple([[[1,2],[4]],[[3,5]]]).symmetric_group_action_on_entries( Permutation(((4,5))) )
([[1, 2], [5]], [[3, 4]])
sage: TableauTuple([[[1,2],[4]],[[3,5]]]).symmetric_group_action_on_entries( Permutation(((1,2))) )
([[2, 1], [4]], [[3, 5]])
"""
w = w + [i+1 for i in range(len(w), self.size())] #need to ensure that it belongs to Sym_size
try:
return self.parent()([[[w[entry-1] for entry in row] for row in t] for t in self])
except ValueError:
return TableauTuples()([[[w[entry-1] for entry in row] for row in t] for t in self])
def content(self, k, multicharge):
r"""
Return the content ``k`` in ``self``.
The content of `k` in a standard tableau. That is, if
`k` appears in row `r` and column `c` of the tableau, then
we return `c - r + a_k`, where the multicharge is
`(a_1, a_2, \ldots, a_l)` and `l` is the level of the tableau.
The multicharge determines the dominant weight
.. MATH::
\Lambda = \sum_{i=1}^l \Lambda_{a_i}
of the affine special linear group. In the combinatorics, the
multicharge simply offsets the contents in each component so that
the cell `(k, r, c)` has content `a_k + c - r`.
INPUT:
- ``k`` -- an integer in `\{1, 2, \ldots, n\}`
- ``multicharge`` -- a sequence of integers of length `l`
Here `l` is the :meth:`~TableauTuple.level` and `n` is the
:meth:`~TableauTuple.size` of ``self``.
EXAMPLES::
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).content(3,[0,0])
-1
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).content(3,[0,1])
0
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).content(3,[0,2])
1
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).content(6,[0,2])
Traceback (most recent call last):
...
ValueError: 6 must be contained in the tableaux
"""
for l, tableau in enumerate(self):
for r,row in enumerate(tableau):
try:
return multicharge[l] - r + row.index(k)
except ValueError:
ValueError
raise ValueError('%s must be contained in the tableaux' % k)
def residue(self, k, e, multicharge):
r"""
Return the *residue* of the integer ``k`` in the tableau ``self``.
The *residue* of `k` is `c - r + a_k` in `\ZZ / e\ZZ`, where `k`
appears in row `r` and column `c` of the tableau and
the multicharge is `(a_1, a_2, \ldots, a_l)`.
The multicharge determines the dominant weight
.. MATH::
\sum_{i=1}^l \Lambda_{a_i}
for the affine special linear group. In the combinatorics, it simply
offsets the contents in each component so that the cell `(k, 0, 0)`
has content `a_k`.
INPUT:
- ``k`` -- an integer in `\{1, 2, \ldots, n\}`
- ``e`` -- an integer in `\{0, 2, 3, 4, 5, \ldots\}`
- ``multicharge`` -- a list of integers of length `l`
Here `l` is the :meth:`~TableauTuple.level` and `n` is the
:meth:`~TableauTuple.size` of ``self``.
OUTPUT:
The residue of ``k`` in a standard tableau. That is,
EXAMPLES::
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).residue(1, 3,[0,0])
0
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).residue(1, 3,[0,1])
1
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).residue(1, 3,[0,2])
2
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).residue(6, 3,[0,2])
Traceback (most recent call last):
...
ValueError: 6 must be contained in the tableaux
"""
for l, tableau in enumerate(self):
for r, row in enumerate(tableau):
try:
return IntegerModRing(e)(multicharge[l] - r + row.index(k))
except ValueError:
pass
raise ValueError('%s must be contained in the tableaux' % k)
#--------------------------------------------------
# Row standard tableau tuple - element class
#--------------------------------------------------
@add_metaclass(ClasscallMetaclass)
class RowStandardTableauTuple(TableauTuple):
r"""
A class for row standard tableau tuples of shape a partition tuple.
A row standard tableau tuple of size `n` is an ordered tuple of row
standard tableaux (see :class:`RowStandardTableau`), with entries `1, 2,
\ldots, n` such that, in each component, the entries are in increasing
order along each row. If the tableau in component `k` has shape
`\lambda^{(k)}` then `\lambda=(\lambda^{(1)},\ldots,\lambda^{(l)}` is a
:class:`PartitionTuple`.
.. NOTE::
The tableaux appearing in a :class:`RowStandardTableauTuple` are row
strict, but individually they are not standard tableaux because the
entries in any single component of a :class:`RowStandardTableauTuple`
will typically not be in bijection with `\{1, 2, \ldots, n\}`.
INPUT:
- ``t`` -- a tableau, a list of (standard) tableau or an equivalent list
OUTPUT:
- A :class:`RowStandardTableauTuple` object constructed from ``t``.
.. NOTE::
Sage uses the English convention for (tuples of) partitions and
tableaux: the longer rows are displayed on top. As with
:class:`PartitionTuple`, in sage the cells, or nodes, of partition
tuples are 0-based. For example, the (lexicographically) first cell in
any non-empty partition tuple is `[0,0,0]`. Further, the coordinates
``[k,r,c]`` in a :class:`TableauTuple` refer to the component, row and
column indices, respectively.
EXAMPLES::
sage: t = RowStandardTableauTuple([[[4,7],[3]],[[2,6,8],[1,5]],[[9]]]); t
([[4, 7], [3]], [[2, 6, 8], [1, 5]], [[9]])
sage: t.pp()
4 7 2 6 8 9
3 1 5
sage: t.shape()
([2, 1], [3, 2], [1])
sage: t[0].pp() # pretty printing
4 7
3
sage: t.is_row_strict()
True
sage: t[0].is_standard()
False
sage: RowStandardTableauTuple([[],[],[]]) # An empty tableau tuple
([], [], [])
sage: RowStandardTableauTuple([[[4,5],[6]],[[1,2,3]]]) in StandardTableauTuples()
True
sage: RowStandardTableauTuple([[[5,6],[4]],[[1,2,3]]]) in StandardTableauTuples()
False
When using code that will generate a lot of tableaux, it is slightly more
efficient to construct a :class:`RowStandardTableauTuple` from the
appropriate parent object::
sage: RST = RowStandardTableauTuples()
sage: RST([[[4,5],[7]],[[1,2,3],[6,8]],[[9]]])
([[4, 5], [7]], [[1, 2, 3], [6, 8]], [[9]])
.. SEEALSO::
- :class:`RowTableau`
- :class:`RowTableaux`
- :class:`TableauTuples`
- :class:`TableauTuple`
- :class:`StandardTableauTuples`
- :class:`StandardTableauTuple`
- :class:`RowStandardTableauTuples`
TESTS::
sage: RowStandardTableauTuple( [[3, 4, 5],[1, 2]] ).category() # indirect doctest
Category of elements of Row standard tableaux
sage: RowStandardTableauTuple([[[3,4,5],[1,2]]]).category() # indirect doctest
Category of elements of Row standard tableaux
sage: RowStandardTableauTuples()([[[3,4,5],[1,2]]]).category() # indirect doctest
Category of elements of Row standard tableaux
sage: RowStandardTableauTuple([[[1,2,3]],[[1]]])
Traceback (most recent call last):
...
ValueError: entries must be in bijection with {1,2,...,n}
sage: RowStandardTableauTuple([[],[[1,2,1]]])
Traceback (most recent call last):
...
ValueError: tableaux must be row strict
sage: RowStandardTableauTuple([ [[1,2,4],[6]],[[0,1]],[[10]] ])
Traceback (most recent call last):
...
ValueError: entries must be in bijection with {1,2,...,n}
sage: TestSuite( RowStandardTableauTuple([[[3,4,6],[1]],[[2],[5]]]) ).run()
sage: TestSuite( RowStandardTableauTuple([[[3,4,6],[1]],[], [[2],[5]]]) ).run()
sage: TestSuite( RowStandardTableauTuple([[[3,4,6],[1]],[[7]], [[2],[5]]]) ).run()
"""
@staticmethod
def __classcall_private__(self, t):
r"""
This ensures that a :class:`RowStandardTableauTuple` is only constructed
as an ``element_class()`` call of an appropriate parent.
EXAMPLES::
sage: t=RowStandardTableauTuple([[[3,4,6],[1]],[[2],[5]]])
sage: t.parent()
Row standard tableau tuples
sage: t.category()
Category of elements of Row standard tableau tuples
sage: type(t)
<class 'sage.combinat.tableau_tuple.RowStandardTableauTuples_all_with_category.element_class'>
sage: RowStandardTableauTuples(level=2)(t).parent()
Row standard tableau tuples of level 2
sage: RowStandardTableauTuples(level=2,size=6)(t).parent()
Row standard tableau tuples of level 2 and size 6
"""
if isinstance(t, (RowStandardTableau, RowStandardTableauTuple)):
return t
# The Tableau class is very general in that it allows the entries of a
# tableau to be almost anything, including lists. For this reason we
# first try and interpret t as a tuple of tableaux and if this fails we
# then try to think of t as a tableau.
try:
t = [Tableau(s) for s in t]
except (TypeError,ValueError):
try:
t = [RowStandardTableau(t)]
except ValueError:
pass
if len(t) == 1:
P = RowStandardTableaux_all()
return P.element_class(P, t[0])
else:
P = RowStandardTableauTuples_all()
return P.element_class(P, t)
raise ValueError('%s is not a row standard tableau tuple' % t)
def __init__(self, parent, t, check=True):
r"""
Initializes a row standard tableau tuple.
EXAMPLES::
sage: t = RowStandardTableauTuples()([[[1,4],[2]],[[3]]])
sage: s = TableauTuples(2)([[[1,4],[2]],[[3]]])
sage: s == t
True
sage: s.parent()
Tableau tuples of level 2
sage: r = RowStandardTableauTuples(level=2)(t); r.parent()
Row standard tableau tuples of level 2
sage: isinstance(r, RowStandardTableauTuple)
True
sage: r in RowStandardTableauTuples()
True
sage: r in RowStandardTableauTuples(level=2)
True
sage: r in RowStandardTableauTuples(level=3)
False
"""
# Morally, a level 1 tableau should never end up here, however, in
# practice it can because the RowStandardTableauTuples() iterator, for
# example, generates RowStandardTableauTuples of level 1. These tableaux
# should have RowStandardTableauTuples as their parent so we have to cope
# with level 1 tableau after all.
try:
t = [Tableau(s) for s in t]
except (TypeError, ValueError):
try:
t = [Tableau(t)]
except ValueError:
raise ValueError('not a valid row standard tableau tuple')
super(RowStandardTableauTuple, self).__init__(parent, t)
if check:
# We still have to check that t is row standard.
if not all(s.is_row_strict() for s in t):
raise ValueError('tableaux must be row strict')
# Finally, the more costly check that the entries are {1,2...n}
entries = sorted(sum((s.entries() for s in t), ()))
if not entries == list(range(1, len(entries) + 1)):
raise ValueError('entries must be in bijection with {1,2,...,n}')
def inverse(self, k):
"""
Return the cell containing ``k`` in the tableau tuple ``self``.
EXAMPLES::
sage: RowStandardTableauTuple([[[3,4],[1,2]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(1)
(0, 1, 0)
sage: RowStandardTableauTuple([[[3,4],[1,2]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(2)
(0, 1, 1)
sage: RowStandardTableauTuple([[[3,4],[1,2]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(3)
(0, 0, 0)
sage: RowStandardTableauTuple([[[3,4],[1,2]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(4)
(0, 0, 1)
sage: StandardTableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(1)
(0, 0, 0)
sage: StandardTableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(2)
(0, 0, 1)
sage: StandardTableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(3)
(0, 1, 0)
sage: StandardTableauTuple([[[1,2],[3,4]],[[5,6,7],[8]],[[9,10],[11],[12]]]).inverse(12)
(2, 2, 0)
"""
for l in range(len(self)):
for row in range(len(self[l])):
try:
return (l, row, self[l][row].index(k))
except ValueError:
pass
raise ValueError('%s must be contained in the tableaux' % k)
def residue_sequence(self, e, multicharge):
r"""
Return the :class:`sage.combinat.tableau_residues.ResidueSequence`
of ``self``.
INPUT:
- ``e`` -- integer in `\{0, 2, 3, 4, 5, \ldots\}`
- ``multicharge`` -- a sequence of integers of length equal
to the level/length of ``self``
OUTPUT:
The :class:`residue sequence
<sage.combinat.tableau_residues.ResidueSequence>` of the tableau.
EXAMPLES::
sage: RowStandardTableauTuple([[[5]],[[3,4],[1,2]]]).residue_sequence(3,[0,0])
3-residue sequence (2,0,0,1,0) with multicharge (0,0)
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).residue_sequence(3,[0,1])
3-residue sequence (1,2,0,1,0) with multicharge (0,1)
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).residue_sequence(3,[0,2])
3-residue sequence (2,0,1,2,0) with multicharge (0,2)
"""
res = [0] * self.size()
for (k,r,c) in self.shape().cells():
res[self[k][r][c]-1] = multicharge[k] - r + c
from sage.combinat.tableau_residues import ResidueSequence
return ResidueSequence(e, multicharge, res, check=False)
def degree(self, e, multicharge):
r"""
Return the Brundan-Kleshchev-Wang [BKW2011]_ degree of ``self``.
The *degree* of a tableau is an integer that is defined recursively by
successively stripping off the number `k`, for `k = n, n-1, \ldots, 1`,
and at stage adding the count of the number of addable cell of the same
residue minus the number of removable cells of them same residue as `k`
and that are below `k` in the diagram.
Note that even though this degree function was defined by
Brundan-Kleshchev-Wang [BKW2011]_ the underlying combinatorics
is much older, going back at least to Misra and Miwa.
The degrees of the tableau `T` gives the degree of the homogeneous
basis element of the graded Specht module which is indexed by `T`.
INPUT:
- ``e`` -- the *quantum characteristic* ``e``
- ``multicharge`` -- (default: ``[0]``) the multicharge
OUTPUT:
The degree of the tableau ``self``, which is an integer.
EXAMPLES::
sage: StandardTableauTuple([[[1]], [], []]).degree(0,(0,0,0))
2
sage: StandardTableauTuple([[],[[1]], []]).degree(0,(0,0,0))
1
sage: StandardTableauTuple([[], [], [[1]]]).degree(0,(0,0,0))
0
sage: StandardTableauTuple([[[1]],[[2]], []]).degree(0,(0,0,0))
3
sage: StandardTableauTuple([[[1]], [], [[2]]]).degree(0,(0,0,0))
2
sage: StandardTableauTuple([[],[[1]], [[2]]]).degree(0,(0,0,0))
1
sage: StandardTableauTuple([[[2]],[[1]], []]).degree(0,(0,0,0))
1
sage: StandardTableauTuple([[[2]], [], [[1]]]).degree(0,(0,0,0))
0
sage: StandardTableauTuple([[],[[2]], [[1]]]).degree(0,(0,0,0))
-1
"""
shape = self.shape()
deg = shape._initial_degree(e,multicharge)
res = shape.initial_tableau().residue_sequence(e, multicharge)
for r in self.reduced_row_word():
if res[r] == res[r+1]:
deg -= 2
elif res[r] == res[r+1] + 1 or res[r] == res[r+1] - 1:
deg += (e == 2 and 2 or 1)
res = res.swap_residues(r, r+1)
return deg
def codegree(self, e, multicharge):
r"""
Return the Brundan-Kleshchev-Wang [BKW2011]_ codegree of ``self``.
The *codegree* of a tableau is an integer that is defined
recursively by successively stripping off the number `k`, for
`k = n, n-1, \ldots, 1` and at stage adding the number of addable
cell of the same residue minus the number of removable cells of
the same residue as `k` and which are above `k` in the diagram.
The codegree of the tableau ``self`` gives the degree of "dual"
homogeneous basis element of the graded Specht module which is
indexed by ``self``.
INPUT:
- ``e`` -- the *quantum characteristic*
- ``multicharge`` -- the multicharge
OUTPUT:
The codegree of the tableau ``self``, which is an integer.
EXAMPLES::
sage: StandardTableauTuple([[[1]], [], []]).codegree(0,(0,0,0))
0
sage: StandardTableauTuple([[],[[1]], []]).codegree(0,(0,0,0))
1
sage: StandardTableauTuple([[], [], [[1]]]).codegree(0,(0,0,0))
2
sage: StandardTableauTuple([[[1]],[[2]], []]).codegree(0,(0,0,0))
-1
sage: StandardTableauTuple([[[1]], [], [[2]]]).codegree(0,(0,0,0))
0
sage: StandardTableauTuple([[],[[1]], [[2]]]).codegree(0,(0,0,0))
1
sage: StandardTableauTuple([[[2]],[[1]], []]).codegree(0,(0,0,0))
1
sage: StandardTableauTuple([[[2]], [], [[1]]]).codegree(0,(0,0,0))
2
sage: StandardTableauTuple([[],[[2]], [[1]]]).codegree(0,(0,0,0))
3
"""
if not self: # the trivial case
return 0
conj_shape = self.shape().conjugate()
codeg = conj_shape._initial_degree(e,tuple(-r for r in multicharge))
res = self.shape().initial_column_tableau().residue_sequence(e, multicharge)
for r in self.reduced_column_word():
if res[r] == res[r+1]:
codeg -= 2
elif res[r] == res[r+1] + 1 or res[r] == res[r+1] - 1:
codeg += (e == 2 and 2 or 1)
res = res.swap_residues(r, r+1)
return codeg
#--------------------------------------------------
# Standard tableau tuple - element class
#--------------------------------------------------
class StandardTableauTuple(RowStandardTableauTuple):
r"""
A class to model a standard tableau of shape a partition tuple. This is
a tuple of standard tableau with entries `1, 2, \ldots, n`, where `n`
is the size of the underlying partition tuple, such that the entries
increase along rows and down columns in each component of the tuple.
sage: s=StandardTableauTuple([[1,2,3],[4,5]])
sage: t=StandardTableauTuple([[1,2],[3,5],[4]])
sage: s.dominates(t)
True
sage: t.dominates(s)
False
sage: StandardTableauTuple([[1,2,3],[4,5]]) in RowStandardTableauTuples()
True
The tableaux appearing in a :class:`StandardTableauTuple` are
both row and column strict, but individually they are not standard
tableaux because the entries in any single component of a
:class:`StandardTableauTuple` will typically not be in bijection with
`\{1, 2, \ldots, n\}`.
INPUT:
- ``t`` -- a tableau, a list of (standard) tableau or an equivalent list
OUTPUT:
- A :class:`StandardTableauTuple` object constructed from ``t``.
.. NOTE::
Sage uses the English convention for (tuples of) partitions and
tableaux: the longer rows are displayed on top. As with
:class:`PartitionTuple`, in sage the cells, or nodes, of partition
tuples are 0-based. For example, the (lexicographically) first cell in
any non-empty partition tuple is `[0,0,0]`. Further, the coordinates
``[k,r,c]`` in a :class:`TableauTuple` refer to the component, row and
column indices, respectively.
EXAMPLES::
sage: t=TableauTuple([ [[1,3,4],[7,9]], [[2,8,11],[6]], [[5,10]] ]); t
([[1, 3, 4], [7, 9]], [[2, 8, 11], [6]], [[5, 10]])
sage: t[0][0][0]
1
sage: t[1][1][0]
6
sage: t[2][0][0]
5
sage: t[2][0][1]
10
sage: t = StandardTableauTuple([[[4,5],[7]],[[1,2,3],[6,8]],[[9]]]); t
([[4, 5], [7]], [[1, 2, 3], [6, 8]], [[9]])
sage: t.pp()
4 5 1 2 3 9
7 6 8
sage: t.shape()
([2, 1], [3, 2], [1])
sage: t[0].pp() # pretty printing
4 5
7
sage: t.is_standard()
True
sage: t[0].is_standard()
False
sage: StandardTableauTuple([[],[],[]]) # An empty tableau tuple
([], [], [])
When using code that will generate a lot of tableaux, it is slightly more
efficient to construct a :class:`StandardTableauTuple` from the
appropriate parent object::
sage: STT = StandardTableauTuples()
sage: STT([[[4,5],[7]],[[1,2,3],[6,8]],[[9]]])
([[4, 5], [7]], [[1, 2, 3], [6, 8]], [[9]])
.. SEEALSO::
- :class:`Tableau`
- :class:`Tableaux`
- :class:`TableauTuples`
- :class:`TableauTuple`
- :class:`StandardTableauTuples`
TESTS::
sage: StandardTableauTuple( [[1,2,3],[4,5]] ).category() # indirect doctest
Category of elements of Standard tableaux
sage: StandardTableauTuple([[[1,2,3],[4,5]]]).category() # indirect doctest
Category of elements of Standard tableaux
sage: StandardTableauTuples()([[[1,2,3],[4,5]]]).category() # indirect doctest
Category of elements of Standard tableaux
sage: StandardTableauTuple([[[1,2,3]],[[1]]])
Traceback (most recent call last):
...
ValueError: entries must be in bijection with {1,2,...,n}
sage: StandardTableauTuple([[],[[1,2,1]]])
Traceback (most recent call last):
...
ValueError: tableaux must be row strict
sage: StandardTableauTuple([ [[1,2,4],[6]],[[0,1]],[[10]] ])
Traceback (most recent call last):
...
ValueError: entries must be in bijection with {1,2,...,n}
sage: TestSuite( StandardTableauTuple([[[1,3,4],[6]],[[2],[5]]]) ).run()
sage: TestSuite( StandardTableauTuple([[[1,3,4],[6]],[], [[2],[5]]]) ).run()
sage: TestSuite( StandardTableauTuple([[[1,3,4],[6]],[[7]], [[2],[5]]]) ).run()
"""
@staticmethod
def __classcall_private__(self, t):
r"""
This ensures that a :class:`StandardTableau` is only ever constructed
as an ``element_class()`` call of an appropriate parent.
EXAMPLES::
sage: t=StandardTableauTuple([[[1,3,4],[6]],[[2],[5]]])
sage: t.parent()
Standard tableau tuples
sage: t.category()
Category of elements of Standard tableau tuples
sage: type(t)
<class 'sage.combinat.tableau_tuple.StandardTableauTuples_all_with_category.element_class'>
sage: StandardTableauTuples(level=2)(t).parent()
Standard tableau tuples of level 2
sage: StandardTableauTuples(level=2,size=6)(t).parent()
Standard tableau tuples of level 2 and size 6
"""
if isinstance(t, (StandardTableau, StandardTableauTuple)):
return t
# The Tableau class is very general in that it allows the entries of a
# tableau to be almost anything, including lists. For this reason we
# first try and interpret t as a tuple of tableaux and if this fails we
# then try to think of t as a tableau.
try:
t = [StandardTableau(s) for s in t]
except (TypeError,ValueError):
try:
t = [StandardTableau(t)]
except ValueError:
pass
if len(t) == 1:
return t[0]
else:
P = StandardTableauTuples_all()
return P.element_class(P, t)
raise ValueError('%s is not a standard tableau tuple' % t)
def __init__(self, parent, t, check=True):
r"""
Initializes a standard tableau tuple.
EXAMPLES::
sage: t = StandardTableauTuples()([[[1,4],[2]],[[3]]])
sage: s = TableauTuples(2)([[[1,4],[2]],[[3]]])
sage: s == t
True
sage: s.parent()
Tableau tuples of level 2
sage: r = StandardTableauTuples(level=2)(t); r.parent()
Standard tableau tuples of level 2
sage: isinstance(r, StandardTableauTuple)
True
sage: r in StandardTableauTuples()
True
sage: r in StandardTableauTuples(level=2)
True
sage: r in StandardTableauTuples(level=3)
False
"""
# The check that ``t`` is valid tableau tuple is done by RowStandardTableauTuple
super(StandardTableauTuple, self).__init__(parent, t, check=check)
# As StandardTableauTuple inherits from RowStandardTableauTuple t must
# be row strict and contain 1,2,...,n once each, so we only need to
if check:
# check that it is column strict
if not all(s.is_column_strict() for s in self):
raise ValueError('tableaux must be column strict')
def dominates(self, t):
"""
Return ``True`` if the tableau (tuple) ``self`` dominates the
tableau ``t``. The two tableaux do not need to be of the same shape.
EXAMPLES::
sage: s = StandardTableauTuple([[1,2,3],[4,5]])
sage: t = StandardTableauTuple([[1,2],[3,5],[4]])
sage: s.dominates(t)
True
sage: t.dominates(s)
False
"""
return all(self.restrict(m).shape().dominates(t.restrict(m).shape())
for m in range(1,1+self.size()))
def to_chain(self):
"""
Returns the chain of partitions corresponding to the standard
tableau tuple ``self``.
EXAMPLES::
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).to_chain()
[([], []),
([], [1]),
([], [2]),
([], [2, 1]),
([], [2, 2]),
([1], [2, 2])]
"""
n = self.shape().size()
if n == 0:
return [self.shape()]
else:
return [self.restrict(k).shape() for k in range(n+1)]
def restrict(self, m=None):
"""
Returns the restriction of the standard tableau ``self`` to ``m``,
which defaults to one less than the current :meth:`~TableauTuple.size`.
EXAMPLES::
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(6)
([[5]], [[1, 2], [3, 4]])
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(5)
([[5]], [[1, 2], [3, 4]])
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(4)
([], [[1, 2], [3, 4]])
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(3)
([], [[1, 2], [3]])
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(2)
([], [[1, 2]])
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(1)
([], [[1]])
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(0)
([], [])
Where possible the restricted tableau belongs to the same category as
the tableau ``self``::
sage: TableauTuple([[[5]],[[1,2],[3,4]]]).restrict(3).category()
Category of elements of Tableau tuples
sage: StandardTableauTuple([[[5]],[[1,2],[3,4]]]).restrict(3).category()
Category of elements of Standard tableau tuples
sage: StandardTableauTuples([[1],[2,2]])([[[5]],[[1,2],[3,4]]]).restrict(3).category()
Category of elements of Standard tableau tuples
sage: StandardTableauTuples(level=2)([[[5]],[[1,2],[3,4]]]).restrict(3).category()
Category of elements of Standard tableau tuples of level 2
"""
if m is None:
m = self.size() - 1
# We are lucky in that currently restriction is defined for arbitrary
# (level one) tableau and not just standard ones. If this ever changes
# we will have to treat the cases where the components restrict to
# empty lists of the form [[]] separately.
tab = [t.restrict(m) for t in self]
try:
return self.parent()(tab)
except ValueError:
return StandardTableauTuple(tab)
#--------------------------------------------------
# Tableau tuples - parent classes
#--------------------------------------------------
class TableauTuples(UniqueRepresentation, Parent):
"""
A factory class for the various classes of tableau tuples.
INPUT:
There are three optional arguments:
- ``shape`` -- determines a :class:`PartitionTuple` which gives the shape
of the :class:`TableauTuples`
- ``level`` -- the level of the tableau tuples (positive integer)
- ``size`` -- the size of the tableau tuples (non-negative integer)
It is not necessary to use the keywords. If they are not specified then the
first integer argument specifies the ``level`` and the second the ``size`` of the
tableaux.
OUTPUT:
- The corresponding class of tableau tuples.
The entries of a tableau can be any sage object. Because of this, no
enumeration of the set of :class:`TableauTuples` is possible.
EXAMPLES::
sage: T3 = TableauTuples(3); T3
Tableau tuples of level 3
sage: [['a','b']] in TableauTuples()
True
sage: [['a','b']] in TableauTuples(level=3)
False
sage: t = TableauTuples(level=3)([[],[[1,1,1]],[]]); t
([], [[1, 1, 1]], [])
sage: t in T3
True
sage: t in TableauTuples()
True
sage: t in TableauTuples(size=3)
True
sage: t in TableauTuples(size=4)
False
sage: t in StandardTableauTuples()
False
sage: t.parent()
Tableau tuples of level 3
sage: t.category()
Category of elements of Tableau tuples of level 3
.. SEEALSO::
- :class:`Tableau`
- :class:`StandardTableau`
- :class:`StandardTableauTuples`
TESTS::
sage: TableauTuples(0)
Traceback (most recent call last):
...
ValueError: the level must be a positive integer
sage: t = TableauTuples(3)([[],[],[[1,2],[3]]])
sage: t.parent()
Tableau tuples of level 3
sage: TableauTuples(t)
Traceback (most recent call last):
...
ValueError: the level must be a positive integer
sage: TableauTuples(3)([[1, 1]])
Traceback (most recent call last):
...
ValueError: [[1, 1]] is not an element of Tableau tuples of level 3
sage: t0 = Tableau([[1]])
sage: t1 = TableauTuples()([[1]])
sage: t2 = TableauTuples()(t1)
sage: t0 == t1 == t2
True
sage: t1 in TableauTuples()
True
sage: t1 in TableauTuples(1)
True
sage: t1 in TableauTuples(2)
False
sage: [[1]] in TableauTuples()
True
sage: [] in TableauTuples()
True
sage: TableauTuples(level=0)
Traceback (most recent call last):
...
ValueError: the level must be a positive integer
sage: TestSuite( TableauTuples() ).run()
sage: TestSuite( TableauTuples(level=1) ).run()
sage: TestSuite( TableauTuples(level=2) ).run()
sage: TestSuite( TableauTuples(level=6) ).run()
sage: TestSuite( TableauTuples(size=0) ).run()
sage: TestSuite( TableauTuples(size=1) ).run()
sage: TestSuite( TableauTuples(size=2) ).run()
sage: TestSuite( TableauTuples(size=10) ).run()
sage: TestSuite( TableauTuples(level=1, size=0) ).run()
sage: TestSuite( TableauTuples(level=1, size=1) ).run()
sage: TestSuite( TableauTuples(level=1, size=10) ).run()
sage: TestSuite( TableauTuples(level=2, size=0) ).run()
sage: TestSuite( TableauTuples(level=2, size=1) ).run()
sage: TestSuite( TableauTuples(level=2, size=10) ).run()
sage: TestSuite( TableauTuples(level=6, size=0) ).run()
sage: TestSuite( TableauTuples(level=6, size=1) ).run()
sage: TestSuite( TableauTuples(level=6, size=10) ).run()
Check that :trac:`14145` has been fixed::
sage: 1 in TableauTuples()
False
"""
Element = TableauTuple
level_one_parent_class = Tableaux_all # used in element_constructor
options = Tableaux.options
@staticmethod
def __classcall_private__(cls, level=None, size=None):
r"""
This is a factory class which returns the appropriate parent based on
arguments. See the documentation for :class:`TableauTuples` for more
information.
EXAMPLES::
sage: TableauTuples()
Tableau tuples
sage: TableauTuples(3)
Tableau tuples of level 3
sage: TableauTuples(level=3)
Tableau tuples of level 3
sage: TableauTuples(size=3)
Tableau tuples of size 3
sage: TableauTuples(4,3)
Tableau tuples of level 4 and size 3
sage: TableauTuples(level=4,size=3)
Tableau tuples of level 4 and size 3
sage: TableauTuples(size=3,level=4)
Tableau tuples of level 4 and size 3
"""
# sanity testing
if not (level is None or level in PositiveIntegers()):
raise ValueError( 'the level must be a positive integer' )
if not (size is None or size in NN):
raise ValueError( 'the size must be a non-negative integer' )
# now that the inputs appear to make sense, return the appropriate class
if level == 1:
if size is not None:
return Tableaux_size(size)
else:
return Tableaux_all()
elif level is not None and size is not None:
return TableauTuples_level_size(level=level, size=size)
elif level is not None:
return TableauTuples_level(level=level)
elif size is not None:
return TableauTuples_size(size=size)
else:
return TableauTuples_all()
def _element_constructor_(self, t):
r"""
Constructs an object from t as an element of ``self``, if possible.
This is inherited by all :class:`TableauTuples`,
:class:`StandardTableauTuples`, and :class:`StandardTableauTuples`
classes.
INPUT:
- ``t`` -- Data which can be interpreted as a tableau
OUTPUT:
- The corresponding tableau object
EXAMPLES::
sage: T = TableauTuples(3)
sage: T([[],[[1,2,1]],[]]) # indirect doctest
([], [[1, 2, 1]], [])
sage: T([[],[[1,2,1]],[]]).parent() is T
True
sage: T( StandardTableauTuples(3)([[],[[1, 2, 3]],[]])).parent() is T
True
sage: T([[1,2]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: [[1, 2]] is not an element of Tableau tuples of level 3
"""
if not t in self:
raise ValueError("%s is not an element of %s"%(t, self))
# one way or another these two cases need to be treated separately
if t == [] or t == [[]]:
return self.level_one_parent_class().element_class(self.level_one_parent_class(),[])
# Because Tableaux are considered to be TableauTuples we have to check to
# see whether t is a Tableau or a TableauTuple in order to work out
# which class t really belongs to.
try:
tab = [Tableau(s) for s in t]
except (TypeError,ValueError):
try:
tab = [Tableau(t)]
except ValueError:
pass
if tab in self:
if len(tab) == 1:
return self.level_one_parent_class().element_class(self.level_one_parent_class(), tab[0])
else:
return self.element_class(self, tab)
raise ValueError('%s is not an element of %s' % (t, self))
def __contains__(self, t):
"""
Containment function of :class:`TableauTuples`.
EXAMPLES::
sage: T = TableauTuples()
sage: [[1,2],[3,4]] in T
True
sage: [[1,2],[3]] in T
True
sage: [] in T
True
sage: [['a','b']] in T
True
sage: Tableau([['a']]) in T
True
sage: [1,2,3] in T
False
sage: [[1],[1,2]] in T
False
sage: ([[1,2],[4]],[[2,3],[1],[1]]) in T
True
Check that :trac:`14145` is fixed::
sage: 1 in TableauTuples()
False
"""
if isinstance(t, (Tableau, TableauTuple)):
return True
elif isinstance(t, (tuple, list)):
return all(s in Tableaux() for s in t) or t in Tableaux()
else:
return False
# defaults for level, size and shape
_level = None
_size = None
def level(self):
"""
Return the ``level`` of a tableau tuple in ``self``, or ``None`` if
different tableau tuples in ``self`` can have different sizes. The
``level`` of a tableau tuple is just the level of the underlying
:class:`PartitionTuple`.
EXAMPLES::
sage: TableauTuples().level() is None
True
sage: TableauTuples(7).level()
7
"""
return self._level
def size(self):
"""
Return the ``size`` of a tableau tuple in ``self``, or ``None`` if
different tableau tuples in ``self`` can have different sizes. The
``size`` of a tableau tuple is just the size of the underlying
:class:`PartitionTuple`.
EXAMPLES::
sage: TableauTuples(size=14).size()
14
"""
return self._size
def list(self):
r"""
If the set of tableau tuples ``self`` is finite then this function
returns the list of these tableau tuples. If the class is infinite an
error is returned.
EXAMPLES::
sage: StandardTableauTuples([[2,1],[2]]).list()
[([[1, 2], [3]], [[4, 5]]),
([[1, 3], [2]], [[4, 5]]),
([[1, 2], [4]], [[3, 5]]),
([[1, 3], [4]], [[2, 5]]),
([[2, 3], [4]], [[1, 5]]),
([[1, 4], [2]], [[3, 5]]),
([[1, 4], [3]], [[2, 5]]),
([[2, 4], [3]], [[1, 5]]),
([[1, 2], [5]], [[3, 4]]),
([[1, 3], [5]], [[2, 4]]),
([[2, 3], [5]], [[1, 4]]),
([[1, 4], [5]], [[2, 3]]),
([[2, 4], [5]], [[1, 3]]),
([[3, 4], [5]], [[1, 2]]),
([[1, 5], [2]], [[3, 4]]),
([[1, 5], [3]], [[2, 4]]),
([[2, 5], [3]], [[1, 4]]),
([[1, 5], [4]], [[2, 3]]),
([[2, 5], [4]], [[1, 3]]),
([[3, 5], [4]], [[1, 2]])]
"""
if self.is_finite():
return [y for y in self]
else:
raise NotImplementedError('this is an infinite set of tableaux')
class TableauTuples_all(TableauTuples):
"""
The parent class of all :class:`TableauTuples`, with arbitrary ``level``
and ``size``.
"""
def __init__(self):
r"""
Initializes the class of all tableaux.
EXAMPLES::
sage: TableauTuples()
Tableau tuples
"""
super(TableauTuples_all, self).__init__(category=Sets())
self._level=None
self._size=None
def _repr_(self):
"""
The string representation of a :class:`StandardTableauTuple`.
EXAMPLES::
sage: TableauTuples() # indirect doctest
Tableau tuples
"""
return "Tableau tuples"
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: TableauTuples().an_element()
([[1]], [[2]], [[3]], [[4]], [[5]], [[6]], [[7]])
"""
return self.element_class(self, [[[1]],[[2]],[[3]],[[4]],[[5]],[[6]],[[7]]])
class TableauTuples_level(TableauTuples):
"""
Class of all :class:`TableauTuples` with a fixed ``level`` and arbitrary
``size``.
"""
def __init__(self, level):
r"""
Initializes the class of tableaux of level ``level``.
EXAMPLES::
sage: TableauTuples(level=4)( [[[1,2],[4]],[],[],[[4,5,6],[7,8]]] )
([[1, 2], [4]], [], [], [[4, 5, 6], [7, 8]])
"""
super(TableauTuples_level, self).__init__(category=Sets())
self._level=level
def __contains__(self,t):
"""
Containment function for :class:`TableauTuples` of a fixed ``level``.
EXAMPLES::
sage: T = TableauTuples(3)
sage: [[[1,2,3]],[[1,2],[3,4]],[[2,4], [1]]] in T
True
sage: T([[[1,2,3]],[[1,2],[3,4]],[[2,4], [1]]])
([[1, 2, 3]], [[1, 2], [3, 4]], [[2, 4], [1]])
sage: T(([[1,2,3]],[[1,2],[3,4]],[[2,4], [1]]))
([[1, 2, 3]], [[1, 2], [3, 4]], [[2, 4], [1]])
sage: [[2,4],[1,3]] in T
False
sage: [[1],[2],[3]] in T
False
Check that :trac:`14145` is fixed::
sage: 1 in TableauTuples(3)
False
"""
if isinstance(t, self.element_class):
return self.level() == t.level()
elif TableauTuples.__contains__(self, t) or isinstance(t, (list, tuple)):
if all(s in Tableaux() for s in t):
return len(t) == self.level()
else:
return self.level() == 1
else:
return False
def _repr_(self):
"""
The string representation of a :class:`StandardTableauTuple` of a
fixed ``level``.
EXAMPLES::
sage: TableauTuples(4) # indirect doctest
Tableau tuples of level 4
"""
return "Tableau tuples of level %s"%self.level()
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: TableauTuples(3).an_element()
([], [], [])
sage: TableauTuples(5).an_element()
([], [], [], [], [])
sage: T = TableauTuples(0)
Traceback (most recent call last):
...
ValueError: the level must be a positive integer
"""
return self.element_class(self, [[] for t in range(self.level())])
class TableauTuples_size(TableauTuples):
"""
Class of all :class:`TableauTuples` with a arbitrary ``level`` and fixed
``size``.
"""
def __init__(self, size):
"""
Initializes the class of tableaux of size ``size``.
EXAMPLES::
sage: TableauTuples(size=6)
Tableau tuples of size 6
"""
super(TableauTuples_size, self).__init__(category=Sets())
self._size=size
def __contains__(self,t):
"""
Containment function for :class:`TableauTuples` of a fixed ``size``.
EXAMPLES::
sage: T = TableauTuples(size=3)
sage: [[2,4], [1]] in T
True
sage: [[2,4],[1,3]] in T
False
sage: [[1,2,3]] in T
True
sage: [[1],[2],[3]] in T
True
sage: [[1],[2],[3],[4]] in T
False
Check that :trac:`14145` is fixed::
sage: 1 in TableauTuples(size=3)
False
"""
if isinstance(t, self.element_class):
return self.size() == t.size()
elif TableauTuples.__contains__(self, t) or isinstance(t, (list, tuple)):
if all(s in Tableaux() for s in t):
return sum(sum(map(len,s)) for s in t) == self.size()
else:
return self.size() == sum(map(len,t))
else:
return False
def _repr_(self):
"""
The string representation of a :class:`StandardTableauTuple` of a
fixed ``size``.
EXAMPLES::
sage: TableauTuples(size=4) # indirect doctest
Tableau tuples of size 4
"""
return "Tableau tuples of size %s"%self.size()
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: TableauTuples(size=3).an_element()
([], [[1, 2, 3]], [])
sage: TableauTuples(size=0).an_element()
([], [], [])
"""
if self.size()==0:
return self.element_class(self, [[],[],[]])
else:
return self.element_class(self,[[],[ range(1,self.size()+1) ],[]])
class TableauTuples_level_size(TableauTuples):
"""
Class of all :class:`TableauTuples` with a fixed ``level`` and a fixed
``size``.
"""
def __init__(self, level,size):
r"""
Initializes the class of tableaux of size ``size``.
EXAMPLES::
sage: TableauTuples(4,0)
Tableau tuples of level 4 and size 0
sage: TableauTuples(4,1)
Tableau tuples of level 4 and size 1
sage: TableauTuples(4,2)
Tableau tuples of level 4 and size 2
sage: TableauTuples(4,3)
Tableau tuples of level 4 and size 3
"""
super(TableauTuples_level_size, self).__init__(category=Sets())
self._level=level
self._size=size
def __contains__(self,t):
"""
Containment function for :class:`TableauTuples` of a fixed ``level``
and ``size``.
EXAMPLES::
sage: T = TableauTuples(3,3)
sage: [[],[[2,4], [1]],[]] in T
True
sage: [[2,4],[1,3]] in T
False
Check that :trac:`14145` is fixed::
sage: 1 in TableauTuples(3,3)
False
"""
if isinstance(t, self.element_class):
return t.level()==self.level() and t.size()==self.size()
elif TableauTuples.__contains__(self, t) or isinstance(t,(list, tuple)):
if all(s in Tableaux() for s in t):
return len(t)==self.level() and sum(sum(map(len,s)) for s in t)==self.size()
else:
return self.level()==1 and self.size()==sum(map(len,t))
else:
return False
def _repr_(self):
"""
The string representation of the :class:`StandardTableauTuples` of
given level and size.
EXAMPLES::
sage: TableauTuples(4,5) # indirect doctest
Tableau tuples of level 4 and size 5
sage: TableauTuples(5,4)
Tableau tuples of level 5 and size 4
sage: TableauTuples(size=5,level=4)
Tableau tuples of level 4 and size 5
"""
return "Tableau tuples of level %s and size %s"%(self.level(), self.size())
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: TableauTuples(3,0).an_element()
([], [], [])
sage: TableauTuples(3,1).an_element()
([[1]], [], [])
sage: TableauTuples(3,2).an_element()
([[1, 2]], [], [])
"""
if self.size()==0:
return self.element_class(self, [[] for s in range(self.level())])
else:
tab=[[[m for m in range(1,self.size()+1)]]]
for s in range(self.level()-1):
tab.append([])
return self.element_class(self, tab)
#--------------------------------------------------
# Row standard tableau tuples - parent classes
#--------------------------------------------------
class RowStandardTableauTuples(TableauTuples):
"""
A factory class for the various classes of tuples of row standard tableau.
INPUT:
There are three optional arguments:
- ``level`` -- the :meth:`~TableauTuples.level` of the tuples of tableaux
- ``size`` -- the :meth:`~TableauTuples.size` of the tuples of tableaux
- ``shape`` -- a list or a partition tuple specifying the :meth:`shape` of
the row standard tableau tuples
It is not necessary to use the keywords. If they are not used then the
first integer argument specifies the :meth:`~TableauTuples.level` and
the second the :meth:`~TableauTuples.size` of the tableau tuples.
OUTPUT:
The appropriate subclass of :class:`RowStandardTableauTuples`.
A tuple of row standard tableau is a tableau whose entries are positive
integers which increase from left to right along the rows in each component.
The entries do NOT need to increase from left to right along the components.
.. NOTE::
Sage uses the English convention for (tuples of) partitions and
tableaux: the longer rows are displayed on top. As with
:class:`PartitionTuple`, in sage the cells, or nodes, of partition
tuples are 0-based. For example, the (lexicographically) first cell
in any non-empty partition tuple is `[0,0,0]`.
EXAMPLES::
sage: tabs = RowStandardTableauTuples([[2],[1,1]]); tabs
Row standard tableau tuples of shape ([2], [1, 1])
sage: tabs.cardinality()
12
sage: tabs[:]
[([[3, 4]], [[2], [1]]),
([[2, 4]], [[3], [1]]),
([[1, 4]], [[3], [2]]),
([[1, 2]], [[4], [3]]),
([[1, 3]], [[4], [2]]),
([[2, 3]], [[4], [1]]),
([[1, 4]], [[2], [3]]),
([[1, 3]], [[2], [4]]),
([[1, 2]], [[3], [4]]),
([[2, 3]], [[1], [4]]),
([[2, 4]], [[1], [3]]),
([[3, 4]], [[1], [2]])]
sage: tabs = RowStandardTableauTuples(level=3); tabs
Row standard tableau tuples of level 3
sage: tabs[100]
([], [], [[2, 3], [1]])
sage: RowStandardTableauTuples()[0]
([])
TESTS::
sage: TestSuite( RowStandardTableauTuples() ).run()
sage: TestSuite( RowStandardTableauTuples(level=1) ).run()
sage: TestSuite( RowStandardTableauTuples(level=4) ).run()
sage: TestSuite( RowStandardTableauTuples(size=0) ).run(max_runs=50) # recursion depth exceeded with default max_runs
sage: TestSuite( RowStandardTableauTuples(size=6) ).run()
sage: TestSuite( RowStandardTableauTuples(level=1, size=0) ).run()
sage: TestSuite( RowStandardTableauTuples(level=1, size=0) ).run()
sage: TestSuite( RowStandardTableauTuples(level=1, size=10) ).run()
sage: TestSuite( RowStandardTableauTuples(level=4, size=0) ).run()
sage: TestSuite( RowStandardTableauTuples(level=4, size=0) ).run()
sage: TestSuite( RowStandardTableauTuples(level=4, size=10) ).run() # long time
sage: TestSuite( RowStandardTableauTuples(shape=[[1],[3,1],[],[2,1]]) ).run()
.. SEEALSO::
- :class:`TableauTuples`
- :class:`Tableau`
- :class:`RowStandardTableau`
- :class:`RowStandardTableauTuples`
"""
Element = RowStandardTableauTuple
level_one_parent_class = RowStandardTableaux_all # used in element_constructor
@staticmethod
def __classcall_private__(cls, *args, **kwargs):
r"""
This is a factory class which returns the appropriate parent based on
arguments. See the documentation for :class:`RowStandardTableauTuples`
for more information.
EXAMPLES::
sage: RowStandardTableauTuples()
Row standard tableau tuples
sage: RowStandardTableauTuples(4)
Row standard tableau tuples of level 4
sage: RowStandardTableauTuples(4,3)
Row standard tableau tuples of level 4 and size 3
sage: RowStandardTableauTuples([ [2,1],[1],[1,1,1],[3,2] ])
Row standard tableau tuples of shape ([2, 1], [1], [1, 1, 1], [3, 2])
TESTS::
sage: RowStandardTableauTuples([ [2,1],[1],[1,1,1],[3,2,3] ])
Traceback (most recent call last):
...
ValueError: the shape must be a partition tuple
sage: P = PartitionTuples()
sage: pt = P([[1]]); pt
([1])
sage: RowStandardTableauTuples(pt)
Row standard tableaux of shape [1]
"""
from sage.combinat.partition_tuple import PartitionTuple
# first check the keyword arguments
level = kwargs.get('level',None)
shape = kwargs.get('shape',None)
size = kwargs.get('size',None)
for key in kwargs:
if key not in ['level','shape','size']:
raise ValueError('%s is not a valid argument for RowStandardTableauTuples' % key)
# now process the positional arguments
if args:
#the first argument could be either the level or the shape
if isinstance(args[0], (int, Integer)):
if level is not None:
raise ValueError('the level was specified more than once')
else:
level = args[0]
else:
if shape is not None:
raise ValueError('the shape was specified more than once')
else:
shape = args[0] # we check that it is a PartitionTuple below
if len(args) == 2: # both the level and size were specified
if level is not None and size is not None:
raise ValueError('the level or size was specified more than once')
else:
size=args[1]
elif len(args)>2:
raise ValueError('too man arguments!')
# now check that the arguments are consistent
if level is not None and (not isinstance(level, (int,Integer)) or level < 1):
raise ValueError('the level must be a positive integer')
if size is not None and (not isinstance(size, (int,Integer)) or size < 0):
raise ValueError('the size must be a non-negative integer')
if shape is not None:
try:
shape = PartitionTuple(shape)
except ValueError:
raise ValueError('the shape must be a partition tuple')
if level is None:
level = shape.level()
elif level != shape.level():
raise ValueError('the shape and level must agree')
if size is None:
size = shape.size()
elif size != shape.size():
raise ValueError('the shape and size must agree')
# now that the inputs appear to make sense, return the appropriate class
if level is not None and level <= 1:
from sage.combinat.partition_tuple import PartitionTuple
if isinstance(shape, PartitionTuple):
shape = shape[0]
if shape is not None:
return RowStandardTableaux_shape(shape)
elif size is not None:
return RowStandardTableaux_size(size)
else:
return RowStandardTableaux_all()
elif shape is not None:
return RowStandardTableauTuples_shape(shape)
elif level is not None and size is not None:
return RowStandardTableauTuples_level_size(level,size)
elif level is not None:
return RowStandardTableauTuples_level(level)
elif size is not None:
return RowStandardTableauTuples_size(size)
else:
return RowStandardTableauTuples_all()
def __getitem__(self, r):
r"""
The default implementation of ``__getitem__`` for enumerated sets does
not allow slices so we override it here.
EXAMPLES::
sage: RowStandardTableauTuples()[10:20]
[([[2, 3], [1]]),
([[1, 2], [3]]),
([[1, 3], [2]]),
([[3], [2], [1]]),
([[2], [3], [1]]),
([[1], [3], [2]]),
([[1], [2], [3]]),
([[2], [1], [3]]),
([[3], [1], [2]]),
([[1, 2]], [])]
.. TODO::
Implement slices with step size different from `1` and make this
a method for enumerate sets.
"""
if isinstance(r, (int,Integer)):
return self.unrank(r)
elif isinstance(r,slice):
start = 0 if r.start is None else r.start
stop = r.stop
if stop is None and not self.is_finite():
raise ValueError('infinite set')
else:
raise ValueError('r must be an integer or a slice')
count = 0
tabs = []
for t in self:
if count == stop:
break
if count >= start:
tabs.append(t)
count += 1
# this is to cope with empty slices endpoints like [:6] or [:}
if count == stop or stop is None:
return tabs
raise IndexError('value out of range')
def __contains__(self, t):
"""
Containment function for :class:`RowStandardTableauTuples` of
arbitrary ``level`` and ``size``.
EXAMPLES::
sage: T = RowStandardTableauTuples()
sage: [[1,3],[2]] in T
True
sage: [] in T
True
sage: Tableau([[1]]) in T
True
sage: RowStandardTableauTuple([[1]]) in T
True
sage: [[1,2],[1]] in T
False
sage: [[1,1],[5]] in T
False
Check that :trac:`14145` is fixed::
sage: 1 in RowStandardTableauTuples()
False
"""
if isinstance(t, (RowStandardTableau, RowStandardTableauTuple)):
return True
elif TableauTuples.__contains__(self, t) or isinstance(t, (list, tuple)):
if all(s in Tableaux() for s in t):
flatt = sorted(sum((list(row) for s in t for row in s),[]))
return (flatt == list(range(1, len(flatt)+1))
and all(len(s) == 0 or all(row[i] < row[i+1]
for row in s for i in range(len(row)-1))
for s in t)
)
else:
return t in RowStandardTableaux()
else:
return False
# set the default shape
_shape = None
def shape(self):
"""
Return the shape of the set of :class:`RowStandardTableauTuples`, or
``None`` if it is not defined.
EXAMPLES::
sage: tabs=RowStandardTableauTuples(shape=[[5,2],[3,2],[],[1,1,1],[3]]); tabs
Row standard tableau tuples of shape ([5, 2], [3, 2], [], [1, 1, 1], [3])
sage: tabs.shape()
([5, 2], [3, 2], [], [1, 1, 1], [3])
sage: RowStandardTableauTuples().shape() is None
True
"""
return self._shape
class RowStandardTableauTuples_all(RowStandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Default class of all :class:`RowStandardTableauTuples` with an arbitrary
:meth:`~TableauTuples.level` and :meth:`~TableauTuples.size`.
"""
def __init__(self):
r"""
Initializes the class of all row standard tableaux.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
EXAMPLES::
sage: RSTT = RowStandardTableauTuples()
sage: TestSuite(RSTT).run()
"""
RowStandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples(), RowStandardTableauTuples_shape),
facade=True, keepkey=False)
def _repr_(self):
"""
The string representation of the :class:`RowStandardTableauTuples` of
arbitrary ``level`` and ``size``.
EXAMPLES::
sage: RowStandardTableauTuples()
Row standard tableau tuples
"""
return "Row standard tableau tuples"
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: RowStandardTableauTuples().an_element()
([[4, 5, 6, 7]], [[2, 3]], [[1]])
"""
return self.element_class(self, reversed([[range(2**(i-1),2**i)] for i in range(1,4)]))
class RowStandardTableauTuples_level(RowStandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Class of all :class:`RowStandardTableauTuples` with a fixed ``level``
and arbitrary ``size``.
"""
def __init__(self, level):
r"""
Initializes the class of row standard tableaux of level
``level`` of arbitrary ``size``.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
EXAMPLES::
sage: RowStandardTableauTuples(3)
Row standard tableau tuples of level 3
sage: RowStandardTableauTuples(3)[:10]
[([], [], []),
([[1]], [], []),
([], [[1]], []),
([], [], [[1]]),
([[1, 2]], [], []),
([[2], [1]], [], []),
([[1], [2]], [], []),
([[2]], [[1]], []),
([[1]], [[2]], []),
([[2]], [], [[1]])]
sage: RowStandardTableauTuples(3).cardinality()
+Infinity
"""
RowStandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples_level
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples_level(level), RowStandardTableauTuples_shape),
facade=True, keepkey=False)
self._level = level
def _repr_(self):
"""
The string representation of the :class:`RowStandardTableauTuples`
of fixed ``level``.
EXAMPLES::
sage: RowStandardTableauTuples(3) # indirect doctest
Row standard tableau tuples of level 3
"""
return 'Row standard tableau tuples of level %s' % self.level()
def __contains__(self, t):
"""
Containment function for :class:`RowStandardTableauTuples` of
fixed ``level``.
EXAMPLES::
sage: T = RowStandardTableauTuples(3)
sage: [[[2,3]],[[1]],[]] in T
True
sage: RowStandardTableauTuple([[2, 3], [1]]) in T
False
sage: [] in T
False
Check that :trac:`14145` is fixed::
sage: 1 in RowStandardTableauTuples(3)
False
"""
if isinstance(t, RowStandardTableauTuple):
return self.level() == t.level()
elif RowStandardTableauTuples.__contains__(self, t):
if all(s in Tableaux() for s in t):
return len(t)==self.level()
else:
return self.level()==1
else:
return False
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: RowStandardTableauTuples(2).an_element()
([[1]], [[2, 3]])
sage: RowStandardTableauTuples(3).an_element()
([[1]], [[2, 3]], [[4, 5, 6, 7]])
"""
return self.element_class(self, [ [range(2**(i-1),2**i)] for i in range(1,self.level()+1)])
class RowStandardTableauTuples_size(RowStandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Class of all :class:`RowStandardTableauTuples` with an arbitrary ``level``
and a fixed ``size``.
"""
def __init__(self, size):
r"""
Initializes the class of row standard tableaux of size ``size`` of
arbitrary level.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
EXAMPLES::
sage: RowStandardTableauTuples(size=3) # indirect doctest
Row standard tableau tuples of size 3
sage: RowStandardTableauTuples(size=2)[:10]
[([[1, 2]]),
([[2], [1]]),
([[1], [2]]),
([[1, 2]], []),
([[2], [1]], []),
([[1], [2]], []),
([[2]], [[1]]),
([[1]], [[2]]),
([], [[1, 2]]),
([], [[2], [1]])]
sage: RowStandardTableauTuples(3).cardinality()
+Infinity
"""
RowStandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples_size
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples_size(size), RowStandardTableauTuples_shape),
facade=True, keepkey=False)
self._size = size
def _repr_(self):
"""
The string representation of the :class:`RowStandardTableauTuples`
of fixed ``size``.
EXAMPLES::
sage: RowStandardTableauTuples(size=3)
Row standard tableau tuples of size 3
"""
return "Row standard tableau tuples of size %s" % self.size()
def __contains__(self, t):
"""
Containment function for :class:`RowStandardTableauTuples` of fixed
``size``.
EXAMPLES::
sage: T = RowStandardTableauTuples(size=3)
sage: ([[1,2]], [], [], [[3]]) in T
True
sage: [[[1,2]], [], [], [[5]]] in T
False
sage: Tableau([[1]]) in T
False
sage: 1 in RowStandardTableauTuples(size=3)
False
"""
if isinstance(t, self.element_class):
return self.size() == t.size()
elif t in RowStandardTableauTuples():
if all(s in Tableaux() for s in t):
return sum(sum(map(len,s)) for s in t) == self.size()
else:
return self.size() == sum(map(len,t))
else:
return False
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: RowStandardTableauTuples(size=2).an_element()
([[1]], [[2]], [], [])
sage: RowStandardTableauTuples(size=4).an_element()
([[1]], [[2, 3, 4]], [], [])
"""
if self.size() == 0:
return self.element_class(self, [[],[],[],[]])
elif self.size() == 1:
return self.element_class(self, [[[1]],[],[],[]])
else:
return self.element_class(self, [[[1]],[range(2,self.size()+1)],[],[]])
class RowStandardTableauTuples_level_size(RowStandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Class of all :class:`RowStandardTableauTuples` with a fixed ``level``
and a fixed ``size``.
"""
def __init__(self,level,size):
r"""
Initializes the class of row standard tableaux of level ``level``
and size ``size``.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
EXAMPLES::
sage: RowStandardTableauTuples(size=4,level=3)
Row standard tableau tuples of level 3 and size 4
sage: RowStandardTableauTuples(size=4,level=3) is RowStandardTableauTuples(3,4)
True
sage: RowStandardTableauTuples(level=3, size=2)[:]
[([[1, 2]], [], []),
([[2], [1]], [], []),
([[1], [2]], [], []),
([[2]], [[1]], []),
([[1]], [[2]], []),
([[2]], [], [[1]]),
([[1]], [], [[2]]),
([], [[1, 2]], []),
([], [[2], [1]], []),
([], [[1], [2]], []),
([], [[2]], [[1]]),
([], [[1]], [[2]]),
([], [], [[1, 2]]),
([], [], [[2], [1]]),
([], [], [[1], [2]])]
sage: RowStandardTableauTuples(3,2).cardinality()
15
"""
RowStandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples_level_size
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples_level_size(level, size), RowStandardTableauTuples_shape),
facade=True, keepkey=False)
self._level=level
self._size=size
def _repr_(self):
"""
The string representation of the :class:`RowStandardTableauTuples` of
fixed ``level`` and size.
EXAMPLES::
sage: RowStandardTableauTuples(3, 4)
Row standard tableau tuples of level 3 and size 4
"""
return "Row standard tableau tuples of level %s and size %s"%(self.level(),self.size())
def __contains__(self, t):
"""
Containment function for :class:`RowStandardTableauTuples` of fixed
``level`` and size.
EXAMPLES::
sage: tabs = RowStandardTableauTuples(level=4, size=4); tabs
Row standard tableau tuples of level 4 and size 4
sage: [[[2,4],[1]],[],[[3]],[]] in tabs
True
sage: tabs([[[1,2]],[],[[4],[3]],[]]) == RowStandardTableauTuple([[[1,2]],[],[[4],[3]],[]])
True
sage: RowStandardTableauTuple([[[2, 3]], [[1]]]) in tabs
False
Check that :trac:`14145` is fixed::
sage: 1 in RowStandardTableauTuples(level=4, size=3)
False
"""
if isinstance(t, self.element_class):
return self.size() == t.size() and self.level() == t.level()
elif t in RowStandardTableauTuples():
if all(s in Tableaux() for s in t):
return len(t) == self.level() and sum(sum(map(len,s)) for s in t) == self.size()
else:
return self.level() == 1 and self.size() == sum(map(len,t))
else:
return False
def an_element(self):
r"""
Returns a particular element of ``self``.
EXAMPLES::
sage: RowStandardTableauTuples(5,size=2).an_element()
([], [], [], [], [[1], [2]])
sage: RowStandardTableauTuples(2,size=4).an_element()
([[1]], [[2, 3], [4]])
"""
if self.size() == 0:
return self.element_class(self, [[] for l in range(self.level())])
elif self.size() == 1:
return self.element_class(self, sum([[[[1]]]],[[] for i in range(self.level()-1)]))
elif self.size() == 2:
return self.element_class(self, sum([[[[1],[2]]]],[[] for i in range(self.level()-1)]))
else:
return self.element_class(self, sum([[[[1]]],
[[range(2,self.size()),[self.size()]]]],[[] for i in range(self.level()-2)]))
class RowStandardTableauTuples_shape(RowStandardTableauTuples):
"""
Class of all :class:`RowStandardTableauTuples` of a fixed shape.
"""
def __init__(self, shape):
r"""
Initializes the class of row standard tableaux of shape ``p``
and no maximum entry.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
EXAMPLES::
sage: STT = RowStandardTableauTuples([[2,1],[2,1,1]])
sage: STT
Row standard tableau tuples of shape ([2, 1], [2, 1, 1])
sage: STT.cardinality()
1260
"""
super(RowStandardTableauTuples_shape, self).__init__(category=FiniteEnumeratedSets())
from sage.combinat.partition_tuple import PartitionTuple
self._shape = PartitionTuple(shape)
self._level = len(shape)
self._size = shape.size()
def __contains__(self, t):
"""
Containment function of :class:`RowStandardTableauTuples` of
fixed shape.
EXAMPLES::
sage: STT = RowStandardTableauTuples([[2,1],[1]])
sage: [[[13, 67]], [[14,67]]] in STT
False
sage: [[[1, 4],[3]], [[2]]] in STT
True
sage: ([[1, 4],[3]], [[2]]) in STT
True
Check that :trac:`14145` is fixed::
sage: 1 in RowStandardTableauTuples([[2,1],[1]])
False
"""
if isinstance(t, self.element_class):
return self.shape() == t.shape()
elif t in RowStandardTableauTuples():
if all(s in Tableaux() for s in t):
return [[len(_) for _ in s] for s in t] == self.shape()
else:
return list(self.shape()) == sum(map(len,t))
else:
return False
def _repr_(self):
"""
The string representation of the :class:`RowStandardTableauTuples` of
fixed shape.
EXAMPLES::
sage: RowStandardTableauTuples([[2,1],[],[3,1,1,1]])
Row standard tableau tuples of shape ([2, 1], [], [3, 1, 1, 1])
"""
return 'Row standard tableau tuples of shape %s' % self.shape()
def __iter__(self):
r"""
Iterate through the finite class of :class:`RowStandardTableauTuples`
of a given :class:`PartitionTulpe` shape.
The algorithm below is modelled on, but different than, the
corresponding iterator for the row standard tableau of partition shape.
In particular, the tableaux are generated in the reverse order here as
that is easier (and more useful for applications to graded Specht
modules).
EXAMPLES::
sage: RowStandardTableauTuples([[1],[1],[1]]).list()
[([[3]], [[2]], [[1]]),
([[2]], [[3]], [[1]]),
([[1]], [[3]], [[2]]),
([[1]], [[2]], [[3]]),
([[2]], [[1]], [[3]]),
([[3]], [[1]], [[2]])]
sage: RowStandardTableauTuples([[2,1],[2]]).list()
[([[4, 5], [2]], [[1, 3]]),
([[4, 5], [3]], [[1, 2]]),
([[3, 5], [4]], [[1, 2]]),
([[3, 4], [5]], [[1, 2]]),
([[4, 5], [1]], [[2, 3]]),
([[3, 5], [1]], [[2, 4]]),
([[2, 5], [1]], [[3, 4]]),
([[1, 5], [2]], [[3, 4]]),
([[1, 4], [2]], [[3, 5]]),
([[1, 3], [2]], [[4, 5]]),
([[1, 2], [3]], [[4, 5]]),
([[2, 3], [1]], [[4, 5]]),
([[2, 4], [1]], [[3, 5]]),
([[3, 4], [1]], [[2, 5]]),
([[3, 4], [2]], [[1, 5]]),
([[2, 4], [3]], [[1, 5]]),
([[1, 4], [3]], [[2, 5]]),
([[1, 2], [4]], [[3, 5]]),
([[1, 3], [4]], [[2, 5]]),
([[2, 3], [4]], [[1, 5]]),
([[2, 3], [5]], [[1, 4]]),
([[1, 3], [5]], [[2, 4]]),
([[1, 2], [5]], [[3, 4]]),
([[1, 5], [3]], [[2, 4]]),
([[1, 5], [4]], [[2, 3]]),
([[1, 4], [5]], [[2, 3]]),
([[2, 4], [5]], [[1, 3]]),
([[2, 5], [4]], [[1, 3]]),
([[2, 5], [3]], [[1, 4]]),
([[3, 5], [2]], [[1, 4]])]
TESTS::
sage: def check(mu):
....: return (RowStandardTableauTuples(mu).cardinality()
....: == len(RowStandardTableauTuples(mu).list()))
sage: all(check(mu) for mu in PartitionTuples(4,4))
True
"""
mu = self.shape()
# Set up two lists clen and cclen which give the "end points" of
# the components of mu and the rows of each component, respectively, so
# that the numbers contained in component c of the initial tableau are
# tab[ clen[c]:clen[c+1] ]
# and the numbers contained in row r of component c are
# tab[ clen[c]:clen[c+1] ][ cclen[c][r]: cclen[c][r+1] ]
# where tab=[1,2,...,n] as above
relations = []
clen = [0]*(len(mu)+1)
cclen = [[0]*(len(mu[c])+1) for c in range(len(mu))]
for c in range(len(mu)):
for r in range(len(mu[c])):
cclen[c][r+1] = cclen[c][r] + mu[c][r]
relations += [(clen[c]+cclen[c][r]+i+1, clen[c]+cclen[c][r]+i+2)
for i in range(mu[c][r]-1)]
clen[c+1] = clen[c] + cclen[c][-1]
# To generate the row standard tableau tuples we are going to generate
# them from linearisations of the poset from the rows of the tableau. We
# will get them as "flattened" tableaux so we need to expand these one
# line lists back into tableaux. This is done y the following functions.
def tableau_from_list(tab):
"""
Converts a list tab=[t_1,...,t_n] into the mu-tableau obtained by
inserting t_1,..,t_n in order into the rows of mu, from left to right
in each component and then left to right along the components.
"""
return self.element_class(self,
[ [tab[clen[c]:clen[c+1]][cclen[c][r]:cclen[c][r+1]]
for r in range(len(mu[c]))]
for c in range(len(mu)) ],
check=False)
# now run through the linear extensions and return the corresponding tableau
for lin in Poset((range(1, mu.size()+1), relations)).linear_extensions():
linear_tab = list(permutation.Permutation(lin).inverse())
yield tableau_from_list(linear_tab)
def cardinality(self):
r"""
Return the number of row standard tableau tuples of with the same
shape as the partition tuple ``self``.
This is just the index of the corresponding Young subgroup in the
full symmetric group.
EXAMPLES::
sage: RowStandardTableauTuples([[3,2,1],[]]).cardinality()
60
sage: RowStandardTableauTuples([[1],[1],[1]]).cardinality()
6
sage: RowStandardTableauTuples([[2,1],[1],[1]]).cardinality()
60
"""
mu = self.shape()
return Integer(factorial(mu.size()) // prod(factorial(row) for nu in mu for row in nu))
def an_element(self):
r"""
Returns a particular element of ``self``.
EXAMPLES::
sage: RowStandardTableauTuples([[2],[2,1]]).an_element()
([[4, 5]], [[1, 3], [2]])
sage: RowStandardTableauTuples([[10],[],[]]).an_element()
([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], [], [])
"""
c = self.cardinality()
return self[c>3 and 4 or (c>1 and -1 or 0)]
class RowStandardTableauTuples_residue(RowStandardTableauTuples):
r"""
Class of all row standard tableau tuples with a fixed residue sequence.
Implicitly, this also specifies the quantum characteristic, multicharge
and hence the level and size of the tableaux.
.. NOTE::
This class is not intended to be called directly, but rather,
it is accessed through the row standard tableaux.
EXAMPLES::
sage: RowStandardTableau([[3,4,5],[1,2]]).residue_sequence(2).row_standard_tableaux()
Row standard tableaux with 2-residue sequence (1,0,0,1,0) and multicharge (0)
sage: RowStandardTableau([[3,4,5],[1,2]]).residue_sequence(3).row_standard_tableaux()
Row standard tableaux with 3-residue sequence (2,0,0,1,2) and multicharge (0)
sage: RowStandardTableauTuple([[[5,6],[7]],[[1,2,3],[4]]]).residue_sequence(2,(0,0)).row_standard_tableaux()
Row standard tableaux with 2-residue sequence (0,1,0,1,0,1,1) and multicharge (0,0)
sage: RowStandardTableauTuple([[[5,6],[7]],[[1,2,3],[4]]]).residue_sequence(3,(0,1)).row_standard_tableaux()
Row standard tableaux with 3-residue sequence (1,2,0,0,0,1,2) and multicharge (0,1)
"""
def __init__(self, residue):
r"""
Initialize ``self``.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
EXAMPLES::
sage: tabs = RowStandardTableau([[3,4,5],[1,2]]).residue_sequence(3).row_standard_tableaux()
sage: TestSuite(tabs).run()
sage: tabs = RowStandardTableauTuple([[[6],[7]],[[3,4,5],[1,2]]]).residue_sequence(2,(0,0)).row_standard_tableaux()
sage: TestSuite(tabs).run() # long time
"""
super(RowStandardTableauTuples_residue, self).__init__(category=FiniteEnumeratedSets())
self._residue = residue
self._quantum_characteristic = residue.quantum_characteristic()
self._multicharge = residue.multicharge()
self._level = residue.level()
self._size = residue.size()
self._base_ring = residue.base_ring()
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: RowStandardTableauTuple([[[4,5],[3]],[[1,2]]]).residue_sequence(3,(0,1)).row_standard_tableaux()
Row standard tableaux with 3-residue sequence (1,2,2,0,1) and multicharge (0,1)
sage: StandardTableauTuple([[[1,2],[3]],[[4,5]]]).residue_sequence(3,(0,1)).row_standard_tableaux()
Row standard tableaux with 3-residue sequence (0,1,2,1,2) and multicharge (0,1)
"""
return 'Row standard tableaux with {}'.format(self._residue.__str__('and'))
def __contains__(self, t):
"""
Check containment of ``t`` in ``self``.
EXAMPLES::
sage: res = RowStandardTableauTuple([[[4,5],[3]],[[1,2]]]).residue_sequence(3,(0,1))
sage: tabs = res.row_standard_tableaux(); tabs
Row standard tableaux with 3-residue sequence (1,2,2,0,1) and multicharge (0,1)
sage: [[[1,2],[3]],[[4,5]]] in tabs
False
sage: [[[4,5],[3]],[[1,2]]] in tabs
True
sage: [[[1,2],[4,5]],[[3]]] in tabs
False
"""
if not isinstance(t, self.element_class):
try:
t = RowStandardTableauTuple(t)
except ValueError:
return False
return (t.residue_sequence(self._quantum_characteristic, self._multicharge)
== self._residue)
def __iter__(self):
r"""
Iterate through ``self``.
We construct this sequence of tableaux recursively, as is easier (and
more useful for applications to graded Specht modules).
EXAMPLES::
sage: R = RowStandardTableauTuple([[[4, 5], [3]],[[1,2]]]).residue_sequence(3, (0,1))
sage: R.row_standard_tableaux()[:]
[([[4, 5], [3]], [[1, 2]]),
([[4, 5], [2]], [[1, 3]]),
([[4], [3], [5]], [[1, 2]]),
([[4], [2], [5]], [[1, 3]]),
([], [[1, 3], [4, 5], [2]]),
([], [[1, 2], [4, 5], [3]]),
([], [[1, 3], [4], [2], [5]]),
([], [[1, 2], [4], [3], [5]])]
sage: R = RowStandardTableauTuple([[[2,4],[1]],[[3]]]).residue_sequence(3,(0,1))
sage: R.row_standard_tableaux()[:]
[([[2, 4], [1], [3]], []),
([[2, 3], [1], [4]], []),
([[2, 4], [1]], [[3]]),
([[2, 3], [1]], [[4]]),
([[2], [1], [4]], [[3]]),
([[2], [1], [3]], [[4]]),
([], [[4], [2], [1], [3]]),
([], [[3], [2], [1], [4]])]
"""
if self._size == 0:
if self._level == 1:
yield RowStandardTableau([])
else:
yield RowStandardTableauTuple([[] for l in range(self._level)]) # the empty tableaux
return
# the only way that I know to generate these tableaux is to test all
# possible shapes in the same block, which is cheap to test
from sage.combinat.partition_tuple import PartitionTuples
for mu in PartitionTuples(self._level, self._size):
if mu.block(self._quantum_characteristic, self._multicharge) == self._residue.block():
for t in RowStandardTableauTuples_residue_shape(self._residue, mu):
if self._level == 1:
yield t
else:
yield self.element_class(self, t, check=False)
def quantum_characteristic(self):
r"""
Return the quantum characteristic of ``self``.
EXAMPLES::
sage: RowStandardTableau([[2,3],[1]]).residue_sequence(3,(0,1)).row_standard_tableaux().quantum_characteristic()
3
sage: StandardTableau([[1,2],[3]]).residue_sequence(3,(0,1)).row_standard_tableaux().quantum_characteristic()
3
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().quantum_characteristic()
3
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().quantum_characteristic()
3
"""
return self._quantum_characteristic
def multicharge(self):
r"""
Return the multicharge of ``self``.
EXAMPLES::
sage: RowStandardTableau([[2,3],[1]]).residue_sequence(3,(0,1)).row_standard_tableaux().multicharge()
(0, 1)
sage: StandardTableau([[1,2],[3]]).residue_sequence(3,(0,1)).row_standard_tableaux().multicharge()
(0, 1)
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().multicharge()
(0, 1)
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().multicharge()
(0, 1)
"""
return self._multicharge
def level(self):
r"""
Return the level of ``self``.
EXAMPLES::
sage: RowStandardTableau([[2,3],[1]]).residue_sequence(3,(0,1)).row_standard_tableaux().level()
2
sage: StandardTableau([[1,2],[3]]).residue_sequence(3,(0,1)).row_standard_tableaux().level()
2
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().level()
2
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().level()
2
"""
return self._level
def size(self):
r"""
Return the size of ``self``.
EXAMPLES::
sage: RowStandardTableau([[2,3],[1]]).residue_sequence(3,(0,1)).row_standard_tableaux().size()
3
sage: StandardTableau([[1,2],[3]]).residue_sequence(3,(0,1)).row_standard_tableaux().size()
3
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().size()
4
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().size()
4
"""
return self._size
def residue_sequence(self):
r"""
Return the residue sequence of ``self``.
EXAMPLES::
sage: RowStandardTableau([[2,3],[1]]).residue_sequence(3,(0,1)).row_standard_tableaux().residue_sequence()
3-residue sequence (2,0,1) with multicharge (0,1)
sage: StandardTableau([[1,2],[3]]).residue_sequence(3,(0,1)).row_standard_tableaux().residue_sequence()
3-residue sequence (0,1,2) with multicharge (0,1)
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().residue_sequence()
3-residue sequence (0,1,2,0) with multicharge (0,1)
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().residue_sequence()
3-residue sequence (1,0,2,0) with multicharge (0,1)
"""
return self._residue
def an_element(self):
r"""
Return a particular element of ``self``.
EXAMPLES::
sage: RowStandardTableau([[2,3],[1]]).residue_sequence(3).row_standard_tableaux().an_element()
[[2, 3], [1]]
sage: StandardTableau([[1,3],[2]]).residue_sequence(3).row_standard_tableaux().an_element()
[[1, 3], [2]]
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().an_element()
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().an_element()
([[4], [3], [1], [2]], [])
"""
try:
return self.unrank(0)
except ValueError:
return None
class RowStandardTableauTuples_residue_shape(RowStandardTableauTuples_residue):
"""
All row standard tableau tuples with a fixed residue and shape.
INPUT:
- ``shape`` -- the shape of the partitions or partition tuples
- ``residue`` -- the residue sequence of the label
EXAMPLES::
sage: res = RowStandardTableauTuple([[[3,6],[1]],[[5,7],[4],[2]]]).residue_sequence(3,(0,0))
sage: tabs = res.row_standard_tableaux([[2,1],[2,1,1]]); tabs
Row standard (2,1|2,1^2)-tableaux with 3-residue sequence (2,1,0,2,0,1,1) and multicharge (0,0)
sage: tabs.shape()
([2, 1], [2, 1, 1])
sage: tabs.level()
2
sage: tabs[:6]
[([[5, 7], [4]], [[3, 6], [1], [2]]),
([[5, 7], [1]], [[3, 6], [4], [2]]),
([[3, 7], [4]], [[5, 6], [1], [2]]),
([[3, 7], [1]], [[5, 6], [4], [2]]),
([[5, 6], [4]], [[3, 7], [1], [2]]),
([[5, 6], [1]], [[3, 7], [4], [2]])]
"""
def __init__(self, residue, shape):
r"""
Initialize ``self``.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
TESTS::
sage: res = RowStandardTableauTuple([[[1,3]],[[4,5],[2,6]]]).residue_sequence(3,(0,0))
sage: tabs = res.row_standard_tableaux([[2],[2,2]])
sage: TestSuite(tabs).run()
"""
if residue.size() != shape.size():
raise ValueError('the size of the shape and the length of the residue defence must coincide!')
super(RowStandardTableauTuples_residue_shape, self).__init__(residue)
self._shape = shape
# The _standard_tableaux attribute below is used to generate the
# tableaux in this class. The key observation is that any row standard
# tableau is standard if we stretch it out to a tableau with one row in
# each component
multicharge = residue.multicharge()
if shape.level() == 1:
standard_shape = [[r] for r in shape]
charge = [multicharge[0] - r for r in range(len(shape))]
else:
standard_shape = [[r] for mu in shape for r in mu]
charge = [multicharge[c] - r for c in range(len(shape))
for r in range(len(shape[c]))]
from sage.combinat.tableau_residues import ResidueSequence
res = ResidueSequence(residue.quantum_characteristic(), charge, residue.residues())
self._standard_tableaux = res.standard_tableaux(standard_shape)
# to convert the tableaux in self._standard_tableaux to row standard
# tableau we use the list _cumulative_lengths, which keeps track of the
# cumulative lengths of each component
if shape.level() == 1:
self._cumulative_lengths = [0, len(shape)]
else:
self._cumulative_lengths = [0]*(shape.level()+1)
for c in range(len(shape)):
self._cumulative_lengths[c+1] = self._cumulative_lengths[c] + len(shape[c])
def __contains__(self, t):
"""
Check containment of ``t`` in ``self``.
EXAMPLES::
sage: tabs = RowStandardTableauTuple([[[1,3]],[[4],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux([[2],[1,1]])
sage: [ [[1,2,3,4]], [[]] ] in tabs
False
sage: ([[1, 3]], [[4], [2]]) in tabs
True
"""
if not isinstance(t, self.element_class):
try:
t = RowStandardTableauTuple(t)
except ValueError:
return False
return (t.shape() == self._shape
and t.residue_sequence(self._quantum_characteristic,self._multicharge)
== self._residue)
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: RowStandardTableau([[1,3],[2,4]]).residue_sequence(3).row_standard_tableaux([2,2])
Row standard (2^2)-tableaux with 3-residue sequence (0,2,1,0) and multicharge (0)
"""
return 'Row standard ({})-tableaux with {}'.format(self._shape._repr_compact_high(),
self._residue.__str__('and'))
def __iter__level_one(self):
r"""
Iterate through the row standard tableaux in ``self``.
We construct this sequence of tableaux recursively, as it is easier
(and more useful for applications to graded Specht modules).
EXAMPLES::
sage: RowStandardTableau([[2,4],[1,3]]).residue_sequence(3).row_standard_tableaux([2,2])[:] # indirect doctest
[[[3, 4], [1, 2]], [[2, 4], [1, 3]]]
"""
if self._size == 0:
yield RowStandardTableau([])
for t in self._standard_tableaux:
yield RowStandardTableau([s[0] for s in t])
def __iter__higher_levels(self):
r"""
Iterate through the row standard tableaux in ``self``.
We construct this sequence of tableaux recursively, as it is easier
(and more useful for applications to graded Specht modules).
EXAMPLES::
sage: RowStandardTableauTuple([[[2,4]],[[3,5],[1]]]).residue_sequence(3,[0,1]).row_standard_tableaux([[2],[2,1]])[:] # indirect doctest
[([[2, 4]], [[3, 5], [1]]),
([[1, 4]], [[3, 5], [2]]),
([[2, 3]], [[4, 5], [1]]),
([[1, 3]], [[4, 5], [2]])]
"""
if self._size == 0:
yield self.element_class(self, [[] for l in range(self._level)], check=False) # the empty tableaux
return
for t in self._standard_tableaux:
yield self.element_class(self,
[ [ t[r][0] for r in range(self._cumulative_lengths[c], self._cumulative_lengths[c+1])]
for c in range(self._level)],
check=False)
@lazy_attribute
def __iter__(self):
r"""
Iterate through the row standard tableaux in ``self``.
We construct this sequence of tableaux recursively, as it is easier
(and more useful for applications to graded Specht modules).
EXAMPLES::
sage: RowStandardTableau([[2,4],[1,3]]).residue_sequence(3).row_standard_tableaux([1,1,1,1])[:] # indirect doctest
[[[3], [1], [4], [2]], [[2], [1], [4], [3]]]
sage: RowStandardTableauTuple([[[2,4]],[[3,5],[1]]]).residue_sequence(3,[0,1]).row_standard_tableaux([[3],[1,1]])[:] # indirect doctest
[([[2, 4, 5]], [[3], [1]]),
([[1, 4, 5]], [[3], [2]]),
([[2, 3, 5]], [[4], [1]]),
([[1, 3, 5]], [[4], [2]])]
"""
if self._level == 1:
return self.__iter__level_one
else:
return self.__iter__higher_levels
#--------------------------------------------------
# Standard tableau tuples - parent classes
#--------------------------------------------------
class StandardTableauTuples(RowStandardTableauTuples):
"""
A factory class for the various classes of tuples of standard tableau.
INPUT:
There are three optional arguments:
- ``level`` -- the :meth:`~TableauTuples.level` of the tuples of tableaux
- ``size`` -- the :meth:`~TableauTuples.size` of the tuples of tableaux
- ``shape`` -- a list or a partition tuple specifying the :meth:`shape` of
the standard tableau tuples
It is not necessary to use the keywords. If they are not used then the first
integer argument specifies the :meth:`~TableauTuples.level` and the second
the :meth:`~TableauTuples.size` of the tableau tuples.
OUTPUT:
The appropriate subclass of :class:`StandardTableauTuples`.
A tuple of standard tableau is a tableau whose entries are positive
integers which increase from left to right along the rows, and from top to
bottom down the columns, in each component. The entries do NOT need to
increase from left to right along the components.
.. NOTE::
Sage uses the English convention for (tuples of) partitions and
tableaux: the longer rows are displayed on top. As with
:class:`PartitionTuple`, in sage the cells, or nodes, of partition
tuples are 0-based. For example, the (lexicographically) first cell
in any non-empty partition tuple is `[0,0,0]`.
EXAMPLES::
sage: tabs=StandardTableauTuples([[3],[2,2]]); tabs
Standard tableau tuples of shape ([3], [2, 2])
sage: tabs.cardinality()
70
sage: tabs[10:16]
[([[1, 2, 3]], [[4, 6], [5, 7]]),
([[1, 2, 4]], [[3, 6], [5, 7]]),
([[1, 3, 4]], [[2, 6], [5, 7]]),
([[2, 3, 4]], [[1, 6], [5, 7]]),
([[1, 2, 5]], [[3, 6], [4, 7]]),
([[1, 3, 5]], [[2, 6], [4, 7]])]
sage: tabs=StandardTableauTuples(level=3); tabs
Standard tableau tuples of level 3
sage: tabs[100]
([[1, 2], [3]], [], [[4]])
sage: StandardTableauTuples()[0]
()
TESTS::
sage: TestSuite( StandardTableauTuples() ).run()
sage: TestSuite( StandardTableauTuples(level=1) ).run()
sage: TestSuite( StandardTableauTuples(level=4) ).run()
sage: TestSuite( StandardTableauTuples(size=0) ).run(max_runs=50) # recursion depth exceeded with default max_runs
sage: TestSuite( StandardTableauTuples(size=6) ).run()
sage: TestSuite( StandardTableauTuples(level=1, size=0) ).run()
sage: TestSuite( StandardTableauTuples(level=1, size=0) ).run()
sage: TestSuite( StandardTableauTuples(level=1, size=10) ).run()
sage: TestSuite( StandardTableauTuples(level=4, size=0) ).run()
sage: TestSuite( StandardTableauTuples(level=4, size=0) ).run()
.. SEEALSO::
- :class:`TableauTuples`
- :class:`Tableau`
- :class:`StandardTableau`
- :class:`StandardTableauTuples`
"""
Element = StandardTableauTuple
level_one_parent_class = StandardTableaux_all # used in element_constructor
@staticmethod
def __classcall_private__(cls, *args, **kwargs):
r"""
This is a factory class which returns the appropriate parent based on
arguments.
See the documentation for :class:`StandardTableauTuples`
for more information.
EXAMPLES::
sage: StandardTableauTuples()
Standard tableau tuples
sage: StandardTableauTuples(4)
Standard tableau tuples of level 4
sage: StandardTableauTuples(4,3)
Standard tableau tuples of level 4 and size 3
sage: StandardTableauTuples([ [2,1],[1],[1,1,1],[3,2] ])
Standard tableau tuples of shape ([2, 1], [1], [1, 1, 1], [3, 2])
TESTS::
sage: StandardTableauTuples([ [2,1],[1],[1,1,1],[3,2,3] ])
Traceback (most recent call last):
...
ValueError: the shape must be a partition tuple
sage: P = PartitionTuples()
sage: pt = P([[1]]); pt
([1])
sage: StandardTableauTuples(pt)
Standard tableaux of shape [1]
"""
from sage.combinat.partition_tuple import PartitionTuple
# first check the keyword arguments
level = kwargs.get('level', None)
shape = kwargs.get('shape', None)
size = kwargs.get('size', None)
for key in kwargs:
if key not in ['level','shape','size']:
raise ValueError('%s is not a valid argument for StandardTableauTuples' % key)
# now process the positional arguments
if args:
#the first argument could be either the level or the shape
if isinstance(args[0], (int, Integer)):
if level is not None:
raise ValueError('the level was specified more than once')
else:
level = args[0]
else:
if shape is not None:
raise ValueError('the shape was specified more than once')
else:
shape = args[0] # we check that it is a PartitionTuple below
if len(args) == 2: # both the level and size were specified
if level is not None and size is not None:
raise ValueError('the level or size was specified more than once')
else:
size = args[1]
elif len(args) > 2:
raise ValueError('too man arguments!')
# now check that the arguments are consistent
if level is not None and (not isinstance(level, (int,Integer)) or level < 1):
raise ValueError('the level must be a positive integer')
if size is not None and (not isinstance(size, (int,Integer)) or size < 0):
raise ValueError('the size must be a non-negative integer')
if shape is not None:
try:
shape = PartitionTuple(shape)
except ValueError:
raise ValueError('the shape must be a partition tuple')
if level is None:
level=shape.level()
elif level!=shape.level():
raise ValueError('the shape and level must agree')
if size is None:
size=shape.size()
elif size!=shape.size():
raise ValueError('the shape and size must agree')
# now that the inputs appear to make sense, return the appropriate class
if level is not None and level <= 1:
if isinstance(shape, PartitionTuple):
shape = shape[0]
if shape is not None:
return StandardTableaux_shape(shape)
elif size is not None:
return StandardTableaux_size(size)
else:
return StandardTableaux_all()
elif shape is not None:
return StandardTableauTuples_shape(shape)
elif level is not None and size is not None:
return StandardTableauTuples_level_size(level,size)
elif level is not None:
return StandardTableauTuples_level(level)
elif size is not None:
return StandardTableauTuples_size(size)
else:
return StandardTableauTuples_all()
def __getitem__(self, r):
r"""
The default implementation of ``__getitem__`` for enumerated sets does
not allow slices so we override it here.
EXAMPLES::
sage: StandardTableauTuples()[10:20]
[([[1, 2], [3]]),
([[1], [2], [3]]),
([[1, 2]], []),
([[1], [2]], []),
([[1]], [[2]]),
([[2]], [[1]]),
([], [[1, 2]]),
([], [[1], [2]]),
([[1]], [], []),
([], [[1]], [])]
.. TODO::
Implement slices with step size different from `1` and make this
a method for enumerate sets.
"""
if isinstance(r,(int,Integer)):
return self.unrank(r)
elif isinstance(r,slice):
start = 0 if r.start is None else r.start
stop = r.stop
if stop is None and not self.is_finite():
raise ValueError('infinite set')
else:
raise ValueError('r must be an integer or a slice')
count = 0
tabs = []
for t in self:
if count == stop:
break
if count >= start:
tabs.append(t)
count += 1
# this is to cope with empty slices endpoints like [:6] or [:}
if count == stop or stop is None:
return tabs
raise IndexError('value out of range')
def __contains__(self, t):
"""
Containment function for :class:`StandardTableauTuples` of arbitrary
``level`` and ``size``.
EXAMPLES::
sage: T = StandardTableauTuples()
sage: [[1,3],[2]] in T
True
sage: [] in T
True
sage: Tableau([[1]]) in T
True
sage: StandardTableauTuple([[1]]) in T
True
sage: [[1,2],[1]] in T
False
sage: [[1,1],[5]] in T
False
Check that :trac:`14145` is fixed::
sage: 1 in StandardTableauTuples()
False
"""
if isinstance(t, (StandardTableau, StandardTableauTuple)):
return True
elif TableauTuples.__contains__(self, t) or isinstance(t, (list, tuple)):
if all(s in Tableaux() for s in t):
flatt=sorted(sum((list(row) for s in t for row in s),[]))
return flatt==list(range(1,len(flatt)+1)) and all(len(x)==0 or
(all(row[i]<row[i+1] for row in x for i in range(len(row)-1))
and all(x[r][c]<x[r+1][c] for c in range(len(x[0]))
for r in range(len(x)-1) if len(x[r+1])>c)
) for x in t)
else:
return t in StandardTableaux()
else:
return False
# set the default shape
_shape = None
def shape(self):
"""
Return the shape of the set of :class:`StandardTableauTuples`, or
``None`` if it is not defined.
EXAMPLES::
sage: tabs=StandardTableauTuples(shape=[[5,2],[3,2],[],[1,1,1],[3]]); tabs
Standard tableau tuples of shape ([5, 2], [3, 2], [], [1, 1, 1], [3])
sage: tabs.shape()
([5, 2], [3, 2], [], [1, 1, 1], [3])
sage: StandardTableauTuples().shape() is None
True
"""
return self._shape
class StandardTableauTuples_all(StandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Default class of all :class:`StandardTableauTuples` with an arbitrary
:meth:`~TableauTuples.level` and :meth:`~TableauTuples.size`.
"""
def __init__(self):
r"""
Initializes the class of all standard tableaux. Input is not
checked; please use :class:`StandardTableauTuples` to ensure the
options are properly parsed.
EXAMPLES::
sage: StandardTableauTuples()
Standard tableau tuples
"""
StandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples(), StandardTableauTuples_shape),
facade=True, keepkey=False)
def _repr_(self):
"""
The string representation of the :class:`StandardTableauTuples` of
arbitrary ``level`` and ``size``.
EXAMPLES::
sage: STT = StandardTableauTuples(); STT # indirect doctest
Standard tableau tuples
"""
return "Standard tableau tuples"
def __iter__(self):
"""
Iterate through the infinite class of :class:`StandardTableauTuples`
of arbitrary ``level`` and ``size``.
Note that because these tableaux should have
:class:`StandardTableauTuples` as their parent, any tuples of level 1
will actually be a :class:`StandardTableauTuples` and NOT
:class:`StandardTableaux`. As such they will have a restricted set
of methods compared with usual :class:`StandardTableaux`. As they
were constructed via this iterator this is presumably what is required
so it should not cause any problems, especially as they are printed
with brackets around them to alert the user that something is
different.
EXAMPLES::
sage: stt=StandardTableauTuples()
sage: stt[0:8]
[(),
([[1]]),
([], []),
([[1, 2]]),
([[1], [2]]),
([[1]], []),
([], [[1]]),
([], [], [])]
sage: stt[5]
([[1]], [])
sage: stt[50]
([], [[1, 3], [2]])
sage: stt[47].parent() is stt
True
"""
from sage.combinat.partition_tuple import PartitionTuples
for shape in PartitionTuples():
# We use StandardTableauTuples(shape) to correctly deal with the
# case when the shape is of level 1.
for t in StandardTableauTuples(shape):
yield self.element_class(self, t, check=False)
class StandardTableauTuples_level(StandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Class of all :class:`StandardTableauTuples` with a fixed ``level``
and arbitrary ``size``.
"""
def __init__(self, level):
r"""
Initialize the class of semistandard tableaux of level ``level`` of
arbitrary ``size``.
Input is not checked; please use
:class:`StandardTableauTuples` to ensure the options are
properly parsed.
EXAMPLES::
sage: StandardTableauTuples(3)
Standard tableau tuples of level 3
"""
StandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples_level
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples_level(level), StandardTableauTuples_shape),
facade=True, keepkey=False)
self._level = level
def _repr_(self):
"""
The string representation of the :class:`StandardTableauTuples`
of fixed ``level``.
EXAMPLES::
sage: StandardTableauTuples(3) # indirect doctest
Standard tableau tuples of level 3
"""
return 'Standard tableau tuples of level %s' % self.level()
def __contains__(self, t):
"""
Containment function for :class:`StandardTableauTuples` of
fixed ``level``.
EXAMPLES::
sage: T = StandardTableauTuples(3)
sage: [[[1,2]],[[3]],[]] in T
True
sage: StandardTableauTuple([[1, 2], [3]]) in T
False
sage: [] in T
False
Check that :trac:`14145` is fixed::
sage: 1 in StandardTableauTuples(3)
False
"""
if isinstance(t, StandardTableauTuple):
return self.level() == t.level()
elif StandardTableauTuples.__contains__(self, t):
if all(s in Tableaux() for s in t):
return len(t)==self.level()
else:
return self.level()==1
else:
return False
def __iter__(self):
"""
Iterate through the infinite class of all
:class:`StandardTableauTuples` of a fixed ``level``.
EXAMPLES::
sage: stt = StandardTableauTuples(3)
sage: stt[0:8]
[([], [], []),
([[1]], [], []),
([], [[1]], []),
([], [], [[1]]),
([[1, 2]], [], []),
([[1], [2]], [], []),
([[1]], [[2]], []),
([[2]], [[1]], [])]
sage: stt[50]
([], [[1, 2, 3]], [])
sage: stt[0].parent() is stt
True
"""
# Iterate through the PartitionTuples and then the tableaux
# Note that the level is greater than one so we do not have to treat
# StandardTableaux separately
from sage.combinat.partition_tuple import PartitionTuples
for shape in PartitionTuples(self.level()):
for t in StandardTableauTuples_shape(shape):
yield self.element_class(self, t, check=False)
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: StandardTableauTuples(size=2).an_element()
([[1]], [[2]], [], [])
sage: StandardTableauTuples(size=4).an_element()
([[1]], [[2, 3, 4]], [], [])
"""
return self.element_class(self, [ [list(range(2**(i-1),2**i))] for i in range(1,self.level()+1)])
class StandardTableauTuples_size(StandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Class of all :class:`StandardTableauTuples` with an arbitrary ``level``
and a fixed ``size``.
"""
def __init__(self, size):
r"""
Initializes the class of semistandard tableaux of size ``size`` of
arbitrary level. Input is not checked; please use
:class:`StandardTableauTuples` to ensure the options are properly
parsed.
EXAMPLES::
sage: StandardTableauTuples(size=3)
Standard tableau tuples of size 3
"""
StandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples_size
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples_size(size), StandardTableauTuples_shape),
facade=True, keepkey=False)
self._size = size
def _repr_(self):
"""
The string representation of the :class:`StandardTableauTuples`
of fixed ``size``.
EXAMPLES::
sage: StandardTableauTuples(size=3) # indirect doctest
Standard tableau tuples of size 3
"""
return "Standard tableau tuples of size %s" % self.size()
def __contains__(self, t):
"""
Containment function for :class:`StandardTableauTuples` of fixed
``size``.
EXAMPLES::
sage: T = StandardTableauTuples(size=3)
sage: ([[1,2]], [], [], [[3]]) in T
True
sage: [[[1,2]], [], [], [[5]]] in T
False
sage: Tableau([[1]]) in T
False
Check that :trac:`14145` is fixed::
sage: 1 in StandardTableauTuples(size=3)
False
"""
if isinstance(t, self.element_class):
return self.size()==t.size()
elif t in StandardTableauTuples():
if all(s in Tableaux() for s in t):
return sum(sum(map(len,s)) for s in t)==self.size()
else:
return self.size()==sum(map(len,t))
else:
return False
def __iter__(self):
"""
Iterate through the infinite class of all
:class:`StandardTableauTuples` of a fixed ``size``.
Note that because these tableaux should have
:class:`StandardTableauTuples` as their parent, any tuples of level 1
will actually be a :class:`StandardTableauTuples` and NOT
:class:`StandardTableaux`. As such they will have a restricted set of
methods compared with usual :class:`StandardTableaux`. As they
were constructed via this iterator this is presumably what is required
so it should not cause any problems, especially as they are printed
with brackets around them to alert the user that something is
different.
EXAMPLES::
sage: stt = StandardTableauTuples(size=3)
sage: stt[0:8]
[([[1, 2, 3]]),
([[1, 3], [2]]),
([[1, 2], [3]]),
([[1], [2], [3]]),
([[1, 2, 3]], []),
([[1, 2], [3]], []),
([[1, 3], [2]], []),
([[1], [2], [3]], [])]
sage: stt[50]
([[3]], [[1]], [[2]])
sage: stt[0].parent() is stt
True
"""
# Iterate through the PartitionTuples and then the tableaux
from sage.combinat.partition_tuple import PartitionTuples
for shape in PartitionTuples(size=self.size()):
# We use StandardTableauTuples(shape) to correctly deal with the
# case when the shape is of level 1.
for t in StandardTableauTuples(shape):
yield self.element_class(self, t, check=False)
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: StandardTableauTuples(size=2).an_element()
([[1]], [[2]], [], [])
sage: StandardTableauTuples(size=4).an_element()
([[1]], [[2, 3, 4]], [], [])
"""
if self.size()==0:
return self.element_class(self, [[],[],[],[]])
elif self.size()==1:
return self.element_class(self, [[[1]],[],[],[]])
else:
return self.element_class(self, [[[1]],[list(range(2,self.size()+1))],[],[]])
class StandardTableauTuples_level_size(StandardTableauTuples, DisjointUnionEnumeratedSets):
"""
Class of all :class:`StandardTableauTuples` with a fixed ``level`` and a
fixed ``size``.
"""
def __init__(self,level,size):
r"""
Initializes the class of semistandard tableaux of level ``level`` and
size ``size``. Input is not checked; please use
:class:`StandardTableauTuples` to ensure the options are properly
parsed.
EXAMPLES::
sage: StandardTableauTuples(size=4,level=3)
Standard tableau tuples of level 3 and size 4
sage: StandardTableauTuples(size=4,level=3) is StandardTableauTuples(3,4)
True
"""
StandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples_level_size
DisjointUnionEnumeratedSets.__init__(self,
Family(PartitionTuples_level_size(level, size), StandardTableauTuples_shape),
facade=True, keepkey=False)
self._level = level
self._size = size
def _repr_(self):
"""
The string representation of the :class:`StandardTableauTuples` of
fixed ``level`` and size.
EXAMPLES::
sage: StandardTableauTuples(3, 4) # indirect doctest
Standard tableau tuples of level 3 and size 4
"""
return "Standard tableau tuples of level %s and size %s"%(self.level(),self.size())
def __contains__(self, t):
"""
Containment function for :class:`StandardTableauTuples` of fixed
``level`` and size.
EXAMPLES::
sage: tabs = StandardTableauTuples(level=4, size=3); tabs
Standard tableau tuples of level 4 and size 3
sage: [[[1,2]],[],[[3]],[]] in tabs
True
sage: tabs([[[1,2]],[],[[3]],[]]) == StandardTableauTuple([[[1,2]],[],[[3]],[]])
True
sage: StandardTableauTuple([[[1, 2]], [[3]]]) in tabs
False
sage: Tableau([[1]]) in tabs
False
Check that :trac:`14145` is fixed::
sage: 1 in StandardTableauTuples(level=4, size=3)
False
"""
if isinstance(t, self.element_class):
return self.size()==t.size() and self.level()==t.level()
elif t in StandardTableauTuples():
if all(s in Tableaux() for s in t):
return len(t)==self.level() and sum(sum(map(len,s)) for s in t)==self.size()
else:
return self.level()==1 and self.size()==sum(map(len,t))
else:
return False
def cardinality(self):
"""
Returns the number of elements in this set of tableaux.
EXAMPLES::
sage: StandardTableauTuples(3,2).cardinality()
12
sage: StandardTableauTuples(4,6).cardinality()
31936
"""
from sage.combinat.partition_tuple import PartitionTuples
return sum(StandardTableauTuples_shape(shape).cardinality()
for shape in PartitionTuples(self.level(), self.size()))
def __iter__(self):
"""
Iterate through the finite class of all :class:`StandardTableauTuples`
of a fixed ``level`` and size.
Note that the level must be greater than 1 here so we can call
:class:`StandardTableauTuples_shape` directly.
EXAMPLES::
sage: stt = StandardTableauTuples(3,3)
sage: stt[0:8]
[([[1, 2, 3]], [], []),
([[1, 2], [3]], [], []),
([[1, 3], [2]], [], []),
([[1], [2], [3]], [], []),
([[1, 2]], [[3]], []),
([[1, 3]], [[2]], []),
([[2, 3]], [[1]], []),
([[1], [2]], [[3]], [])]
sage: stt[40]
([], [[2, 3]], [[1]])
sage: stt[0].parent() is stt
True
"""
# Iterate through the PartitionTuples and then the tableaux
from sage.combinat.partition_tuple import PartitionTuples
for shape in PartitionTuples(level=self.level(), size=self.size()):
for t in StandardTableauTuples_shape(shape):
yield self.element_class(self, t, check=False)
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: StandardTableauTuples(5,size=2).an_element()
([], [], [], [], [[1], [2]])
sage: StandardTableauTuples(2,size=4).an_element()
([[1]], [[2, 3], [4]])
"""
if self.size() == 0:
return self.element_class(self, [[] for l in range(self.level())])
elif self.size() == 1:
return self.element_class(self, sum([[[[1]]]],[[] for i in range(self.level()-1)]))
elif self.size() == 2:
return self.element_class(self, sum([[[[1],[2]]]],[[] for i in range(self.level()-1)]))
else:
return self.element_class(self, sum([[[[1]]],
[[list(range(2,self.size())),
[self.size()]]]],[[] for i in range(self.level()-2)]))
class StandardTableauTuples_shape(StandardTableauTuples):
"""
Class of all :class:`StandardTableauTuples` of a fixed shape.
"""
def __init__(self, shape):
r"""
Initializes the class of semistandard tableaux of shape ``p`` and no
maximum entry. Input is not checked; please use
:class:`StandardTableauTuples` to ensure the options are properly
parsed.
EXAMPLES::
sage: STT = StandardTableauTuples([[2,1],[2,1,1]]); STT
Standard tableau tuples of shape ([2, 1], [2, 1, 1])
sage: STT.cardinality()
210
"""
super(StandardTableauTuples_shape, self).__init__(category=FiniteEnumeratedSets())
from sage.combinat.partition_tuple import PartitionTuple
self._shape = PartitionTuple(shape)
self._level = len(shape)
self._size = shape.size()
def __contains__(self, t):
"""
Containment function of :class:`StandardTableauTuples` of fixed shape.
EXAMPLES::
sage: STT = StandardTableauTuples([[2,1],[1]])
sage: [[[13, 67]], [[14,67]]] in STT
False
sage: [[[1, 4],[3]], [[2]]] in STT
True
sage: ([[1, 4],[3]], [[2]]) in STT
True
Check that :trac:`14145` is fixed::
sage: 1 in StandardTableauTuples([[2,1],[1]])
False
"""
if isinstance(t, self.element_class):
return self.shape() == t.shape()
elif t in StandardTableauTuples():
if all(s in Tableaux() for s in t):
return [[len(_) for _ in s] for s in t] == self.shape()
else:
return list(self.shape()) == sum(map(len,t))
else:
return False
def _repr_(self):
"""
The string representation of the :class:`StandardTableauTuples` of
fixed shape.
EXAMPLES::
sage: StandardTableauTuples([[2,1],[],[3,1,1,1]])
Standard tableau tuples of shape ([2, 1], [], [3, 1, 1, 1])
"""
return 'Standard tableau tuples of shape %s' % self.shape()
def __iter__(self):
r"""
Iterate through the finite class of :class:`StandardTableauTuples` of
a given :class:`PartitionTuple` shape.
The algorithm below is modelled on, but different than, the
corresponding iterator for the standard tableau of partition shape. In
particular, the tableaux are generated in the reverse order here as
that is easier (and more useful for applications to graded Specht
modules).
EXAMPLES::
sage: StandardTableauTuples([[1],[1],[1]]).list()
[([[1]], [[2]], [[3]]),
([[2]], [[1]], [[3]]),
([[1]], [[3]], [[2]]),
([[2]], [[3]], [[1]]),
([[3]], [[1]], [[2]]),
([[3]], [[2]], [[1]])]
sage: StandardTableauTuples([[2,1],[2]])[10:20]
[([[2, 3], [5]], [[1, 4]]),
([[1, 4], [5]], [[2, 3]]),
([[2, 4], [5]], [[1, 3]]),
([[3, 4], [5]], [[1, 2]]),
([[1, 5], [2]], [[3, 4]]),
([[1, 5], [3]], [[2, 4]]),
([[2, 5], [3]], [[1, 4]]),
([[1, 5], [4]], [[2, 3]]),
([[2, 5], [4]], [[1, 3]]),
([[3, 5], [4]], [[1, 2]])]
TESTS::
sage: correct_number=lambda mu : StandardTableauTuples(mu).cardinality()==len(StandardTableauTuples(mu).list())
sage: all(correct_number(mu) for mu in PartitionTuples(4,4))
True
"""
mu = self.shape()
n = mu.size()
# To generate the standard tableau tuples we are going to flatten them
# into a list tab which is obtained by reading the tableau along rows.
# The shape of mu gives a unique way of expanding this list into a
# tableau which is done using the function tableau_from_list() below. We
# start with the tableau containing the numbers 1,2,...,n entered in order
# along the rows of each component and then left to right along the
# components. This corresponds to the flat list tab=[1,2,...,n].
tab = list(range(1, n + 1))
# Set up two lists clen and cclen which give the "end points" of
# the components of mu and the rows of each component, respectively, so
# that the numbers contained in component c of the initial tableau are
# tab[ clen[c]:clen[c+1] ]
# and the numbers contained in row r of component c are
# tab[ clen[c]:clen[c+1] ][ cclen[c][r]: cclen[c][r+1] ]
# where tab=[1,2,...,n] as above
clen = [0]*(len(mu)+1)
cclen = [[0]*(len(mu[c])+1) for c in range(len(mu))]
for c in range(len(mu)):
for r in range(len(mu[c])):
cclen[c][r+1] = cclen[c][r]+mu[c][r]
clen[c+1] = clen[c] + cclen[c][-1]
# now use clen and cclen to "inflate" tab into a tableau
def tableau_from_list(tab):
"""
Converts a list tab=[t_1,...,t_n] into the mu-tableau obtained by
inserting t_1,..,t_n in order into the rows of mu, from left to right
in each component and then left to right along the components.
"""
return self.element_class(self, [ [tab[clen[c]:clen[c+1]][cclen[c][r]:cclen[c][r+1]]
for r in range(len(mu[c]))]
for c in range(len(mu)) ],
check=False)
# We're now ready to start generating the tableaux. Here's the first one:
initial_tableau = tableau_from_list(tab)
yield initial_tableau
# Number the columns of mu from left to right in each component starting
# from the last component, then to the second last and so on. For example,
# if \mu=[[2,1],[3]] then the column indices are [3 4 | 0 1 2]. Now
# define cols to be the list with cols[r] the cols index of r in
# the tableau tab, for 1\le i\le n. We initialise this for tab,
# corresponding to the initial tableau.
cols = [0]*(n+1) # cols[m] is the column index of m in tab
mins = [0]*n # the kth position of tab is always larger than mins[k]
c = len(mu)
offset = 0
for t in initial_tableau[::-1]:
for row in range(len(t)):
for col in range(len(t[row])):
cols[t[row][col]] = col + offset
mins[t[row][col]-1] = row + col
if t:
offset += len(t[0])
# To generate all of the tableaux we look for the first place where
# cols[r]<cols[r-1]. Then swap r and s where s<r is maximal such that it
# has a larger column index than r and is either in the same or an
# earlier component. (So, s=r-1 if r and r-1 are in the same
# component.) We then insert 1,2,...,r-1 in order along the rows in the
# positions that were occupied by 1,2,...,r and leave the numbers larger
# than r where they were. The next function determines the integer s
# that r swaps with.
# define a list so the index i appears in component component[i]
component = flatten([[i+1]*mu[i].size() for i in range(len(mu))])
def max_row_in_component(tab,r):
"""
Return the largest integer less than r which has higher column index and
is in the same or an earlier component, with the component index as
high as possible.
"""
# find the numbers less than r in same component as r-1
c = component[tab.index(r)]
while c > 0:
comp = [m for m in tab[clen[c-1]:clen[c]] if m < r and cols[m] > cols[r]]
if not comp:
c -= 1
else:
return comp[-1]
while True: # loop until we drop! We'll break out of the loop when done
r = 1 # find the smallest r with cols[r]<cols[r-1]
while r < len(cols) and cols[r-1] <= cols[r]:
r += 1
if r == len(cols):
break # we're at the last tableau so we're done!
new_cols = list(cols) # make copies of tab and cols
new_tab = list(tab)
s = max_row_in_component(tab,r)
new_tab[tab.index(s)] = r # move r to where s currently is
changed=[-1] * r # The list changed records the indexes in new_tab
# that are occupied by numbers less than or equal to r
new_cols[r] = cols[s] # The new column indices in new_tab
# the numbers in new_tab and new_cols which is slower.
changed[-1] = tab.index(s)
for t in range(1,r):
i=0 # find the leftmost index in tab where t can go
while t <= mins[i] or (tab[i] > r or i in changed):
i += 1
new_tab[i] = t
new_cols[t] = cols[tab[i]]
changed[t-1] = i
tab = new_tab
cols = new_cols
yield tableau_from_list(tab)
# all done!
return
def last(self):
r"""
Returns the last standard tableau tuple in ``self``, with respect to
the order that they are generated by the iterator. This is just the
standard tableau tuple with the numbers `1,2, \ldots, n`, where `n`
is :meth:`~TableauTuples.size`, entered in order down the columns form
right to left along the components.
EXAMPLES::
sage: StandardTableauTuples([[2],[2,2]]).last().pp()
5 6 1 3
2 4
"""
return StandardTableauTuples(self.shape().conjugate()).first().conjugate()
def cardinality(self):
r"""
Returns the number of standard Young tableau tuples of with the same
shape as the partition tuple ``self``.
Let `\mu=(\mu^{(1)},\dots,\mu^{(l)})` be the ``shape`` of the
tableaux in ``self`` and let `m_k=|\mu^{(k)}|`, for `1\le k\le l`.
Multiplying by a (unique) coset representative of the Young subgroup
`S_{m_1}\times\dots\times S_{m_l}` inside the symmetric group `S_n`, we
can assume that `t` is standard and the numbers `1,2...,n` are entered
in order from to right along the components of the tableau. Therefore,
there are
.. MATH::
\binom{n}{m_1,\dots,m_l}\prod_{k=1}^l |\text{Std}(\mu^{(k)})|
standard tableau tuples of this shape, where `|\text{Std}(\mu^{(k)})|`
is the number of standard tableau of shape `\mu^{(k)}`, for
`1 \leq k \leq l`. This is given by the hook length formula.
EXAMPLES::
sage: StandardTableauTuples([[3,2,1],[]]).cardinality()
16
sage: StandardTableauTuples([[1],[1],[1]]).cardinality()
6
sage: StandardTableauTuples([[2,1],[1],[1]]).cardinality()
40
sage: StandardTableauTuples([[3,2,1],[3,2]]).cardinality()
36960
"""
mu = self.shape()
return Integer(factorial(mu.size())
* prod(StandardTableaux(nu).cardinality() / factorial(nu.size()) for nu in mu))
def an_element(self):
r"""
Returns a particular element of the class.
EXAMPLES::
sage: StandardTableauTuples([[2],[2,1]]).an_element()
([[2, 4]], [[1, 3], [5]])
sage: StandardTableauTuples([[10],[],[]]).an_element()
([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], [], [])
"""
c = self.cardinality()
return self[c > 3 and 4 or (c > 1 and -1 or 0)]
def random_element(self):
r"""
Returns a random standard tableau in ``self``.
We do this by randomly selecting addable nodes to place
`1, 2, \ldots, n`. Of course we could do this recursively, but it's
more efficient to keep track of the (changing) list of addable nodes
as we go.
EXAMPLES::
sage: StandardTableauTuples([[2],[2,1]]).random_element() # random
([[1, 2]], [[3, 4], [5]])
"""
tab = [[] for i in range(self.level())] # start with the empty tableau and add nodes
mu = self.shape()
cells = mu.cells()
addables = [[i,0,0] for i in range(self.level()) if mu[i] != {}]
m = 0
while m < mu.size():
m += 1
i = int(round(random()*(len(addables)-1))) # index for a random addable cell
(k,r,c) = addables[i] # the actual cell
# remove the cell we just added from the list addable nodes
addables.pop(i)
# add m into the tableau
if tab[k] == []:
tab[k].append([])
if len(tab[k]) == r:
tab[k].append([])
tab[k][r].append(m)
# now update the list of addable cells - note they must also be in mu
if (k,r,c+1) in cells and (r == 0 or (r > 0 and len(tab[k][r-1]) > c+1)):
addables.append([k,r,c+1])
if (k,r+1,c) in cells and (c == 0 or (c > 0 and len(tab[k]) > r+1 and len(tab[k][r+1]) == c)):
addables.append([k,r+1,c])
# Just to be safe we check that tab is standard and has shape mu by
# using the class StandardTableauTuples(mu) to construct the tableau
return self.element_class(self, tab)
class StandardTableaux_residue(StandardTableauTuples):
r"""
Class of all standard tableau tuples with a fixed residue sequence.
Implicitly, this also specifies the quantum characteristic, multicharge
and hence the level and size of the tableaux.
.. NOTE::
This class is not intended to be called directly, but rather,
it is accessed through the standard tableaux.
EXAMPLES::
sage: StandardTableau([[1,2,3],[4,5]]).residue_sequence(2).standard_tableaux()
Standard tableaux with 2-residue sequence (0,1,0,1,0) and multicharge (0)
sage: StandardTableau([[1,2,3],[4,5]]).residue_sequence(3).standard_tableaux()
Standard tableaux with 3-residue sequence (0,1,2,2,0) and multicharge (0)
sage: StandardTableauTuple([[[5,6],[7]],[[1,2,3],[4]]]).residue_sequence(2,(0,0)).standard_tableaux()
Standard tableaux with 2-residue sequence (0,1,0,1,0,1,1) and multicharge (0,0)
sage: StandardTableauTuple([[[5,6],[7]],[[1,2,3],[4]]]).residue_sequence(3,(0,1)).standard_tableaux()
Standard tableaux with 3-residue sequence (1,2,0,0,0,1,2) and multicharge (0,1)
"""
def __init__(self, residue):
r"""
Initialize ``self``.
.. NOTE::
Input is not checked; please use :class:`StandardTableauTuples`
to ensure the options are properly parsed.
EXAMPLES::
sage: T = StandardTableau([[1,2,3],[4,5]]).residue_sequence(3).standard_tableaux()
sage: TestSuite(T).run()
sage: T = StandardTableauTuple([[[6],[7]],[[1,2,3],[4,5]]]).residue_sequence(2,(0,0)).standard_tableaux()
sage: TestSuite(T).run()
"""
super(StandardTableaux_residue, self).__init__(residue, category=FiniteEnumeratedSets())
self._level = residue.level()
self._multicharge = residue.multicharge()
self._quantum_characteristic = residue.quantum_characteristic()
self._residue = residue
self._size = residue.size()
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: StandardTableauTuple([[[1,2],[3]],[[4,5]]]).residue_sequence(3,(0,1)).standard_tableaux()
Standard tableaux with 3-residue sequence (0,1,2,1,2) and multicharge (0,1)
"""
return 'Standard tableaux with {}'.format(self._residue.__str__('and'))
def __contains__(self, t):
"""
Check containment of ``t`` in ``self``.
EXAMPLES::
sage: tabs = StandardTableauTuple([[[1,2],[3]],[[4,5]]]).residue_sequence(3,(0,1)).standard_tableaux()
sage: tabs
Standard tableaux with 3-residue sequence (0,1,2,1,2) and multicharge (0,1)
sage: [[[1,2],[3]],[[4,5]]] in tabs
True
sage: [[[1,2],[3]],[[4,5]]] in tabs
True
sage: [[[4,5],[3]],[[1,2]]] in tabs
False
sage: [[[1,4,5],[3]],[[2]]] in tabs
True
"""
if not isinstance(t, self.element_class):
try:
t = StandardTableauTuple(t)
except ValueError:
return False
return (t.residue_sequence(self._quantum_characteristic,self._multicharge)
== self._residue)
def __iter__(self):
r"""
Iterate through ``self``.
We construct this sequence of tableaux recursively. is easier (and
more useful for applications to graded Specht modules).
EXAMPLES::
sage: R = StandardTableauTuple([[[1,2],[5]],[[3,4]]]).residue_sequence(3, (0,1))
sage: list(R.standard_tableaux())
[([[1, 2, 4], [5]], [[3]]),
([[1, 2, 4]], [[3, 5]]),
([[1, 2, 5], [4]], [[3]]),
([[1, 2], [4]], [[3, 5]]),
([[1, 2, 5]], [[3, 4]]),
([[1, 2], [5]], [[3, 4]]),
([[1, 3, 4], [5]], [[2]]),
([[1, 3, 4]], [[2, 5]]),
([[1, 3, 5], [4]], [[2]]),
([[1, 3], [4]], [[2, 5]]),
([[1, 3, 5]], [[2, 4]]),
([[1, 3], [5]], [[2, 4]])]
sage: R = StandardTableauTuple([[[1,4],[2]],[[3]]]).residue_sequence(3,(0,1))
sage: list(R.standard_tableaux())
[([[1, 3], [2], [4]], []),
([[1, 3], [2]], [[4]]),
([[1, 4], [2], [3]], []),
([[1], [2], [3]], [[4]]),
([[1, 4], [2]], [[3]]),
([[1], [2], [4]], [[3]])]
"""
if self._size == 0:
yield StandardTableauTuple([[] for l in range(self._level)]) # the empty tableaux
return
for t in StandardTableaux_residue(self._residue.restrict(self._size-1)):
for cell in t.shape().addable_cells():
if self._residue[self._size] == self._residue.parent().cell_residue(*cell):
# a cell of the right residue
if self._level == 1:
yield t.add_entry(cell, self._size)
else:
tab = _add_entry_fast(t, cell, self._size)
yield self.element_class(self, tab, check=False)
class StandardTableaux_residue_shape(StandardTableaux_residue):
"""
All standard tableau tuples with a fixed residue and shape.
INPUT:
- ``shape`` -- the shape of the partitions or partition tuples
- ``residue`` -- the residue sequence of the label
EXAMPLES::
sage: res = StandardTableauTuple([[[1,3],[6]],[[2,7],[4],[5]]]).residue_sequence(3,(0,0))
sage: tabs = res.standard_tableaux([[2,1],[2,1,1]]); tabs
Standard (2,1|2,1^2)-tableaux with 3-residue sequence (0,0,1,2,1,2,1) and multicharge (0,0)
sage: tabs.shape()
([2, 1], [2, 1, 1])
sage: tabs.level()
2
sage: tabs[:6]
[([[2, 7], [6]], [[1, 3], [4], [5]]),
([[1, 7], [6]], [[2, 3], [4], [5]]),
([[2, 3], [6]], [[1, 7], [4], [5]]),
([[1, 3], [6]], [[2, 7], [4], [5]]),
([[2, 5], [6]], [[1, 3], [4], [7]]),
([[1, 5], [6]], [[2, 3], [4], [7]])]
"""
def __init__(self, residue, shape):
r"""
Initialize ``self``.
.. NOTE::
Input is not checked; please use :class:`StandardTableauTuples`
to ensure the options are properly parsed.
TESTS::
sage: res = StandardTableauTuple([[[1,3],[6]],[[2,7],[4],[5]]]).residue_sequence(3,(0,0))
sage: tabs = res.standard_tableaux([[2,1],[2,1,1]])
sage: TestSuite(tabs).run()
"""
if residue.size() != shape.size():
raise ValueError('the size of the shape and the length of the residue defence must coincide')
StandardTableauTuples.__init__(self, category=FiniteEnumeratedSets())
self._level = residue.level()
self._multicharge = residue.multicharge()
self._quantum_characteristic = residue.quantum_characteristic()
self._residue = residue
self._shape = shape
self._size = residue.size()
def __contains__(self, t):
"""
Check containment of ``t`` in ``self``.
EXAMPLES::
sage: tabs=StandardTableauTuple([[[1,3]],[[2],[4]]]).residue_sequence(3,(0,1)).standard_tableaux([[2],[1,1]])
sage: [ [[1,2,3,4]], [[]] ] in tabs
False
sage: ([[1, 2]], [[3], [4]]) in tabs
True
"""
if not isinstance(t, self.element_class):
try:
t = StandardTableauTuple(t)
except ValueError:
return False
return (t.shape() == self._shape
and t.residue_sequence(self._quantum_characteristic,self._multicharge)
== self._residue)
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: StandardTableau([[1,3],[2,4]]).residue_sequence(3).standard_tableaux([2,2])
Standard (2^2)-tableaux with 3-residue sequence (0,2,1,0) and multicharge (0)
"""
return 'Standard ({})-tableaux with {}'.format(self._shape._repr_compact_high(),
self._residue.__str__('and'))
def __iter__(self):
r"""
Iterate through the row standard tableaux in ``self``.
We construct this sequence of tableaux recursively, as it is easier
(and more useful for applications to graded Specht modules).
EXAMPLES::
sage: StandardTableau([[1,3],[2,4]]).residue_sequence(3).standard_tableaux([2,2])[:]
[[[1, 3], [2, 4]]]
"""
if self._size == 0:
yield StandardTableauTuple([[] for l in range(self._level)]) # the empty tableaux
return
for cell in self._shape.removable_cells():
if self._residue[self._size] == self._residue.parent().cell_residue(*cell):
# a cell of the right residue
for t in StandardTableaux_residue_shape(self._residue.restrict(self._size-1),
self._shape.remove_cell(*cell)):
if self._level == 1:
yield t.add_entry(cell, self._size)
else:
tab = _add_entry_fast(t, cell, self._size)
yield self.element_class(self, tab, check=False)
def an_element(self):
r"""
Return a particular element of ``self``.
EXAMPLES::
sage: T = StandardTableau([[1,3],[2]]).residue_sequence(3).standard_tableaux([2,1])
sage: T.an_element()
[[1, 3], [2]]
"""
# the tableaux class may be empty so we trap a ValueError
try:
return self[0]
except ValueError:
return None
def _add_entry_fast(T, cell, m):
"""
Helper function to set ``cell`` to ``m`` in ``T`` or add the
cell to ``T`` with entry ``m``.
INPUT:
- ``T`` -- a tableau tuple
- ``cell`` -- the cell
- ``m`` -- the entry to add
OUTPUT:
- a list of lists of lists representing the tableau tuple
.. WARNING::
This function assumes that ``cell`` is either in ``T`` or
and addable corner and does no checking of the input.
TESTS::
sage: from sage.combinat.tableau_tuple import _add_entry_fast
sage: s = StandardTableauTuple([ [[3,4,7],[6,8]], [[9,13],[12]], [[1,5],[2,11],[10]] ]); s.pp()
3 4 7 9 13 1 5
6 8 12 2 11
10
sage: t = _add_entry_fast(s, (0,0,3), 14)
sage: TableauTuple(t).pp()
3 4 7 14 9 13 1 5
6 8 12 2 11
10
sage: t = _add_entry_fast(s, (1,1,1), 14)
sage: TableauTuple(t).pp()
3 4 7 9 13 1 5
6 8 12 14 2 11
10
"""
(k,r,c) = cell
tab = T.to_list()
try:
tab[k][r][c] = m
except IndexError:
# k,r,c should otherwise be an addable cell
# add (k,r,c) is an addable cell the following should work
# so we do not need to trap anything
if r == len(tab[k]):
tab[k].append([])
tab[k][r].append(m)
return tab
|
<filename>spar_python/circuit_generation/ibm/ibm_circuit_test.py
# *****************************************************************
# Copyright 2015 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: IBM TA2 circuit class test
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 08 Nov 2012 SY Original Version
# *****************************************************************
import ibm_wire as iw
import ibm_gate_mul as igm
import ibm_gate_add as iga
import ibm_circuit as ic
import unittest
import spar_python.common.spar_random as sr
class TestAndGate(unittest.TestCase):
def test_display(self):
"""
Tests that the display method works as intended in a simple circuit.
"""
# set the desired batch size:
L = 10
# create a simple sample circuit:
circuit = ic.IBMCircuit(L)
# create input wires:
w1 = iw.IBMInputWire("w1", circuit)
w2 = iw.IBMInputWire("w2", circuit)
w3 = iw.IBMInputWire("w3", circuit)
# create gates:
g1 = iga.IBMAddGate("g1", w1, w2, circuit)
g2 = igm.IBMMulGate("g2", g1, w3, circuit)
output_gate = iga.IBMAddGate("og", g1, g2, circuit)
# set the circuit with the input wires and output gate:
circuit.set_input_wires([w1, w2, w3])
circuit.set_output_gate(output_gate)
self.assertEqual(("W=3,D=1.2,L=10\ng1:LADD(w1,w2)\ng2:LMUL(g1,w3)"
"\nog:LADD(g1,g2)"),
circuit.display())
def test_trim(self):
"""
Tests that gates that do not lead to the output gate are not displayed.
"""
# set the desired batch size:
L = 10
# create a simple sample circuit:
circuit = ic.IBMCircuit(L)
# create input wires:
w1 = iw.IBMInputWire("w1", circuit)
w2 = iw.IBMInputWire("w2", circuit)
w3 = iw.IBMInputWire("w3", circuit)
# create gates that do lead to the output gate:
g1 = iga.IBMAddGate("g1", w1, w2, circuit)
g2 = igm.IBMMulGate("g2", g1, w3, circuit)
# create a gate that does not lead to the output gate:
g3 = iga.IBMAddGate("g3", w2, g2, circuit)
# create the output gate:
output_gate = iga.IBMAddGate("og", g1, g2, circuit)
# set the circuit with the input wires and output gate:
circuit.set_input_wires([w1, w2, w3])
circuit.set_output_gate(output_gate)
self.assertEqual(("W=3,D=1.2,L=10\ng1:LADD(w1,w2)\ng2:LMUL(g1,w3)"
"\nog:LADD(g1,g2)"),
circuit.display())
def test_get_num_gates(self):
# set the desired batch size:
L = 10
# create a simple sample circuit:
circuit = ic.IBMCircuit(L)
# create input wires:
w1 = iw.IBMInputWire("w1", circuit)
w2 = iw.IBMInputWire("w2", circuit)
w3 = iw.IBMInputWire("w3", circuit)
# create gates that do lead to the output gate:
g1 = iga.IBMAddGate("g1", w1, w2, circuit)
g2 = igm.IBMMulGate("g2", g1, w3, circuit)
# create a gate that does not lead to the output gate:
g3 = iga.IBMAddGate("g3", w2, g2, circuit)
# create the output gate:
output_gate = iga.IBMAddGate("og", g1, g2, circuit)
# set the circuit with the input wires and output gate:
circuit.set_input_wires([w1, w2, w3])
circuit.set_output_gate(output_gate)
self.assertEqual(3, circuit.get_num_gates(gate_func_name="LADD"))
self.assertEqual(1, circuit.get_num_gates(gate_func_name="LMUL"))
def test_get_num_inputs(self):
"""
Tests that the get_num_inputs method functions as expected.
"""
# set the desired batch size:
L = 10
# create a simple sample circuit:
circuit = ic.IBMCircuit(L)
# create input wires:
w1 = iw.IBMInputWire("w1", circuit)
w2 = iw.IBMInputWire("w2", circuit)
w3 = iw.IBMInputWire("w3", circuit)
# create gates:
g1 = iga.IBMAddGate("g1", w1, w2, circuit)
g2 = igm.IBMMulGate("g2", g1, w3, circuit)
output_gate = iga.IBMAddGate("og", g1, g2, circuit)
# set the circuit with the input wires and output gate:
circuit.set_input_wires([w1, w2, w3])
circuit.set_output_gate(output_gate)
self.assertEqual(3, circuit.get_num_inputs())
|
<reponame>burningmantech/ranger-ims-server
##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Incident Management System directory service integration.
"""
from pathlib import Path
from time import time
from typing import Any, ClassVar, Iterable, Mapping, Optional, Sequence, TextIO
from attr import Factory, attrs
from twisted.logger import Logger
from yaml import safe_load as parseYAML
from ims.model import Position, Ranger, RangerStatus
from .._directory import DirectoryError, IMSDirectory, IMSUser, RangerDirectory
__all__ = ()
def statusFromID(strValue: str) -> RangerStatus:
return {
"active": RangerStatus.active,
"inactive": RangerStatus.inactive,
"vintage": RangerStatus.vintage,
}.get(strValue, RangerStatus.other)
def rangersFromMappings(
sequence: Iterable[Mapping[str, Any]]
) -> Iterable[Ranger]:
if type(sequence) is not list:
raise DirectoryError(f"Rangers must be sequence: {sequence!r}")
for mapping in sequence:
try:
yield rangerFromMapping(mapping)
except DirectoryError:
raise
except Exception as e:
raise DirectoryError(f"Unable to parse Ranger records: {e}")
def rangerFromMapping(mapping: Mapping[str, Any]) -> Ranger:
if type(mapping) is not dict:
raise DirectoryError(f"Ranger must be mapping: {mapping!r}")
handle = mapping.get("handle", None)
if handle is None:
raise DirectoryError(f"Ranger must have handle: {mapping!r}")
elif type(handle) is not str:
raise DirectoryError(f"Ranger handle must be text: {handle!r}")
name = mapping.get("name", "")
if type(name) is not str:
raise DirectoryError(f"Ranger name must be text: {name!r}")
_status = mapping.get("status", "")
if type(_status) is not str:
raise DirectoryError(f"Ranger status must be text: {_status!r}")
status = statusFromID(_status)
_email = mapping.get("email", [])
email: Sequence[str]
if type(_email) is str:
email = (_email,)
elif type(_email) is list:
for e in _email:
if type(e) is not str:
raise DirectoryError(f"Ranger email must be text: {e!r}")
email = tuple(_email)
else:
raise DirectoryError(
f"Ranger email must be text or sequence of text: {_email!r}"
)
enabled = mapping.get("enabled", None)
if type(enabled) is not bool:
raise DirectoryError(f"Ranger enabled must be boolean: {enabled!r}")
password = mapping.get("password", None)
if password is not None and type(password) is not str:
raise DirectoryError(f"Ranger password must be text: {password!r}")
return Ranger(
handle=handle,
name=name,
status=status,
email=email,
enabled=enabled,
directoryID=None,
password=mapping.get("password", None),
)
def positionsFromMappings(
sequence: Iterable[Mapping[str, Any]]
) -> Iterable[Position]:
if type(sequence) is not list:
raise DirectoryError(f"Positions must be sequence: {sequence!r}")
for mapping in sequence:
try:
yield positionFromMapping(mapping)
except DirectoryError:
raise
except Exception as e:
raise DirectoryError(f"Unable to parse position records: {e}")
def positionFromMapping(mapping: Mapping[str, Any]) -> Position:
if type(mapping) is not dict:
raise DirectoryError(f"Position must be mapping: {mapping!r}")
name: Optional[str] = mapping.get("name", None)
if name is None:
raise DirectoryError(f"Position must have name: {mapping!r}")
elif type(name) is not str:
raise DirectoryError(f"Position name must be text: {name!r}")
members: Sequence[str] = mapping.get("members", [])
if type(members) is not list:
raise DirectoryError(
f"Position members must be sequence of text: {members!r}"
)
for m in members:
if type(m) is not str:
raise DirectoryError(f"Position members must be text: {m!r}")
return Position(name=name, members=frozenset(members))
@attrs(frozen=True, auto_attribs=True, kw_only=True)
class FileDirectory(IMSDirectory):
"""
IMS directory loaded from a file.
"""
_log: ClassVar[Logger] = Logger()
@attrs(frozen=False, auto_attribs=True, kw_only=True, eq=False)
class _State:
"""
Internal mutable state for :class:`RangerDirectory`.
"""
directory: RangerDirectory = Factory(
lambda: RangerDirectory(rangers=(), positions=())
)
lastLoadTime = 0.0
path: Path
checkInterval = 1.0 # Don't restat the file more often than this (seconds)
_state: _State = Factory(_State)
def _mtime(self) -> float:
return self.path.stat().st_mtime
def _open(self) -> TextIO:
return self.path.open()
def _reload(self) -> None:
now = time()
elapsed = now - self._state.lastLoadTime
if (
elapsed >= self.checkInterval
and self._mtime() >= self._state.lastLoadTime
):
self._log.info("Reloading directory file...")
with self._open() as fh:
yaml = parseYAML(fh)
schemaVersion = yaml.get("schema")
if schemaVersion is None:
raise DirectoryError("No schema version in YAML file")
if schemaVersion != 0:
raise DirectoryError("Unknown schema version in YAML file")
rangers = tuple(rangersFromMappings(yaml.get("rangers", ())))
positions = tuple(
positionsFromMappings(yaml.get("positions", ()))
)
self._state.directory = RangerDirectory(
rangers=rangers, positions=positions
)
self._state.lastLoadTime = now
async def personnel(self) -> Iterable[Ranger]:
self._reload()
return await self._state.directory.personnel()
async def lookupUser(self, searchTerm: str) -> Optional[IMSUser]:
self._reload()
return await self._state.directory.lookupUser(searchTerm)
|
<reponame>JoleProject/Jole<gh_stars>0
#!/usr/bin/env python3
"""
This is an example to train a task with DDPG algorithm.
Here it creates a gym environment InvertedDoublePendulum. And uses a DDPG with
1M steps.
Results:
AverageReturn: 250
RiseTime: epoch 499
"""
import gym
import tensorflow as tf
from garage.envs.wrappers.reverse_action import ReverseAction
from garage.envs.wrappers.double_action import DoubleAction
from garage.experiment import run_experiment
from garage.np.exploration_strategies import OUStrategy
from garage.replay_buffer import SimpleReplayBuffer
from garage.tf.algos.jole_ddpg_stochastic import JoLeDDPGStochastic
from garage.tf.envs import TfEnv
from garage.tf.experiment import LocalTFRunner
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
from garage.tf.env_functions.cvae_obs_generator_reward import CVAERewardGenerator
from garage.tf.env_functions.cvae_obs_recognition_reward import CVAERewardRecognition
from garage.tf.env_functions.cvae_obs_generator import CVAEObsGenerator
from garage.tf.env_functions.cvae_obs_recognition import CVAEObsRecognition
def run_task(snapshot_config, *_):
"""Run task."""
with LocalTFRunner(snapshot_config=snapshot_config) as runner:
env = TfEnv(gym.make('HalfCheetah-v3'))
env = ReverseAction(env)
action_noise = OUStrategy(env.spec, sigma=0.2)
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu)
reward_model_generator = CVAERewardGenerator(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
z_dim=1)
reward_model_recognition = CVAERewardRecognition(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
z_dim=1)
obs_model_generator = CVAEObsGenerator(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
z_dim=1)
obs_model_recognition = CVAEObsRecognition(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
z_dim=1)
replay_buffer = SimpleReplayBuffer(env_spec=env.spec,
size_in_transitions=int(1e6),
time_horizon=100)
jole_ddpg = JoLeDDPGStochastic(env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
reward_model_generator = reward_model_generator,
reward_model_recognition = reward_model_recognition,
obs_model_generator = obs_model_generator,
obs_model_recognition = obs_model_recognition,
replay_buffer=replay_buffer,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.99,
min_buffer_size=int(1e4),
exploration_strategy=action_noise,
policy_optimizer=tf.train.AdamOptimizer,
qf_optimizer=tf.train.AdamOptimizer,
z_dim=1)
runner.setup(algo=jole_ddpg, env=env)
runner.train(n_epochs=500, n_epoch_cycles=20, batch_size=100)
env_name = "HalfCheetah-v3"
for i in range(1, 6):
run_experiment(
run_task,
snapshot_mode='none',
seed=i,
log_dir="data/type_stochastic_ddpg/{}/jole_ddpg_with_sigma/reverse_action/{}_local".format(env_name,i)
)
|
#!/usr/bin/env python
# Copyright (c) 2009, <NAME>'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of 'ifconfig' on UNIX.
$ python scripts/ifconfig.py
lo:
stats : speed=0MB, duplex=?, mtu=65536, up=yes
incoming : bytes=1.95M, pkts=22158, errs=0, drops=0
outgoing : bytes=1.95M, pkts=22158, errs=0, drops=0
IPv4 address : 127.0.0.1
netmask : 255.0.0.0
IPv6 address : ::1
netmask : ffdf8:f53e:61e4::18
MAC address : 00:00:00:00:00:00
docker0:
stats : speed=0MB, duplex=?, mtu=1500, up=yes
incoming : bytes=3.48M, pkts=65470, errs=0, drops=0
outgoing : bytes=164.06M, pkts=112993, errs=0, drops=0
IPv4 address : 172.17.0.1
broadcast : 172.17.0.1
netmask : 255.255.0.0
IPv6 address : fe80::42:27ff:fe5e:799e%docker0
netmask : ffff:ffff:ffff:ffff::
MAC address : 02:42:27:5e:79:9e
broadcast : ff:ff:ff:ff:ff:ff
wlp3s0:
stats : speed=0MB, duplex=?, mtu=1500, up=yes
incoming : bytes=7.04G, pkts=5637208, errs=0, drops=0
outgoing : bytes=372.01M, pkts=3200026, errs=0, drops=0
IPv4 address : 10.0.0.2
broadcast : 10.255.255.255
netmask : 255.0.0.0
IPv6 address : fe80::ecb3:1584:5d17:937%wlp3s0
netmask : ffff:ffff:ffff:ffff::
MAC address : 48:45:20:59:a4:0c
broadcast : ff:ff:ff:ff:ff:ff
"""
from __future__ import print_function
import socket
import psutil
from psutil._common import bytes2human
af_map = {
socket.AF_INET: 'IPv4',
socket.AF_INET6: 'IPv6',
psutil.AF_LINK: 'MAC',
}
duplex_map = {
psutil.NIC_DUPLEX_FULL: "full",
psutil.NIC_DUPLEX_HALF: "half",
psutil.NIC_DUPLEX_UNKNOWN: "?",
}
def main():
stats = psutil.net_if_stats()
io_counters = psutil.net_io_counters(pernic=True)
for nic, addrs in psutil.net_if_addrs().items():
print("%s:" % (nic))
if nic in stats:
st = stats[nic]
print(" stats : ", end='')
print("speed=%sMB, duplex=%s, mtu=%s, up=%s" % (
st.speed, duplex_map[st.duplex], st.mtu,
"yes" if st.isup else "no"))
if nic in io_counters:
io = io_counters[nic]
print(" incoming : ", end='')
print("bytes=%s, pkts=%s, errs=%s, drops=%s" % (
bytes2human(io.bytes_recv), io.packets_recv, io.errin,
io.dropin))
print(" outgoing : ", end='')
print("bytes=%s, pkts=%s, errs=%s, drops=%s" % (
bytes2human(io.bytes_sent), io.packets_sent, io.errout,
io.dropout))
for addr in addrs:
print(" %-4s" % af_map.get(addr.family, addr.family), end="")
print(" address : %s" % addr.address)
if addr.broadcast:
print(" broadcast : %s" % addr.broadcast)
if addr.netmask:
print(" netmask : %s" % addr.netmask)
if addr.ptp:
print(" p2p : %s" % addr.ptp)
print("")
if __name__ == '__main__':
main()
|
#Chapter 4 - Analyzing time series and images
#-------------------------------------------------------------------------------------------#
#Multiple time series on common axes
# Import matplotlib.pyplot
import matplotlib.pyplot as plt
# Plot the aapl time series in blue
plt.plot(aapl, color='blue', label='AAPL')
# Plot the ibm time series in green
plt.plot(ibm, color='green', label='IBM')
# Plot the csco time series in red
plt.plot(csco, color='red', label='CSCO')
# Plot the msft time series in magenta
plt.plot(msft, color='magenta', label='MSFT')
# Add a legend in the top left corner of the plot
plt.legend(loc='upper left')
# Specify the orientation of the xticks
plt.xticks(rotation=60)
# Display the plot
plt.show()
#-------------------------------------------------------------------------------------------#
#Multiple time series slices (1)
# Plot the series in the top subplot in blue
plt.subplot(2,1,1)
plt.xticks(rotation=45)
plt.title('AAPL: 2001 to 2011')
plt.plot(aapl, color='blue')
# Slice aapl from '2007' to '2008' inclusive: view
view = aapl['2007':'2008']
# Plot the sliced data in the bottom subplot in black
plt.subplot(2,1,2)
plt.xticks(rotation=45)
plt.title('AAPL: 2007 to 2008')
plt.plot(view, color='black')
plt.tight_layout()
plt.show()
#-------------------------------------------------------------------------------------------#
#Multiple time series slices (2)
# Slice aapl from Nov. 2007 to Apr. 2008 inclusive: view
view = aapl['2007-11':'2008-04']
# Plot the sliced series in the top subplot in red
plt.subplot(2,1,1)
plt.xticks(rotation=45)
plt.title('AAPL: Nov. 2007 to Apr. 2008')
plt.plot(view, color='red')
# Reassign the series by slicing the month January 2008
view = aapl['2008-01']
# Plot the sliced series in the bottom subplot in green
plt.subplot(2,1,2)
plt.xticks(rotation=45)
plt.title('AAPL: Jan. 2008')
plt.plot(view, color='green')
# Improve spacing and display the plot
plt.tight_layout()
plt.show()
#-------------------------------------------------------------------------------------------#
#Plotting an inset view
# Slice aapl from Nov. 2007 to Apr. 2008 inclusive: view
view = aapl['2007-11':'2008-04']
# Plot the entire series
plt.plot(aapl, color='green')
plt.xticks(rotation=45)
plt.title('AAPL: 2001-2011')
# Specify the axes
plt.axes([0.25, 0.5,0.35,0.35])
# Plot the sliced series in red using the current axes
plt.plot(view, color='red')
plt.xticks(rotation=45)
plt.title('2007/11-2008/04')
plt.show()
#-------------------------------------------------------------------------------------------#
#Plotting moving averages
# Plot the 30-day moving average in the top left subplot in green
plt.subplot(2, 2, 1)
plt.plot(mean_30, 'green')
plt.plot(aapl, 'k-.')
plt.xticks(rotation=60)
plt.title('30d averages')
# Plot the 75-day moving average in the top right subplot in red
plt.subplot(2, 2, 2)
plt.plot(mean_75, 'red')
plt.plot(aapl, 'k-.')
plt.xticks(rotation=60)
plt.title('75d averages')
# Plot the 125-day moving average in the bottom left subplot in magenta
plt.subplot(2, 2, 3)
plt.plot(mean_125, 'magenta')
plt.plot(aapl, 'k-.')
plt.xticks(rotation=60)
plt.title('125d averages')
# Plot the 250-day moving average in the bottom right subplot in cyan
plt.subplot(2, 2, 4)
plt.plot(mean_250, 'cyan')
plt.plot(aapl, 'k-.')
plt.xticks(rotation=60)
plt.title('250d averages')
# Display the plot
plt.show()
#-------------------------------------------------------------------------------------------#
#Plotting moving standard deviations
# Plot std_30 in red
plt.plot(std_30, color='red', label='30d')
# Plot std_75 in cyan
plt.plot(std_75, color='cyan', label='75d')
# Plot std_125 in green
plt.plot(std_125, color='green', label='125d')
# Plot std_250 in magenta
plt.plot(std_250, color='magenta', label='250d')
# Add a legend to the upper left
plt.legend(loc='upper left')
# Add a title
plt.title('Moving standard deviations')
# Display the plot
plt.show()
#-------------------------------------------------------------------------------------------#
#Extracting a histogram from a grayscale image
# Load the image into an array: image
image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')
# Display image in top subplot using color map 'gray'
plt.subplot(2,1,1)
plt.title('Original image')
plt.axis('off')
plt.imshow(image, cmap='gray')
# Assign pixels the flattened 1D numpy array image.flatten()
pixels = image.flatten()
# Display a histogram of the pixels in the bottom subplot
plt.subplot(2,1,2)
plt.xlim((0,255))
plt.title('Normalized histogram')
plt.hist(pixels, bins=64, color='red', alpha=0.4, range=(0,256), normed=True)
plt.show()
#-------------------------------------------------------------------------------------------#
#Cumulative Distribution Function from an image histogram
# Load the image into an array: image
image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')
# Display image in top subplot using color map 'gray'
plt.subplot(2,1,1)
plt.imshow(image, cmap='gray')
plt.title('Original image')
plt.axis('off')
# Flatten the image into 1 dimension: pixels
pixels = image.flatten()
# Display a histogram of the pixels in the bottom subplot
plt.subplot(2,1,2)
pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False,
color='red', alpha=0.4)
plt.grid('off')
# Use plt.twinx() to overlay the CDF in the bottom subplot
plt.twinx()
# Display a cumulative histogram of the pixels
cdf = plt.hist(pixels, bins=64, range=(0,256),
normed=True, cumulative=True,
color='blue', alpha=0.4)
# Specify x-axis range, hide axes, add title and display plot
plt.xlim((0,256))
plt.grid('off')
plt.title('PDF & CDF (original image)')
plt.show()
#-------------------------------------------------------------------------------------------#
#Equalizing an image histogram
# Load the image into an array: image
image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')
# Flatten the image into 1 dimension: pixels
pixels = image.flatten()
# Generate a cumulative histogram
cdf, bins, patches = plt.hist(pixels, bins=256, range=(0,256), normed=True, cumulative=True)
new_pixels = np.interp(pixels, bins[:-1], cdf*255)
# Reshape new_pixels as a 2-D array: new_image
new_image = new_pixels.reshape(image.shape)
# Display the new image with 'gray' color map
plt.subplot(2,1,1)
plt.title('Equalized image')
plt.axis('off')
plt.imshow(new_image, cmap='gray')
# Generate a histogram of the new pixels
plt.subplot(2,1,2)
pdf = plt.hist(new_pixels, bins=64, range=(0,256), normed=False,
color='red', alpha=0.4)
plt.grid('off')
# Use plt.twinx() to overlay the CDF in the bottom subplot
plt.twinx()
plt.xlim((0,256))
plt.grid('off')
# Add title
plt.title('PDF & CDF (equalized image)')
# Generate a cumulative histogram of the new pixels
cdf = plt.hist(new_pixels, bins=64, range=(0,256),
cumulative=True, normed=True,
color='blue', alpha=0.4)
plt.show()
#-------------------------------------------------------------------------------------------#
#Extracting histograms from a color image
# Load the image into an array: image
image = plt.imread('hs-2004-32-b-small_web.jpg')
# Display image in top subplot
plt.subplot(2,1,1)
plt.title('Original image')
plt.axis('off')
plt.imshow(image)
# Extract 2-D arrays of the RGB channels: red, blue, green
red, green, blue = image[:,:,0], image[:,:,1], image[:,:,2]
# Flatten the 2-D arrays of the RGB channels into 1-D
red_pixels = red.flatten()
blue_pixels = blue.flatten()
green_pixels = green.flatten()
# Overlay histograms of the pixels of each color in the bottom subplot
plt.subplot(2,1,2)
plt.title('Histograms from color image')
plt.xlim((0,256))
plt.hist(red_pixels, bins =64, normed=True, color='red', alpha=0.2)
plt.hist(green_pixels, bins =64, normed=True, color='blue', alpha=0.2)
plt.hist(blue_pixels, bins =64, normed=True, color='green', alpha=0.2)
# Display the plot
plt.show()
#-------------------------------------------------------------------------------------------#
#Extracting bivariate histograms from a color image
# Load the image into an array: image
image = plt.imread('hs-2004-32-b-small_web.jpg')
# Extract RGB channels and flatten into 1-D array
red, blue, green = image[:,:,0], image[:,:,1], image[:,:,2]
red_pixels = red.flatten()
blue_pixels = blue.flatten()
green_pixels = green.flatten()
# Generate a 2-D histogram of the red and green pixels
plt.subplot(2,2,1)
plt.grid('off')
plt.xticks(rotation=60)
plt.xlabel('red')
plt.ylabel('green')
plt.hist2d(red_pixels,green_pixels, bins=(32,32), normed=True)
# Generate a 2-D histogram of the green and blue pixels
plt.subplot(2,2,2)
plt.grid('off')
plt.xticks(rotation=60)
plt.xlabel('green')
plt.ylabel('blue')
plt.hist2d(green_pixels,blue_pixels, bins=(32,32), normed=True)
# Generate a 2-D histogram of the blue and red pixels
plt.subplot(2,2,3)
plt.grid('off')
plt.xticks(rotation=60)
plt.xlabel('blue')
plt.ylabel('red')
plt.hist2d(blue_pixels,red_pixels, bins=(32,32), normed=True)
# Display the plot
plt.show()
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------#
|
"""
Extract drums MIDI files. Some drum tracks are split into multiple separate
drum instruments, in which case we try to merge them into a single instrument
and save only 1 MIDI file.
VERSION: Magenta 1.1.7
"""
import argparse
import copy
import os
import random
import shutil
import timeit
from itertools import cycle
from multiprocessing import Manager
from multiprocessing.pool import Pool
from typing import List
from typing import Optional
import matplotlib.pyplot as plt
import tables
from pretty_midi import Instrument
from pretty_midi import PrettyMIDI
from lakh_utils import get_matched_midi_md5
from lakh_utils import get_midi_path
from lakh_utils import get_msd_score_matches
from lakh_utils import msd_id_to_h5
from multiprocessing_utils import AtomicCounter
parser = argparse.ArgumentParser()
parser.add_argument("--sample_size", type=int, default=1000)
parser.add_argument("--pool_size", type=int, default=4)
parser.add_argument("--path_dataset_dir", type=str, required=True)
parser.add_argument("--path_match_scores_file", type=str, required=True)
parser.add_argument("--path_output_dir", type=str, required=True)
args = parser.parse_args()
# The list of all MSD ids (we might process only a sample)
MSD_SCORE_MATCHES = get_msd_score_matches(args.path_match_scores_file)
def extract_drums(msd_id: str) -> Optional[PrettyMIDI]:
"""
Extracts a PrettyMIDI instance of all the merged drum tracks
from the given MSD id.
:param msd_id: the MSD id
:return: the PrettyMIDI instance of the merged drum tracks
"""
os.makedirs(args.path_output_dir, exist_ok=True)
midi_md5 = get_matched_midi_md5(msd_id, MSD_SCORE_MATCHES)
midi_path = get_midi_path(msd_id, midi_md5, args.path_dataset_dir)
pm = PrettyMIDI(midi_path)
pm_drums = copy.deepcopy(pm)
pm_drums.instruments = [instrument for instrument in pm_drums.instruments
if instrument.is_drum]
if len(pm_drums.instruments) > 1:
# Some drum tracks are split, we can merge them
drums = Instrument(program=0, is_drum=True)
for instrument in pm_drums.instruments:
for note in instrument.notes:
drums.notes.append(note)
pm_drums.instruments = [drums]
if len(pm_drums.instruments) != 1:
raise Exception(f"Invalid number of drums {msd_id}: "
f"{len(pm_drums.instruments)}")
return pm_drums
def process(msd_id: str, counter: AtomicCounter) -> Optional[dict]:
"""
Processes the given MSD id and increments the counter. The
method will call the extract_drums method and write the resulting MIDI
files to disk.
:param msd_id: the MSD id to process
:param counter: the counter to increment
:return: the dictionary containing the MSD id and the PrettyMIDI drums;
raises an exception if the file cannot be processed
"""
try:
with tables.open_file(msd_id_to_h5(msd_id, args.path_dataset_dir)) as h5:
pm_drums = extract_drums(msd_id)
pm_drums.write(os.path.join(args.path_output_dir, f"{msd_id}.mid"))
return {"msd_id": msd_id, "pm_drums": pm_drums}
except Exception as e:
print(f"Exception during processing of {msd_id}: {e}")
finally:
counter.increment()
def app(msd_ids: List[str]):
start = timeit.default_timer()
# Cleanup the output directory
shutil.rmtree(args.path_output_dir, ignore_errors=True)
# Starts the threads
with Pool(args.pool_size) as pool:
manager = Manager()
counter = AtomicCounter(manager, len(msd_ids))
print("START")
results = pool.starmap(process, zip(msd_ids, cycle([counter])))
results = [result for result in results if result]
print("END")
results_percentage = len(results) / len(msd_ids) * 100
print(f"Number of tracks: {len(MSD_SCORE_MATCHES)}, "
f"number of tracks in sample: {len(msd_ids)}, "
f"number of results: {len(results)} "
f"({results_percentage:.2f}%)")
# Creates an histogram for the drum lengths
pm_drums = [result["pm_drums"] for result in results]
pm_drums_lengths = [pm.get_end_time() for pm in pm_drums]
plt.figure(num=None, figsize=(10, 8), dpi=500)
plt.hist(pm_drums_lengths, bins=100, color="darkmagenta")
plt.title('Drums lengths')
plt.ylabel('length (sec)')
plt.show()
stop = timeit.default_timer()
print("Time: ", stop - start)
if __name__ == "__main__":
if args.sample_size:
# Process a sample of it
MSD_IDS = random.sample(list(MSD_SCORE_MATCHES), args.sample_size)
else:
# Process all the dataset
MSD_IDS = list(MSD_SCORE_MATCHES)
app(MSD_IDS)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# <h1 style="text-align:center;">Machine Learning for Programmers</h1>
# <h2 style="text-align:center;">What is ML?</h2>
# <h3 style="text-align:center;">Dr. <NAME></h3>
#
# %% [markdown] slideshow={"slide_type": "slide"}
# # Biological Inspiration
#
# <img src="img/ag/Figure-10-001.png" style="width: 80%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # The Origins of Machine Learning
#
# *A computer can be programmed so that it will learn to play a better game of checkers than can be played by the person who wrote the program.* (<NAME>, 1959)
#
# *Programming computers to learn from experience should eventually eliminate the need for much of this detailed programming effort.* (<NAME>, 1959)
# %% [markdown] slideshow={"slide_type": "subslide"}
# # One Answer (<NAME>)
#
# The phrase *machine learning* describes a growing body of techniques that all have one goal: discover meaningful information from data.
#
# Here, “data” refers to anything that can be recorded and measured. [...]
#
# "Meaningful information" is whatever we can extract from the data that will be useful to us in some way.
# %% [markdown] slideshow={"slide_type": "subslide"}
# # <NAME>'s Books
#
# <img src="img/glassner-book.jpg" style="width: 60%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-01-001.png">
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-01-002.png">
# %% [markdown] slideshow={"slide_type": "slide"}
# # Another Answer (paraphrasing <NAME>)
#
# - A part of Artificial Intelligence (AI)
# - AI: Making computers solve problems that could previously only be tackled by humans
# - AI doesn't have to involve learning, e.g., expert systems
# - ML: Improving behavior with additional data
# %% [markdown] slideshow={"slide_type": "slide"}
# # Example: MNIST Data
#
# <img src="img/ag/Figure-01-023.png" style="float: right;width: 40%;"/>
#
# We want to recognize hand-written digits:
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Rule-based Systems: Feature Engineering
#
# Extraction of relevant features from data by humans.
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-01-003.png" style="width: 40%; margin-left: auto; margin-right: auto;">
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-01-004.png" style="width: 20%; margin-left: auto; margin-right: auto;">
# %% [markdown] slideshow={"slide_type": "slide"}
# # Supervised Learning (Classification)
#
# (Learning from labeled data)
#
# <img src="img/ag/Figure-01-007.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Training a Classifier
#
# - Show lots of labeled data to a learner
# - Check whether it can correctly classify samples based on features
#
# - Evaluation must be based on different data than training
# - Otherwise the learner could just store the examples it has seen
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Often: Training Loop
#
# <img src="img/ag/Figure-08-001.png" style="width: 20%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Back to MNIST Data
#
# <img src="img/ag/Figure-01-023.png" style="float: right;width: 40%;"/>
#
# Let's try this in practice:
# %% slideshow={"slide_type": "slide"}
|
<reponame>cad106uk/market-access-api
import datetime
from datetime import timezone
import itertools
import random
from django.conf import settings
from django.core.management import BaseCommand
from api.barriers.models import PublicBarrier
from api.barriers.public_data import public_release_to_s3
from api.metadata.constants import BarrierStatus, PublicBarrierStatus
from api.metadata.utils import get_countries, get_sectors, get_trading_bloc_by_country_id
ALL_SECTORS_PROPORTION = 0.2
EU_PROPORTION = 0.1
ADJECTIVES = (
"abrupt", "acidic", "adorable", "adventurous", "aggressive", "agitated", "alert", "aloof",
"amiable", "amused", "annoyed", "antsy", "anxious", "appalling", "appetizing", "apprehensive",
"arrogant", "ashamed", "astonishing", "attractive", "average", "batty", "beefy", "bewildered",
"biting", "bitter", "bland", "blushing", "bored", "brave", "bright", "broad", "bulky", "burly",
"charming", "cheeky", "cheerful", "chubby", "clean", "clear", "cloudy", "clueless", "clumsy",
"colorful", "colossal", "combative", "comfortable", "condemned", "condescending", "confused",
"contemplative", "convincing", "convoluted", "cooperative", "corny", "costly", "courageous",
"crabby", "creepy", "crooked", "cruel", "cumbersome", "curved", "cynical", "dangerous",
"dashing", "decayed", "deceitful", "deep", "defeated", "defiant", "delicious", "delightful",
"depraved", "depressed", "despicable", "determined", "dilapidated", "diminutive", "disgusted",
"distinct", "distraught", "distressed", "disturbed", "dizzy", "drab", "drained", "dull",
"eager", "ecstatic", "elated", "elegant", "emaciated", "embarrassed", "enchanting",
"encouraging", "energetic", "enormous", "enthusiastic", "envious", "exasperated", "excited",
"exhilarated", "extensive", "exuberant", "fancy", "fantastic", "fierce", "filthy", "flat",
"floppy", "fluttering", "foolish", "frantic", "fresh", "friendly", "frightened", "frothy",
"frustrating", "funny", "fuzzy", "gaudy", "gentle", "ghastly", "giddy", "gigantic",
"glamorous", "gleaming", "glorious", "gorgeous", "graceful", "greasy", "grieving", "gritty",
"grotesque", "grubby", "grumpy", "handsome", "happy", "harebrained", "healthy", "helpful",
"helpless", "high", "hollow", "homely", "horrific", "huge", "hungry", "hurt", "icy", "ideal",
"immense", "impressionable", "intrigued", "irate", "irritable", "itchy", "jealous",
"jittery", "jolly", "joyous", "juicy",
"jumpy", "kind", "lackadaisical", "large", "lazy", "lethal", "little", "lively", "livid",
"lonely", "loose", "lovely", "lucky", "ludicrous", "macho", "magnificent", "mammoth",
"maniacal", "massive", "melancholy", "melted", "miniature", "minute", "mistaken", "misty",
"moody", "mortified", "motionless", "muddy", "mysterious", "narrow", "nasty", "naughty",
"nervous", "nonchalant", "nonsensical", "nutritious", "nutty", "obedient", "oblivious",
"obnoxious", "odd", "old-fashioned", "outrageous", "panicky", "perfect", "perplexed",
"petite", "petty", "plain", "pleasant", "poised", "pompous", "precious", "prickly", "proud",
"pungent", "puny", "quaint", "quizzical", "ratty", "reassured", "relieved", "repulsive",
"responsive", "ripe", "robust", "rotten", "rotund", "rough", "round", "salty", "sarcastic",
"scant", "scary", "scattered", "scrawny", "selfish", "shaggy", "shaky", "shallow", "sharp",
"shiny", "short", "silky", "silly", "skinny", "slimy", "slippery", "small", "smarmy",
"smiling", "smoggy", "smooth", "smug", "soggy", "solid", "sore", "sour", "sparkling",
"spicy", "splendid", "spotless", "square", "stale", "steady", "steep", "responsive",
"sticky", "stormy", "stout", "straight", "strange", "strong", "stunning", "substantial",
"successful", "succulent", "superficial", "superior", "swanky", "sweet", "tart", "tasty",
"teeny", "tender", "tense", "terrible", "testy", "thankful", "thick", "thoughtful",
"thoughtless", "tight", "timely", "tricky", "trite", "troubled", "uneven", "unsightly",
"upset", "uptight", "vast", "vexed", "victorious", "virtuous", "vivacious", "vivid", "wacky",
"weary", "whimsical", "whopping", "wicked", "witty", "wobbly", "wonderful", "worried",
"yummy", "zany", "zealous", "zippy", "slithery", "red", "yellow", "blue", "green", "brown",
"black", "white", "orange", "purple", "violet", "golden", "silver", "bronze",
)
NOUNS = [
"Aardvarks", "Albatrosses", "Alligators", "Alpacas", "Angelfish", "Anteaters", "Antelopes",
"Armadillos", "Badgers", "Barracudas", "Bats", "Beagles", "Bears", "Beavers", "Birds",
"Brontosauruses", "Boa Constrictors", "Bulldogs", "Bumblebees", "Butterflies", "Camels", "Caribous",
"Cassowaries", "Cats", "Catfish", "Caterpillars", "Centipedes", "Chameleons", "Cheetahs",
"Chinchillas", "Chipmunks", "Cobras", "Coelacanths", "Condors", "Coral Snakes", "Cormorants", "Crabs",
"Cranes", "Crocodiles", "Dalmatians", "Deer", "Dolphins", "Doves", "Dragonfish", "Dragonflies",
"Ducks", "Eagles", "Eels", "Elephants", "Elks", "Falcons", "Ferrets", "Finchs", "Fireflies", "fish",
"Flamingos", "Foxes", "Frogs", "Gazelles", "Geckos", "Gerbils", "Giraffes", "Gnus", "Goldfish",
"Gooses", "Gorillas", "Grasshoppers", "Greyhounds", "Grouses", "Gulls", "Hamsters", "Hares", "Hawks",
"Hatchetfish", "Hedgehogs", "Herons", "Herrings", "Hornets", "Horses", "Hummingbirds", "Ibexes",
"Ibises", "Iguanas", "Jackals", "Jaguars", "Jellyfish", "Kangaroos", "Kestrels", "Kingfishers",
"Koalas", "Koi", "Larks", "Lemurs", "Leopards", "Lions", "Lionfish", "Llamas", "Lobsters", "Lorises",
"Magpies", "Mallards", "Mandrills", "Manta Rays", "Mantises", "Marlins", "Mastiffs", "Mollusks",
"Mongooses", "Mooses", "Mouses", "Narwhals", "Nautiluses", "Newts", "Nightingales", "Octopuses",
"Okapis", "Opossums", "Orcas", "Ospreys", "Ostrichs", "Otters", "Owls", "Pandas", "Panthers", "Parrots",
"Partridges", "Pelicans", "Penguins", "Pheasants", "Pigeons", "Platypi", "Polar Bears",
"Porcupines", "Porpoises", "Pythons", "Quails", "Rabbits", "Raccoons", "Rams", "Ravens", "Reindeer",
"Rhinoceri", "Roadrunners", "Rooks", "Salamanders", "Salmons", "Sandpipers", "Scorpions",
"Sea Cucumbers", "Sea Lions", "Sea Snakes", "Sea Turtles", "Seahorses", "Seals", "Sharks", "Sheep",
"Snowy Owls", "Songbirds", "Sparrows", "Spiders", "Squids", "Squirrels", "Starfish", "Starlings",
"Stegosauruses", "Stingrays", "Storks", "Swans", "Tapirs", "Tigers", "Toucans", "Triceratops",
"Turtles", "Vampire Bats", "Velociraptors", "Wallabies", "Walruses", "Wolves", "Wolverines", "Wombats",
"Wrasses", "Wrens", "Yaks", "Zebras", "apples", "backs", "balls", "bears", "beds", "bells",
"birds", "birthdays", "boats", "boxs", "boys", "breads", "cakes", "cars", "cats", "chairs",
"chickens", "coats", "corn", "cows", "days", "dogs", "dolls", "doors", "ducks", "eggs",
"eyes", "farms", "farmers", "feets", "fires", "fish", "floors", "flowers", "games",
"gardens", "grasss", "grounds", "hands", "heads", "hills", "homes", "horses", "houses",
"kitties", "legs", "letters", "milks", "money", "mornings", "names", "nests",
"nights", "papers", "parties", "pictures", "pigs", "rabbits", "rain", "rings", "robins", "schools",
"seeds", "shoes", "snow", "songs", "sticks", "streets", "stars", "tables", "things", "times",
"tops", "toys", "trees", "watches", "water", "winds", "windows", "woods",
"Jelly Babies", "Jelly Beans", "Wizards", "Beer", "Halloumi",
]
class Randomiser:
_sectors = None
_countries = None
@property
def countries(self):
if self._countries is None:
self._countries = [
country["id"]
for country in get_countries()
if country["disabled_on"] is None
and country.get("id")
]
return self._countries
@property
def sectors(self):
if self._sectors is None:
self._sectors = [
sector["id"] for sector in get_sectors()
if sector["level"] == 0
and sector["disabled_on"] is None
and sector.get("id")
]
return self._sectors
def get_title(self):
return f"{random.choice(ADJECTIVES)} {random.choice(NOUNS)}".title()
def get_sectors(self):
quantity = random.choices(
population=[0, 1, 2, 3],
weights=[0.1, 0.6, 0.2, 0.1],
)[0]
return random.choices(self.sectors, k=quantity)
def get_country(self):
return random.choice(self.countries)
def get_status(self):
statuses = (
BarrierStatus.OPEN_PENDING,
BarrierStatus.OPEN_IN_PROGRESS,
BarrierStatus.RESOLVED_IN_PART,
BarrierStatus.RESOLVED_IN_FULL,
)
return random.choice(statuses)
def create_fake_public_barriers(quantity):
earliest_publish_date = datetime.datetime(2020, 8, 1, tzinfo=timezone.utc)
id_generator = itertools.count(1).__next__
randomiser = Randomiser()
for i in range(quantity):
if random.random() > ALL_SECTORS_PROPORTION:
sectors = randomiser.get_sectors()
all_sectors = False
else:
sectors = []
all_sectors = True
if random.random() > EU_PROPORTION:
country = randomiser.get_country()
trading_bloc = ""
caused_by_trading_bloc = False
country_trading_bloc = get_trading_bloc_by_country_id(country)
if country_trading_bloc and random.random() > 0.8:
caused_by_trading_bloc = True
else:
country = None
trading_bloc = "TB00016"
caused_by_trading_bloc = False
status = randomiser.get_status()
if status in (BarrierStatus.RESOLVED_IN_FULL, BarrierStatus.RESOLVED_IN_PART):
status_date = datetime.date(
random.randint(2014, 2020),
random.randint(1, 12),
1,
)
else:
status_date = None
published_date = earliest_publish_date + datetime.timedelta(days=random.randint(1, 100))
yield PublicBarrier(
id=id_generator(),
_title=randomiser.get_title(),
_summary="Lorem ipsum dolor",
status=status,
status_date=status_date,
country=country,
caused_by_trading_bloc=caused_by_trading_bloc,
trading_bloc=trading_bloc,
sectors=sectors,
all_sectors=all_sectors,
_public_view_status=PublicBarrierStatus.PUBLISHED,
first_published_on=published_date,
last_published_on=published_date,
)
class Command(BaseCommand):
help = "Publish fake barriers"
def add_arguments(self, parser):
parser.add_argument("quantity", type=int, help="Number of barriers to publish")
def handle(self, *args, **options):
if settings.DJANGO_ENV in ["local", "dev"]:
quantity = options["quantity"]
self.stdout.write(f"Publishing {quantity} fake barriers...")
public_barriers = create_fake_public_barriers(quantity)
public_release_to_s3(public_barriers)
else:
self.stdout.write(f"Publishing fake barriers is disabled on {settings.DJANGO_ENV}")
|
"Test moving window functions."
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_equal, assert_array_almost_equal,
assert_raises)
import bottleneck as bn
from .util import arrays, array_order
def test_move():
"test move functions"
for func in bn.get_functions('move'):
yield unit_maker, func
def unit_maker(func):
"Test that bn.xxx gives the same output as a reference function."
fmt = ('\nfunc %s | window %d | min_count %s | input %s (%s) | shape %s | '
'axis %s | order %s\n')
fmt += '\nInput array:\n%s\n'
aaae = assert_array_almost_equal
func_name = func.__name__
func0 = eval('bn.slow.%s' % func_name)
if func_name == "move_var":
decimal = 3
else:
decimal = 5
for i, a in enumerate(arrays(func_name)):
axes = range(-1, a.ndim)
for axis in axes:
windows = range(1, a.shape[axis])
for window in windows:
min_counts = list(range(1, window + 1)) + [None]
for min_count in min_counts:
actual = func(a, window, min_count, axis=axis)
desired = func0(a, window, min_count, axis=axis)
tup = (func_name, window, str(min_count), 'a'+str(i),
str(a.dtype), str(a.shape), str(axis),
array_order(a), a)
err_msg = fmt % tup
aaae(actual, desired, decimal, err_msg)
err_msg += '\n dtype mismatch %s %s'
da = actual.dtype
dd = desired.dtype
assert_equal(da, dd, err_msg % (da, dd))
# ---------------------------------------------------------------------------
# Test argument parsing
def test_arg_parsing():
"test argument parsing"
for func in bn.get_functions('move'):
yield unit_maker_argparse, func
def unit_maker_argparse(func, decimal=5):
"test argument parsing."
name = func.__name__
func0 = eval('bn.slow.%s' % name)
a = np.array([1., 2, 3])
fmt = '\n%s' % func
fmt += '%s\n'
fmt += '\nInput array:\n%s\n' % a
actual = func(a, 2)
desired = func0(a, 2)
err_msg = fmt % "(a, 2)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
actual = func(a, 2, 1)
desired = func0(a, 2, 1)
err_msg = fmt % "(a, 2, 1)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
actual = func(a, window=2)
desired = func0(a, window=2)
err_msg = fmt % "(a, window=2)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
actual = func(a, window=2, min_count=1)
desired = func0(a, window=2, min_count=1)
err_msg = fmt % "(a, window=2, min_count=1)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
actual = func(a, window=2, min_count=1, axis=0)
desired = func0(a, window=2, min_count=1, axis=0)
err_msg = fmt % "(a, window=2, min_count=1, axis=0)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
actual = func(a, min_count=1, window=2, axis=0)
desired = func0(a, min_count=1, window=2, axis=0)
err_msg = fmt % "(a, min_count=1, window=2, axis=0)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
actual = func(a, axis=-1, min_count=None, window=2)
desired = func0(a, axis=-1, min_count=None, window=2)
err_msg = fmt % "(a, axis=-1, min_count=None, window=2)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
actual = func(a=a, axis=-1, min_count=None, window=2)
desired = func0(a=a, axis=-1, min_count=None, window=2)
err_msg = fmt % "(a=a, axis=-1, min_count=None, window=2)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
if name in ('move_std', 'move_var'):
actual = func(a, 2, 1, -1, ddof=1)
desired = func0(a, 2, 1, -1, ddof=1)
err_msg = fmt % "(a, 2, 1, -1, ddof=1)"
assert_array_almost_equal(actual, desired, decimal, err_msg)
# regression test: make sure len(kwargs) == 0 doesn't raise
args = (a, 1, 1, -1)
kwargs = {}
func(*args, **kwargs)
def test_arg_parse_raises():
"test argument parsing raises in move"
for func in bn.get_functions('move'):
yield unit_maker_argparse_raises, func
def unit_maker_argparse_raises(func):
"test argument parsing raises in move"
a = np.array([1., 2, 3])
assert_raises(TypeError, func)
assert_raises(TypeError, func, axis=a)
assert_raises(TypeError, func, a, 2, axis=0, extra=0)
assert_raises(TypeError, func, a, 2, axis=0, a=a)
assert_raises(TypeError, func, a, 2, 2, 0, 0, 0)
assert_raises(TypeError, func, a, 2, axis='0')
assert_raises(TypeError, func, a, 1, min_count='1')
if func.__name__ not in ('move_std', 'move_var'):
assert_raises(TypeError, func, a, 2, ddof=0)
# ---------------------------------------------------------------------------
# move_median.c is complicated. Let's do some more testing.
#
# If you make changes to move_median.c then do lots of tests by increasing
# range(100) in the two functions below to range(10000). And for extra credit
# increase size to 30. With those two changes the unit tests will take a
# LONG time to run.
def test_move_median_with_nans():
"test move_median.c with nans"
fmt = '\nfunc %s | window %d | min_count %s\n\nInput array:\n%s\n'
aaae = assert_array_almost_equal
min_count = 1
size = 10
func = bn.move_median
func0 = bn.slow.move_median
rs = np.random.RandomState([1, 2, 3])
for i in range(100):
a = np.arange(size, dtype=np.float64)
idx = rs.rand(*a.shape) < 0.1
a[idx] = np.inf
idx = rs.rand(*a.shape) < 0.2
a[idx] = np.nan
rs.shuffle(a)
for window in range(2, size + 1):
actual = func(a, window=window, min_count=min_count)
desired = func0(a, window=window, min_count=min_count)
err_msg = fmt % (func.__name__, window, min_count, a)
aaae(actual, desired, decimal=5, err_msg=err_msg)
def test_move_median_without_nans():
"test move_median.c without nans"
fmt = '\nfunc %s | window %d | min_count %s\n\nInput array:\n%s\n'
aaae = assert_array_almost_equal
min_count = 1
size = 10
func = bn.move_median
func0 = bn.slow.move_median
rs = np.random.RandomState([1, 2, 3])
for i in range(100):
a = np.arange(size, dtype=np.int64)
rs.shuffle(a)
for window in range(2, size + 1):
actual = func(a, window=window, min_count=min_count)
desired = func0(a, window=window, min_count=min_count)
err_msg = fmt % (func.__name__, window, min_count, a)
aaae(actual, desired, decimal=5, err_msg=err_msg)
# ----------------------------------------------------------------------------
# Regression test for square roots of negative numbers
def test_move_std_sqrt():
"Test move_std for neg sqrt."
a = [0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767]
err_msg = "Square root of negative number. ndim = %d"
b = bn.move_std(a, window=3)
assert_true(np.isfinite(b[2:]).all(), err_msg % 1)
a2 = np.array([a, a])
b = bn.move_std(a2, window=3, axis=1)
assert_true(np.isfinite(b[:, 2:]).all(), err_msg % 2)
a3 = np.array([[a, a], [a, a]])
b = bn.move_std(a3, window=3, axis=2)
assert_true(np.isfinite(b[:, :, 2:]).all(), err_msg % 3)
|
import os
import cv2 as cv
import numpy as np
import torch
from torchvision import transforms
from tqdm import tqdm
from config import device
from data_gen import data_transforms
from utils import ensure_folder, compute_mse, compute_sad, draw_str
IMG_FOLDER = 'alphamatting/input_lowres'
ALPHA_FOLDER = 'alphamatting/gt_lowres'
TRIMAP_FOLDERS = ['alphamatting/trimap_lowres/Trimap1', 'alphamatting/trimap_lowres/Trimap2']
OUTPUT_FOLDERS = ['alphamatting/output_lowres_4_5/Trimap1', 'alphamatting/output_lowres_4_5/Trimap2', 'images/alphamatting/output_lowres_13/Trimap3', ]
if __name__ == '__main__':
# checkpoint = 'BEST_checkpoint.tar'
# checkpoint = torch.load(checkpoint)
# model = checkpoint['model'].module
# model = model.to(device)
# model.eval()
checkpoint = 'checkpointss/checkpoint_0_0.30576499869736534.tar'
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)
model.eval()
# checkpoint = 'checkpoint_0007_0.0650.tar'
# checkpoint = torch.load(checkpoint)
# model = checkpoint['model']
# model = model.to(device)
# model.eval()
transformer = data_transforms['valid']
ensure_folder('images')
ensure_folder('images/alphamatting')
ensure_folder(OUTPUT_FOLDERS[0])
ensure_folder(OUTPUT_FOLDERS[1])
# ensure_folder(OUTPUT_FOLDERS[2])
files = [f for f in os.listdir(IMG_FOLDER) if f.endswith('.png')]
for file in tqdm(files):
filename = os.path.join(IMG_FOLDER, file)
img = cv.imread(filename)
filename = os.path.join(ALPHA_FOLDER, file)
# print(filename)
alpha = cv.imread(filename, 0)
alpha = alpha / 255
print(img.shape)
h, w = img.shape[:2]
x = torch.zeros((1, 4, h, w), dtype=torch.float)
image = img[..., ::-1] # RGB
image = transforms.ToPILImage()(image)
image = transformer(image)
x[0:, 0:3, :, :] = image
for i in range(2):
filename = os.path.join(TRIMAP_FOLDERS[i], file)
print('reading {}...'.format(filename))
trimap = cv.imread(filename, 0)
x[0:, 3, :, :] = torch.from_numpy(trimap.copy() / 255.)
# print(torch.max(x[0:, 3, :, :]))
# print(torch.min(x[0:, 3, :, :]))
# print(torch.median(x[0:, 3, :, :]))
# Move to GPU, if available
x = x.type(torch.FloatTensor).to(device)
with torch.no_grad():
pred = model(x)
pred = pred.cpu().numpy()
pred = pred.reshape((h, w))
pred[trimap == 0] = 0.0
pred[trimap == 255] = 1.0
# Calculate loss
# loss = criterion(alpha_out, alpha_label)
# print(pred.shape)
# print(alpha.shape)
mse_loss = compute_mse(pred, alpha, trimap)
sad_loss = compute_sad(pred, alpha)
str_msg = 'sad: %.4f, mse: %.4f' % (sad_loss, mse_loss)
print(str_msg)
out = (pred.copy() * 255).astype(np.uint8)
draw_str(out, (10, 20), str_msg)
filename = os.path.join(OUTPUT_FOLDERS[i], file)
cv.imwrite(filename, out)
print('wrote {}.'.format(filename))
|
<reponame>JedersonLuz/jusrisfai_challenge
from bs4 import BeautifulSoup
from requests_html import HTMLSession
class ScrapingRules(object):
def find_titulo(self, soup):
title = soup.find_all('table')
title = title[-1].find_all('p')
return title[-1].get_text().strip()
def find_artigo(self, soup, artigo):
results = soup.find_all('p')
index = 0
content = ''
for i in range(len(results)):
if (f'Art. {artigo}.' in results[i].get_text()) \
or (f'Art. {artigo}º' in results[i].get_text()) \
or (f'Art . {artigo}.' in results[i].get_text()):
index, content = i, results[i].get_text()
if (f'Art. {int(artigo)+1}.' in results[i].get_text()) \
or (f'Art. {int(artigo)+1}º' in results[i].get_text()) \
or (f'Art . {int(artigo)+1}.' in results[i].get_text()):
return (index, content, i)
return (0, 'Artigo não encontrado', 0)
def find_paragrafo(self, soup, paragrafo, index, end):
results = soup.find_all('p')
if (end - index) > 1:
for i in range(index, end):
if paragrafo == '1':
if (f'§ {paragrafo}º' in results[i].get_text()) or ('Parágrafo único' in results[i].get_text()):
return (i, results[i].get_text())
else:
if f'§ {paragrafo}º' in results[i].get_text():
return (i, results[i].get_text())
return (index, '')
def find_inciso(self, soup, inciso, index, end):
results = soup.find_all('p')
if (end - index) > 1:
for i in range(index, end):
if f'{inciso}-' in results[i].get_text() \
or f'{inciso} -' in results[i].get_text() \
or f'{inciso} –' in results[i].get_text():
return (i, results[i].get_text())
return (index, '')
def find_alinea(self, soup, alinea, index, end):
results = soup.find_all('p')
if (end - index) > 1:
for i in range(index, end):
if f'{alinea})' in results[i].get_text() \
or f'{alinea}.' in results[i].get_text() \
or f'{alinea} -' in results[i].get_text() \
or f'{alinea} –' in results[i].get_text():
return (i, results[i].get_text())
return (index, '')
def find_rule(self, lei, artigo, paragrafo, inciso, alinea):
## URL param
url = f'http://www.planalto.gov.br/ccivil_03/leis/l{lei}.htm'
## Request
print('Request initialized')
print('Wait for a moment...')
session = HTMLSession()
resp = session.get(url)
resp.html.render()
if resp.status_code == 200:
#print('Status code 200')
soup = BeautifulSoup(resp.html.html, "lxml")
print(f'Lei {lei}: {self.find_titulo(soup)}')
index, artigo_text, end = self.find_artigo(soup, artigo)
print(artigo_text)
if paragrafo:
index, paragrafo_text = self.find_paragrafo(soup, paragrafo, index, end)
if paragrafo_text: print(paragrafo_text)
if inciso:
index, inciso_text = self.find_inciso(soup, inciso, index, end)
if inciso_text: print(inciso_text)
if alinea:
index, alinea_text = self.find_alinea(soup, alinea, index, end)
if alinea_text: print(alinea_text)
else:
print('No internet')
print('Try:')
print('Checking the network cables, modem, and router')
print('Reconnecting to Wi-Fi')
## Inputs
lei = '9504'
#lei = input('Informe um número de lei: ')
artigo = '4'
#artigo = input('Informe um artigo: ')
paragrafo = ''
#paragrafo = input('Informe um parágrafo (deixe em branco para desconsirar esse argumento): ')
inciso = ''
#inciso = input('Informe um inciso (deixe em branco para desconsirar esse argumento): ').upper()
alinea = ''
#alinea = input('Informe uma alínea (deixe em branco para desconsirar esse argumento): ')
## Main
scraping_rules = ScrapingRules()
scraping_rules.find_rule(lei, artigo, paragrafo, inciso, alinea)
|
<reponame>kustodian/aerospike-admin<gh_stars>0
# Copyright 2013-2018 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from distutils.version import LooseVersion
import re
from lib.health.constants import ParserResultType, HealthResultType, HealthResultCounter, AssertResultKey
from lib.health.exceptions import SyntaxException, HealthException
from lib.health.parser import HealthParser
from lib.health.query import QUERIES
from lib.health.util import is_health_parser_variable
from lib.utils.util import parse_queries
from lib.view import terminal
VERSION_CONSTRAINT_PATTERN = "SET CONSTRAINT VERSION(.+)"
class HealthChecker(object):
def __init__(self):
try:
self.health_parser = HealthParser()
self.health_parser.build()
except Exception:
self.health_parser = None
pass
self.verbose = False
self.no_valid_version = False
self.filtered_data_set_to_parser = False
def _reset_counters(self):
self.status_counters = {}
self.status_counters[HealthResultCounter.QUERY_COUNTER] = 0
self.status_counters[HealthResultCounter.QUERY_SUCCESS_COUNTER] = 0
self.status_counters[HealthResultCounter.QUERY_SKIPPED_COUNTER] = 0
self.status_counters[HealthResultCounter.ASSERT_QUERY_COUNTER] = 0
self.status_counters[HealthResultCounter.ASSERT_FAILED_COUNTER] = 0
self.status_counters[HealthResultCounter.ASSERT_PASSED_COUNTER] = 0
self.status_counters[HealthResultCounter.DEBUG_COUNTER] = 0
self.status_counters[HealthResultCounter.SYNTAX_EXCEPTION_COUNTER] = 0
self.status_counters[HealthResultCounter.HEALTH_EXCEPTION_COUNTER] = 0
self.status_counters[HealthResultCounter.OTEHR_EXCEPTION_COUNTER] = 0
self.assert_outputs = {}
self.health_exceptions = []
self.syntax_exceptions = []
self.other_exceptions = []
self.debug_outputs = []
def _increment_counter(self, counter):
if counter and counter in self.status_counters:
self.status_counters[counter] += 1
def _set_parser_input(self, data):
try:
self.health_parser.set_health_data(data)
except Exception:
raise Exception("No parser available. Please check ply module installed or not.")
def _reset_parser(self):
self.health_parser.clear_health_cache()
if self.filtered_data_set_to_parser:
# Healthchecker should work as setting input once and calling execute multiple times on same data.
# So we need to reset parser input data if we set version filtered data.
self._set_parser_input(self.health_input_data)
def set_health_input_data(self, data):
self.health_input_data = data
if not data or not isinstance(data, dict):
raise ValueError(
terminal.fg_red() + "Wrong Input Data for HealthChecker" + terminal.fg_clear())
self._set_parser_input(data)
def _create_health_result_dict(self):
res = {}
res[HealthResultType.STATUS_COUNTERS] = copy.deepcopy(
self.status_counters)
res[HealthResultType.EXCEPTIONS] = {}
res[HealthResultType.EXCEPTIONS][
HealthResultType.EXCEPTIONS_SYNTAX] = copy.deepcopy(self.syntax_exceptions)
res[HealthResultType.EXCEPTIONS][
HealthResultType.EXCEPTIONS_PROCESSING] = copy.deepcopy(self.health_exceptions)
res[HealthResultType.EXCEPTIONS][
HealthResultType.EXCEPTIONS_OTHER] = copy.deepcopy(self.other_exceptions)
res[HealthResultType.ASSERT] = copy.deepcopy(self.assert_outputs)
res[HealthResultType.DEBUG_MESSAGES] = copy.deepcopy(
self.debug_outputs)
return res
def _is_assert_query(self, query):
if query and "ASSERT" in query:
return True
return False
def _is_version_set_query(self, query):
return re.search(VERSION_CONSTRAINT_PATTERN, query)
def _set_version_checker_function(self, line):
vp_l_e = "<=(.+)"
vp_l = "<(.+)"
vp_g_e = ">=(.+)"
vp_g = ">(.+)"
vp_e = "=(.+)"
vp_in = "IN \[(.+)\]"
v_str = re.search(VERSION_CONSTRAINT_PATTERN, line).group(1).strip()
if re.search(vp_l_e, v_str):
v = re.search(vp_l_e, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(
x.strip()) <= LooseVersion(v.strip())
elif re.search(vp_l, v_str):
v = re.search(vp_l, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(
x.strip()) < LooseVersion(v.strip())
elif re.search(vp_g_e, v_str):
v = re.search(vp_g_e, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(
x.strip()) >= LooseVersion(v.strip())
elif re.search(vp_g, v_str):
v = re.search(vp_g, v_str).group(1)
self.version_checker_fn = lambda x: LooseVersion(
x.strip()) > LooseVersion(v.strip())
elif re.search(vp_e, v_str):
v = re.search(vp_e, v_str).group(1).strip()
if v.lower() == "all":
self.version_checker_fn = None
else:
self.version_checker_fn = lambda x: LooseVersion(
x.strip()) == LooseVersion(v)
elif re.search(vp_in, v_str):
v = re.search(vp_in, v_str).group(1).strip()
v = [i.strip() for i in v.split(",")]
if "ALL" in v or "all" in v:
self.version_checker_fn = None
else:
self.version_checker_fn = lambda x: LooseVersion(
x.strip()) in [LooseVersion(i) for i in v]
else:
v = v_str.strip()
if v.lower() == "all":
self.version_checker_fn = None
else:
self.version_checker_fn = lambda x: LooseVersion(
x.strip()) == LooseVersion(v.strip())
def _filter_nodes_to_remove(self, data):
"""
Returns map as {cl1:v1, cl2:v2, ... } or 0 or 1
where v can be 0 : means remove all nodes
1 : means keep all nodes
list of nodes : remove nodes from list
"""
if not data or not "METADATA" in data or not "CLUSTER" in data["METADATA"]:
# no metadata information available
return 1
sn_partial_count = 0
sn_one_count = 0
sn_zero_count = 0
sn_node_dict = {}
sn_total_clusters = 0
for cl in data["METADATA"]["CLUSTER"].keys():
sn_total_clusters += 1
try:
cl_one_count = 0
cl_zero_count = 0
cl_node_list = []
cl_total_nodes = 0
for n in data["METADATA"]["CLUSTER"][cl].keys():
cl_total_nodes += 1
try:
if not self.version_checker_fn(data["METADATA"]["CLUSTER"][cl][n][("version", "KEY")]):
cl_zero_count += 1
cl_node_list.append(n)
else:
cl_one_count += 1
except Exception:
cl_one_count += 1
pass
if cl_total_nodes == cl_one_count:
# keep all nodes for this cluster
sn_node_dict[cl] = 1
sn_one_count += 1
elif cl_total_nodes == cl_zero_count:
# remove all nodes for this cluster
sn_node_dict[cl] = 0
sn_zero_count += 1
else:
# some nodes need to remove
sn_node_dict[cl] = cl_node_list
sn_partial_count += 1
except Exception:
sn_node_dict[cl] = 1
sn_one_count += 1
if sn_total_clusters == sn_one_count:
# keep all nodes for all cluster
return 1
elif sn_total_clusters == sn_zero_count:
# remove all nodes for all cluster, so remove this snapshot itself
return 0
else:
# some nodes need to remove
return sn_node_dict
def _remove_node_data(self, data, remove_nodes):
if not data or not isinstance(data, dict):
return
for _key in data.keys():
if isinstance(_key, tuple) and _key[1] == "CLUSTER":
if _key not in remove_nodes or remove_nodes[_key] == 1:
continue
if remove_nodes[_key] == 0:
data.pop(_key)
continue
for n in data[_key].keys():
if n in remove_nodes[_key]:
data[_key].pop(n)
if not data[_key]:
data.pop(_key)
else:
self._remove_node_data(data[_key], remove_nodes)
if not data[_key]:
data.pop(_key)
def _filter_health_input_data(self):
data = copy.deepcopy(self.health_input_data)
for sn in data.keys():
# SNAPSHOT level
remove_nodes = self._filter_nodes_to_remove(data[sn])
if remove_nodes == 1:
continue
elif remove_nodes == 0:
data.pop(sn)
continue
else:
self._remove_node_data(data[sn], remove_nodes)
if not data[sn]:
data.pop(sn)
return data
def _filter_and_set_health_input_data(self, line):
self._set_version_checker_function(line)
if not self.version_checker_fn:
self.no_valid_version = False
self._set_parser_input(self.health_input_data)
self.filtered_data_set_to_parser = False
else:
d = self._filter_health_input_data()
if not d:
self.no_valid_version = True
else:
self.no_valid_version = False
self._set_parser_input(d)
self.filtered_data_set_to_parser = True
def _execute_query(self, query):
return self.health_parser.parse(query)
def _add_assert_output(self, assert_out):
if not assert_out:
return
categories = assert_out[AssertResultKey.CATEGORY]
assert_ptr = self.assert_outputs
for c in categories[:-1]:
if not c in assert_ptr:
assert_ptr[c] = {}
assert_ptr = assert_ptr[c]
c = categories[-1]
if not c in assert_ptr:
assert_ptr[c] = []
assert_ptr = assert_ptr[c]
assert_ptr.append(assert_out)
def _execute_queries(self, query_source=None, is_source_file=True):
self._reset_counters()
if not self.health_input_data or not isinstance(self.health_input_data, dict):
raise Exception("No Health Input Data available")
if not query_source:
raise Exception("No Input Query Source.")
if not isinstance(query_source, str):
raise Exception("Query input source is not valid")
queries = parse_queries(query_source, is_file=is_source_file)
if not queries:
raise Exception("Wrong Health query source.")
try:
for query in queries:
if not query:
continue
self._increment_counter(HealthResultCounter.QUERY_COUNTER)
if query.lower() == "exit":
self._increment_counter(
HealthResultCounter.QUERY_SUCCESS_COUNTER)
break
result = None
if self._is_version_set_query(query):
self._filter_and_set_health_input_data(query)
self._increment_counter(
HealthResultCounter.QUERY_SUCCESS_COUNTER)
continue
if self.no_valid_version:
self._increment_counter(
HealthResultCounter.QUERY_SKIPPED_COUNTER)
continue
if self._is_assert_query(query):
self._increment_counter(
HealthResultCounter.ASSERT_QUERY_COUNTER)
try:
result = self._execute_query(query)
self._increment_counter(
HealthResultCounter.QUERY_SUCCESS_COUNTER)
except SyntaxException as se:
self._increment_counter(
HealthResultCounter.SYNTAX_EXCEPTION_COUNTER)
self.syntax_exceptions.append({"index": self.status_counters[
HealthResultCounter.QUERY_COUNTER], "query": query, "error": str(se)})
except HealthException as he:
self._increment_counter(
HealthResultCounter.HEALTH_EXCEPTION_COUNTER)
self.health_exceptions.append({"index": self.status_counters[
HealthResultCounter.QUERY_COUNTER], "query": query, "error": str(he)})
except Exception as oe:
self._increment_counter(
HealthResultCounter.OTEHR_EXCEPTION_COUNTER)
self.other_exceptions.append({"index": self.status_counters[
HealthResultCounter.QUERY_COUNTER], "query": query, "error": str(oe)})
if result:
try:
if isinstance(result, tuple):
if(result[0] == ParserResultType.ASSERT):
if result[1][AssertResultKey.SUCCESS]:
self._increment_counter(HealthResultCounter.ASSERT_PASSED_COUNTER)
else:
self._increment_counter(HealthResultCounter.ASSERT_FAILED_COUNTER)
self._add_assert_output(result[1])
elif is_health_parser_variable(result):
self._increment_counter(
HealthResultCounter.DEBUG_COUNTER)
self.debug_outputs.append(result)
except Exception:
pass
except Exception:
pass
return True
def execute(self, query_file=None):
health_summary = None
if query_file is None:
if not self._execute_queries(query_source=QUERIES, is_source_file=False):
return {}
health_summary = self._create_health_result_dict()
elif query_file:
if not self._execute_queries(query_source=query_file, is_source_file=True):
return {}
health_summary = self._create_health_result_dict()
else:
raise Exception("Wrong Query-file input for Health-Checker to execute")
self.no_valid_version = False
self._reset_parser()
self._reset_counters()
return health_summary
|
from rlstudio.agent import base as agent_base
from rlstudio.environment import base as env_base
from rlstudio.experiment import base as exp_base
from rlstudio.stats import base as stats_base
from rlstudio.typing import RunId
import numpy as np
from typing import List
class Experiment:
"""Defines an experiment."""
def __init__(self,
run_id: RunId,
config: exp_base.Configuration,
agents: List[agent_base.Agent]):
self.run_id = run_id
self.config = config
self.agents = agents
self.config.validate()
def train(self, summary: stats_base.Summary = None) -> None:
"""Trains the agent on training tasks and optionally records statistics in `summary`."""
np.set_printoptions(precision=2)
time = -1
# Training tasks are revisited `repeat` number of times.
for round_id in range(self.config.repeat + 1):
# Loop over training tasks one by one.
for task in self.config.train_tasks:
print(f'Training agent for {self.config.train_episodes} episodes on task {task.id()}')
# Train the agent(s) for a given number of episodes on the current task.
for episode in range(self.config.train_episodes):
# Part A: Evaluate the agent if it's evaluation time.
if not episode % self.config.train_eval_step:
time += 1
metadata = exp_base.EvaluationMetadata(
self.run_id, time, task.id(), round_id, episode)
returns = self._eval(metadata, summary, task)
print(f'Episode {episode:4d}: Returns: {returns}')
# Part B: Train the agent from the beginning of the task to the end.
# Get the initial timestep, which contains an observation among other metadata.
timestep = task.reset()
# Reset agents between episodes if asked.
if self.config.reset_agent:
for agent in self.agents:
agent.reset()
# Loop until the episode terminates.
while not timestep.last():
# (I) Ask agent(s) to make a decision given the current timestep (observation).
decisions = [agent.decide(timestep, greedy=False)
for agent in self.agents]
# (II) Pass decision to the environment and receive a new timestep (observation, reward).
new_timestep = task.step([d.action for d in decisions])
# (III) Inform the agent(s) of the transition from the old state to the new one.
for agent in self.agents:
agent.update(timestep, new_timestep)
# (IV) Update the timestep and repeat the above.
timestep = new_timestep
# Post-training evaluation.
time += 1
metadata = exp_base.EvaluationMetadata(
self.run_id, time, task.id(), round_id, self.config.train_episodes)
returns = self._eval(metadata, summary, task)
print(f'Final eval: Returns: {returns}')
def test(self, summary: stats_base.Summary = None) -> None:
"""Tests an agent on test tasks and optionally records statistics in `summary`."""
if self.config.test_tasks is None:
return
np.set_printoptions(precision=2)
time = -1
for round_id in range(self.config.repeat + 1):
for task_idx, task in self.config.test_tasks:
for episode in range(self.config.test_episodes):
time += 1
metadata = exp_base.EvaluationMetadata(
self.run_id, time, task.id(), round_id, episode)
returns = self._eval(metadata, summary, task)
print(f'Tested agent on task {task.id()}: Total return is {returns}')
def _eval(self,
metadata: exp_base.EvaluationMetadata,
summary: stats_base.Summary,
task: env_base.Task) -> float:
"""Evaluates the agent on a given task and records statistics.
Note that, unlike training, during evaluation the agent takes
a greedy action based on its current policy.
"""
returns = np.zeros(shape=len(self.agents), dtype=np.float)
timestep = task.reset()
if self.config.reset_agent:
for agent in self.agents:
agent.reset()
while not timestep.last():
decisions = [agent.decide(timestep, greedy=True)
for agent in self.agents]
if summary is not None:
summary.record_decisions(metadata, timestep, decisions)
timestep = task.step([d.action for d in decisions])
returns += timestep.reward
if summary is not None:
summary.record_returns(metadata, returns)
summary.commit_episode(metadata)
return returns
|
"""
Regression evaluation.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import logging
import itertools
import numbers
import numpy as np
import sklearn.metrics as skm
from tabulate import tabulate
import eta.core.utils as etau
import fiftyone.core.evaluation as foe
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.plots as fop
import fiftyone.core.validation as fov
logger = logging.getLogger(__name__)
def evaluate_regressions(
samples,
pred_field,
gt_field="ground_truth",
eval_key=None,
missing=None,
method="simple",
**kwargs,
):
"""Evaluates the regression predictions in the given collection with
respect to the specified ground truth values.
You can customize the evaluation method by passing additional
parameters for the method's config class as ``kwargs``.
The supported ``method`` values and their associated configs are:
- ``"simple"``: :class:`SimpleEvaluationConfig`
If an ``eval_key`` is specified, then this method will record some
statistics on each sample:
- When evaluating sample-level fields, an ``eval_key`` field will be
populated on each sample recording the error of that sample's
prediction.
- When evaluating frame-level fields, an ``eval_key`` field will be
populated on each frame recording the error of that frame's
prediction. In addition, an ``eval_key`` field will be populated on
each sample that records the average error of the frame predictions
of the sample.
Args:
samples: a :class:`fiftyone.core.collections.SampleCollection`
pred_field: the name of the field containing the predicted
:class:`fiftyone.core.labels.Regression` instances
gt_field ("ground_truth"): the name of the field containing the
ground truth :class:`fiftyone.core.labels.Regression` instances
eval_key (None): a string key to use to refer to this evaluation
missing (None): a missing value. Any None-valued regressions are
given this value for results purposes
method ("simple"): a string specifying the evaluation method to use.
Supported values are ``("simple")``
**kwargs: optional keyword arguments for the constructor of the
:class:`RegressionEvaluationConfig` being used
Returns:
a :class:`RegressionResults`
"""
fov.validate_collection_label_fields(
samples, (pred_field, gt_field), fol.Regression, same_type=True
)
config = _parse_config(pred_field, gt_field, method, **kwargs)
eval_method = config.build()
eval_method.ensure_requirements()
eval_method.register_run(samples, eval_key)
results = eval_method.evaluate_samples(
samples, eval_key=eval_key, missing=missing
)
eval_method.save_run_results(samples, eval_key, results)
return results
class RegressionEvaluationConfig(foe.EvaluationMethodConfig):
"""Base class for configuring :class:`RegressionEvaluation` instances.
Args:
pred_field: the name of the field containing the predicted
:class:`fiftyone.core.labels.Regression` instances
gt_field ("ground_truth"): the name of the field containing the ground
truth :class:`fiftyone.core.labels.Regression` instances
"""
def __init__(self, pred_field, gt_field, **kwargs):
super().__init__(**kwargs)
self.pred_field = pred_field
self.gt_field = gt_field
class RegressionEvaluation(foe.EvaluationMethod):
"""Base class for regression evaluation methods.
Args:
config: a :class:`RegressionEvaluationConfig`
"""
def evaluate_samples(self, samples, eval_key=None, missing=None):
"""Evaluates the regression predictions in the given samples with
respect to the specified ground truth values.
Args:
samples: a :class:`fiftyone.core.collections.SampleCollection`
eval_key (None): an evaluation key for this evaluation
missing (None): a missing value. Any None-valued regressions are
given this value for results purposes
Returns:
a :class:`RegressionResults` instance
"""
pass
def get_fields(self, samples, eval_key):
fields = [eval_key]
if samples._is_frame_field(self.config.gt_field):
prefix = samples._FRAMES_PREFIX + eval_key
fields.append(prefix)
return fields
def cleanup(self, samples, eval_key):
fields = [eval_key]
samples._dataset.delete_sample_fields(fields, error_level=1)
if samples._is_frame_field(self.config.gt_field):
samples._dataset.delete_frame_fields(fields, error_level=1)
def _validate_run(self, samples, eval_key, existing_info):
self._validate_fields_match(eval_key, "pred_field", existing_info)
self._validate_fields_match(eval_key, "gt_field", existing_info)
class SimpleEvaluationConfig(RegressionEvaluationConfig):
"""Base class for configuring :class:`SimpleEvaluation` instances.
Args:
pred_field: the name of the field containing the predicted
:class:`fiftyone.core.labels.Regression` instances
gt_field: the name of the field containing the ground truth
:class:`fiftyone.core.labels.Regression` instances
metric ("squared_error"): the error metric to use to populate
sample/frame-level error data. Supported values are
``("squared_error", "absolute_error")`` or any function that
accepts two scalar arguments ``(ypred, ytrue)``
"""
def __init__(self, pred_field, gt_field, metric="squared_error", **kwargs):
super().__init__(pred_field, gt_field, **kwargs)
self._metric = metric
@property
def method(self):
return "simple"
@property
def metric(self):
return self._metric if etau.is_str(self._metric) else "custom"
def attributes(self):
return super().attributes() + ["metric"]
class SimpleEvaluation(RegressionEvaluation):
"""Simple regression evaluation.
Args:
config: a :class:`SimpleEvaluationConfig`
"""
def evaluate_samples(self, samples, eval_key=None, missing=None):
metric = self.config._metric
if metric == "squared_error":
error_fcn = lambda yp, yt: (yp - yt) ** 2
elif metric == "absolute_error":
error_fcn = lambda yp, yt: abs(yp - yt)
elif callable(metric):
error_fcn = metric
else:
raise ValueError(
"Unsupported metric '%s'. The supported values are %s or a "
"function that accepts two scalar arguments `(ypred, ytrue)`"
% (metric, ("squared_error", "absolute_error"))
)
pred_field = self.config.pred_field
gt_field = self.config.gt_field
is_frame_field = samples._is_frame_field(gt_field)
gt = gt_field + ".value"
pred = pred_field + ".value"
pred_conf = pred_field + ".confidence"
_id = "id" if not is_frame_field else "frames.id"
ytrue, ypred, confs, ids = samples.values([gt, pred, pred_conf, _id])
if is_frame_field:
_ytrue = list(itertools.chain.from_iterable(ytrue))
_ypred = list(itertools.chain.from_iterable(ypred))
_confs = list(itertools.chain.from_iterable(confs))
_ids = list(itertools.chain.from_iterable(ids))
else:
_ytrue = ytrue
_ypred = ypred
_confs = confs
_ids = ids
results = RegressionResults(
_ytrue,
_ypred,
confs=_confs,
eval_key=eval_key,
gt_field=gt_field,
pred_field=pred_field,
ids=_ids,
missing=missing,
samples=samples,
)
if eval_key is None:
return results
def compute_error(yp, yt):
if missing is not None:
if yp is None:
yp = missing
if yt is None:
yt = missing
try:
return error_fcn(yp, yt)
except:
return None
# note: fields are manually declared so they'll exist even when
# `samples` is empty
dataset = samples._dataset
if is_frame_field:
frame_errors = [
list(map(compute_error, yp, yt))
for yp, yt in zip(ypred, ytrue)
]
sample_errors = [_safe_mean(e) for e in frame_errors]
eval_frame = samples._FRAMES_PREFIX + eval_key
# Sample-level errors
dataset._add_sample_field_if_necessary(eval_key, fof.FloatField)
samples.set_values(eval_key, sample_errors)
# Per-frame errors
dataset._add_frame_field_if_necessary(eval_key, fof.FloatField)
samples.set_values(eval_frame, frame_errors)
else:
errors = list(map(compute_error, ypred, ytrue))
# Per-sample errors
dataset._add_sample_field_if_necessary(eval_key, fof.FloatField)
samples.set_values(eval_key, errors)
return results
class RegressionResults(foe.EvaluationResults):
"""Class that stores the results of a regression evaluation.
Args:
ytrue: a list of ground truth values
ypred: a list of predicted values
confs (None): an optional list of confidences for the predictions
eval_key (None): the evaluation key of the evaluation
gt_field (None): the name of the ground truth field
pred_field (None): the name of the predictions field
ids (None): a list of sample or frame IDs corresponding to the
regressions
missing (None): a missing value. Any None-valued regressions are
given this value for results purposes
samples (None): the :class:`fiftyone.core.collections.SampleCollection`
for which the results were computed
"""
def __init__(
self,
ytrue,
ypred,
confs=None,
eval_key=None,
gt_field=None,
pred_field=None,
ids=None,
missing=None,
samples=None,
):
ytrue, ypred, confs, ids = _parse_values(
ytrue, ypred, confs, ids, missing=missing
)
self.ytrue = ytrue
self.ypred = ypred
self.confs = confs
self.eval_key = eval_key
self.gt_field = gt_field
self.pred_field = pred_field
self.ids = ids
self.missing = missing
self._samples = samples
def metrics(self, weights=None):
"""Computes various popular regression metrics for the results.
The computed metrics are:
- Mean squared error: :func:`sklearn:sklearn.metrics.mean_squared_error`
- Root mean squared error: :func:`sklearn:sklearn.metrics.mean_squared_error`
- Mean absolute error: :func:`sklearn:sklearn.metrics.mean_absolute_error`
- Median absolute error: :func:`sklearn:sklearn.metrics.median_absolute_error`
- R^2 score: :func:`sklearn:sklearn.metrics.r2_score`
- Explained variance score: :func:`sklearn:sklearn.metrics.explained_variance_score`
- Max error: :func:`sklearn:sklearn.metrics.max_error`
- Support: the number of examples
Args:
weights (None): an optional list of weights for each example
Returns:
a dict
"""
yt = self.ytrue
yp = self.ypred
w = weights
if yt.size > 0:
mse = skm.mean_squared_error(yt, yp, sample_weight=w)
rmse = np.sqrt(mse)
mae = skm.mean_absolute_error(yt, yp, sample_weight=w)
median_absolute_error = skm.median_absolute_error(yt, yp)
r2_score = skm.r2_score(yt, yp, sample_weight=w)
ev_score = skm.explained_variance_score(yt, yp, sample_weight=w)
max_error = skm.max_error(yt, yp)
support = len(yt)
else:
mse = 0.0
rmse = 0.0
mae = 0.0
median_absolute_error = 0.0
r2_score = 0.0
ev_score = 0.0
max_error = 0.0
support = 0
return {
"mean_squared_error": mse,
"root_mean_squared_error": rmse,
"mean_absolute_error": mae,
"median_absolute_error": median_absolute_error,
"r2_score": r2_score,
"explained_variance_score": ev_score,
"max_error": max_error,
"support": support,
}
def print_metrics(self, weights=None, digits=2):
"""Prints the regression metrics computed via :meth:`metrics`.
Args:
weights (None): an optional list of weights for each example
digits (2): the number of digits of precision to print
"""
metrics = self.metrics(weights=weights)
_print_dict_as_table(metrics, digits)
def plot_results(
self, labels=None, sizes=None, backend="plotly", **kwargs
):
"""Plots the regression results.
You can use the ``labels`` parameters to define a coloring for the
points, and you can use the ``sizes`` parameter to scale the sizes of
the points.
You can attach plots generated by this method to an App session via its
:attr:`fiftyone.core.session.Session.plots` attribute, which will
automatically sync the session's view with the currently selected
points in the plot.
Args:
labels (None): data to use to color the points. Can be any of the
following:
- the name of a sample field or ``embedded.field.name`` of
from which to extract numeric or string values
- a :class:`fiftyone.core.expressions.ViewExpression`
defining numeric or string values to extract via
:meth:`fiftyone.core.collections.SampleCollection.values`
- a list or array-like of numeric or string values (or lists
of lists for frame-level regressions)
sizes (None): data to use to scale the sizes of the points. Can be
any of the following:
- the name of a sample field or ``embedded.field.name`` from
which to extract numeric values
- a :class:`fiftyone.core.expressions.ViewExpression`
defining numeric values to extract via
:meth:`fiftyone.core.collections.SampleCollection.values`
- a list or array-like of numeric values (or lists of lists
for frame-level regressions)
backend ("plotly"): the plotting backend to use. Supported values
are ``("plotly", "matplotlib")``
**kwargs: keyword arguments for the backend plotting method:
- "plotly" backend: :meth:`fiftyone.core.plots.plotly.plot_regressions`
- "matplotlib" backend: :meth:`fiftyone.core.plots.matplotlib.plot_regressions`
Returns:
an :class:`fiftyone.core.plots.base.InteractivePlot`
"""
return fop.plot_regressions(
self.ytrue,
self.ypred,
samples=self._samples,
ids=self.ids,
labels=labels,
sizes=sizes,
gt_field=self.gt_field,
pred_field=self.pred_field,
backend=backend,
**kwargs,
)
@classmethod
def _from_dict(cls, d, samples, config, **kwargs):
ytrue = d["ytrue"]
ypred = d["ypred"]
confs = d.get("confs", None)
eval_key = d.get("eval_key", None)
gt_field = d.get("gt_field", None)
pred_field = d.get("pred_field", None)
ids = d.get("ids", None)
missing = d.get("missing", None)
return cls(
ytrue,
ypred,
confs=confs,
eval_key=eval_key,
gt_field=gt_field,
pred_field=pred_field,
ids=ids,
missing=missing,
samples=samples,
**kwargs,
)
def _parse_config(pred_field, gt_field, method, **kwargs):
if method is None:
method = "simple"
if method == "simple":
return SimpleEvaluationConfig(pred_field, gt_field, **kwargs)
raise ValueError("Unsupported evaluation method '%s'" % method)
def _safe_mean(values):
values = [v for v in values if v is not None]
return np.mean(values) if values else None
def _parse_values(ytrue, ypred, *args, missing=None):
_ytrue = []
_ypred = []
_valid = []
missing_count = 0
for yt, yp in zip(ytrue, ypred):
v = yt is not None and yp is not None
if missing is None:
_valid.append(v)
if v:
_ytrue.append(yt)
_ypred.append(yp)
else:
missing_count += 1
if missing is not None:
if yt is None:
yt = missing
if yp is None:
yp = missing
_ytrue.append(yt)
_ypred.append(yp)
found_missing = missing_count > 0
_ytrue = np.array(_ytrue)
_ypred = np.array(_ypred)
if found_missing and missing is None:
logger.warning(
"Ignoring %d examples with either missing ground truth or "
"predictions",
missing_count,
)
valid = np.array(_valid)
args = [np.asarray(a)[valid] if a is not None else a for a in args]
else:
args = [np.asarray(a) if a is not None else a for a in args]
return (_ytrue, _ypred, *args)
def _print_dict_as_table(d, digits):
fmt = "%%.%df" % digits
records = []
for k, v in d.items():
k = k.replace("_", " ")
if isinstance(v, numbers.Integral):
v = str(v)
else:
v = fmt % v
records.append((k, v))
print(tabulate(records, tablefmt="plain", numalign="left"))
|
from .. import conf
from ..gen_utils import layout_comment as layout
from ..helpers import snippet_str_help
@snippet_str_help()
def lambda_advice(snippet, *, repeat=False, **_kwargs):
"""
Look for use of lambda and give general advice on when / how to use.
"""
if not 'lambda' in snippet:
return None
if repeat:
return None
title = layout("""\
### Using `lambda`
""")
brief_msg = layout("""\
Lambdas are commonly overused so it is worth reading
[Overusing lambda expressions in Python](https://treyhunner.com/2018/09/stop-writing-lambda-expressions/).
Having said this, lambdas used appropriately in sorting as key are idiomatic
and readable as a result.
""")
main_msg = (
layout("""\
Lambda functions are anonymous functions. They have no name or
documentation so should only be used where their brevity pros outweigh
their readability cons.
In their favour, lambdas are idiomatic when used as the key in sorting
operations. They are also commonly relied upon when using libraries like
Pandas (albeit not always wisely).
Sometimes the alternatives are arguably worse. Consider the following
alternatives - in this case it may be difficult to improve on the
lambda:
#### Use `lambda`
Using a lambda creates one idiomatic line which obviously sorts by the
country and city keys in that order
""")
+
layout("""\
addresses.sort(key=lambda address: (address['country'], address['city']))
""", is_code=True)
+
layout("""\
#### Alternative 1) use `operator` module
Using `itemgetter` requires an extra import and it's not as idiomatic.
Using `operator` functions may become more idiomatic but probably not.
""")
+
layout("""\
from operator import itemgetter
addresses.sort(key=(itemgetter('country'), itemgetter('city')))
""", is_code=True)
+
layout("""\
#### Alternative 2) define a conventional function
Defining a named function adds many more lines of code and may break up
the flow of overall program logic. Often not worth it when a very simple
function.
""")
+
layout("""\
def country_city(address):
return (address['country'], address['city'])
addresses.sort(key=country_city)
""", is_code=True)
+
layout("""\
Lambdas are a clear mistake when they add nothing or when they become
too complex.
Lambdas add nothing apart from confusion and noise when they merely
apply a function. In the following examples both lines in each pair
achieve the same result:
""")
+
layout("""\
words.sort(key=lambda word: len(word))
words.sort(key=len)
numbers.sort(key=lambda num: abs(num))
numbers.sort(key=abs)
""", is_code=True)
+
layout("""\
In the following example a simple, clearly documented (and testable)
function is a much better option than a lambda. Compare:
""")
+
layout("""\
water_iot_vals.sort(
key=lambda x: min((sum(x) + min(x)) / max(x), 1) + min(x) ** 2)
""", is_code=True)
+
layout("""\
against the more readable, maintainable, and testable:
""")
+
layout('''\
def water_quality(vals):
"""
Reading rank is based on ... etc etc
The Tajik adjustment was favoured over the more traditional Core
Value adjustment because ...
See https://...calculating-clark-coefficients
:param list vals: ...
:return: water quality rating
:rtype: float
"""
raw_clark_coeff = min((sum(vals) + min(vals)) / max(vals)
corrected_clark_coeff = min(raw_clark_coeff, 1)
tajik_adjustment = min(vals) ** 2
adjusted_clark_coeff = corrected_clark_coeff + tajik_adjustment
return adjusted_clark_coeff
water_iot_vals.sort(key=water_quality)
''', is_code=True)
+ layout("""\
The main rule with lambdas is to avoid them unless they make the code
more readable. And if you must use them, remember that you can still use
names that convey some meaning - for example `s` for string, `t` for
tuple, `d` for dict, `l` for list, `nt` for namedtuple, `row` for a row
etc. They will make the logic more intelligible and make mistakes more
obvious. For example, x[-1] might be what was intended but it would be
easier to tell if we saw:
`lambda l: l[-1]` ## obviously the last item of a list
or `lambda s: s[-1]` ## obviously the last character of a string
or `lambda d: d[-1]` ## probably a mistake unless -1 is a key
Of course, longer names should be used if they help.
""")
)
extra = layout("""\
<NAME>ner's [Overusing lambda expressions in Python](https://treyhunner.com/2018/09/stop-writing-lambda-expressions/) is well
worth reading. The position taken in SuperHELP differs in some regards but
has been influenced by Trey's thinking.
If using functions from the `operator` module (`itemgetter`, `attrgetter`,
and `methodcaller`) becomes more common and idiomatic then it could be time
to substantially reduce the usage of `lambda`.
""")
message = {
conf.Level.BRIEF: title + brief_msg,
conf.Level.MAIN: title + main_msg,
conf.Level.EXTRA: extra,
}
return message
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 1 11:16:43 2021
@author: <NAME>
"""
"""
Experiment for comparing the performance of the network as a fucntion of numbers of neuron.
The code is for system of differential equations with two unknowns.
"""
# use the outgrad backage for the coputations of gradient
import functions
import autograd.numpy as np
import pandas as pd
import subprocess
import time
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
np.random.seed(0) # to obtain a consistent solution
#define the right side function of the system of differential equations
def right_side(x, y):
f1 = np.cos(x)+(y[0])**2+y[1]-(1+x**2+np.sin(x)*np.sin(x))
f2 = 2*x-(1+x*x)*np.sin(x) + y[0]*y[1]
return np.array([f1,f2])
# the initial values
a1 = 0
a2= 1.0
# the analytical solution is given by
def analytic(x):
an_sol = np.zeros((2, len(x)))
an_sol[0] = np.sin(x)
an_sol[1] = 1+x*x
return an_sol
#=============Experiment 3: Implementation and comparision with analytical solution=====================
"""
This is the code for implementation of the ANN.
A system of differential equations with two unkown. Compared with analytical solution.
"""
# Input Data
a, b = [0.0, 1.0] # the domain
m =11 # number of sample points between a and b
t = np.linspace(a,b, m) # generate the points
x = (t-min(t))/(max(t)-min(t)) # normalize the data
X = np.zeros((1,m)) # we will generalize the model for system
X[0] = x
T = np.zeros((1,m))
T[0]= t
# Define the neural network sturctue
h = 60
ITs = 30000 # number of iteration
tol = 1e-06 # tollerance
ANN_er = [] # for storing neural network solution
RK4_er = [] # for storing Runge-Kutta solution
param1, param2, Cost, Iter = functions.ode_nn_model1(X, h, a1, a2, a, right_side, tol, ITs, iter = 0, print_cost = True)
# Using the learned parameters we compute the ANN soutions
def Yt1(X, param1):
# retrieve each parameters
W11 = param1["W11"]
b11 = param1["b11"]
W12 = param1["W12"]
b12 = param1["b12"]
Z1 = np.dot(W11,X) + b11
A1 = functions.sigmoid(Z1)
Z2 = np.dot(W12,A1)+ b12
A2 = Z2
return np.array(a1 + (X-a)*A2[0]) # the first component
def Yt2(X, param2):
# retrive parameters
W21 = param2["W21"]
b21 = param2["b21"]
W22 = param2["W22"]
b22 = param2["b22"]
Z1 = np.dot(W21,X) + b21
A1 = functions.sigmoid(Z1)
Z2 = np.dot(W22,A1)+ b22
A2 = Z2
return np.array(a2 + (X-a)*A2[1]) # the second component
# compute the ANN solution
y1 = Yt1(T, param1)
y2 = Yt2(T, param2)
# For comparision compute the exact solution
exact = analytic(t)
# We plot the trajectories
fig3=plt.figure(figsize= (10,8))
plt.subplot(2, 1, 1)
plt.plot(t, y1[0], 'g--o')
plt.plot(t, y2[0], 'r--o')
plt.plot(t, exact[0],'blue', lw=2)
plt.plot(t, exact[1],'blue', lw=2)
plt.legend(['ANN $y_1$','ANN $y_2$', 'Exact $y_1$','Exact $y_2$'])
# absulte error
err_1= abs(y1[0]-exact[0])
err_2= abs(y2[0]-exact[1])
plt.subplot(2,1,2)
plt.plot(t, err_1,'-go')
plt.plot(t, err_2,'-r*')
plt.legend(['error $y_1$', 'error $y_2$'])
# save the plot
plt.savefig('../results/fig4.eps')
plt.show()
# create table for soutions and corresponding error
data = {'ANN $y_1$': y1[0], 'Analytic $y_1$':exact[0], 'ANN $y_2$': y2[0],'Analyitc $y_2$': exact[1]}
data_err = {'error $y_1$': err_1, 'error $y_2$': err_2 }
# for a data frame
summary_table = pd.DataFrame(data= data , index = t)
summary_err = pd.DataFrame(data = data_err, index = t)
# the following code save the table as latex and pdf
filename = ['../results/out_ex1.tex','../results/err.tex']
pdffile = ['../results/out_ex1.pdf', '../results/err.pdf']
template = r'''\documentclass{{standalone}}
\usepackage{{booktabs}}
\begin{{document}}
{}
\end{{document}}
'''
with open(filename[0], 'w') as f:
f.write(template.format(summary_table.to_latex()))
subprocess.run(['pdflatex', filename[0]])
with open(filename[1], 'w') as f:
f.write(template.format(summary_err.to_latex()))
subprocess.run(['pdflatex', filename[1]])
|
import logging
import signal
import os
import json
import subprocess
import re
import boto3
import requests
os.environ['PATH'] = os.environ['PATH'] + ":" + os.environ.get('LAMBDA_TASK_ROOT', '.') + '/bin'
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
SUCCESS = "SUCCESS"
FAILED = "FAILED"
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
EXIT_DISK_NOT_FOUND = 2
def send(event, context, responseStatus, responseData=None, physicalResourceId=None, noEcho=False, responseReason=None):
responseUrl = event['ResponseURL']
if responseReason:
responseReason += ' '
else:
responseReason = ''
responseReason += 'See the details in CloudWatch Log Stream: ' + context.log_stream_name
responseBody = {}
responseBody['Status'] = responseStatus
responseBody['Reason'] = responseReason
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name
responseBody['StackId'] = event['StackId']
responseBody['RequestId'] = event['RequestId']
responseBody['LogicalResourceId'] = event['LogicalResourceId']
responseBody['NoEcho'] = noEcho
if responseData:
responseBody['Data'] = responseData
json_responseBody = json.dumps(responseBody, separators=(",",":"))
LOGGER.info("Response body:\n" + json_responseBody)
headers = {
'content-type' : '',
'content-length' : str(len(json_responseBody))
}
try:
response = requests.put(responseUrl, data=json_responseBody, headers=headers)
LOGGER.info("Status code: " + response.reason)
except Exception as e:
LOGGER.info("send(..) failed executing requests.put(..): " + str(e))
class TimeoutError(Exception):
pass
class CredentialsError(Exception):
pass
def timeout_handler(_signal, _frame):
raise TimeoutError('Time limit exceeded')
signal.signal(signal.SIGALRM, timeout_handler)
creds_path = "/tmp/cloudbd-credentials.json"
def get_credentials(ssm_region, ssm_parameter):
try:
if not os.path.isfile(creds_path):
ssm = boto3.client('ssm', region_name=ssm_region)
creds = ssm.get_parameter(Name=ssm_parameter, WithDecryption=True)['Parameter']['Value']
with open(creds_path, "w") as creds_file:
creds_file.write(creds)
except Exception as e:
LOGGER.info(str(e))
raise CredentialsError("unable to download CloudBD credentials from SSM paramater store")
def create_disk(remote, disk, size):
subprocess.check_call(["cloudbd", "create", "--creds=" + creds_path, "--remote=" + remote, "--disk=" + disk, "--size=" + size])
def delete_disk(remote, disk):
subprocess.check_call(["cloudbd", "delete", "--creds=" + creds_path, "--remote=" + remote, "--disk=" + disk])
def get_disk_info(remote, disk):
output = subprocess.check_output(["cloudbd", "info", "--remote=" + remote, "--disk=" + disk, "-e"]).decode("utf-8")
return dict(re.findall(r'(\S+)\s*=\s*(".*?"|\S+)', output))
def handler(event, context):
physicalResourceId = "0"
signal.alarm(int(context.get_remaining_time_in_millis() / 1000) - 1)
LOGGER.info('Request received event:\n%s', json.dumps(event))
try:
requestType = event['RequestType']
if requestType == 'Update':
LOGGER.info("Request failed: cannot update a CloudBD disk")
send(event, context, FAILED, None, physicalResourceId, None, "Cannot change CloudBD disk properties")
return
elif requestType == 'Delete':
physicalResourceId = event['PhysicalResourceId']
if physicalResourceId == "0":
send(event, context, SUCCESS, None, physicalResourceId)
return
get_credentials(os.environ['CLOUDBD_CREDS_SSM_REGION'], os.environ['CLOUDBD_CREDS_SSM_PARAM'])
remote_dict = {
"type": "aws_iam_temp",
"region": os.environ['AWS_REGION'],
"bucket": os.environ['CLOUDBD_REMOTE_BUCKET'],
"protocol": "https",
"access_key_id": os.environ['AWS_ACCESS_KEY_ID'],
"secret_access_key": os.environ['AWS_SECRET_ACCESS_KEY'],
"session_token": os.environ['AWS_SESSION_TOKEN']
}
remote = 'data:application/json,' + json.dumps(remote_dict, separators=(",",":"))
disk = event['ResourceProperties']['Name']
if requestType == 'Create':
size = event['ResourceProperties']['Size']
LOGGER.info("Request cloudbd create disk '%s' of size '%s'", disk, size)
create_disk(remote, disk, size)
diskinfo = get_disk_info(remote, disk)
physicalResourceId = "cbd-" + diskinfo['CBD_UUID']
send(event, context, SUCCESS,
{
'Name': diskinfo['CBD_DEVICE'],
'Size': diskinfo['CBD_SIZE'],
'Uuid': physicalResourceId
},
physicalResourceId)
elif requestType == 'Delete':
physicalResourceId = event['PhysicalResourceId']
LOGGER.info("Request cloudbd destroy disk '%s (%s)'", disk, physicalResourceId)
try:
diskinfo = get_disk_info(remote, disk)
if "cbd-" + diskinfo['CBD_UUID'] == physicalResourceId:
delete_disk(remote, disk)
else:
LOGGER.info("Disk UUID mismatch, non-CloudFormation managed resource exists... skipping delete")
send(event, context, SUCCESS, None, physicalResourceId)
except subprocess.CalledProcessError as e:
if e.returncode == EXIT_DISK_NOT_FOUND:
LOGGER.info("Disk not found, assuming already deleted")
send(event, context, SUCCESS, None, physicalResourceId)
else:
LOGGER.info("Failed to delete disk '%s': %s", disk, e.output)
send(event, context, FAILED, None, physicalResourceId)
else:
LOGGER.info("Request failed: unexpected event type '%s'", event['RequestType'])
send(event, context, FAILED, None, physicalResourceId)
except TimeoutError:
LOGGER.info("Request failed: time limit exceeded")
send(event, context, FAILED, None, physicalResourceId, None, "Time limit exceeded.")
except CredentialsError:
LOGGER.info("Request failed: unable to get CloudBD credentials from SSM parameter")
send(event, context, FAILED, None, physicalResourceId, None, "Failed to get CloudBD credentials from SSM parameter.")
except Exception as e:
LOGGER.info("Request failed: %s", repr(e))
send(event, context, FAILED, None, physicalResourceId)
finally:
signal.alarm(0)
|
#! /usr/bin/env python
#################################################################################
# File Name : IPADS_GraphX_Plot.py
# Created By : xiaodi
# Creation Date : [2014-08-13 08:46]
# Last Modified : [2014-08-14 22:04]
# Description :
#################################################################################
# Time Load Partition Data Strategy Threshold Vertices Edges Replications NumParts Factor stddev avg min max stddev avg min max
# 20140812-2011,11348,105, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,0,1000000,8179662,3480904,16,3.480904,vertices,139.720703548,217556.5,217231,217807,0.002647588098,edges,0.695970545354,511228.875,511228,511230,0.000003912142091
# 20140812-2012,11372,106, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,1000000,1000000,8179662,7125874,16,7.125874,vertices,66531.0864313,445367.125,374997,623511,0.5579980786,edges,114540.481712,511228.875,392679,843821,0.8824658036
# 20140812-2012,10929,105, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,100,1000000,8179662,3789756,16,3.789756,vertices,1166.5307272,236859.75,234989,239127,0.01747025402,edges,1519.62412602,511228.875,508949,513901,0.009686463817
# 20140812-2013,10934,24, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,EdgePartition2D,0,1000000,8179662,4976609,16,4.976609,vertices,43705.432858,311038.0625,230625,347599,0.3760761595,edges,40658.8290594,511228.875,452561,567485,0.2247995088
# 20140812-2014,11280,22, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,EdgePartition1DDst,0,1000000,8179662,7125874,16,7.125874,vertices,66531.0864313,445367.125,374997,623511,0.5579980786,edges,114540.481712,511228.875,392679,843821,0.8824658036
# 20140812-2015,11129,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,RandomVertexCut,0,1000000,8179662,7953123,16,7.953123,vertices,401.371277427,497070.1875,496114,497616,0.003021706064,edges,660.239528031,511228.875,510032,512646,0.005113169713
# 20140812-2015,11193,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,CanonicalRandomVertexCut,0,1000000,8179662,7953080,16,7.95308,vertices,521.414182776,497067.5,495757,498061,0.004635185362,edges,703.616628126,511228.875,509752,512223,0.004833451553
# 20140812-2016,13052,107, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,0,1000000,8179662,3688730,25,3.68873,vertices,155.248446047,147549.2,147331,147911,0.003930892204,edges,9.38986687872,327186.48,327167,327206,0.0001191980793
# 20140812-2018,13242,103, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,1000000,1000000,8179662,7798577,25,7.798577,vertices,70052.7796267,311943.08,240206,550682,0.9952969625,edges,92676.6116675,327186.48,231459,636202,1.237040724
# 20140812-2017,13251,104, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,100,1000000,8179662,3992878,25,3.992878,vertices,1262.69889744,159715.12,157800,162249,0.02785584734,edges,1497.14608826,327186.48,324773,330277,0.01682221099
# 20140812-2019,13090,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,EdgePartition2D,0,1000000,8179662,5607006,25,5.607006,vertices,25163.2584429,224280.24,169954,253262,0.3714460088,edges,57164.4456851,327186.48,268625,422743,0.4710402459
# 20140812-2019,13050,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,EdgePartition1DDst,0,1000000,8179662,7798577,25,7.798577,vertices,70052.7796267,311943.08,240206,550682,0.9952969625,edges,92676.6116675,327186.48,231459,636202,1.237040724
# 20140812-2020,12870,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,RandomVertexCut,0,1000000,8179662,8963526,25,8.963526,vertices,560.645947457,358541.04,357418,359602,0.006091352889,edges,631.597537677,327186.48,325858,328364,0.007659240687
# 20140812-2021,13460,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,CanonicalRandomVertexCut,0,1000000,8179662,8965461,25,8.965461,vertices,557.853893417,358618.44,357290,359665,0.006622637698,edges,632.364079941,327186.48,325660,328288,0.00803211673
# 20140812-2022,12819,103, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,0,1000000,8179662,3955644,48,3.955644,vertices,151.091790534,82409.25,82140,82736,0.007232197842,edges,235.032677249,170409.625,170240,170746,0.002969315847
# 20140812-2024,13235,101, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,1000000,1000000,8179662,8440782,48,8.440782,vertices,53850.2421565,175849.625,126650,436468,1.761834863,edges,61614.1471869,170409.625,113486,464422,2.059367245
# 20140812-2023,13156,102, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,HybridCut,100,1000000,8179662,4263219,48,4.263219,vertices,896.914117178,88817.0625,86929,90783,0.04339256323,edges,971.900964283,170409.625,168537,172766,0.02481667335
# 20140812-2024,13104,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,EdgePartition2D,0,1000000,8179662,6770593,48,6.770593,vertices,16175.1381428,141054.020833,109284,221935,0.7986372833,edges,29135.9185518,170409.625,146634,305638,0.9330693615
# 20140812-2025,13281,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,EdgePartition1DDst,0,1000000,8179662,8440782,48,8.440782,vertices,53850.2421565,175849.625,126650,436468,1.761834863,edges,61614.1471869,170409.625,113486,464422,2.059367245
# 20140812-2026,12799,23, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,RandomVertexCut,0,1000000,8179662,10109881,48,10.109881,vertices,477.444935288,210622.520833,209523,211580,0.009766287061,edges,431.044643135,170409.625,169530,171233,0.009993566971
# 20140812-2027,13096,22, /data/sdd1/xiaodi/data/in-2.0-1m V1000000 E8179662,CanonicalRandomVertexCut,0,1000000,8179662,10108498,48,10.108498,vertices,452.356881526,210593.708333,209842,211920,0.00986734132,edges,411.01016943,170409.625,169777,171285,0.00884926541
import numpy as np
import matplotlib.pyplot as plt
# replications, of different partition strategy, by number of partitions
n_groups = 3
# means_men = (20, 35, 30, 35, 27)
# std_men = (2, 3, 4, 1, 2)
means_HybridCut = (3.789756, 3.992878, 4.263219)
means_EdgePartition2D = (4.976609, 5.607006, 6.770593)
means_EdgePartition1DDst = (7.125874, 7.798577, 8.440782)
means_RandomVertexCut = (7.953123, 8.963526, 10.109881)
means_CanonicalRandomVertexCut = (7.95308, 8.965461, 10.108498)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.1
opacity = 0.4
error_config = {'ecolor': '0.3'}
# rects1 = plt.bar(index, means_men, bar_width,
# alpha=opacity,
# color='b',
# yerr=std_men,
# error_kw=error_config,
# label='Men')
rects1 = plt.bar(index, means_RandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='RandomVertexCut')
rects2 = plt.bar(index + bar_width, means_CanonicalRandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='CanonicalRandomVertexCut')
rects3 = plt.bar(index + 2 * bar_width, means_EdgePartition1DDst, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition1D')
rects4 = plt.bar(index + 3 * bar_width, means_EdgePartition2D, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition2D')
rects5 = plt.bar(index + 4 * bar_width, means_HybridCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='HybridCut')
plt.xlabel('# of Partitions')
plt.ylabel('Replication Factor')
plt.title('Replication Factor by # of Partitions (in-2.0-1m V1000000 E8179662)')
plt.xticks(index + 2 * bar_width, ('16', '25', '48'))
plt.legend(loc="upper left", ncol=2)
plt.tight_layout()
# plt.show()
plt.savefig('1.png')
# ingress time, of different partition strategy, by number of partitions
n_groups = 3
# means_men = (20, 35, 30, 35, 27)
# std_men = (2, 3, 4, 1, 2)
means_HybridCut = (105, 104, 102)
means_EdgePartition2D = (24, 23, 23)
means_EdgePartition1DDst = (22, 23, 23)
means_RandomVertexCut = (23, 23, 23)
means_CanonicalRandomVertexCut = (23, 23, 22)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.1
opacity = 0.4
error_config = {'ecolor': '0.3'}
# rects1 = plt.bar(index, means_men, bar_width,
# alpha=opacity,
# color='b',
# yerr=std_men,
# error_kw=error_config,
# label='Men')
rects1 = plt.bar(index, means_RandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='RandomVertexCut')
rects2 = plt.bar(index + bar_width, means_CanonicalRandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='CanonicalRandomVertexCut')
rects3 = plt.bar(index + 2 * bar_width, means_EdgePartition1DDst, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition1D')
rects4 = plt.bar(index + 3 * bar_width, means_EdgePartition2D, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition2D')
rects5 = plt.bar(index + 4 * bar_width, means_HybridCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='HybridCut')
plt.xlabel('# of Partitions')
plt.ylabel('Ingress Time (ms)')
plt.title('Replication Factor by # of Partitions (in-2.0-1m V1000000 E8179662)')
plt.xticks(index + 2 * bar_width, ('16', '25', '48'))
plt.legend(loc="upper left", ncol=2)
plt.tight_layout()
# plt.show()
plt.savefig('2.png')
# stddev of each parition's load of edges / vertices
n_groups = 3
# means_men = (20, 35, 30, 35, 27)
# std_men = (2, 3, 4, 1, 2)
means_HybridCut = (1166.5307272, 1262.69889744, 896.914117178)
means_EdgePartition2D = (43705.432858, 25163.2584429, 16175.1381428)
means_EdgePartition1DDst = (66531.0864313, 70052.7796267, 53850.2421565)
means_RandomVertexCut = (401.371277427, 560.645947457, 477.444935288)
means_CanonicalRandomVertexCut = (521.414182776, 557.853893417, 452.356881526)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.1
opacity = 0.4
error_config = {'ecolor': '0.3'}
# rects1 = plt.bar(index, means_men, bar_width,
# alpha=opacity,
# color='b',
# yerr=std_men,
# error_kw=error_config,
# label='Men')
rects1 = plt.bar(index, means_RandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='RandomVertexCut')
rects2 = plt.bar(index + bar_width, means_CanonicalRandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='CanonicalRandomVertexCut')
rects3 = plt.bar(index + 2 * bar_width, means_EdgePartition1DDst, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition1D')
rects4 = plt.bar(index + 3 * bar_width, means_EdgePartition2D, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition2D')
rects5 = plt.bar(index + 4 * bar_width, means_HybridCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='HybridCut')
plt.xlabel('# of Partitions')
plt.ylabel('stdev of vertices')
plt.title('stdev of vertices by # of Partitions (in-2.0-1m V1000000 E8179662)')
plt.xticks(index + 2 * bar_width, ('16', '25', '48'))
plt.legend(loc="upper left", ncol=2)
plt.tight_layout()
# plt.show()
plt.savefig('3.png')
n_groups = 3
# means_men = (20, 35, 30, 35, 27)
# std_men = (2, 3, 4, 1, 2)
means_HybridCut = (1519.62412602, 1497.14608826, 971.900964283)
means_EdgePartition2D = (40658.8290594, 57164.4456851, 29135.9185518)
means_EdgePartition1DDst = (114540.481712, 92676.6116675, 61614.1471869)
means_RandomVertexCut = (660.239528031, 631.597537677, 431.044643135)
means_CanonicalRandomVertexCut = (703.616628126, 632.364079941, 411.01016943)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.1
opacity = 0.4
error_config = {'ecolor': '0.3'}
# rects1 = plt.bar(index, means_men, bar_width,
# alpha=opacity,
# color='b',
# yerr=std_men,
# error_kw=error_config,
# label='Men')
rects1 = plt.bar(index, means_RandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='RandomVertexCut')
rects2 = plt.bar(index + bar_width, means_CanonicalRandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='CanonicalRandomVertexCut')
rects3 = plt.bar(index + 2 * bar_width, means_EdgePartition1DDst, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition1D')
rects4 = plt.bar(index + 3 * bar_width, means_EdgePartition2D, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition2D')
rects5 = plt.bar(index + 4 * bar_width, means_HybridCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='HybridCut')
plt.xlabel('# of Partitions')
plt.ylabel('stdev of edges')
plt.title('stdev of edges by # of Partitions (in-2.0-1m V1000000 E8179662)')
plt.xticks(index + 2 * bar_width, ('16', '25', '48'))
plt.legend(loc="upper left", ncol=2)
plt.tight_layout()
# plt.show()
plt.savefig('4.png')
# (max-min)/avg of each parition's load of edges / vertices
n_groups = 3
# means_men = (20, 35, 30, 35, 27)
# std_men = (2, 3, 4, 1, 2)
means_HybridCut = (0.01747025402, 0.02785584734, 0.04339256323)
means_EdgePartition2D = (0.3760761595, 0.3714460088, 0.7986372833)
means_EdgePartition1DDst = (0.5579980786, 0.9952969625, 1.761834863)
means_RandomVertexCut = (0.004635185362, 0.006091352889, 0.009766287061)
means_CanonicalRandomVertexCut = (0.003021706064, 0.006622637698, 0.00986734132)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.1
opacity = 0.4
error_config = {'ecolor': '0.3'}
# rects1 = plt.bar(index, means_men, bar_width,
# alpha=opacity,
# color='b',
# yerr=std_men,
# error_kw=error_config,
# label='Men')
rects1 = plt.bar(index, means_RandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='RandomVertexCut')
rects2 = plt.bar(index + bar_width, means_CanonicalRandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='CanonicalRandomVertexCut')
rects3 = plt.bar(index + 2 * bar_width, means_EdgePartition1DDst, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition1D')
rects4 = plt.bar(index + 3 * bar_width, means_EdgePartition2D, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition2D')
rects5 = plt.bar(index + 4 * bar_width, means_HybridCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='HybridCut')
plt.xlabel('# of Partitions')
plt.ylabel('(max-min)/avg of vertices')
plt.title('(max-min)/avg of vertices by # of Partitions (in-2.0-1m V1000000 E8179662)')
plt.xticks(index + 2 * bar_width, ('16', '25', '48'))
plt.legend(loc="upper left", ncol=2)
plt.tight_layout()
# plt.show()
plt.savefig('5.png')
n_groups = 3
# means_men = (20, 35, 30, 35, 27)
# std_men = (2, 3, 4, 1, 2)
means_HybridCut = (0.009686463817, 0.01682221099, 0.02481667335)
means_EdgePartition2D = (0.2247995088, 0.4710402459, 0.9330693615)
means_EdgePartition1DDst = (0.8824658036, 1.237040724, 2.059367245)
means_RandomVertexCut = (0.004833451553, 0.007659240687, 0.009993566971)
means_CanonicalRandomVertexCut = (0.005113169713, 0.00803211673, 0.00884926541)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.1
opacity = 0.4
error_config = {'ecolor': '0.3'}
# rects1 = plt.bar(index, means_men, bar_width,
# alpha=opacity,
# color='b',
# yerr=std_men,
# error_kw=error_config,
# label='Men')
rects1 = plt.bar(index, means_RandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='RandomVertexCut')
rects2 = plt.bar(index + bar_width, means_CanonicalRandomVertexCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='CanonicalRandomVertexCut')
rects3 = plt.bar(index + 2 * bar_width, means_EdgePartition1DDst, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition1D')
rects4 = plt.bar(index + 3 * bar_width, means_EdgePartition2D, bar_width,
alpha=opacity,
error_kw=error_config,
label='EdgePartition2D')
rects5 = plt.bar(index + 4 * bar_width, means_HybridCut, bar_width,
alpha=opacity,
error_kw=error_config,
label='HybridCut')
plt.xlabel('# of Partitions')
plt.ylabel('(max-min)/avg of edges')
plt.title('(max-min)/avg of edges by # of Partitions (in-2.0-1m V1000000 E8179662)')
plt.xticks(index + 2 * bar_width, ('16', '25', '48'))
plt.legend(loc="upper left", ncol=2)
plt.tight_layout()
# plt.show()
plt.savefig('6.png')
|
"""
Code to generate a Python model from a database or differences
between a model and database.
Some of this is borrowed heavily from the AutoCode project at:
http://code.google.com/p/sqlautocode/
"""
import sys
import logging
import six
import sqlalchemy
import migrate
import migrate.changeset
log = logging.getLogger(__name__)
HEADER = """
## File autogenerated by genmodel.py
from sqlalchemy import *
"""
META_DEFINITION = "meta = MetaData()"
DECLARATIVE_DEFINITION = """
from sqlalchemy.ext import declarative
Base = declarative.declarative_base()
"""
class ModelGenerator(object):
"""Various transformations from an A, B diff.
In the implementation, A tends to be called the model and B
the database (although this is not true of all diffs).
The diff is directionless, but transformations apply the diff
in a particular direction, described in the method name.
"""
def __init__(self, diff, engine, declarative=False):
self.diff = diff
self.engine = engine
self.declarative = declarative
def column_repr(self, col):
kwarg = []
if col.key != col.name:
kwarg.append('key')
if col.primary_key:
col.primary_key = True # otherwise it dumps it as 1
kwarg.append('primary_key')
if not col.nullable:
kwarg.append('nullable')
if col.onupdate:
kwarg.append('onupdate')
if col.default:
if col.primary_key:
# I found that PostgreSQL automatically creates a
# default value for the sequence, but let's not show
# that.
pass
else:
kwarg.append('default')
args = ['%s=%r' % (k, getattr(col, k)) for k in kwarg]
# crs: not sure if this is good idea, but it gets rid of extra
# u''
if six.PY3:
name = col.name
else:
name = col.name.encode('utf8')
type_ = col.type
for cls in col.type.__class__.__mro__:
if cls.__module__ == 'sqlalchemy.types' and \
not cls.__name__.isupper():
if cls is not type_.__class__:
type_ = cls()
break
type_repr = repr(type_)
if type_repr.endswith('()'):
type_repr = type_repr[:-2]
constraints = [repr(cn) for cn in col.constraints]
data = {
'name': name,
'commonStuff': ', '.join([type_repr] + constraints + args),
}
if self.declarative:
return """%(name)s = Column(%(commonStuff)s)""" % data
else:
return """Column(%(name)r, %(commonStuff)s)""" % data
def _getTableDefn(self, table, metaName='meta'):
out = []
tableName = table.name
if self.declarative:
out.append("class %(table)s(Base):" % {'table': tableName})
out.append(" __tablename__ = '%(table)s'\n" %
{'table': tableName})
for col in table.columns:
out.append(" %s" % self.column_repr(col))
out.append('\n')
else:
out.append("%(table)s = Table('%(table)s', %(meta)s," %
{'table': tableName, 'meta': metaName})
for col in table.columns:
out.append(" %s," % self.column_repr(col))
out.append(")\n")
return out
def _get_tables(self,missingA=False,missingB=False,modified=False):
to_process = []
for bool_,names,metadata in (
(missingA,self.diff.tables_missing_from_A,self.diff.metadataB),
(missingB,self.diff.tables_missing_from_B,self.diff.metadataA),
(modified,self.diff.tables_different,self.diff.metadataA),
):
if bool_:
for name in names:
yield metadata.tables.get(name)
def _genModelHeader(self, tables):
out = []
import_index = []
out.append(HEADER)
for table in tables:
for col in table.columns:
if "dialects" in col.type.__module__ and \
col.type.__class__ not in import_index:
out.append("from " + col.type.__module__ +
" import " + col.type.__class__.__name__)
import_index.append(col.type.__class__)
out.append("")
if self.declarative:
out.append(DECLARATIVE_DEFINITION)
else:
out.append(META_DEFINITION)
out.append("")
return out
def genBDefinition(self):
"""Generates the source code for a definition of B.
Assumes a diff where A is empty.
Was: toPython. Assume database (B) is current and model (A) is empty.
"""
out = []
out.extend(self._genModelHeader(self._get_tables(missingA=True)))
for table in self._get_tables(missingA=True):
out.extend(self._getTableDefn(table))
return '\n'.join(out)
def genB2AMigration(self, indent=' '):
'''Generate a migration from B to A.
Was: toUpgradeDowngradePython
Assume model (A) is most current and database (B) is out-of-date.
'''
decls = ['from migrate.changeset import schema',
'pre_meta = MetaData()',
'post_meta = MetaData()',
]
upgradeCommands = ['pre_meta.bind = migrate_engine',
'post_meta.bind = migrate_engine']
downgradeCommands = list(upgradeCommands)
for tn in self.diff.tables_missing_from_A:
pre_table = self.diff.metadataB.tables[tn]
decls.extend(self._getTableDefn(pre_table, metaName='pre_meta'))
upgradeCommands.append(
"pre_meta.tables[%(table)r].drop()" % {'table': tn})
downgradeCommands.append(
"pre_meta.tables[%(table)r].create()" % {'table': tn})
for tn in self.diff.tables_missing_from_B:
post_table = self.diff.metadataA.tables[tn]
decls.extend(self._getTableDefn(post_table, metaName='post_meta'))
upgradeCommands.append(
"post_meta.tables[%(table)r].create()" % {'table': tn})
downgradeCommands.append(
"post_meta.tables[%(table)r].drop()" % {'table': tn})
for (tn, td) in six.iteritems(self.diff.tables_different):
if td.columns_missing_from_A or td.columns_different:
pre_table = self.diff.metadataB.tables[tn]
decls.extend(self._getTableDefn(
pre_table, metaName='pre_meta'))
if td.columns_missing_from_B or td.columns_different:
post_table = self.diff.metadataA.tables[tn]
decls.extend(self._getTableDefn(
post_table, metaName='post_meta'))
for col in td.columns_missing_from_A:
upgradeCommands.append(
'pre_meta.tables[%r].columns[%r].drop()' % (tn, col))
downgradeCommands.append(
'pre_meta.tables[%r].columns[%r].create()' % (tn, col))
for col in td.columns_missing_from_B:
upgradeCommands.append(
'post_meta.tables[%r].columns[%r].create()' % (tn, col))
downgradeCommands.append(
'post_meta.tables[%r].columns[%r].drop()' % (tn, col))
for modelCol, databaseCol, modelDecl, databaseDecl in td.columns_different:
upgradeCommands.append(
'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
tn, modelCol.name, databaseCol.name))
downgradeCommands.append(
'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
tn, modelCol.name, databaseCol.name))
return (
'\n'.join(decls),
'\n'.join('%s%s' % (indent, line) for line in upgradeCommands),
'\n'.join('%s%s' % (indent, line) for line in downgradeCommands))
def _db_can_handle_this_change(self,td):
"""Check if the database can handle going from B to A."""
if (td.columns_missing_from_B
and not td.columns_missing_from_A
and not td.columns_different):
# Even sqlite can handle column additions.
return True
else:
return not self.engine.url.drivername.startswith('sqlite')
def runB2A(self):
"""Goes from B to A.
Was: applyModel. Apply model (A) to current database (B).
"""
meta = sqlalchemy.MetaData(self.engine)
for table in self._get_tables(missingA=True):
table = table.tometadata(meta)
table.drop()
for table in self._get_tables(missingB=True):
table = table.tometadata(meta)
table.create()
for modelTable in self._get_tables(modified=True):
tableName = modelTable.name
modelTable = modelTable.tometadata(meta)
dbTable = self.diff.metadataB.tables[tableName]
td = self.diff.tables_different[tableName]
if self._db_can_handle_this_change(td):
for col in td.columns_missing_from_B:
modelTable.columns[col].create()
for col in td.columns_missing_from_A:
dbTable.columns[col].drop()
# XXX handle column changes here.
else:
# Sqlite doesn't support drop column, so you have to
# do more: create temp table, copy data to it, drop
# old table, create new table, copy data back.
#
# I wonder if this is guaranteed to be unique?
tempName = '_temp_%s' % modelTable.name
def getCopyStatement():
preparer = self.engine.dialect.preparer
commonCols = []
for modelCol in modelTable.columns:
if modelCol.name in dbTable.columns:
commonCols.append(modelCol.name)
commonColsStr = ', '.join(commonCols)
return 'INSERT INTO %s (%s) SELECT %s FROM %s' % \
(tableName, commonColsStr, commonColsStr, tempName)
# Move the data in one transaction, so that we don't
# leave the database in a nasty state.
connection = self.engine.connect()
trans = connection.begin()
try:
connection.execute(
'CREATE TEMPORARY TABLE %s as SELECT * from %s' % \
(tempName, modelTable.name))
# make sure the drop takes place inside our
# transaction with the bind parameter
modelTable.drop(bind=connection)
modelTable.create(bind=connection)
connection.execute(getCopyStatement())
connection.execute('DROP TABLE %s' % tempName)
trans.commit()
except:
trans.rollback()
raise
|
<reponame>klecknerlab/muvi
#!/usr/bin/python3
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is the top-level module for creating a Qt based volumetric viewer
application.
'''
import sys, os
# from PyQt5.QtCore import Qt
from PyQt5 import QtCore, QtGui
Qt = QtCore.Qt
from PyQt5.QtWidgets import QMainWindow, QApplication, QTabWidget, QHBoxLayout, \
QVBoxLayout, QLabel, QWidget, QScrollArea, QAction, QFrame, QMessageBox, \
QFileDialog, QGridLayout, QPushButton, QStyle, QSplitter, QMenu, \
QSizePolicy, QProgressBar
from .qt_script import KeyframeEditor
from .qtview_widgets import paramListToVBox, controlFromParam, ListControl, \
IntControl, ViewWidget, AssetList, AssetItem, generateDarkPalette
import time
import traceback
import glob
from PIL import Image
from .params import PARAM_CATEGORIES, PARAMS
# from .. import open_3D_movie, VolumetricMovie
ORG_NAME = "MUVI Lab"
APP_NAME = "MUVI Volumetric Movie Viewer"
ICON_DIR = os.path.split(__file__)[0]
PARAM_WIDTH = 250
SCROLL_WIDTH = 15
LOGICAL_DPI_BASELINE = None
UI_EXTRA_SCALING = 1.0
if sys.platform == 'win32':
# On Windows, it appears to need a bit more width to display text
# We need to play some games to get the icon to show up correctly!
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("MuviLab.Viewer")
LOGICAL_DPI_BASELINE = 96 #Used to correct fractional scaling, which does not show up in DPR!
elif sys.platform == 'darwin':
# Python 3: pip3 install pyobjc-framework-Cocoa
try:
from Foundation import NSBundle
bundle = NSBundle.mainBundle()
if bundle:
app_info = bundle.localizedInfoDictionary() or bundle.infoDictionary()
if app_info:
app_info['CFBundleName'] = APP_NAME
app_info['NSRequiresAquaSystemAppearance'] = 'NO'
except ImportError:
raise ImportError('pyobjc-framework-Cocoa not installed (OS X only) -- run "pip3 install pyobjc-framework-Cocoa" or "conda install pyobjc-framework-Cocoa" first')
# OS X always does integer high DPI scaling, so no need to check logical DPI
class ImageDisplay(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.image = None
self.scaleFactor = 1
self.dpr = self.devicePixelRatio()
def paintEvent(self, event=None):
if self.image is not None:
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(0, 0, self.w * self.scale, self.h * self.scale, self.image, 0, 0, self.w, self.h)
qp.end()
def adjustSize(self, event=None):
if self.image is not None:
self.scale = self.scaleFactor / self.dpr
self.setFixedSize(int(self.w*self.scale), int(self.h*self.scale))
self.update()
def zoomIn(self, event=None):
self.setScaleFactor(self.scaleFactor * 2)
def zoomOut(self, event=None):
self.setScaleFactor(self.scaleFactor / 2)
def zoomFit(self, event=None):
if self.image is not None:
parentSize = self.parent().size()
w, h = parentSize.width(), parentSize.height()
sf = min(w / self.w * self.dpr, h / self.h * self.dpr)
# print(w, h, sf)
self.setScaleFactor(sf)
def setScaleFactor(self, factor):
self.scaleFactor = factor
self.adjustSize()
def setImage(self, image):
self.image = QtGui.QPixmap(image)
size = self.image.size()
self.w, self.h = size.width(), size.height()
self.adjustSize()
# class ExportWorker(QtCore.QObject):
# finished = QtCore.pyqtSignal()
# progress = QtCore.pyqtSignal(int)
# progressRange = QtCore.pyqtSignal(int, int)
#
# def __init__(self, parent):
# super().__init__() # No parent!
# self.parent = parent
# self.queue = []
#
# def run(self):
# print('start')
# for n in range(10):
# time.sleep(0.1)
#
# for fn, params in self.queue:
# img = self.parent.renderImage()
# img.save(fn)
# self.parent.updatePreview(img)
# self.parent.fileLabel.setText(f'Saved to: {os.path.split(fn)[1]}')
#
# self.queue = []
#
# # self.progressRange.emit(0, 1)
#
# self.progressRange.emit(0, 1)
# self.finished.emit()
# print('end')
#
# def exportSingle(self, fn):
# self.queue = [(fn, {})]
# self.progressRange.emit(0, 0)
# # self.parent.progress.repaint()
# # self.progress.emit(5)
# self.parent.exportThread.start()
class ExportWindow(QWidget):
exportFrame = QtCore.pyqtSignal()
def __init__(self, parent):
super().__init__(parent=parent, flags=Qt.Window)
self.setWindowTitle("Image Export")
self.parent = parent
self.vbox = QVBoxLayout()
self.setLayout(self.vbox)
self.scrollArea = QScrollArea()
self.scrollArea.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.image = ImageDisplay(self.scrollArea)
self.scrollArea.setWidget(self.image)
self.scrollArea.setMinimumSize(1920//3, 1080//3)
self.vbox.addWidget(self.scrollArea)
self.zoomOut = QPushButton("Zoom Out")
self.zoomOut.clicked.connect(self.image.zoomOut)
self.zoomFit = QPushButton("Fit")
self.zoomFit.clicked.connect(self.image.zoomFit)
self.zoomIn = QPushButton("Zoom In")
self.zoomIn.clicked.connect(self.image.zoomIn)
self.hbox = QHBoxLayout()
self.hbox.addStretch(1)
self.hbox.addWidget(self.zoomOut)
self.hbox.addWidget(self.zoomFit)
self.hbox.addWidget(self.zoomIn)
self.vbox.addLayout(self.hbox)
self.hbox.addStretch(1)
self.exportButton = QPushButton("Export Current Frame")
self.exportButton.clicked.connect(self.saveFrame)
self.previewButton = QPushButton("Preview Current Frame")
self.previewButton.clicked.connect(self.previewFrame)
self.folderControl = QHBoxLayout()
self.folderLabel = QLabel(os.path.abspath(os.getcwd()))
self.folderButton = QPushButton()
self.folderButton.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
self.folderButton.clicked.connect(self.selectExportFolder)
self.folderControl.addWidget(self.folderButton, 0)
self.folderControl.addWidget(self.folderLabel, 1)
self.movieButton = QPushButton("Export Movie Frames")
self.movieButton.clicked.connect(self.exportMovie)
self.fileLabel = QLabel("")
self.fileLabel.setFixedWidth(512)
self.ss_sizes = [512, 640, 720, 768, 1024, 1080, 1280, 1920,
2048, 2160, 3072, 3240, 3840, 4096]
self.widthControl = ListControl('Width:', 1920, self.ss_sizes, param='os_width')
self.heightControl = ListControl('Height:', 1080, self.ss_sizes, param='os_height')
self.oversampleControl = IntControl('Oversample', 1, 1, 3, step=1, param='os_oversample',
tooltip='If > 1, render at a higher resolution and then downsample.\bThis will make export (much) slower, but is useful for publication-quality images.')
def print_param(p, v):
print(f'{p}: {v}')
self.widthControl.paramChanged.connect(self.updateBuffer)
self.heightControl.paramChanged.connect(self.updateBuffer)
self.oversampleControl.paramChanged.connect(self.updateBuffer)
self.scaleControl = ListControl('Scale Height:', 1080, self.ss_sizes, param='scaling_height',
tooltip='Effective screen height to use for axis scaling. Used to prevent super thin lines and tiny text for high resolutions!')
self.scaleHeight = self.scaleControl.value()
self.scaleControl.paramChanged.connect(self.updatescaleHeight)
self.progress = QProgressBar()
self.pauseButton = QPushButton()
self.pauseButton.setEnabled(False)
# self.exportWorker = ExportWorker(self)
# self.exportThread = QtCore.QThread()
# self.exportWorker.progressRange.connect(self.progress.setRange)
# self.exportWorker.progress.connect(self.progress.setValue)
# self.exportWorker.moveToThread(self.exportThread)
# self.exportWorker.finished.connect(self.exportThread.quit)
# self.exportThread.started.connect(self.exportWorker.run)
self.queue = []
# Why this strangeness? By triggering exportFrame through the signals
# method, we give a chance for other updates to happen!
# self.exportFrame.connect(self._exportFrame)
self.exportTimer = QtCore.QTimer()
# This is how long we wait to export each frame.
# Nominally, this gives time for the progressbar to update!
# Theoretically, a timer of 1 should be ok, but that doesn't actually
# seem to work like I expect...
self.exportTimer.setInterval(35)
self.exportTimer.timeout.connect(self._exportFrame)
self.settings = QGridLayout()
self.vbox.addLayout(self.settings)
self.settings.setColumnStretch(0, 1)
self.settings.setColumnStretch(5, 1)
self.settings.addWidget(self.widthControl, 0, 1, 1, 2)
self.settings.addWidget(self.heightControl, 1, 1, 1, 2)
self.settings.addWidget(self.scaleControl, 0, 3, 1, 2)
self.settings.addWidget(self.oversampleControl, 1, 3, 1, 2)
self.settings.addWidget(self.exportButton, 3, 1, 1, 2)
self.settings.addWidget(self.previewButton, 3, 3, 1, 2)
self.settings.addLayout(self.folderControl, 4, 1, 1, 4)
self.settings.addWidget(self.movieButton, 5, 1, 1, 4)
self.settings.addWidget(self.fileLabel, 6, 1, 1, 4)
self.settings.addWidget(self.progress, 7, 0, 1, 5)
self.settings.addWidget(self.pauseButton, 7, 5, 1, 1)
for i, (label, w, h) in enumerate([
('720p', 1280, 720),
('1080p', 1920, 1080),
('1440p', 2560, 1440),
('2160p (4K)', 3840, 2160),
# ('3240p (6K)', 5760, 3240),
# ('4320p (8K)', 7680, 4320,)
]):
button = QPushButton(label)
def cr(state, w=w, h=h):
self.widthControl.setValue(w)
self.heightControl.setValue(h)
button.clicked.connect(cr)
j = i+1
# if j >= 3:
# j += 1
self.settings.addWidget(button, 2, j)
def updatescaleHeight(self, key, val):
self.scaleHeight = val
def updateBuffer(self, key=None, val=None):
width, height = self.widthControl.value(), self.heightControl.value()
oversample = self.oversampleControl.value()
self.parent.display.makeCurrent()
if not hasattr(self, 'bufferId'):
self.bufferId = self.parent.display.view.addBuffer(width * oversample, height * oversample)
else:
self.parent.display.view.resizeBuffer(self.bufferId, width * oversample, height * oversample)
self.parent.display.doneCurrent()
def closeEvent(self, e):
self.parent.toggleExport()
def selectExportFolder(self):
self.folderLabel.setText(QFileDialog.getExistingDirectory(
self, "Select Export Folder", self.folderLabel.text()))
# def renderImage(self):
# # self.progress.setRange(0, 0)
# # self.progress.update()
# # print('start')
# # QApplication.processEvents()
#
# if not hasattr(self, 'bufferId'):
# self.updateBuffer()
#
# img = self.parent.display.offscreenRender(self.bufferId, scaleHeight=self.scaleHeight)
# img = Image.fromarray(img[::-1])
#
# oversample = self.oversampleControl.value()
# if oversample > 1:
# w, h = img.size
# img = img.resize((w//oversample, h//oversample), Image.LANCZOS)
# # self.progress.setRange(0, 1)
#
# return img
def exportMovie(self, event=None):
dir = os.path.join(self.folderLabel.text(), 'muvi_frames')
if not os.path.exists(dir):
os.makedirs(dir)
frames = self.parent.keyframeEditor.frames()
# print(frames)
for frame, params in enumerate(frames):
fn = os.path.join(dir, f'frame{frame:08d}.png')
self.queue.append((fn, params))
# This triggers an update w/o drawing anything!
self.queue.append((False, self.parent.allParams()))
self.progress.setRange(0, len(frames))
self.progress.setValue(0)
self.startExport()
def startExport(self):
self.exportButton.setDisabled(True)
self.previewButton.setDisabled(True)
self.movieButton.setDisabled(True)
self.parent.display.disable()
self.exportTimer.start()
def _exportFrame(self):
if not self.queue:
return
QApplication.processEvents()
fn, updates = self.queue.pop(0)
if not hasattr(self, 'bufferId'):
self.updateBuffer()
if updates:
self.parent.display.updateParams(updates)
if fn is not False:
img = self.parent.display.offscreenRender(self.bufferId, scaleHeight=self.scaleHeight)
img = Image.fromarray(img[::-1])
oversample = self.oversampleControl.value()
if oversample > 1:
w, h = img.size
img = img.resize((w//oversample, h//oversample), Image.LANCZOS)
if fn is not None:
img.save(fn)
self.fileLabel.setText(f'Saved to: {os.path.split(fn)[1]}')
img = QtGui.QImage(img.tobytes("raw", "RGB"), img.size[0], img.size[1],
QtGui.QImage.Format_RGB888)
self.image.setImage(img)
if self.queue:
self.progress.setValue(self.progress.value() + 1)
self.progress.repaint()
else:
self.exportButton.setEnabled(True)
self.previewButton.setEnabled(True)
self.movieButton.setEnabled(True)
self.exportTimer.stop()
self.progress.setRange(0, 1)
self.progress.reset()
self.parent.display.enable()
def saveFrame(self, event=None):
dir = self.folderLabel.text()
fns = glob.glob(os.path.join(dir, 'muvi_screenshot_*.png'))
for i in range(10**4):
fn = os.path.join(dir, 'muvi_screenshot_%08d.png' % i)
if fn not in fns:
break
# self.exportWorker.exportSingle(os.path.join(dir, fn))
self.progress.setRange(0, 0)
self.progress.repaint()
self.queue.append((fn, {}))
self.startExport()
# self.exportFrame.emit()
def previewFrame(self, event=None):
self.progress.setRange(0, 0)
self.progress.repaint()
self.queue.append((None, {}))
self.startExport()
# self.exportFrame.emit()
# self.updatePreview(self.renderImage())
# def updatePreview(self, img):
# img = QtGui.QImage(img.tobytes("raw", "RGB"), img.size[0], img.size[1],
# QtGui.QImage.Format_RGB888)
# # img = QtGui.QImage(np.require(img[::-1, :, :3], np.uint8, 'C'),
# # img.shape[1], img.shape[0], QtGui.QImage.Format_RGB888)
# # self.image.setPixmap(QtGui.QPixmap(img).scaledToWidth(1024))
# # pixmap = QtGui.QPixmap(img)
# # pixmap.setDevicePixelRatio(1)
# self.image.setImage(img)
# # self.image.adjustSize()
class VolumetricViewer(QMainWindow):
def __init__(self, parent=None, clipboard=None, window_name=None):
super().__init__(parent)
self.setWindowTitle(window_name)
self.clipboard = clipboard
self.vbox = QVBoxLayout()
self.vbox.setContentsMargins(0, 0, 0, 0)
self.keyframeEditor = KeyframeEditor(self)
self.keyframeEditor.setFixedWidth(PARAM_WIDTH)
self.keyframeEditor.setVisible(False)
self.hbox = QHBoxLayout()
self.display = ViewWidget(parent=self, uiScale=UI_EXTRA_SCALING)
self.hbox.addWidget(self.keyframeEditor)
self.hbox.addWidget(self.display, 1)
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.setSpacing(0)
self.vbox.addLayout(self.hbox, 1)
self.playback = controlFromParam(PARAMS['frame'])
self.vbox.addWidget(self.playback)
self.playback.playingChanged.connect(self.display.setPlaying)
self.display.frameChanged.connect(self.playback.setSilent)
self.playback.paramChanged.connect(self.display.updateParam)
self.paramControls = {'frame': self.playback}
self.paramTabs = QTabWidget()
self.paramTabs.setFixedWidth(PARAM_WIDTH)
self.numTabs = 0
self.nullTab = QWidget()
layout = QVBoxLayout()
button = QPushButton('Open Data File')
button.pressed.connect(self.openFile)
layout.addWidget(button)
layout.addStretch(1)
self.nullTab.setLayout(layout)
self.paramTabs.addTab(self.nullTab, 'Data')
# for cat, params in PARAM_CATEGORIES.items():
# if cat != 'Playback':
for cat in ["Limits", "View", "Display"]:
params = PARAM_CATEGORIES[cat]
self.paramTabs.addTab(self.buildParamTab(params), cat)
self.paramTabs.setObjectName('paramTabs')
# self.paramTabs.setStyleSheet(f'''
# #paramTabs::QTabBar::tab {{
# width: {(PARAM_WIDTH-SCROLL_WIDTH)/self.paramTabs.count()}px;
# padding: 5px 0px 5px 0px;
# background-color: red;
# }}
# ''')
# self.paramTabs.setTabShape(QTabWidget.Rounded)
self.splitter = QSplitter(Qt.Vertical)
self.splitter.setHandleWidth(15)
self.assetVBox = QVBoxLayout()
self.assetVBox.setContentsMargins(5, 5, 5, 0)
self.assetList = AssetList(self)
self.assetVBox.addWidget(self.assetList)
# button = QPushButton('Print all params')
# button.clicked.connect(self.allParams)
# self.assetVBox.addWidget(button)
resetView = QPushButton('Recenter/Rescale View')
resetView.clicked.connect(self.display.resetView)
self.assetVBox.addWidget(resetView)
self.addParamCategory('Asset List', self.assetVBox)
widget = QWidget()
widget.setLayout(self.assetVBox)
self.splitter.addWidget(widget)
self.splitter.addWidget(self.paramTabs)
self.splitter.setSizes([100, 200])
self.splitter.setStretchFactor(0, 0)
self.splitter.setStretchFactor(1, 1)
self.hbox.addWidget(self.splitter)
widget = QWidget()
widget.setLayout(self.vbox)
self.setCentralWidget(widget)
self.setWindowTitle(APP_NAME)
self.exportWindow = ExportWindow(self)
menu = self.menuBar()
self.fileMenu = menu.addMenu("File")
self.addMenuItem(self.fileMenu, 'Quit', self.close, 'Ctrl+Q',
'Quit the viewer.')
self.addMenuItem(self.fileMenu, '&Open Data',
self.openFile, 'Ctrl+O')
self.addMenuItem(self.fileMenu, '&Save Script File',
self.keyframeEditor.saveScript, 'Ctrl+S')
self.editMenu = menu.addMenu("Edit")
self.addMenuItem(self.editMenu, 'Insert &Keyframe',
self.addKeyframe, "Ctrl+K")
self.viewMenu = menu.addMenu("View")
self.showSettings = self.addMenuItem(self.viewMenu,
'Hide View Settings', self.toggleSettings, 'Ctrl+/',
'Show or hide settings option on right side of main window')
self.showKeyframeEditor = self.addMenuItem(self.viewMenu,
'Hide Keyframe List', self.toggleKeyframe, 'Ctrl+L',
'Show or hide keyframe list on right side of main window')
self.save_image = self.addMenuItem(self.viewMenu,
'Save Screenshot', self.exportWindow.saveFrame, 's',
'Save a screenshot with the current export settings (use export window to control resolution).')
self.showExport = self.addMenuItem(self.viewMenu,
'Show Export Window', self.toggleExport, 'Ctrl+E',
'Show or hide the export window, used to take screenshots or make movies')
self.addMenuItem(self.viewMenu, 'Match Aspect Ratio to Export', self.matchAspect, "Ctrl+A",
tooltip="Adjust aspect ratio of main display to match export size; useful for previewing movies!")
for i in range(3):
axis = chr(ord('X') + i)
def f(event, a=i):
self.orient_camera(a)
self.addMenuItem(self.viewMenu,
f'Look down {axis}-axis', f, axis.lower())
def f2(event, a=i):
self.orient_camera(a+3)
self.addMenuItem(self.viewMenu,
f'Look down -{axis}-axis', f2, 'Shift+'+axis.lower())
self.setAcceptDrops(True)
self.show()
def addKeyframe(self):
self.keyframeEditor.addKeyframe()
self.toggleKeyframe(show = True)
# def createScript(self):
# fn, ext = QFileDialog.getSaveFileName(self, 'Create MUVI Script File', os.getcwd(), "MUVI script (*.muvi_script)")
#
# # with open(fn, 'wt') as f:
# # pass
def valueCallback(self, param, value):
control = self.paramControls.get(param, None)
if control is not None:
control.setValue(value)
def rangeCallback(self, param, minVal, maxVal):
control = self.paramControls.get(param, None)
if control is not None and hasattr(control, 'setRange'):
control.setRange(minVal, maxVal)
def addMenuItem(self, menu, title, func=None, shortcut=None, tooltip=None):
action = QAction(title, self)
if shortcut is not None:
action.setShortcut(shortcut)
if tooltip is not None:
action.setStatusTip(tooltip)
action.setToolTip(tooltip)
if func is not None:
action.triggered.connect(func)
menu.addAction(action)
return action
def openFile(self):
fn, ext = QFileDialog.getOpenFileName(self,
'Open Volumetric Movie / Mesh Sequence', os.getcwd(),
"Volumetric Movie (*.vti);; Polygon Mesh (*.ply);; MUVI Script (*.muvi_script)")
if fn:
self.openData(fn)
def openData(self, dat):
try:
if isinstance(dat, str) and os.path.splitext(dat)[1] == ".muvi_script":
self.keyframeEditor.openScript(dat)
asset = None
else:
self.display.makeCurrent()
asset = self.display.view.openData(dat)
self.display.doneCurrent()
except Exception as e:
ec = e.__class__.__name__
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setWindowTitle(str(ec))
msg.setText(str(ec) + ": " + str(e))
msg.setDetailedText(traceback.format_exc())
msg.setStyleSheet("QTextEdit {font-family: Courier; min-width: 600px;}")
msg.setStandardButtons(QMessageBox.Cancel)
msg.exec_()
# raise
else:
if asset is not None:
self.assetList.addItem(AssetItem(asset, self))
self.update()
def openAssets(self, assets):
try:
self.newIds = self.display.view.openAssets(assets)
except Exception as e:
ec = e.__class__.__name__
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setWindowTitle(str(ec))
msg.setText(str(ec) + ": " + str(e))
msg.setDetailedText(traceback.format_exc())
msg.setStyleSheet("QTextEdit {font-family: Courier; min-width: 600px;}")
msg.setStandardButtons(QMessageBox.Cancel)
msg.exec_()
else:
relabel = {}
for id, asset in self.newIds.items():
if isinstance(asset, int):
relabel[id] = asset
else:
relabel[id] = asset.id
self.assetList.addItem(AssetItem(asset, self))
self.update()
return relabel
def buildParamTab(self, params, prefix="", defaults={}):
vbox = QVBoxLayout()
vbox.setSpacing(10)
self.addParams(params, vbox, prefix=prefix, defaults=defaults)
vbox.addStretch(1)
sa = QScrollArea()
sa.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
sa.setContentsMargins(0, 0, 0, 0)
sa.setFrameShape(QFrame.NoFrame)
widget = QWidget()
widget.setLayout(vbox)
widget.setFixedWidth(PARAM_WIDTH - (SCROLL_WIDTH + 5))
sa.setWidget(widget)
return sa
def addParams(self, params, vbox, prefix="", defaults={}):
paramControls = paramListToVBox(params, vbox, self.display.view, prefix=prefix, defaults=defaults)
for param, control in paramControls.items():
if hasattr(control, 'paramChanged'):
control.paramChanged.connect(self.display.updateParam)
self.paramControls.update(paramControls)
def addParamCategory(self, cat, vbox, prefix="", defaults={}):
self.addParams(PARAM_CATEGORIES[cat], vbox, prefix="", defaults=defaults)
def matchAspect(self, event=None):
size = self.display.size()
w, h = size.width(), size.height()
we = self.exportWindow.widthControl.value()
he = self.exportWindow.heightControl.value()
newWidth = (we * h) // he
self.display.resize(newWidth, w)
self.update()
def selectAssetTab(self, asset):
if asset is None:
tab, label = self.nullTab, 'Data'
elif isinstance(asset, AssetItem):
tab, label = asset.tab, asset.label
else:
raise ValueError('selectAssetTab should receive an int or AssetItem object')
self.paramTabs.setUpdatesEnabled(False)
self.paramTabs.removeTab(0)
self.paramTabs.insertTab(0, tab, label)
self.paramTabs.setCurrentIndex(0)
self.paramTabs.setUpdatesEnabled(True)
def toggleSettings(self):
if self.splitter.isVisible():
self.splitter.setVisible(False)
self.showSettings.setText('Show View Settings')
else:
self.splitter.setVisible(True)
self.showSettings.setText('Hide View Settings')
def toggleKeyframe(self, event=None, show=None):
if show is None:
show = not self.keyframeEditor.isVisible()
if show:
self.keyframeEditor.setVisible(True)
self.showKeyframeEditor.setText('Hide Keyframe List')
else:
self.keyframeEditor.setVisible(False)
self.showKeyframeEditor.setText('Show Keyframe List')
def toggleExport(self):
if self.exportWindow.isVisible():
self.exportWindow.hide()
self.showExport.setText('Show Export Window')
else:
self.exportWindow.show()
self.showExport.setText('Hide Export Window')
def getExportSettings(self):
return {
"width": self.exportWindow.widthControl.value(),
"height": self.exportWindow.heightControl.value(),
"oversample": self.exportWindow.oversampleControl.value(),
"scale_height": self.exportWindow.scaleControl.value(),
}
def closeEvent(self, event):
# Prevents an error message by controlling deallocation order!
del self.display.view
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
l = []
if len(event.mimeData().urls()):
self.openData(event.mimeData().urls()[0].toLocalFile())
else:
event.ignore()
def orient_camera(self, axis):
if axis == 0:
self.display.view.resetView(direction=(1, 0, 0), up=(0, 1, 0))
elif axis == 1:
self.display.view.resetView(direction=(0, 1, 0), up=(0, 0, -1))
elif axis == 2:
self.display.view.resetView(direction=(0, 0, 1), up=(0, 1, 0))
elif axis == 3:
self.display.view.resetView(direction=(-1, 0, 0), up=(0, 1, 0))
elif axis == 4:
self.display.view.resetView(direction=(0, -1, 0), up=(0, 0, 1))
else:
self.display.view.resetView(direction=(0, 0, -1), up=(0, 1, 0))
self.display.update()
def allParams(self):
d = self.display.view.allParams()
return d
def view_volume(vol=None, args=None, window_name=None):
global PARAM_WIDTH, SCROLL_WIDTH, UI_EXTRA_SCALING, CLIPBOARD, app
if window_name is None:
window_name = APP_NAME
if args is None:
args = sys.argv
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
app = QApplication(args)
if LOGICAL_DPI_BASELINE is not None:
# This is used to fix fractional scaling in Windows, which does
# not show up as a devicePixelRatio!
UI_EXTRA_SCALING = QWidget().logicalDpiX() / LOGICAL_DPI_BASELINE
PARAM_WIDTH = int(PARAM_WIDTH * UI_EXTRA_SCALING)
SCROLL_WIDTH = int(SCROLL_WIDTH * UI_EXTRA_SCALING)
app.setStyle('Fusion')
app.setPalette(generateDarkPalette())
app.setStyleSheet(f'''
QWidget {{
font-size: {int(12 * UI_EXTRA_SCALING)}px;
}}
QLabel, QSlider, QSpinBox, QDoubleSpinBox, QCheckBox {{
padding: 0px;
margin: 0px;
}}
QGroupBox {{
padding: 0px;
padding-top: 20px;
margin: 0px;
}}
QScrollBar:vertical {{
width: {SCROLL_WIDTH}px;
}}
#Border {{
border: 1px solid #808080;
border-radius: 4px;
margin-top: 0px;
margin-bottom: 5px;
}}
QTabBar::tab:selected {{
color: palette(Text);
}}
QTabBar::tab:!selected {{
color: #A0A0A0;
}}
''')
app.setApplicationDisplayName(window_name)
window = VolumetricViewer(clipboard=app.clipboard(), window_name=window_name)
if vol is not None:
window.openData(vol)
app.setWindowIcon(QtGui.QIcon(QtGui.QPixmap(os.path.join(ICON_DIR, 'muvi_logo.png'))))
return(app.exec())
def qt_viewer(args=None, window_name=None):
if args is None:
args = sys.argv
if len(args) > 1:
vol = args.pop(1)
else:
vol = None
view_volume(vol, args, window_name)
if __name__ == '__main__':
qt_viewer()
|
"""
The MIT License (MIT)
Copyright (c) 2017-2021 TwitchIO
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import inspect
from functools import partial
from .core import *
__all__ = ("Cog",)
class CogEvent:
def __init__(self, *, name: str, func):
self.name = name
self.func = func
class CogMeta(type):
def __new__(mcs, *args, **kwargs):
name, bases, attrs = args
attrs["__cogname__"] = kwargs.pop("name", name)
self = super().__new__(mcs, name, bases, attrs, **kwargs)
self._events = {}
self._commands = {}
for name, mem in inspect.getmembers(self):
if isinstance(mem, (CogEvent, Command)):
if name.startswith(("cog_", "bot_")): # Invalid method prefixes
raise RuntimeError(f'The event or command "{name}" starts with an invalid prefix (cog_ or bot_).')
if isinstance(mem, CogEvent):
try:
self._events[mem.name].append(mem.func)
except KeyError:
self._events[mem.name] = [mem.func]
return self
class Cog(metaclass=CogMeta):
"""Class used for creating a TwitchIO Cog.
Cogs help organise code and provide powerful features for creating bots.
Cogs can contain commands, events and special cog specific methods to help with checks,
before and after command invocation hooks, and cog error handlers.
To use a cog simply subclass Cog and add it. Once added, cogs can be un-added and re-added live.
Examples
----------
.. code:: py
# In modules/test.py
from twitchio.ext import commands
class MyCog(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
async def hello(self, ctx: commands.Context):
await ctx.send(f"Hello, {ctx.author.name}!")
@commands.Cog.event()
async def event_message(self, message):
# An event inside a cog!
if message.echo:
return
print(message.content)
def prepare(bot: commands.Bot):
# Load our cog with this module...
bot.add_cog(MyCog(bot))
"""
def _load_methods(self, bot):
for name, method in inspect.getmembers(self):
if isinstance(method, Command):
method._instance = self
method.cog = self
self._commands[name] = method
bot.add_command(method)
events = self._events.copy()
self._events = {}
for event, callbacks in events.items():
for callback in callbacks:
callback = partial(callback, self)
bot.add_event(callback=callback, name=event)
def _unload_methods(self, bot):
for name in self._commands:
bot.remove_command(name)
for event, callbacks in self._events.items():
for callback in callbacks:
bot.remove_event(callback=callback)
self._events = {}
try:
self.cog_unload()
except Exception as e:
pass
@classmethod
def event(cls, event: str = None):
"""Add an event listener to this Cog.
Examples
----------
.. code:: py
class MyCog(commands.Cog):
def __init__(...):
...
@commands.Cog.event()
async def event_message(self, message: twitchio.Message):
print(message.content)
@commands.Cog.event("event_ready")
async def bot_is_ready(self):
print('Bot is ready!')
"""
def decorator(func):
event_name = event or func.__name__
return CogEvent(name=event_name, func=func)
return decorator
@property
def name(self) -> str:
"""This cogs name."""
return self.__cogname__
@property
def commands(self) -> dict:
"""The commands associated with this cog as a mapping."""
return self._commands
async def cog_error(self, exception: Exception):
pass
async def cog_command_error(self, ctx: Context, exception: Exception):
"""Method invoked when an error is raised in one of this cogs commands.
Parameters
-------------
ctx: :class:`Context`
The context around the invoked command.
exception: Exception
The exception raised.
"""
pass
async def cog_check(self, ctx: Context) -> bool:
"""A cog-wide check which is ran everytime a command from this Cog is invoked.
Parameters
------------
ctx: :class:`Context`
The context used to try and invoke this command.
Notes
-------
.. note::
This method must return True/False or raise. If this check returns False or raises, it will fail
and an exception will be propagated to error handlers.
"""
return True
def cog_unload(self):
pass
|
# -*- coding: utf-8 -*-
"""
============================================================
© 2018 北京灵汐科技有限公司 版权所有。
* 注意:
以下内容均为北京灵汐科技有限公司原创,
未经本公司允许,不得转载,否则将视为侵权;
对于不遵守此声明或者其他违法使用以下内容者,
本公司依法保留追究权。
© 2018 Lynxi Technologies Co., Ltd. All rights reserved.
* NOTICE:
All information contained here is,
and remains the property of Lynxi.
This file can not be copied or distributed without
the permission of Lynxi Technologies Co., Ltd.
============================================================
@file: lynpy.py
@author: <EMAIL>
"""
import sys
sys.path.append('/usr/lib')
import pylynchipsdk as sdk
import numpy as np
SDK_DTYPE = {
sdk.lyn_data_type_t.DT_INT8: 'int8',
sdk.lyn_data_type_t.DT_UINT8: 'uint8',
sdk.lyn_data_type_t.DT_INT32: 'int32',
sdk.lyn_data_type_t.DT_UINT32: 'uint32',
sdk.lyn_data_type_t.DT_FLOAT: 'float32',
sdk.lyn_data_type_t.DT_FLOAT16: 'float16',
}
class Tensor(object):
'''lynpy.Tensor is a common data object which used to manage the data on device memory.
'''
def __init__(self, dev_id=0, size=0, allocate=True):
"""init function.
Parameters
----------
dev_id : int32
set which the device to be used.
size : int32
the tensor size in bytes.
allocate : True or False
True, will allcate device memory when create tensor.
False, allcate when tensor.apu(), or can set tensor.devptr manually.
"""
super(Tensor, self).__init__()
self.__numpydata = None
self.devptr = None
self.__child = False
self.data_size = size
self.dev_id = dev_id
##
# from numpy
self.shape = None
self.dtype = None
self.size = 0
self.itemsize = 0
self.context, ret = sdk.lyn_create_context(self.dev_id)
assert ret == 0
##
# for split case, not need to allocate device memory
if (self.data_size != 0) and (allocate == True):
self.devptr, ret = sdk.lyn_malloc(self.data_size)
assert ret == 0
def __del__(self):
if (self.devptr != None) and (self.__child == False):
sdk.lyn_set_current_context(self.context)
sdk.lyn_free(self.devptr)
self.__numpydata = None
self.devptr = None
self.data_size = 0
sdk.lyn_destroy_context(self.context)
def __str__(self):
msg = 'Tensor: {} {} \n{}'.format(
self.__numpydata.shape,
self.__numpydata.dtype,
str(self.__numpydata))
return msg
def __update_numpydata_info(self):
self.shape = self.__numpydata.shape
self.dtype = self.__numpydata.dtype
self.size = self.__numpydata.size
self.itemsize = self.__numpydata.itemsize
def from_numpy(self, data):
"""set tensor.apu() source data or tensor.cpu() destination data.
Parameters
----------
data : numpy.ndarray or List[numpy.ndarray]
Returns
-------
Tensor : reference to self.
"""
total_size = 0
self.__numpydata = []
if isinstance(data, list):
for d in data:
assert isinstance(d, np.ndarray)
if d.flags["C_CONTIGUOUS"] == False:
self.__numpydata.append(np.ascontiguousarray(d))
else:
self.__numpydata.append(d)
total_size = total_size + d.size * d.itemsize
elif isinstance(data, np.ndarray):
if data.flags["C_CONTIGUOUS"] == False:
self.__numpydata = np.ascontiguousarray(data)
else:
self.__numpydata = data
total_size = data.size * data.itemsize
self.__update_numpydata_info()
else:
assert 0
if self.data_size == 0:
self.data_size = total_size
assert self.data_size == total_size, 'required {}, input {}'.format(self.data_size, total_size)
return self
def view_as(self, shape, dtype='float32'):
"""change the view of data shape/dtype, will not change the data in memory.
Parameters
----------
shape : Tuple
dtype : numpy.dtype
Returns
-------
Tensor : reference to self.
"""
if self.__numpydata is None:
data = np.empty(shape, dtype=dtype)
assert self.data_size == data.size * data.itemsize, 'required {}, input {}'.format(self.data_size, data.size * data.itemsize)
self.__numpydata = data
else:
# force convert
self.__numpydata.dtype = dtype
self.__numpydata.shape = shape
self.__update_numpydata_info()
return self
def numpy(self):
'''return the numpy object'''
return self.__numpydata
def cpu(self):
'''copy data from server to device'''
assert self.data_size != 0
assert self.devptr != None
if self.__numpydata is None:
self.__numpydata = np.empty(self.data_size, dtype=np.byte)
self.__update_numpydata_info()
sdk.lyn_set_current_context(self.context)
if isinstance(self.__numpydata, np.ndarray):
assert 0 == sdk.lyn_memcpy(sdk.lyn_numpy_to_ptr(self.__numpydata),
self.devptr, self.data_size,
sdk.lyn_memcpy_dir_t.ServerToClient)
else: # numpy list
offset = 0
for d in self.__numpydata:
size = d.size * d.itemsize
assert 0 == sdk.lyn_memcpy(sdk.lyn_numpy_to_ptr(d),
sdk.lyn_addr_seek(self.devptr, offset),
size,
sdk.lyn_memcpy_dir_t.ServerToClient)
offset = offset + size
assert offset > self.data_size # overflow
return self
def apu(self):
'''copy data from device to server'''
assert self.data_size != 0
assert self.__numpydata is not None
sdk.lyn_set_current_context(self.context)
if self.devptr == None:
self.devptr, ret = sdk.lyn_malloc(self.data_size)
assert ret == 0
if isinstance(self.__numpydata, np.ndarray):
assert 0 == sdk.lyn_memcpy(self.devptr,
sdk.lyn_numpy_to_ptr(self.__numpydata),
self.data_size,
sdk.lyn_memcpy_dir_t.ClientToServer)
else: # numpy list
offset = 0
for d in self.__numpydata:
size = d.size * d.itemsize
assert 0 == sdk.lyn_memcpy(sdk.lyn_addr_seek(self.devptr, offset),
sdk.lyn_numpy_to_ptr(d),
size,
sdk.lyn_memcpy_dir_t.ClientToServer)
offset = offset + size
return self
def split(self, size_list):
"""split a tensor to tensor list.
Parameters
----------
size_list : List[int32]
a list of size in bytes
Returns
-------
Tensor : List[Tensor]
"""
assert self.devptr != None
result = []
offset = 0
if self.__numpydata is not None:
data = self.__numpydata.flatten()
data.dtype = np.int8
for size in size_list:
if offset + size > self.data_size:
break
new_obj = Tensor(dev_id=self.dev_id, size=size, allocate=False)
new_obj.devptr = sdk.lyn_addr_seek(self.devptr, offset)
new_obj.__child = True
if self.__numpydata is not None:
new_obj = new_obj.from_numpy(data[offset:offset+size])
result.append(new_obj)
offset = offset + size
if offset < self.data_size:
size = self.data_size - offset
new_obj = Tensor(dev_id=self.dev_id, size=size, allocate=False)
new_obj.devptr = sdk.lyn_addr_seek(self.devptr, offset)
new_obj.__child = True
if self.__numpydata is not None:
new_obj = new_obj.from_numpy(data[offset:])
result.append(new_obj)
return result
def copy_to(self, to, stream=None):
"""copy data to another tensor. support copy tensor over device.
Parameters
----------
stream : sdk stream object
if stream not none, will use asynchronous copy method.
will be ignored when copy tensor over device
"""
assert self.data_size == to.data_size, 'required {}, input {}'.format(self.data_size, to.data_size)
assert self.devptr != None and to.devptr != None
if self.dev_id == to.dev_id:
sdk.lyn_set_current_context(self.context)
if stream == None:
assert 0 == sdk.lyn_memcpy(to.devptr, self.devptr, self.data_size,
sdk.lyn_memcpy_dir_t.ServerToServer)
else:
assert 0 == sdk.lyn_memcpy_async(stream, to.devptr, self.devptr, self.data_size,
sdk.lyn_memcpy_dir_t.ServerToServer)
if self.__numpydata is not None:
to.from_numpy(self.__numpydata)
else:
self.cpu()
to.from_numpy(self.__numpydata)
to.apu()
class Model(object):
'''lynpy.Model is a module to do inference.
'''
def __init__(self, dev_id=0, path=None, stream=None, sync=True):
"""init function.
Parameters
----------
dev_id : int32
set which the device to be used.
path : str
the model file path.
stream : sdk stream object
if not set, will create a default stream.
also can use the others stream by Model.stream.
sync : True or False
True, blocking wait the infering done.
False, should call Model.synchronize() before accsess output data.
"""
super(Model, self).__init__()
self.path = path
self.dev_id = dev_id
self.sync = sync
self.stream = stream
self.model = None
self.__input = None
self.__output = None
self.__input_list = None
self.__output_list = None
self.input_size = 0
self.output_size = 0
self.batch_size = 0
self.__model_desc = None
self.context, ret = sdk.lyn_create_context(self.dev_id)
assert ret == 0
if self.stream == None:
self.stream, ret = sdk.lyn_create_stream()
assert ret == 0
if self.path != None:
self.load()
def __del__(self):
self.unload()
sdk.lyn_destroy_stream(self.stream)
sdk.lyn_destroy_context(self.context)
def __call__(self, input, output=None):
'''do infering'''
return self.infer(input, output)
def load(self, path=None):
'''load model from file'''
if self.path == None:
self.path = path
assert self.path != None
sdk.lyn_set_current_context(self.context)
self.model, ret = sdk.lyn_load_model(self.path)
assert ret == 0
self.__model_desc, ret = sdk.lyn_model_get_desc(self.model)
self.batch_size = self.__model_desc.inputTensorAttrArray[0].batchSize
self.input_size, ret = sdk.lyn_model_get_input_data_total_len(self.model)
self.output_size, ret = sdk.lyn_model_get_output_data_total_len(self.model)
self.input_size *= self.batch_size
self.output_size *= self.batch_size
def unload(self):
'''unload model'''
if self.model != None:
sdk.lyn_set_current_context(self.context)
sdk.lyn_unload_model(self.model)
self.model = None
def infer(self, input: Tensor, output: Tensor=None) -> Tensor:
'''do infering, can set output tensor or create automatic'''
assert self.model != None
assert input.data_size == self.input_size, 'required {}, input {}'.format(self.data_size, input.data_size)
self.__input = input
if output is not None:
assert output.data_size == self.output_size, 'required {}, input {}'.format(self.output_size, output.data_size)
self.__output = output
elif self.__output is None:
self.__output = Tensor(dev_id=self.dev_id, size=self.output_size)
sdk.lyn_set_current_context(self.context)
assert 0 == sdk.lyn_execute_model_async(self.stream, self.model,
self.__input.devptr, self.__output.devptr,
self.batch_size)
if self.sync == True:
assert 0 == sdk.lyn_synchronize_stream(self.stream)
return self.__output
def synchronize(self):
'''blocking wait for infering done'''
sdk.lyn_set_current_context(self.context)
assert 0 == sdk.lyn_synchronize_stream(self.stream)
def output_tensor(self):
if self.__output is None:
self.__output = Tensor(dev_id=self.dev_id, size=self.output_size)
return self.__output
def input_tensor(self):
if self.__input is None:
self.__input = Tensor(dev_id=self.dev_id, size=self.input_size)
return self.__input
def output_list(self):
"""get output tensors as a list, view as below:
[batch0][tensor0, tensor1, ..., tensorX]
[batch1][tensor0, tensor1, ..., tensorX]
...
[batchN][tensor0, tensor1, ..., tensorX]
Note:
output_list() tensors will keep the latest value with output_tensor() at device memory,
but the different value at host memory, you should use cpu() to synchronize data before access
"""
if self.__output_list is None:
self.__output_list = []
if self.batch_size == 1:
batch_list = [self.output_tensor()]
else:
split_size = []
for i in range(self.batch_size):
split_size.append(self.__model_desc.outputDataLen)
batch_list = self.output_tensor().split(split_size)
shape_list = []
dtype_list = []
tensor_size = []
tensor_num = self.__model_desc.outputTensorAttrArrayNum
for i in range(tensor_num):
shape, ret = sdk.lyn_model_get_output_tensor_dims_by_index(self.model, i)
dtype = self.__model_desc.outputTensorAttrArray[i].dtype
size = self.__model_desc.outputTensorAttrArray[i].dataLen
shape_list.append(shape)
dtype_list.append(SDK_DTYPE[dtype])
tensor_size.append(size)
for batch in batch_list:
tensor_list = batch.split(tensor_size)
for i in range(tensor_num):
tensor_list[i].view_as(shape=shape_list[i], dtype=dtype_list[i])
self.__output_list.append(tensor_list)
return self.__output_list
def input_list(self):
"""get input tensors as a list, view as below:
[batch0][tensor0, tensor1, ..., tensorX]
[batch1][tensor0, tensor1, ..., tensorX]
...
[batchN][tensor0, tensor1, ..., tensorX]
Note:
input_list() tensors will keep the latest value with input_tensor() at device memory,
but the different value at host memory, you should use cpu() to synchronize data before access
"""
if self.__input_list is None:
self.__input_list = []
if self.batch_size == 1:
batch_list = [self.input_tensor()]
else:
split_size = []
for i in range(self.batch_size):
split_size.append(self.__model_desc.inputDataLen)
batch_list = self.input_tensor().split(split_size)
shape_list = []
dtype_list = []
tensor_size = []
tensor_num = self.__model_desc.inputTensorAttrArrayNum
for i in range(tensor_num):
shape, ret = sdk.lyn_model_get_input_tensor_dims_by_index(self.model, i)
dtype = self.__model_desc.inputTensorAttrArray[i].dtype
size = self.__model_desc.inputTensorAttrArray[i].dataLen
shape_list.append(shape)
dtype_list.append(SDK_DTYPE[dtype])
tensor_size.append(size)
for batch in batch_list:
tensor_list = batch.split(tensor_size)
for i in range(tensor_num):
tensor_list[i].view_as(shape=shape_list[i], dtype=dtype_list[i])
self.__input_list.append(tensor_list)
return self.__input_list |
from tokens import *
from memory import *
import tokens
class Parser:
############## init code ###############
def __init__(self, toks):
self.toks = toks
self.current = 0
def parse(self):
stmts = []
self._match(Terminator)
while not self._isAtEnd():
try:
stmts.append(self._line())
except ParseError as e:
self._sync()
if not e.tok.toktype is EOF: # deal with this better
print(e) # TODO: deal with errors
return stmts
########### recursive descent parser ###########
def _line(self):
if self._match(Terminator): # allow blank lines
return self._line()
elif self._check(Directive):
return self._directive()
elif self._check(Operator):
return self._operator()
elif self._check(LabelDef):
return self._label(True)
else:
raise ParseError("Unexecpected token", self._peek())
def _directive(self):
dirType = self._consume(
Directive, "This should definataly be a directive.")
data = []
while not self._match(Terminator):
data.append(self._advance())
return Direct(dirType, data)
def _label(self, isDef=False):
return Label(self._advance().data, isDef)
############ operator and memory functions #############
def _operator(self):
opcode = self._consume(
Operator, "This should definataly be a operator.")
if self._match(Terminator):
return Instruction(opcode)
if self._check(tokens.Label):
return Instruction(opcode, self._label())
mem1 = self._memory()
if self._match(Terminator):
return Instruction(opcode, mem1)
self._consume(Comma, "am I missing a comma?")
mem2 = self._memory()
return Instruction(opcode, mem1, mem2)
def _memory(self):
if self._match(Register): # DataDirect, and AddressDirect
reg = self._peek(-1).data
regType = reg[0]
regNum = reg[1]
if regType == "a":
return AddressDirect(regNum)
if regType == "d":
return DataDirect(regNum)
elif self._match(Decrement): # AddressIndirectPreDecrement
self._consume(GroupStart, "decrement expecting a '('")
reg = self._consume(Register, "expecting a 'register'").data
self._consume(GroupEnd, "expecting a ')'")
assert(reg[0] == 'a')
return AddressIndirectPreDecrement(reg[1])
elif self._match(Literal): # ImmediateData
return ImmediateData(self._peek(-1).data)
elif self._match(MemLocLiteral): # AbsoluteLong and AbsoluteShort
loc = self._peek(-1).data
if loc[1] == "l":
return AbsoluteLong(loc[0])
if loc[1] == "w":
return AbsoluteShort(loc[0])
elif self._match(GroupStart): # AddressIndirect, AddressIndirectPostIncrement
reg = self._consume(Register, "expecting a 'register'").data
if self._match(Comma): # ScaledAddressWithOffset
return self.partialScaledIndirectWithOffset(0, reg[1])
self._consume(GroupEnd, "expecting a ')'")
if self._match(Increment): # ScaledAddressWithOffset
return AddressIndirectPostIncrement(reg[1])
return AddressIndirect(reg[1])
elif self._match(Number): # AddressIndirectWithOffset
num = self._peek(-1).data
self._consume(GroupStart, "number expecting a '('")
reg = self._consume(Register, "expecting a 'register'").data
if self._match(Comma): # ScaledAddressWithOffset
return self.partialScaledIndirectWithOffset(num, reg[1])
self._consume(GroupEnd, "expecting a ')'")
return AddressIndirectWithOffset(reg[1], num)
def partialScaledIndirectWithOffset(self, offset, addReg):
# n(%An, Xn*SF)
reg2 = self._consume(Register, "expecting a 'register'").data
self._consume(Star, "expecting a '*'")
scaleFactor = self._consume(Number, "expecting a number").data
self._consume(GroupEnd, "expecting a ')'")
return ScaledAddressWithOffset(addReg, reg2[1], reg2[0], scaleFactor, offset)
######## Helper functions #############
def _match(self, *types):
for type in types:
if self._check(type):
self._advance()
return True
return False
def _check(self, type):
if self._isAtEnd():
return False
else:
return self._peek().toktype is type
def _peek(self, skip=0):
return self.toks[self.current + skip]
def _advance(self):
if not self._isAtEnd():
self.current += 1
return self._peek(-1)
def _consume(self, tokType, message=""):
if self._check(tokType):
return self._advance()
raise self._error(self._peek(), message + " Got %s" %
str(self._peek().toktype))
def _error(self, tok, message):
return ParseError(message, tok)
def _sync(self):
while not self._match(Terminator) and not self._isAtEnd():
self._advance()
def _isAtEnd(self):
return self._peek().toktype is EOF
def parse(toks):
p = Parser(toks)
return p.parse()
class ParseError(Exception):
def __init__(self, msg, tok):
self.msg = msg
self.tok = tok
class Instruction:
"""
An instruction is an opcode(op, size) followed by 0-2 data locations
"""
def __init__(self, opcode, mem1=None, mem2=None):
self.opcode = opcode
self.mem_src = mem1
self.mem_dest = mem2
self.calculateSize()
def calculateSize(self):
self.size = 2 + (self.mem_src.additionalsize if self.mem_src is not None else 0) + \
(self.mem_dest.additionalsize if self.mem_dest is not None else 0)
if self.opcode.data[0][-1] == "q":
if isinstance(self.mem_src, ImmediateData):
self.size -= 4
if self.size > 6:
raise ParseError(
"Invalid memory access for instruction", self.opcode)
def __len__(self):
return self.size
def __repr__(self):
return str(self)
def __str__(self):
if self.mem_dest is None:
if self.mem_src is None:
return "{}: {}.{}".format(self.size, self.opcode.data[0], self.opcode.data[1])
return "{}: {}.{} {}".format(self.size, self.opcode.data[0], self.opcode.data[1], str(self.mem_src))
return "{}: {}.{} {}, {}".format(self.size, self.opcode.data[0], self.opcode.data[1], str(self.mem_src), str(self.mem_dest))
class Direct:
"""
An directive is an name followed by any additional data
"""
def __init__(self, directive, data=None):
self.directive = directive
self.data = data
def __str__(self):
return " .{} {}".format(self.directive.data, ", ".join([str(x.data) for x in self.data]))
|
<filename>trade/tests/test_views.py
import json
import os
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.staticfiles import finders
from django.core.files import File
from django.test import TestCase
from django.urls import reverse
# 물품 모델 테스트
from trade.forms import PayForm
from accounts.models import Profile
from category.models import Category
from trade.models import Item, Order, ItemImage, ItemComment
from store.models import StoreProfile
User = get_user_model()
class ItemNewTest(TestCase):
# 테스트 물품 생성, 주문자 생성, 주문서 생성
@classmethod
def setUpTestData(cls):
seller = get_user_model().objects.create_user(username='seller', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=seller, name='seller가게')
cls.cate = Category.objects.create(name='전자제품')
# 주문자 정보를 iamport 모듈에 넘겨주는 form 생성 확인
def test_view_url(self):
response = self.client.get('/trade/item/new/')
self.assertRedirects(response, '/accounts/login/?next=/trade/item/new/')
def test_view_url_name(self):
response = self.client.get(reverse('trade:item_new'))
self.assertRedirects(response, '/accounts/login/?next=/trade/item/new/')
def test_login_view_url_name(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.get(reverse('trade:item_new'))
self.assertEqual(str(response.context['user']), 'seller')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'trade/item_new.html')
def test_item_add(self):
self.client.login(username='seller', password='<PASSWORD>')
url = finders.find('profile/3.png')
img = File(open(url, 'rb'))
response = self.client.post(reverse('trade:item_new'), {
'title': 'test item',
'desc': 'test',
'amount': 1000,
'photo': img,
'category': self.cate.id,
'item_status': 's'
})
self.assertEquals(
json.loads(str(response.content, encoding='utf-8'))['is_valid'],
True
)
self.assertTrue(Item.objects.get(title='test item'))
def test_item_add_fail(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.post(reverse('trade:item_new'), {
'title': 'test item',
'desc': 'test',
'amount': 1000,
'photo': '',
'category': self.cate.id,
'item_status': 't'
})
self.assertEquals(
json.loads(str(response.content, encoding='utf-8'))['is_valid'],
False
)
self.assertTrue(
json.loads(str(response.content, encoding='utf-8'))['error']['photo'],
)
self.assertTrue(
json.loads(str(response.content, encoding='utf-8'))['error']['item_status'],
)
# print(json.loads(str(response.content, encoding='utf-8')))
self.assertFalse(Item.objects.filter(title='test item').exists())
# 테스트 종료후 이미지 삭제
def tearDown(self):
try:
imgs = Item.objects.get(title='test item').itemimage_set.all()
except:
return super().tearDown()
if not settings.USE_AWS:
for img in imgs:
directory = os.path.dirname(img.photo.path)
if os.path.isfile(img.photo.path):
os.remove(img.photo.path)
if len(os.listdir(directory)) == 0:
os.rmdir(directory)
return super().tearDown()
class ItemUpdateTest(TestCase):
# 테스트 물품 생성, 주문자 생성, 주문서 생성
@classmethod
def setUpTestData(cls):
cls.seller = get_user_model().objects.create_user(username='seller', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=cls.seller, name='seller가게')
cls.cate = Category.objects.create(name='전자제품')
cls.item = Item.objects.create(user=cls.seller, category=cls.cate, title='[중고]닌텐도셋트', amount=100000)
def test_view_url_name(self):
response = self.client.get(reverse('trade:item_update', kwargs={'pk': self.item.id}))
self.assertRedirects(response, '/accounts/login/?next=/trade/item/update/{}/'.format(self.item.id))
def test_other_login_view_url_name(self):
other_user = get_user_model().objects.create_user(username='other', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=other_user, name='other가게')
self.client.login(username='other', password='<PASSWORD>')
response = self.client.get(reverse('trade:item_update', kwargs={'pk': self.item.id}), follow=True)
self.assertEquals(list(response.context.get('messages'))[0].message, '잘못된 접근 입니다.')
self.assertRedirects(response, '/')
def test_login_view_url_name(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.get(reverse('trade:item_update', kwargs={'pk': self.item.id}))
self.assertEqual(str(response.context['user']), 'seller')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'trade/item_update.html')
def test_item_update(self):
self.client.login(username='seller', password='<PASSWORD>')
id = self.item.id
response = self.client.post(reverse('trade:item_update', kwargs={'pk': id}), {
'pay_status': 'sale_complete'
})
self.assertEquals(
json.loads(str(response.content, encoding='utf-8'))['pay_status'],
'판매완료'
)
self.assertEquals(Item.objects.get(id=id).pay_status, 'sale_complete')
def test_item_update_fail(self):
self.client.login(username='seller', password='<PASSWORD>')
id = self.item.id
response = self.client.post(reverse('trade:item_update', kwargs={'pk': id}), {
'pay_status': 'hacked'
})
self.assertFormError(
response, 'form', 'pay_status',
'올바르게 선택해 주세요. hacked 이/가 선택가능항목에 없습니다.'
)
self.assertEquals(Item.objects.get(id=id).pay_status, 'ready')
class ItemDetailTest(TestCase):
# 테스트 물품 생성, 주문자 생성, 주문서 생성
@classmethod
def setUpTestData(cls):
cls.seller = get_user_model().objects.create_user(username='seller', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=cls.seller, name='seller가게')
cls.cate = Category.objects.create(name='전자제품')
cls.item = Item.objects.create(user=cls.seller, category=cls.cate, title='[중고]닌텐도셋트', amount=100000)
cls.cmt = ItemComment.objects.create(user=cls.seller, item=cls.item, message='im seller')
def test_view_url_name(self):
Item.objects.create(user=self.seller, category=self.cate, title='[중고]맥북', amount=100000, pay_status='sale_complete')
Item.objects.create(user=self.seller, category=self.cate, title='[중고]아이폰', amount=100000)
response = self.client.get(reverse('trade:item_detail', kwargs={'pk': self.item.id}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'trade/item_detail.html')
self.assertTrue('wish_ctn' in response.context)
self.assertTrue('follow_ctn' in response.context)
self.assertTrue('items' in response.context)
self.assertQuerysetEqual(response.context['items'], ['<Item: [중고]아이폰>'])
self.assertTrue('items_ctn' in response.context)
self.assertEquals(str(response.context['items_ctn']), '1')
self.assertTrue('kakao_key' in response.context)
self.assertTrue('facebook_key' in response.context)
def test_comment_add_not_login(self):
response = self.client.post(reverse('trade:item_detail', kwargs={'pk': self.item.id}), {
'message': 'hello django test'
})
self.assertRedirects(response, '/accounts/login/?next=/trade/item/detail/{}/'.format(self.item.id))
def test_comment_add(self):
other_user = get_user_model().objects.create_user(username='other', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=other_user, name='other가게')
self.client.login(username='other', password='<PASSWORD>')
response = self.client.post(reverse('trade:item_detail', kwargs={'pk': self.item.id}), {
'message': 'hello django test'
})
self.assertTrue(ItemComment.objects.filter(item=self.item, user=other_user).exists())
self.assertIsNone(ItemComment.objects.get(item=self.item, user=other_user).parent)
def test_reply_comment_add(self):
other_user = get_user_model().objects.create_user(username='other', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=other_user, name='other가게')
self.client.login(username='other', password='<PASSWORD>')
response = self.client.post(reverse('trade:item_detail', kwargs={'pk': self.item.id}), {
'message': 'hello django test',
'parent_id': self.cmt.id
})
self.assertTrue(ItemComment.objects.filter(item=self.item, user=other_user).exists())
self.assertEquals(self.cmt, ItemComment.objects.get(item=self.item, user=other_user).parent)
def test_comment_pagination(self):
cmt_cnt = 8
for cnt in range(cmt_cnt):
ItemComment.objects.create(user=self.seller, item=self.item, message='hi')
response = self.client.get(reverse('trade:item_detail', kwargs={'pk': self.item.id}), {'page': 2})
self.assertEquals(response.status_code, 200)
self.assertEquals(len(response.context['comments']), 4)
class ItemDeleteTest(TestCase):
# 테스트 물품 생성, 주문자 생성, 주문서 생성
@classmethod
def setUpTestData(cls):
cls.seller = get_user_model().objects.create_user(username='seller', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=cls.seller, name='seller가게')
cls.cate = Category.objects.create(name='전자제품')
cls.item = Item.objects.create(user=cls.seller, category=cls.cate, title='[중고]닌텐도셋트', amount=100000)
cls.cmt = ItemComment.objects.create(user=cls.seller, item=cls.item, message='im seller')
def test_view_url_name(self):
response = self.client.get(reverse('trade:item_delete', kwargs={'pk': self.item.id}))
self.assertRedirects(response, '/accounts/login/?next=/trade/item/delete/{}/'.format(self.item.id))
def test_view_url_name_other_login(self):
other_user = get_user_model().objects.create_user(username='other', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=other_user, name='other가게')
self.client.login(username='other', password='<PASSWORD>')
response = self.client.get(reverse('trade:item_delete', kwargs={'pk': self.item.id}), follow=True)
self.assertEquals(list(response.context.get('messages'))[0].message, '잘못된 접근 입니다.')
self.assertRedirects(response, '/')
def test_get_item_delete(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.get(reverse('trade:item_delete', kwargs={'pk': self.item.id}))
self.assertEquals(response.status_code, 200)
def test_post_item_delete(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.post(reverse('trade:item_delete', kwargs={'pk': self.item.id}))
self.assertFalse(Item.objects.filter(id=self.item.id).exists())
self.assertFalse(ItemComment.objects.filter(item=self.item.id).exists())
self.assertRedirects(response, reverse('store:store_sell_list', kwargs={'pk': self.seller.storeprofile.id}))
class CommentUpdateTest(TestCase):
# 테스트 물품 생성, 주문자 생성, 주문서 생성
@classmethod
def setUpTestData(cls):
cls.seller = get_user_model().objects.create_user(username='seller', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=cls.seller, name='seller가게')
cls.cate = Category.objects.create(name='전자제품')
cls.item = Item.objects.create(user=cls.seller, category=cls.cate, title='[중고]닌텐도셋트', amount=100000)
cls.cmt = ItemComment.objects.create(user=cls.seller, item=cls.item, message='im seller')
def test_view_url_name(self):
response = self.client.get(reverse('trade:comment_update', kwargs={'pk': self.item.id, 'cid': self.cmt.id}))
self.assertRedirects(response, '/accounts/login/?next=/trade/comment/update/{}/{}/'.format(self.item.id, self.cmt.id))
def test_view_url_name_other_login(self):
other_user = get_user_model().objects.create_user(username='other', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=other_user, name='other가게')
self.client.login(username='other', password='<PASSWORD>')
response = self.client.get(reverse('trade:comment_update', kwargs={'pk': self.item.id, 'cid': self.cmt.id}), follow=True)
self.assertEquals(list(response.context.get('messages'))[0].message, '잘못된 접근 입니다.')
self.assertRedirects(response, reverse('trade:item_detail', kwargs={'pk': self.item.id}))
def test_get_comment_update(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.get(reverse('trade:comment_update', kwargs={'pk': self.item.id, 'cid': self.cmt.id}))
self.assertEquals(response.status_code, 200)
def test_post_comment_update(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.post(reverse('trade:comment_update', kwargs={'pk': self.item.id, 'cid': self.cmt.id}), {
'message': 'hello django test'
})
self.assertEquals(ItemComment.objects.get(id=self.cmt.id).message, 'hello django test')
self.assertEquals(json.loads(str(response.content, encoding='utf-8'))['msg'], 'hello django test')
class CommentDeleteTest(TestCase):
# 테스트 물품 생성, 주문자 생성, 주문서 생성
@classmethod
def setUpTestData(cls):
cls.seller = get_user_model().objects.create_user(username='seller', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=cls.seller, name='seller가게')
cls.cate = Category.objects.create(name='전자제품')
cls.item = Item.objects.create(user=cls.seller, category=cls.cate, title='[중고]닌텐도셋트', amount=100000)
cls.cmt = ItemComment.objects.create(user=cls.seller, item=cls.item, message='im seller')
def test_view_url_name(self):
response = self.client.get(reverse('trade:comment_delete', kwargs={'pk': self.item.id, 'cid': self.cmt.id}))
self.assertRedirects(response, '/accounts/login/?next=/trade/comment/delete/{}/{}/'.format(self.item.id, self.cmt.id))
def test_view_url_name_other_login(self):
other_user = get_user_model().objects.create_user(username='other', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=other_user, name='other가게')
self.client.login(username='other', password='<PASSWORD>')
response = self.client.get(reverse('trade:comment_delete', kwargs={'pk': self.item.id, 'cid': self.cmt.id}), follow=True)
self.assertEquals(list(response.context.get('messages'))[0].message, '잘못된 접근 입니다.')
self.assertRedirects(response, reverse('trade:item_detail', kwargs={'pk': self.item.id}))
def test_get_item_delete(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.get(reverse('trade:comment_delete', kwargs={'pk': self.item.id, 'cid': self.cmt.id}))
self.assertEquals(response.status_code, 200)
def test_post_item_delete(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.post(reverse('trade:comment_delete', kwargs={'pk': self.item.id, 'cid': self.cmt.id}))
self.assertTrue(Item.objects.filter(id=self.item.id).exists())
self.assertFalse(ItemComment.objects.filter(id=self.cmt.id).exists())
self.assertEquals(response.status_code, 200)
class OrderNewTest(TestCase):
@classmethod
def setUpTestData(cls):
# 구매자
cls.order = get_user_model().objects.create_user(username='order', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=cls.order, name='order가게')
Profile.objects.create(
user=cls.order,
phone='0123456789',
post_code='111',
address='상암동',
detail_address='101호',
account_num='123'
)
# 판매자
cls.seller = get_user_model().objects.create_user(username='seller', email='<EMAIL>', password='<PASSWORD>')
StoreProfile.objects.create(user=cls.seller, name='seller가게')
Profile.objects.create(
user=cls.seller,
phone='0122222222',
post_code='13321',
address='둔촌동',
detail_address='301호',
account_num='321'
)
# 판매자 물품
cls.cate = Category.objects.create(name='전자제품')
cls.item = Item.objects.create(user=cls.seller, category=cls.cate, title='[중고]닌텐도셋트', amount=100000)
# 비회원 접근
def test_view_url_name(self):
response = self.client.get(reverse('trade:order_new', kwargs={'item_id': self.item.id}))
self.assertRedirects(response, '/accounts/login/?next=/trade/order/new/{}/'.format(self.item.id))
# 본인 물품 접근
def test_get_seller_login(self):
self.client.login(username='seller', password='<PASSWORD>')
response = self.client.get(reverse('trade:order_new', kwargs={'item_id': self.item.id}), follow=True)
self.assertEquals(list(response.context.get('messages'))[0].message, '자신의 물품은 구매할수 없습니다.')
self.assertEquals(response.status_code, 200)
# 예약 or 판매완료 물품 접근
def test_get_sells_item(self):
self.client.login(username='order', password='<PASSWORD>')
item = Item.objects.create(user=self.seller, category=self.cate, title='[중고]맥북', amount=100000, pay_status='reservation')
response = self.client.get(reverse('trade:order_new', kwargs={'item_id': item.id}), follow=True)
self.assertEquals(list(response.context.get('messages'))[0].message, '이미 예약이 되었거나 판매완료 상품입니다.')
self.assertEquals(response.status_code, 200)
# 회원 접근 (정상)
def test_get_order_item(self):
self.client.login(username='order', password='<PASSWORD>')
response = self.client.get(reverse('trade:order_new', kwargs={'item_id': self.item.id}))
self.assertEquals(response.status_code, 200)
def test_post_order_item_import(self):
self.client.login(username='order', password='<PASSWORD>')
response = self.client.post(reverse('trade:order_new', kwargs={'item_id': self.item.id}), {
'username': '홍길동',
'phone': '0123456789',
'post_code': '111',
'address': '상암동',
'detail_address': '101호',
'email': '<EMAIL>',
'pay_choice': 'import'
})
order = Order.objects.get(item=self.item, user=self.order)
self.assertTrue('/trade/order/{}/pay/'.format(self.item.id) in response['Location'])
self.assertRedirects(response, '/trade/order/{}/pay/{}/'.format(self.item.id, str(order.merchant_uid)))
def test_post_order_item_banktrans(self):
self.client.login(username='order', password='<PASSWORD>')
response = self.client.post(reverse('trade:order_new', kwargs={'item_id': self.item.id}), {
'username': '홍길동',
'phone': '0123456789',
'post_code': '111',
'address': '상암동',
'detail_address': '101호',
'email': '<EMAIL>',
'pay_choice': 'bank_trans'
}, HTTP_HOST='example.com')
order = Order.objects.get(item=self.item, user=self.order)
self.assertRedirects(response, '/trade/info/{}/'.format(order.id))
self.assertEquals(order.status, 'reserv')
self.assertEquals(order.is_active, False)
|
import asyncio
import importlib
import logging
from json import JSONDecodeError
from typing import Callable, List
from urllib.parse import urljoin
import aiohttp
import aioredis
import sentry_sdk
from aio_pika import Connection, ExchangeType, IncomingMessage
from aio_pika import Message as AMQPMessage
from aio_pika import connect_robust
from aio_pika.message import DeliveryMode
from vaccine import config
from vaccine.models import Answer, Event, Message, User
from vaccine.utils import DECODE_MESSAGE_EXCEPTIONS, HTTP_EXCEPTIONS
logging.basicConfig(level=config.LOG_LEVEL.upper())
logger = logging.getLogger(__name__)
class Worker:
def __init__(self):
modname, clsname = config.APPLICATION_CLASS.rsplit(".", maxsplit=1)
module = importlib.import_module(modname)
self.ApplicationClass = getattr(module, clsname)
async def setup(self):
self.connection = await connect_robust(config.AMQP_URL)
self.channel = await self.connection.channel()
await self.channel.set_qos(prefetch_count=config.CONCURRENCY)
self.exchange = await self.channel.declare_exchange(
"vumi", type=ExchangeType.DIRECT, durable=True, auto_delete=False
)
self.redis = aioredis.from_url(
config.REDIS_URL, encoding="utf-8", decode_responses=True
)
self.inbound_queue = await self.setup_consume(
f"{config.TRANSPORT_NAME}.inbound", self.process_message
)
self.event_queue = await self.setup_consume(
f"{config.TRANSPORT_NAME}.event", self.process_event
)
if (
config.ANSWER_API_URL
and config.ANSWER_API_TOKEN
and config.ANSWER_RESOURCE_ID
):
self.answer_worker = AnswerWorker(
connection=self.connection,
url=config.ANSWER_API_URL,
token=config.ANSWER_API_TOKEN,
resource_id=config.ANSWER_RESOURCE_ID,
)
await self.answer_worker.setup()
else:
self.answer_worker = None
async def setup_consume(self, routing_key: str, callback: Callable):
queue = await self.channel.declare_queue(
routing_key, durable=True, auto_delete=False
)
await queue.bind(self.exchange, routing_key)
await queue.consume(callback)
return queue
async def teardown(self):
await self.connection.close()
await self.redis.close()
if self.answer_worker:
await self.answer_worker.teardown()
async def process_message(self, amqp_msg: IncomingMessage):
try:
msg = Message.from_json(amqp_msg.body.decode("utf-8"))
except DECODE_MESSAGE_EXCEPTIONS:
logger.exception(f"Invalid message body {amqp_msg.body!r}")
amqp_msg.reject(requeue=False)
return
async with amqp_msg.process(requeue=True):
async with self.redis.lock(
f"userlock.{msg.from_addr}", timeout=config.USER_LOCK_TIMEOUT
):
logger.debug(f"Processing inbound message {msg}")
user_data = await self.redis.get(f"user.{msg.from_addr}")
user = User.get_or_create(msg.from_addr, user_data)
app = self.ApplicationClass(user)
for outbound in await app.process_message(msg):
await self.publish_message(outbound)
if self.answer_worker:
for answer in app.answer_events:
await self.publish_answer(answer)
await self.redis.setex(
f"user.{msg.from_addr}", config.TTL, user.to_json()
)
async def publish_message(self, msg: Message):
await self.exchange.publish(
AMQPMessage(
msg.to_json().encode("utf-8"),
delivery_mode=DeliveryMode.PERSISTENT,
content_type="application/json",
content_encoding="UTF-8",
),
routing_key=f"{config.TRANSPORT_NAME}.outbound",
)
async def publish_answer(self, answer: Answer):
await self.exchange.publish(
AMQPMessage(
answer.to_json().encode("utf-8"),
delivery_mode=DeliveryMode.PERSISTENT,
content_type="application/json",
content_encoding="UTF-8",
),
routing_key=f"{config.TRANSPORT_NAME}.answer",
)
async def process_event(self, amqp_msg: IncomingMessage):
try:
event = Event.from_json(amqp_msg.body.decode("utf-8"))
except DECODE_MESSAGE_EXCEPTIONS:
logger.exception(f"Invalid event body {amqp_msg.body!r}")
amqp_msg.reject(requeue=False)
return
async with amqp_msg.process(requeue=True):
logger.debug(f"Processing event {event}")
class AnswerWorker:
def __init__(self, connection: Connection, url: str, token: str, resource_id: str):
self.connection = connection
self.answers: List[IncomingMessage] = []
self.session = aiohttp.ClientSession(
raise_for_status=False,
timeout=aiohttp.ClientTimeout(total=10),
connector=aiohttp.TCPConnector(limit=1),
headers={
"Authorization": f"Token {token}",
"Content-Type": "application/vnd.api+json",
},
)
self.url = url
self.token = token
self.resource_id = resource_id
async def setup(self):
self.channel = await self.connection.channel()
await self.channel.set_qos(prefetch_count=config.ANSWER_BATCH_SIZE)
self.exchange = await self.channel.declare_exchange(
"vumi", type=ExchangeType.DIRECT, durable=True, auto_delete=False
)
self.answer_queue = await self.setup_consume(
f"{config.TRANSPORT_NAME}.answer", self.process_answer
)
self.periodic_task = asyncio.create_task(self._periodic_loop())
async def setup_consume(self, routing_key: str, callback: Callable):
queue = await self.channel.declare_queue(
routing_key, durable=True, auto_delete=False
)
await queue.bind(self.exchange, routing_key)
await queue.consume(callback)
return queue
async def teardown(self):
self.periodic_task.cancel()
await self.channel.close()
async def process_answer(self, amqp_msg: IncomingMessage):
try:
Answer.from_json(amqp_msg.body.decode("utf-8"))
except DECODE_MESSAGE_EXCEPTIONS:
logger.exception(f"Invalid answer body {amqp_msg.body!r}")
amqp_msg.reject(requeue=False)
return
self.answers.append(amqp_msg)
async def _periodic_loop(self):
while True:
await asyncio.sleep(config.ANSWER_BATCH_TIME)
await self._push_results()
async def _submit_answers(
self, answers: List[Answer]
) -> aiohttp.client.ClientResponse:
data = {
"data": {
"type": "responses",
"id": self.resource_id,
"attributes": {
"responses": [
[
a.timestamp.isoformat(),
a.row_id,
a.address,
a.session_id,
a.question,
a.response,
a.response_metadata,
]
for a in answers
]
},
}
}
response = await self.session.post(
url=urljoin(
self.url, f"flow-results/packages/{self.resource_id}/responses/"
),
json=data,
)
response_data = await response.text()
sentry_sdk.set_context(
"answer_api", {"request_data": data, "response_data": response_data}
)
return response
async def _push_results(self):
msgs, self.answers = self.answers, []
answers: List[Answer] = []
processed = []
for msg in msgs:
answers.append(Answer.from_json(msg.body.decode("utf-8")))
if not answers:
return
try:
response = await self._submit_answers(answers)
# If there is a 400 response, then we send one by one to figure out which
# answer has an issue, and nack it
if response.status == 400:
for msg in msgs:
answer = Answer.from_json(msg.body.decode("utf-8"))
response = await self._submit_answers([answer])
if response.status == 400:
# If this is a duplicate submission, then ignore error
try:
response_body = await response.json()
if (
response_body["data"]["attributes"]["responses"][0]
== "row_id is not unique for flow question"
):
msg.ack()
processed.append(msg)
continue
except (TypeError, KeyError, IndexError, JSONDecodeError):
pass
msg.nack(requeue=False)
processed.append(msg)
response.raise_for_status()
msg.ack()
processed.append(msg)
else:
response.raise_for_status()
for m in msgs:
m.ack()
except HTTP_EXCEPTIONS:
logger.exception("Error sending results to flow results server")
self.answers.extend(m for m in msgs if m not in processed)
return
if __name__ == "__main__": # pragma: no cover
worker = Worker()
loop = asyncio.get_event_loop()
loop.run_until_complete(worker.setup())
logger.info("Worker running")
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(worker.teardown())
|
#!/usr/bin/env python
# Importing libraries
import os, sys, cv2, argparse
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.join(".."))
from utils.imutils import jimshow
from utils.imutils import jimshow_channel
def main(inpath, startpoint, endpoint):
# Loading image
image = cv2.imread(inpath)
# Recreating the image with a rectangle on top
color = (0, 255, 0) # green
thickness = 2
image_rectangle = cv2.rectangle(image.copy(), startpoint, endpoint, color, int(thickness))
# Cropping the image
image_cropped = image[startpoint[1]:endpoint[1], startpoint[0]:endpoint[0]] # The first index indicating the range of x-values and the second the y-range
# Specifying cropped outpath
cropped_outpath = os.path.join(os.path.split(inpath)[0],"image_cropped.jpg")
cropped_outpath
# Writing cropped image
cv2.imwrite(cropped_outpath, image_cropped)
print(f"A new file has been saved succesfully: \"{cropped_outpath}\"")
# Flattening the image to black and white
grey_image = cv2.cvtColor(image_cropped, cv2.COLOR_BGR2GRAY)
# Blurring the image
blurred = cv2.GaussianBlur(grey_image, (5,5), 0)
# Using the canny edge detection software
canny = cv2.Canny(blurred, 75, 150)
# Using the "findContours" cv2 function to find all the contours from the canny image
contours, _ =cv2.findContours(canny.copy(), # .copy() is just so that we don't overwrite the original image, but rather do it on a copy
cv2.RETR_EXTERNAL, # This takes only the external structures (we don't want edges within each coin)
cv2.CHAIN_APPROX_SIMPLE, ) # The method of getting approximated contours
# The original cropped image with contour overlay
image_letters =cv2.drawContours(image_cropped.copy(), # draw contours on original
contours, # our list of contours
-1, #which contours to draw -1 means all. -> 1 would mean first contour
(0, 255, 0), # contour color
2) # contour pixel width
# Specifying cropped, contour overlay image outpath
cropped_contour_outpath = os.path.join(os.path.split(inpath)[0], "image_letters.jpg")
# Writing the cropped image with contour overlay
cv2.imwrite(cropped_contour_outpath, image_letters)
print(f"A new file has been saved succesfully: \"{cropped_contour_outpath}\"")
# Define behaviour when called from command line
if __name__=="__main__":
# Initialise ArgumentParser class
parser = argparse.ArgumentParser(description = "Takes a target image - creates a copy with a green rectangle specifying boundary of crop. Also creates a cropped version with contours of letters")
# Add inpath argument
parser.add_argument(
"-i",
"--inpath",
type = str,
default = os.path.join("..", "data", "jefferson", "jefferson.jpg"),
required = False,
help= "str - path to image file to be processed")
# Add outpath argument
parser.add_argument(
"-s",
"--startpoint",
type = tuple,
default = (1385, 885),
required = False,
help = "tuple - tuple of two numbers specifying bottom left pixel of crop")
# Add outpath argument
parser.add_argument(
"-e",
"--endpoint",
type = tuple,
default = (2890, 2790),
required = False,
help = "tuple - tuple of two numbers specifying bottom left pixel of crop")
# Taking all the arguments we added to the parser and input into "args"
args = parser.parse_args()
main(args.inpath, args.startpoint, args.endpoint)
|
import numpy as np
from matplotlib import pyplot as plt
import pylab as P
# computes precision@k for the multi class case
def precision_at_k(y, y_hat, k=5):
count = 0.0
for item in y:
t = np.argmax(item)
for i in range(k):
if t == y_hat[i]:
count += 1.0
break
return float(count) / len(y)
def plot_accuracy_vs_distance(y, y_hat, order_file_path):
INFINITY = 1000
bins = [[1, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11], [12, 12], [13, 14],
[15, 16], [17, 18], [19, 20], [21, 22], [23, INFINITY]]
fid = open(order_file_path)
lines = fid.readlines()
fid.close()
acc = dict()
durations = list()
for i in range(len(y)):
cur_acc = y_hat[i] == np.argmax(y[i])
order = lines[i].split()
duration = np.abs(int(order[1]) - int(order[0]))
if cur_acc:
durations.append(duration)
# if duration in acc:
# acc[duration].append(cur_acc)
# else:
# acc[duration] = list()
# acc[duration].append(cur_acc)
for b in bins:
if b[0] <= duration <= b[1]:
if b[0] in acc:
acc[b[0]].append(cur_acc)
else:
acc[b[0]] = list()
acc[b[0]].append(cur_acc)
plot_acc = dict()
for val in acc:
plot_acc[val] = (float(np.sum(acc[val])) / len(acc[val]))
print("B: %s, P: %s" % (val, len(acc[val])))
# plt.title("Accuracy vs. Word Distance")
# plt.ylabel('Accuracy')
# plt.xlabel('Word Distance')
np.save("w2v_500", plot_acc)
# plt.bar(range(len(plot_acc)), plot_acc.values(), align='center')
# plt.xticks(range(len(plot_acc)), plot_acc.keys())
# plt.show()
def plot_accuracy_vs_word_position(path_indices, path_test):
miss_class_idx = np.loadtxt(path_indices)
test_data = dict()
with open(path_test) as fid:
lines = fid.readlines()
for i in range(len(lines)):
vals = lines[i].split()
test_data[i] = [vals]
fid.close()
miss_class_data = dict()
for id in range(len(miss_class_idx)):
miss_class_data[id] = test_data[int(miss_class_idx[id])]
def read_dictionary(path):
d_i2w = dict()
d_w2i = dict()
with open(path) as fid:
lines = fid.readlines()
for i, line in enumerate(lines):
vals = line.split()
d_i2w[i] = vals[0]
d_w2i[vals[0]] = i
fid.close()
return d_i2w, d_w2i
dict_skip_i2w = None
dict_skip_w2i = None
dict_enc_dec_i2w = None
dict_enc_dec_w2i = None
def enc_dec_2_skip_thoughts_dict(id, dict_enc_dec_path="data/orig/dictionary.txt",
dict_skip_path="data/skip_thoughts/raw/dictionary.txt"):
global dict_skip_i2w
global dict_skip_w2i
global dict_enc_dec_i2w
global dict_enc_dec_w2i
# read skip-thoughts dictionary
if dict_skip_i2w is None or dict_skip_w2i is None:
dict_skip_i2w, dict_skip_w2i = read_dictionary(dict_skip_path)
# read enc-dec dictionary
if dict_enc_dec_i2w is None or dict_enc_dec_w2i is None:
dict_enc_dec_i2w, dict_enc_dec_w2i = read_dictionary(dict_enc_dec_path)
word = dict_enc_dec_i2w[id]
if word in dict_skip_w2i:
new_id = dict_skip_w2i[word]
elif word.lower() in dict_skip_w2i:
new_id = dict_skip_w2i[word.lower()]
else:
new_id = dict_skip_w2i['unknown']
return new_id
def check_intersection(dict_size=50001):
count = 0
for i in range(dict_size):
id = enc_dec_2_skip_thoughts_dict(i)
if id == 1877:
count += 1
print "Total number: %d" % count
print "Percentage from dictionary: %.2f" % (float(count) / dict_size)
|
<filename>iptv_scan.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 07:00:33 2016
@author: pavel
"""
import concurrent.futures
import time
from urllib.request import urlopen
from ipaddress import IPv4Network
TIMEOUT = 2 #seconds
SLEEP_TIME=1
BYTES_TO_READ = 200
NUM_WORKERS = 3 #threads
UDPXY_URL = "http://192.168.0.200:4022/udp/"
RANGES_WIDE = ["192.168.3.11/17", "192.168.127.12/17", "172.16.17.32/17", "172.16.17.32/17", "172.16.17.32/17"]
RANGES_NARROW = ["192.168.3.11/24",]#"192.168.127.12/24", "192.168.3.11/24", "192.168.127.12/24", "192.168.3.11/24", "172.16.58.3/24"]
RANGES = RANGES_NARROW
PORT = "5000"
COUNT_FILE = "scan_status.txt"
UPDATE_COUNT_FILE_EVERY = 100
TEMP_FILE = "scan_iptv.txt"
OUTPUT_PLAYLIST = "scan_iptv.m3u"
HEADER = "#EXTM3U"
TITLE = "#EXTINF:-1,Unknown_"
END_LINE = "\r\n"
def check_url(url, timeout, bytes_to_read):
try:
with urlopen(url, timeout = timeout) as f:
f.read(bytes_to_read)
return True
except:
return False
def get_url(ip_addr):
return UDPXY_URL + ip_addr + ":" + PORT
def check_channel(ip_addr):
url = get_url(ip_addr)
return url, check_url(url, TIMEOUT, BYTES_TO_READ)
def addresses(skip = 0):
count = 0
for addr in RANGES:
ip_network = IPv4Network(addr)
for ip_addr in ip_network.hosts():
if count < skip:
count += 1
else:
yield ip_addr.exploded
def num_scanned():
count = 0
try:
with open(COUNT_FILE, 'r') as f:
for line in f:
count = max(count, int(line.strip()))
except:
pass
return count
def scan_parallel():
skip = num_scanned()
count = skip
with open(COUNT_FILE, 'w') as count_file:
with open(TEMP_FILE, 'a') as output:
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_ch = {executor.submit(check_channel, ip_addr): ip_addr for ip_addr in addresses(skip)}
for future in concurrent.futures.as_completed(future_to_ch):
ip_addr = future_to_ch[future]
count += 1
if count % UPDATE_COUNT_FILE_EVERY == 0:
count_file.write(str(count)+END_LINE)
count_file.flush()
print(count, ip_addr)
url, status_ok = future.result()
if status_ok:
output.write(url + END_LINE)
output.flush()
def scan():
skip = num_scanned()
count = skip
with open(COUNT_FILE, 'w') as count_file:
with open(TEMP_FILE, 'a') as output:
for ip_addr in addresses(skip):
count += 1
if count % UPDATE_COUNT_FILE_EVERY == 0:
count_file.write(str(count)+END_LINE)
count_file.flush()
print(count, ip_addr)
url, status_ok = check_channel(ip_addr)
if status_ok:
output.write(url + END_LINE)
output.flush()
time.sleep(SLEEP_TIME)
def to_m3u():
with open(TEMP_FILE, "r") as input_file:
with open(OUTPUT_PLAYLIST, "w") as output_file:
output_file.write(HEADER + END_LINE)
count = 0
for line in input_file:
output_file.write(TITLE + str(count) + END_LINE)
output_file.write(line.strip() + END_LINE)
count += 1
if __name__ == "__main__":
# execute only if run as a script
#scan_parallel()
scan()
to_m3u()
|
<filename>src/execute_flexible_search.py
#!/usr/bin/env python3
# ExecuteFlexibleSearchException: Received HTTP500 error doesn't mean you won't know why it happened
# To figure out what's wrong with flexible search enable verbose errors and execute flexible search query in groovy:
# setparametertemporary flexible.search.exception.show.query.details true
# xg "flexibleSearchService.search('select {page},CONCAT({components1}) from {PflJspIncludeComponent} where {components1} is not null group by {page} order by {page}')"
# maybe ask if need to enable verbose errors and rerun query in groovy to see why error happened?
# TODO: pipe-aware result printing to avoid BrokenPipeError: [Errno 32] Broken pipe
# TODO: further refactor: method's arguments (especially passing 'args')
# TODO: extract removing empty columns (without checking first row == column name) into a separate script
import argparse
import html
import logging
import re
import requests
import sys
import time
import multiline_tabulate
import unroll_pk
from lib import argparse_helper
from lib import hybris_argparse_helper
from lib import hybris_requests_helper
from lib import logging_helper
from lib import requests_helper
from lib import shell_helper
COLUMN_BLACKLIST = {'hjmpTS', 'createdTS', 'modifiedTS', 'TypePkString', 'OwnerPkString', 'aCLTS', 'propTS'}
DEFAULT_ENTRIES_LIMIT = shell_helper.get_terminal_height() - 3
class ExecuteFlexibleSearchException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return f'ExecuteFlexibleSearchException: {self.message}'
def _execute_flexible_search_and_return_json(session, hac_address, query, csrf, max_count=100, user='admin',
locale='en'):
flex_data = {'flexibleSearchQuery': query, '_csrf': csrf, 'maxCount': max_count,
'user': user, 'locale': locale, 'commit': False}
flex_data_without_query_content = {k: v for k, v in flex_data.items() if k != 'flexibleSearchQuery'}
logging.debug(f'flex data without query content: {flex_data_without_query_content}')
try:
post_result = session.post(f'{hac_address}/console/flexsearch/execute', data=flex_data, verify=False)
except requests.exceptions.ChunkedEncodingError as exc:
raise ExecuteFlexibleSearchException(f'ChunkedEncodingError: {exc}')
if post_result.status_code >= 500:
raise ExecuteFlexibleSearchException(f'Received HTTP{post_result.status_code} error')
result_json = post_result.json()
if result_json['exception'] is not None:
exception_message = result_json['exception']['message']
raise ExecuteFlexibleSearchException(f'Exception during executing Flexible Search: {exception_message}')
return result_json
def execute_flexible_search(session, hac_address, query, csrf, max_count=100, user='admin', locale='en'):
result_json = _execute_flexible_search_and_return_json(session, hac_address, query, csrf, max_count, user, locale)
return result_json['resultList']
def _execute_flexible_search_and_return_header_and_data(session, address, csrf_token, flexible_query, analyse_short,
no_analyse, limit, ignore_columns):
logging.debug('Executing...')
try:
result_json = _execute_flexible_search_and_return_json(session=session,
hac_address=address,
query=flexible_query,
csrf=csrf_token,
max_count=limit)
except ExecuteFlexibleSearchException as exc:
print(exc)
sys.exit(1)
result_list = result_json['resultList']
logging.debug(f'Result: {result_list}')
if len(result_list) == 0:
print('---No data---')
sys.exit(0)
result_list_unescaped = []
for row in result_list:
for element in row:
result_list_unescaped.append(html.unescape(element) if element is not None else None)
headers = result_json['headers']
headers = [re.sub('^[pP]_', '', header) for header in headers] # change 'p_uid' -> 'uid' etc. by removing 'p_'
data_in_separate_lists = []
for column_index in range(0, len(result_list_unescaped), len(headers)):
data_in_separate_lists.append(result_list_unescaped[column_index:column_index + len(headers)])
header_and_data = [headers]
header_and_data.extend(data_in_separate_lists)
# TODO: use unroll_pk.py !!
if analyse_short or not no_analyse:
# TODO: unroll pk until there are no more pk to check or there is empty output from current pk check
for analyse_iteration in range(3):
logging.debug(f'-----------Analyse #{analyse_iteration}')
# TODO: extract checking PK to check_pk.py with input \d{13} and output: Type, unique field(s?) name + value
# TODO: check if given types aren't in dictionary already, if not then save results as {Type: [uniqueName1,uN2]}
# TODO: use [hostName,url] as key, to invalidate caches on new machine or other servers)
# TODO: allow two fields per type in dictionary to for example pick 2 values from {Address}
# get all 13 digit numbers (except current 'PK' column), because they may be a PK of something
item_pk_set = _get_pk_set_from_header_and_data(header_and_data, ignore_columns)
logging.debug(f'item_pk_set = {item_pk_set}')
if item_pk_set:
key_to_string = unroll_pk.get_key_replacements(item_pk_set, session, csrf_token, address,
not analyse_short)
logging.debug(f'key_to_string = {key_to_string}')
if key_to_string:
for row in data_in_separate_lists:
for column_index, column in enumerate(row):
for key, replace_string in key_to_string:
# if column isn't empty AND (at least part of) key is in column AND this is not PK col
if column is not None and key in column \
and header_and_data[0][column_index].lower() != 'PK'.lower():
row[column_index] = row[column_index].replace(key, replace_string)
logging.debug(f'replacing {key} -> {replace_string}')
logging.debug(f'Output from execute_flexible_search: {header_and_data}')
return header_and_data
def _use_pager_for_header_and_data(header_and_data, pager, args):
if pager == '':
return '\n'.join([args.csv_delimiter.join(x if x is not None else '' for x in row) for row in header_and_data])
elif pager == 'multiline':
additional_parameters = multiline_tabulate.extract_arguments_as_kwargs(args)
if args.limit == DEFAULT_ENTRIES_LIMIT: # didn't provide limit amount -> use full screen
additional_parameters.update({'limit_entries': None, 'limit_lines': DEFAULT_ENTRIES_LIMIT})
return multiline_tabulate.multiline_tabulate(header_and_data, **additional_parameters)
else:
return ''
def _get_pk_set_from_header_and_data(header_and_data, ignore_column):
item_pk_set = set()
for row_index, row in enumerate(header_and_data):
for col_index, column in enumerate(row):
if header_and_data[0][col_index].lower() == 'PK'.lower(): # ignore data from 'PK' column
continue
if column and (ignore_column is None or column not in ignore_column):
matches = re.findall(r'\d{13}', column)
for match in matches:
item_pk_set.add(match)
return item_pk_set
def _handle_cli_arguments():
parser = argparse.ArgumentParser('Script that executes given flexible search')
hybris_argparse_helper.add_hybris_hac_arguments(parser)
parser.add_argument('query',
help='string with flexible search or path to file with flexible search or "-" if piping')
parser.add_argument('--parameters', '-p', nargs='*',
help='arguments to put into flexible query by replacing with $1, $2 etc')
# TODO: if there are more than X (1? 2? 3?) blacklisted columns, but not all then by default show them
parser.add_argument('--no-blacklist', action='store_true',
help='Show blacklisted columns (like hjmpTS, createdTS etc')
parser.add_argument('--analyse-short', '-a', action='store_true',
help='Analyse PK and print short item info, by default: print long item info')
parser.add_argument('--no-analyse', '-A', action='store_true',
help='Do not analyse PK to get info about them, by default: print long item info')
parser.add_argument('--watch', '-w', type=float, help='Number of seconds to wait before retrying query')
multiline_tabulate.add_common_parser_arguments(parser)
logging_helper.add_logging_arguments_to_parser(parser)
parser.set_defaults(limit=DEFAULT_ENTRIES_LIMIT)
args = parser.parse_args()
# TODO: expand checking PK to all blacklisted by default columns
query_lower = args.query.lower()
is_pk_between_select_and_from = query_lower.find('select') < query_lower.find('pk') < query_lower.find('from')
if not args.no_blacklist and 'creation' not in args.query and (
'pk' not in args.query or not is_pk_between_select_and_from):
args.ignore_columns = f'{args.ignore_columns},' + ','.join(COLUMN_BLACKLIST)
# TODO: accept --param key=value --param key2=value2 then replace ?key and ?key2 with value/value2, cleaner than $1, $2
flexible_query = argparse_helper.get_text_from_string_or_file_or_pipe(args.query)
flexible_query = flexible_query.rstrip(';')
if args.parameters:
for i, parameter in enumerate(args.parameters):
flexible_query = flexible_query.replace(f'${i + 1}', parameter)
if f'${i + 2}' in flexible_query:
print(f"Probably you should provide additional parameter for replacing with ${i + 2}")
elif '$1' in flexible_query:
print("No parameters given, but $1 found in query, probably you've forgotten to add parameter")
logging.debug('Full flexible search query:')
logging.debug(flexible_query)
return args, flexible_query
def main():
logging_helper.run_ipython_on_exception()
args, flexible_query = _handle_cli_arguments()
wrapped_execute_flexible_search_and_return_header_and_data = logging_helper.decorate_method_with_pysnooper_if_needed(
_execute_flexible_search_and_return_header_and_data, args.logging_level)
session, address = requests_helper.get_session_with_basic_http_auth_and_cleaned_address(args.address)
credentials = {'user': args.user, 'password': <PASSWORD>}
csrf_token = hybris_requests_helper.log_into_hac_and_return_csrf_or_exit(session, address, credentials)
if args.watch:
try:
iteration = 0
lines_in_last_output = 0
while True:
last_update_time = time.asctime()
last_update_message = f'Last update: {last_update_time} {time.time()}'
header_and_data = wrapped_execute_flexible_search_and_return_header_and_data(session, address,
csrf_token,
flexible_query,
args.analyse_short,
args.no_analyse,
args.limit,
args.ignore_columns)
output = last_update_message + '\n' + _use_pager_for_header_and_data(header_and_data, args.pager, args)
if iteration == 0:
print(output, end='', flush=True)
else:
move_up = shell_helper.get_move_up(lines_in_last_output)
clear_to_end_of_screen = shell_helper.clear_to_end_of_screen()
print(f'{move_up}{clear_to_end_of_screen}{output}', end='', flush=True)
lines_in_last_output = output.count('\n')
iteration += 1
time.sleep(args.watch)
except KeyboardInterrupt:
print('\r ')
else:
prepared_string = wrapped_execute_flexible_search_and_return_header_and_data(session, address, csrf_token,
flexible_query,
args.analyse_short,
args.no_analyse,
args.limit,
args.ignore_columns)
print(_use_pager_for_header_and_data(prepared_string, args.pager, args), end='', flush=True)
if __name__ == '__main__':
main()
|
<reponame>rikkt0r/firewall-rule-generator
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.conf import settings
import mongoengine as me
# from mongoengine.queryset import CASCADE
from fw_common.validators import validate_ip, validate_netmask
class Interface(me.EmbeddedDocument):
FIELDS = ('sys', 'desc', 'ip', 'netmask')
sys = me.StringField(max_length=8, required=True)
desc = me.StringField(max_length=80)
ip = me.StringField(max_length=15, required=True)
netmask = me.IntField(required=True)
def clean(self):
validate_ip(self.ip)
validate_netmask(self.netmask)
super(Interface, self).clean()
def __repr__(self):
return "Interface <%s>" % self.sys
class Template(me.Document):
name = me.StringField(max_length=100, required=True)
desc = me.StringField(max_length=300)
input = me.BooleanField(default=False)
output = me.BooleanField(default=True)
forward = me.BooleanField(default=False)
rules = me.SortedListField(me.ReferenceField('Rule'))
def __repr__(self):
return "Template <%s>" % self.name
class Host(me.Document):
FIELDS = ('name', 'htype')
TYPES = (
(0, 'PC/Laptop'),
(1, 'Server'),
(2, 'Firewall'),
(3, 'Other')
)
name = me.StringField(max_length=100, required=True)
htype = me.IntField(choices=TYPES, default=0)
interfaces = me.ListField(me.EmbeddedDocumentField(Interface))
template = me.ReferenceField(Template)
rules = me.SortedListField(me.ReferenceField('Rule'))
def __repr__(self):
return "Host <%s>" % self.name
class ModuleAvailableParams(me.EmbeddedDocument):
sys = me.StringField(max_length=16, required=True)
desc = me.StringField(max_length=120)
class ModuleAvailable(me.Document):
sys = me.StringField(max_length=40)
desc = me.StringField(max_length=100)
params = me.ListField(me.EmbeddedDocumentField(ModuleAvailableParams))
def __repr__(self):
return "ModuleAvailable <%s>" % self.sys
class ModuleParam(me.EmbeddedDocument):
sys = me.StringField(max_length=16, required=True)
value = me.StringField(min_length=1, max_length=60)
class Module(me.Document):
sys = me.StringField(max_length=40)
params = me.ListField(me.EmbeddedDocumentField(ModuleParam))
def __repr__(self):
return "Module <%s>" % self.sys
class Rule(me.Document):
TABLES = [(i, t[0]) for i, t in enumerate(settings.TABLES)]
CHAINS = [(i, c[0]) for i, c in enumerate(settings.CHAINS)]
ACTIONS = [(i, a[0]) for i, a in enumerate(settings.ACTIONS)]
LOG_LEVELS = settings.LOG_LEVELS
PROTOCOLS = (
(0, 'tcp'),
(1, 'udp'),
(2, 'icmp')
)
FRAGMENT = (
(0, 'None'),
(1, 'Yes'),
(2, 'No')
)
COUNTERS = (
(0, 'None'),
(1, 'pkts'),
(2, 'bytes'),
(3, 'pkts bytes')
)
FIELDS = ('protocol_reverse', 'source', 'source_mask', 'source_reverse', 'destination', 'destination_mask',
'destination_reverse', 'interface_in', 'interface_in_reverse', 'fragment', 'log_level', 'log_prefix')
# table, chain, protocol, counter, modules, action
table = me.IntField(choices=TABLES, default=0, required=True)
chain = me.IntField(choices=CHAINS, required=True)
protocol = me.IntField(choices=PROTOCOLS)
protocol_reverse = me.BooleanField(default=False)
source = me.StringField(max_length=15)
source_mask = me.IntField(default=0)
source_reverse = me.BooleanField(default=False)
destination = me.StringField(max_length=15)
destination_mask = me.IntField(default=0)
destination_reverse = me.BooleanField(default=False)
interface_in = me.StringField(max_length=8)
interface_in_reverse = me.BooleanField(default=False)
interface_out = me.StringField(max_length=8)
interface_out_reverse = me.BooleanField(default=False)
fragment = me.IntField(choices=FRAGMENT, default=0)
counter = me.IntField(choices=COUNTERS, default=0)
# modules = me.ListField(me.ReferenceField(Module, reverse_delete_rule=CASCADE))
modules = me.ListField(me.ReferenceField(Module))
action = me.IntField(choices=ACTIONS, required=False)
log_level = me.IntField(choices=LOG_LEVELS)
log_prefix = me.StringField(max_length=40)
def validate(self, clean=True):
validate_netmask(self.source_mask)
validate_netmask(self.destination_mask)
if self.protocol == 2:
for m in self.modules:
if m.sys == 'multiport':
raise ValidationError("ICMP is not compatible with multiport")
super(Rule, self).validate(clean)
def __deepcopy__(self, memo):
fields = self.FIELDS + ('table', 'chain', 'protocol', 'counter', 'action')
rule = Rule()
for f in fields:
setattr(rule, f, getattr(self, f))
for m in self.modules:
rule.modules.append(m)
return rule
def __repr__(self):
return "Rule <%s, modules: %d>" % (self.get_action_display(), len(self.modules))
|
import re
import logging
import decimal
import pickle
from datetime import datetime
from celery.task import task
from celery import group, chord
from celery.execute import send_task
from celery.signals import celeryd_after_setup
from robotice.conf import setup_app
from robotice.utils.database import get_db_values
from robotice.reactor.tasks import commit_action
logger = logging.getLogger(__name__)
# TODO: move to utils
def get_value_for_relay(config, actuator, model_values, real_value):
if (model_values[0] < real_value) and (real_value < model_values[1]):
"""je v intervalu vse ok"""
return 0
if "hum" in actuator.get('plan'):
if "air" in actuator.get('plan'):
"""zde je potreba odlisit pudni / vzduch kde sou hodnoty naopak"""
if (real_value < model_values[0]):
"""je mensi jako dolni hranice neni potreba ochlazovat"""
return 0
elif (real_value > model_values[1]):
"""je je vetsi jako horni hranice je potreba ochlazovat"""
return 1
elif "terra" in actuator.get('plan'):
"""zde je potreba odlisit pudni / vzduch kde sou hodnoty naopak"""
if (real_value < model_values[0]):
"""je mensi jako dolni hranice je potreba zalevat"""
return 1
elif (real_value > model_values[1]):
"""je je vetsi jako horni hranice neni potreba zalevat"""
return 0
elif "temp" in actuator.get('plan'):
if (real_value < model_values[0]):
"""je mensi jako dolni hranice neni potreba ochlazovat"""
return 0
elif (real_value > model_values[1]):
"""je je vetsi jako horni hranice je potreba ochlazovat"""
return 1
return 0
def get_value_for_actuator(config, actuator, model_values, real_value):
"""v zavislosti na charakteru actuatoru a planu vrati hodnotu, pokud bude rele zapni vypni 0/1
pripadne float pro pwm atp
"""
if "relay" in actuator.get("device"):
return get_value_for_relay(config, actuator, model_values, real_value)
else:
"""PWM"""
return float(0.00)
return None
class BaseComparator(object):
"""Object for handling simple reasoning
:param:config: Robotice settings
"""
def compare(self):
compared, actions, missing_data = 0, 0, 0
for actuator in self.config.actuators:
system = actuator.get('system_name')
plan_name = actuator["plan_name"]
model_value, real_value = self.get_values(actuator)
recurence_db_key = ':'.join([str(system), str(plan_name), 'recurrence'])
logger.info("key: {0} model_value: {1} | real_value: {2}".format(
('%s:%s:%s' % (system, 'sensors', plan_name)), model_value, real_value))
if real_value == None or model_value == None:
logger.info('NO REAL DATA to COMPARE')
self.config.db.incr(recurence_db_key)
missing_data += 1
continue
actuator_device = self.config.get_actuator_device(actuator)
logger.info(actuator_device)
actuator.update(actuator_device)
logger.info(actuator)
if isinstance(model_value, int):
logger.info("actuator")
if model_value != real_value:
logger.info('Registred commit_action for {0}'.format(actuator))
send_task('reactor.commit_action', args=(
actuator, model_value, real_value, self.config))
actions += 1
logger.info('actuator: {0} model_value: {1} real_value: {2}'.format(
actuator.get("name"), model_value, real_value))
# increment recurrence
self.config.db.incr(recurence_db_key)
else:
self.config.db.set(recurence_db_key, 0)
else:
logger.info("parsed real values : %s < %s and %s < %s" %
(model_value[0], real_value, real_value, model_value[1]))
model_value_converted = get_value_for_actuator(
self.config, actuator, model_value, real_value)
logger.info(
'converted value for actuator {0}'.format(model_value_converted))
if (model_value[0] < real_value) \
and (real_value < model_value[1]):
model_value_converted = 0
logger.info('OK - actuator: {0} hostname: {1}, plan: {2}'.format(
actuator.get("name"), actuator.get("name"), plan_name))
else:
logger.info('Registred commit_action for {0}'.format(actuator))
send_task('reactor.commit_action', args=[
actuator, str(model_value_converted), str(real_value), self.config])
actions += 1
logger.info('actuator: {0} hostname: {1}, plan: {2}'.format(
actuator.get("name"), actuator.get("name"), plan_name))
return "Simple comparator emit {0} actions.".format(actions)
def get_values(self, actuator):
system = actuator.get('system_name')
plan_name = actuator["plan_name"]
return get_db_values(self.config, system, plan_name)
def __init__(self, config):
super(BaseComparator, self).__init__()
self.config = config |
import os
from random import choice
import bpy
from src.loader.LoaderInterface import LoaderInterface
from src.utility.Config import Config
class RockEssentialsRockLoader(LoaderInterface):
"""
Loads rocks/cliffs from a specified .bled Rocks Essentials file.
Example 1: Load two rocks from the specified .blend file.
.. code-block:: yaml
{
"module": "loader.RockEssentialsRockLoader",
"config": {
"batches": [
{
"path": "<args:0>/Rock Essentials/Individual Rocks/Sea/Rocks_Sea_Large.blend",
"objects": ['Rock_Sea_Large001','Rock_Sea_Large003']
}
]
}
}
Example 2: Load 5 copies of two specified rocks from the specified .blend file.
.. code-block:: yaml
{
"module": "loader.RockEssentialsRockLoader",
"config": {
"batches": [
{
"path": "<args:0>/Rock Essentials/Individual Rocks/Sea/Rocks_Sea_Large.blend",
"objects": ['Rock_Sea_Large001','Rock_Sea_Large003'],
"amount": 5
}
]
}
}
Example 3: Load 5 rocks, where each loaded rock is randomly selected out of a list of two rocks, from the specified
.blend file.
.. code-block:: yaml
{
"module": "loader.RockEssentialsRockLoader",
"config": {
"batches": [
{
"path": "<args:0>/Rock Essentials/Individual Rocks/Sea/Rocks_Sea_Large.blend",
"objects": ['Rock_Sea_Large001','Rock_Sea_Large003'],
"amount": 5,
"sample_objects": True
}
]
}
}
Example 4: Load 5 random rocks from the specified .blend file.
.. code-block:: yaml
{
"module": "loader.RockEssentialsRockLoader",
"config": {
"batches": [
{
"path": "<args:0>/Rock Essentials/Individual Rocks/Sea/Rocks_Sea_Large.blend",
"amount": 5,
"sample_objects": True
}
]
}
}
**configuration**:
.. list-table::
:widths: 25 100 10
:header-rows: 1
* - Parameter
- Description
- Type
* - batches
- Rocks to load. Each cell contains separate configuration data. Default: [].
- list
**Properties per rock batch**:
.. list-table::
:widths: 25 100 10
:header-rows: 1
* - Parameter
- Description
- Type
* - path
- Path to a .blend file containing desired rock/cliff objects in //Object// section.
- string
* - objects
- List of rock-/cliff-object names to be loaded. If not specified then `amount` property is used for
consequential loading. Default: [].
- list
* - amount
- Amount of rock-/cliff-object to load. If not specified, the amount will be set to the amount of suitable
objects in the current section of a blend file. Must be bigger than 0.
- int
* - sample_objects
- Toggles the uniform sampling of objects to load. Takes into account `objects` and `amount` parameters.
Default: False. Requires 'amount' param to be defined.
- bool
* - render_levels
- Number of subdivisions to perform when rendering. Default: 3.
- int
* - high_detail_mode
- Flag for enabling HDM when possible. Default: False.
- boolean
* - reflection_amount
- Reflection texture value. Default: rock-specific. Range: [0,1]
- float
* - reflection_roughness
- Roughness texture value. Default: rock-specific. Range: [0,1]
- float
* - physics
- Custom property for physics/rigidbody state. Default: False.
- boolean
* - scale
- Scale of a rock as a 3d-vector with each value as a scaling factor per according dimension. Default: [1,
1, 1].
- mathutils.Vector
* - HSV
- Hue-Saturation-Value parameters of the HSV node. (3 values). Range: H: [0, 1], S: [0, 2], V: [0, 2].
Default: rock-specific.
- list
"""
def __init__(self, config):
LoaderInterface.__init__(self, config)
def run(self):
""" Loads rocks."""
rocks_settings = self.config.get_list("batches", [])
for subsec_num, subsec_settings in enumerate(rocks_settings):
subsec_config = Config(subsec_settings)
subsec_objects = self._load_rocks(subsec_num, subsec_config)
self._set_rocks_properties(subsec_objects, subsec_config)
def _load_rocks(self, subsec_num, batch_config):
""" Loads rocks.
:param subsec_num: Number of a corresponding cell (batch) in `rocks` list in configuration. Used for name generation.
:param batch_config: Config object that contains user-defined settings for a current batch.
:return: List of loaded objects.
"""
loaded_objects = []
obj_types = ["Rock", "Cliff"]
amount_defined = False
# get path to .blend file
path = batch_config.get_string("path")
# get list of obj names, empty if not defined
objects = batch_config.get_list("objects", [])
# toggle object sampling
sample_objects = batch_config.get_bool("sample_objects", False)
obj_list = []
with bpy.data.libraries.load(path) as (data_from, data_to):
# if list of names is empty
if not objects:
# get list of rocks suitable for loading - objects that are rocks or cliffs
for obj_type in obj_types:
obj_list += [obj for obj in data_from.objects if obj_type in obj]
else:
# if names are defined - get those that are available in this .blend file
obj_list = [obj for obj in data_from.objects if obj in objects]
# get amount of rocks in this batch, set to all suitable if not defined
if batch_config.has_param("amount"):
amount = batch_config.get_int("amount")
amount_defined = True
if amount == 0:
raise RuntimeError("Amount param can't be equal to zero!")
else:
amount = len(obj_list)
for i in range(amount):
# load rock: choose random from the list if sampling is True, go through list if not
if sample_objects and amount_defined:
obj = choice(obj_list)
else:
obj = obj_list[i % len(obj_list)]
bpy.ops.wm.append(filepath=os.path.join(path, "/Object", obj), filename=obj,
directory=os.path.join(path + "/Object"))
loaded_obj = bpy.context.scene.objects[obj]
# set custom name for easier tracking in the scene
bpy.context.scene.objects[obj].name = obj + "_" + str(subsec_num) + "_" + str(i)
# append to return list
loaded_objects.append(loaded_obj)
return loaded_objects
def _set_rocks_properties(self, objects, batch_config):
""" Sets rocks properties in accordance to user-defined values.
:param objects: List of objects.
:param batch_config: Config object that contains user-defined settings for a current batch.
"""
# get physics custom setting, 'passive' if not defined
physics = batch_config.get_bool("physics", False)
# get render level for a batch, '3' if not defined
render_levels = batch_config.get_int("render_levels", 3)
# get HDM custom setting for a batch, 'disabled'\'False' if not defined
high_detail_mode = batch_config.get_bool("high_detail_mode", False)
# get scale, original scale of 1 along all dims if not defined
scale = batch_config.get_vector3d("scale", [1, 1, 1])
# get reflection amount and reflection roughness if defined
if batch_config.has_param("reflection_amount"):
reflection_amount = batch_config.get_float("reflection_amount")
else:
reflection_amount = None
if batch_config.has_param("reflection_roughness"):
reflection_roughness = batch_config.get_float("reflection_roughness")
else:
reflection_roughness = None
if batch_config.has_param("HSV"):
hsv = batch_config.get_list("HSV")
else:
hsv = None
for obj in objects:
# set physics parameter
obj["physics"] = physics
# set category id
obj["category_id"] = 1
# set render value
obj.modifiers["Subsurf"].render_levels = render_levels
# set scale
obj.scale = scale
# set HDM if enabled
if "01) High Detail Mode" in obj:
obj["01) High Detail Mode"] = high_detail_mode
else:
print("High Detail Mode is unavailable for " + str(obj.name) + ", omitting.")
if reflection_amount is not None:
obj["05) Reflection Amount"] = reflection_amount
if reflection_roughness is not None:
obj["06) Reflection Roughness"] = reflection_roughness
if hsv is not None:
obj["02) Saturation"] = hsv[1]
obj["03) Hue"] = hsv[0]
obj["04) Value"] = hsv[2]
|
<gh_stars>100-1000
#!/usr/bin/env python
#
# GrovePi Example for using the Grove GPS Module http://www.seeedstudio.com/depot/Grove-GPS-p-959.html?cPath=25_130
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# History
# ------------------------------------------------
# Author Date Comments
# Karan 21 Aug 14 Initial Authoring
# Karan 10 June 15 Updated the code to reflect the decimal GPS coordinates (contributed by rschmidt on the DI forums: http://www.dexterindustries.com/forum/?topic=gps-example-questions/#post-5668)
# Karan 18 Mar 16 Updated code to handle conditions where no fix from satellite
#
#
#####################################################
#
# GPS SENSOR GOES INTO RPISER PORT
#
#####################################################
#
import serial, time
import smbus
import math
import RPi.GPIO as GPIO
import struct
import sys
import ir_receiver_check
enable_debug=1
enable_save_to_file=0
if ir_receiver_check.check_ir():
print("Disable IR receiver before continuing")
exit()
ser = serial.Serial('/dev/ttyAMA0', 9600, timeout = 0) #Open the serial port at 9600 baud
ser.flush()
def cleanstr(in_str):
out_str = "".join([c for c in in_str if c in "0123456789.-" ])
if len(out_str)==0:
out_str = "-1"
return out_str
def safefloat(in_str):
try:
out_str = float(in_str)
except ValueError:
out_str = -1.0
return out_str
class GPS:
#The GPS module used is a Grove GPS module http://www.seeedstudio.com/depot/Grove-GPS-p-959.html
inp=[]
# Refer to SIM28 NMEA spec file http://www.seeedstudio.com/wiki/images/a/a0/SIM28_DATA_File.zip
GGA=[]
#Read data from the GPS
def read(self):
while True:
GPS.inp=ser.readline()
if GPS.inp[:6] =='$GPGGA': # GGA data , packet 1, has all the data we need
break
time.sleep(0.1) #without the cmd program will crash
try:
ind=GPS.inp.index('$GPGGA',5,len(GPS.inp)) #Sometimes multiple GPS data packets come into the stream. Take the data only after the last '$GPGGA' is seen
GPS.inp=GPS.inp[ind:]
except ValueError:
print ("")
GPS.GGA=GPS.inp.split(",") #Split the stream into individual parts
return [GPS.GGA]
#Split the data into individual elements
def vals(self):
if enable_debug:
print(GPS.GGA)
time=GPS.GGA[1]
if GPS.GGA[2]=='': # latitude. Technically a float
lat =-1.0
else:
lat=safefloat(cleanstr(GPS.GGA[2]))
if GPS.GGA[3]=='': # this should be either N or S
lat_ns=""
else:
lat_ns=str(GPS.GGA[3])
if GPS.GGA[4]=='': # longitude. Technically a float
long=-1.0
else:
long=safefloat(cleanstr(GPS.GGA[4]))
if GPS.GGA[5]=='': # this should be either W or E
long_ew=""
else:
long_ew=str(GPS.GGA[5])
fix=int(cleanstr(GPS.GGA[6]))
sats=int(cleanstr(GPS.GGA[7]))
if GPS.GGA[9]=='':
alt=-1.0
else:
# change to str instead of float
# 27"1 seems to be a valid value
alt=str(GPS.GGA[9])
return [time,fix,sats,alt,lat,lat_ns,long,long_ew]
# Convert to decimal degrees
def decimal_degrees(self, raw_degrees):
try:
degrees = float(raw_degrees) // 100
d = float(raw_degrees) % 100 / 60
return degrees + d
except:
return raw_degrees
if __name__ == "__main__":
g=GPS()
if enable_save_to_file:
f=open("gps_data.csv",'w') #Open file to log the data
f.write("name,latitude,longitude\n") #Write the header to the top of the file
ind=0
while True:
time.sleep(0.01)
try:
x=g.read() #Read from GPS
[t,fix,sats,alt,lat,lat_ns,longitude,long_ew]=g.vals() #Get the individial values
# Convert to decimal degrees
if lat !=-1.0:
lat = g.decimal_degrees(safefloat(lat))
if lat_ns == "S":
lat = -lat
if longitude !=-1.0:
longitude = g.decimal_degrees(safefloat(longitude))
if long_ew == "W":
longitude = -longitude
# print ("Time:",t,"Fix status:",fix,"Sats in view:",sats,"Altitude",alt,"Lat:",lat,lat_ns,"Long:",long,long_ew)
try:
print("Time\t\t: %s\nFix status\t: %d\nSats in view\t: %d\nAltitude\t: %s\nLat\t\t: %f\nLong\t\t: %f") %(t,fix,sats,alt,lat,longitude)
except:
print("Time\t\t: %s\nFix status\t: %s\nSats in view\t: %s\nAltitude\t: %s\nLat\t\t: %s\nLong\t\t: %s") %(t,str(fix),str(sats),str(alt),str(lat),str(longitude))
s=str(t)+","+str(safefloat(lat)/100)+","+str(safefloat(longitude)/100)+"\n"
if enable_save_to_file:
f.write(s) #Save to file
time.sleep(2)
except IndexError:
print ("Unable to read")
except KeyboardInterrupt:
if enable_save_to_file:
f.close()
print ("Exiting")
sys.exit(0)
|
from __future__ import unicode_literals
import json
from datetime import timedelta
from django.shortcuts import redirect
from django.forms.models import model_to_dict
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, View, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib import messages
from admin.banners.forms import BannerForm
from osf.models import ScheduledBanner
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days) + 1):
yield start_date + timedelta(n)
def get_blackout_dates(current_banner_id=None):
blackout_dates = []
for banner in ScheduledBanner.objects.exclude(id=current_banner_id):
for dt in daterange(banner.start_date, banner.end_date):
blackout_dates.append(dt.strftime('%Y-%m-%d'))
return blackout_dates
class BannerList(PermissionRequiredMixin, ListView):
paginate_by = 25
template_name = 'banners/list.html'
ordering = 'start_date'
permission_required = 'osf.view_scheduledbanner'
raise_exception = True
model = ScheduledBanner
def get_queryset(self):
return ScheduledBanner.objects.all().order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(query_set, page_size)
kwargs.setdefault('banners', query_set)
kwargs.setdefault('page', page)
return super(BannerList, self).get_context_data(**kwargs)
class BannerDisplay(PermissionRequiredMixin, DetailView):
model = ScheduledBanner
template_name = 'banners/detail.html'
permission_required = 'osf.view_scheduledbanner'
raise_exception = True
def get_object(self, queryset=None):
return ScheduledBanner.objects.get(id=self.kwargs.get('banner_id'))
def get_context_data(self, *args, **kwargs):
banner = self.get_object()
banner_dict = model_to_dict(banner)
kwargs.setdefault('page_number', self.request.GET.get('page', '1'))
kwargs['banner'] = banner_dict
fields = banner_dict
kwargs['change_form'] = BannerForm(initial=fields)
kwargs['default_photo'] = banner.default_photo.url
kwargs['mobile_photo'] = banner.mobile_photo.url
kwargs['blackoutDates'] = json.dumps(get_blackout_dates(banner.id))
return kwargs
class BannerChangeForm(PermissionRequiredMixin, UpdateView):
permission_required = 'osf.change_scheduledbanner'
raise_exception = True
model = ScheduledBanner
form_class = BannerForm
def get_object(self, queryset=None):
banner_id = self.kwargs.get('banner_id')
return ScheduledBanner.objects.get(id=banner_id)
def get_success_url(self, *args, **kwargs):
return reverse_lazy('banners:detail', kwargs={'banner_id': self.kwargs.get('banner_id')})
def post(self, request, *args, **kwargs):
bid = kwargs['banner_id']
form = BannerForm(request.POST, request.FILES, instance=self.get_object())
if form.is_valid():
form.save()
else:
for error in form.non_field_errors():
messages.error(request, error)
return redirect('banners:detail', banner_id=bid)
class BannerDetail(PermissionRequiredMixin, View):
permission_required = 'osf.view_scheduledbanner'
raise_exception = True
def get(self, request, *args, **kwargs):
view = BannerDisplay.as_view()
return view(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
view = BannerChangeForm.as_view()
return view(request, *args, **kwargs)
class CreateBanner(PermissionRequiredMixin, CreateView):
permission_required = 'osf.change_scheduledbanner'
raise_exception = True
template_name = 'banners/create.html'
success_url = reverse_lazy('banners:list')
model = ScheduledBanner
form_class = BannerForm
def get_context_data(self, *args, **kwargs):
kwargs['blackoutDates'] = json.dumps(get_blackout_dates())
return super(CreateBanner, self).get_context_data(*args, **kwargs)
class DeleteBanner(PermissionRequiredMixin, DeleteView):
permission_required = 'osf.delete_scheduledbanner'
raise_exception = True
template_name = 'banners/confirm_delete.html'
success_url = reverse_lazy('banners:list')
def get_object(self, queryset=None):
return ScheduledBanner.objects.get(id=self.kwargs['banner_id'])
|
###########################################################################
# Copyright 2014-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
import UserDict
class HTTPException(BaseException):
httpCode = 500
class BadRequest(HTTPException): httpCode = 400
class NotFound(HTTPException): httpCode = 404
class MethodNotAllowed(HTTPException): httpCode = 405
class _BaseNode(object):
#status = 200
def __init__(self, parent = None, body = {}, objectId = None):
#self.__path = path
self.__parent = parent
self.__id = objectId
self.__children = {}
self._body = body
def addChild(self, key, obj):
key = str(key)
# Do not want to overwrite existing children
if key in self.__children:
raise BadRequest()
self.__children[key] = obj
def delete(self, *args, **kwargs): raise MethodNotAllowed()
def update(self, *args, **kwargs): raise MethodNotAllowed()
def create(self, *args, **kwargs): raise MethodNotAllowed()
def _lines(self, indent = 0):
ret = []
for k, v in sorted(self.children.items(), key = lambda x: x[0]):
ret.append(' ' * indent + str(k))
ret += v._lines(toStr(indent + 2))
return ret
@property
def body(self):
return self._body
@property
def id(self):
return self.__id
@property
def path(self):
ret = []
curr = self
while curr.__parent is not None:
ret.insert(0, str(curr.id))
curr = curr.__parent
return ret
@property
def children(self):
return self.__children
# Uri that is created through a POST
# Allow: GET, PUT, DELETE
class _Entry(_BaseNode):
def __init__(self, body, **kwargs):
super(_Entry, self).__init__(body = body, **kwargs)
def update(self, body, partial = True):
if partial:
# TODO
def _up(a, b):
for k, v in b.iteritems():
_up(a[k], v)
else:
self._body = body
# Allow only POST, PUT, GET, DELETE
class _Collection(_BaseNode):
def __init__(self, **kwargs):
super(_Collection, self).__init__(**kwargs)
self.__nextId = 0
def create(self, body):
ret = _Entry(body, parent = self, objectId = self.__nextId)
ret.status = 201
self.children[str(self.__nextId)] = ret
self.__nextId += 1
return ret
def addChild(self, key, obj):
if not isinstance(obj, _Entry):
raise BadRequest("Cannot add a suburi to a Collection manually")
super(_Collection, self).addChild(key, obj)
# TODO: overwrite the whole collection
def update(self, body):
raise NotImplementedError()
@property
def entries(self):
return sorted(obj.children.items(), key = lambda x: x[0])
class _Node(_BaseNode):
def __init__(self, **kwargs):
super(_Node, self).__init__(**kwargs)
def _create(self, path, index, body):
curr = path[index]
l = len(path)
if curr not in self.children:
if index < l - 1:
newNode = _Node(parent = self, objectId = curr)
else:
newNode = _Collection(parent = self, objectId = curr)
self.children[curr] = newNode
return self.children[path[index]]._create(path, index + 1, body)
class Storage(object):
def __init__(self):
self.__data = {}
self.__root = _BaseNode()
def _resolvePath(self, path):
curr = self.__root
print path
try:
for x in path:
curr = curr.children[x]
except KeyError:
# This will be mapped to a 404
raise NotFound(path)
return curr
def get(self, path):
return self._resolvePath(path)
# TODO: perform some checks before deleting
def delete(self, path):
obj = self._resolvePath(path[:-1])
del obj.children[path[-1]]
def post(self, path, body = {}):
curr = self.__root
for x in path[:-1]:
curr = curr.children.setdefault(x, _BaseNode(parent = curr, objectId = x))
if path[-1] not in curr.children:
coll = _Collection(parent = curr, objectId = path[-1])
curr.children[path[-1]] = coll
coll = curr.children[path[-1]]
return coll.create(body)
def _toStr(self):
l = self.__root._toStr()
print '\n'.join(l)
|
from __future__ import absolute_import, division, print_function
from xfel.ui import master_phil_scope
from dxtbx.format.FormatXTC import locator_scope
from xfel.ui.command_line.plot_run_stats import phil_scope as rs_scope
def get_help(path, scope = master_phil_scope):
return scope.get(path).objects[0].help
tooltips = {
# Settings dialog
'db_cred_ctr': get_help('experiment_tag'),
'db_cred_btn_big': 'Set up database credentials the GUI will use to connect with',
'facility_ctr': get_help('facility.name'),
'btn_facility_options': 'Facility specific options',
'experiment_ctr': get_help('facility.lcls.experiment'),
'output_ctr': get_help('output_folder'),
'advanced': 'Multiprocessing, queueing, and other options',
# Advanced settings dialog
'mp_option_ctr': get_help('mp.method'),
'queue_ctr': get_help('mp.queue'),
'nproc_ctr': get_help('mp.nproc'),
'nnodes_ctr': get_help('mp.nnodes'),
'nproc_per_node': get_help('mp.nproc_per_node'),
'wall_time_ctr': get_help('mp.wall_time'),
'mpi_command_ctr': get_help('mp.mpi_command'),
'env_script_ctr': get_help('mp.env_script'),
'phenix_script_ctr': get_help('mp.phenix_script'),
'htcondor_executable_path_ctr': get_help('mp.htcondor.executable_path'),
'htcondor_filesystemdomain_ctr': get_help('mp.htcondor.filesystemdomain'),
'nnodes_index_ctr': get_help('mp.nnodes_index'),
'nnodes_scale_ctr': get_help('mp.nnodes_scale'),
'nnodes_merge_ctr': get_help('mp.nnodes_merge'),
'extra_options': get_help('mp.extra_options'),
'shifter_image_ctr': get_help('mp.shifter.shifter_image'),
'shifter_srun_template_ctr': get_help('mp.shifter.srun_script_template'),
'shifter_sbatch_template_ctr': get_help('mp.shifter.sbatch_script_template'),
'shifter_jobname_ctr': get_help('mp.shifter.jobname'),
'shifter_project_ctr': get_help('mp.shifter.project'),
'shifter_reservation_ctr': get_help('mp.shifter.reservation'),
'shifter_constraint_ctr': get_help('mp.shifter.constraint'),
'staging_ctr': get_help('mp.shifter.staging'),
'back_end_ctr': get_help('dispatcher'),
# DBCredentialsDialog
'db_host_ctr': get_help('db.host'),
'db_port_ctr': get_help('db.port'),
'db_name_ctr': get_help('db.name'),
'db_user_ctr': get_help('db.user'),
'db_password_ctr': get_help('db.password'),
'web_location_ctr': get_help('facility.lcls.web.location'),
# StandaloneOptions
'data_dir_ctr': get_help('facility.standalone.data_dir'),
'monitor_for': get_help('facility.standalone.monitor_for'),
'folders_options': get_help('facility.standalone.folders.method'),
'n_files_needed_ctr': get_help('facility.standalone.folders.n_files_needed'),
'last_modified_ctr': get_help('facility.standalone.files.last_modified'),
'minimum_file_size_ctr': get_help('facility.standalone.files.minimum_file_size'),
'template_ctr': get_help('facility.standalone.template'),
# Main GUI
'btn_persistent_tags': 'Auto-tag new runs as they arrive',
'btn_manage_tags': 'Add/rename/delete tags',
'btn_view_phil': 'View trial parameters',
'rs_d_min': get_help('d_min', rs_scope),
'rs_multiples': 'Number of multiple lattices before a hit is counted as a multiple hit',
'rs_ratio': 'Ratio of 2θ high to 2θ low needed for an image to be a solvent hit',
'rs_n_strong': get_help('n_strong_cutoff', rs_scope),
'rs_isigi': get_help('i_sigi_cutoff', rs_scope),
'rs_n_dump': 'Number of images to convert to cbf and then display',
'uc_selection_type': 'Union: include runs matching any of these tags\n' + \
'Intersection: include runs matching all of these tags',
# Trial dialog
'trial_throttle_ctr': 'Percent of images (events) to process',
'trial_num_bins_ctr': 'Used for logging only',
'trial_d_min_ctr': 'Used for logging only', # XXX doesn't appear
# Run group dialog
'rg_end_type': 'Auto add runs: new data will be added to this block as it arrives\nSpecify end run: set the last run for this block explicitly.',
'rg_address_ctr': 'Detector address in XTC stream (use detnames to list available detectors)',
'rg_beam_xyz': 'Beam center in pixels, and detector distance in mm (overridden by the phil parameter input.reference_geometry)',
'rg_bin_nrg_gain_binning': 'Rayonix binning (2, 3, 4, etc.)',
'rg_bin_nrg_gain_energy': 'Energy override for all images (eV)',
'rg_wavelength_offset': 'Offset applied to wavelength of each image (Å)',
'rg_spectrum_calibration': get_help('spectrum_eV_per_pixel', locator_scope),
'rg_energy_ctr': 'Energy override for all images (eV)',
'rg_two_thetas': 'Two 2θ values (deg). The ratio of high/low is used to check for presence of solvent on each image. ' + \
'Defaults are the water ring and a low resolution ring',
}
def setup_tooltip(obj):
obj.SetToolTip(tooltips.get(obj.Name))
|
import operator
from unittest import TestCase
from shared.BaseUnitTest import BaseUnitTest
from shared.products import get_products
from shared.users import get_users
import cProfile
import pstats
from streams.Stream import Stream
class TestStream(BaseUnitTest):
def test_create(self):
results = Stream.create(get_users()).asList()
self.assertEqual(25, len(results))
def test_map(self):
results = (Stream
.create(get_users())
.map(lambda user: user['gender'])
.asList())
self.assertEqual(results,
['Female', 'Female', 'Female', 'Female', 'Female', 'Male', 'Male', 'Male', 'Male', 'Male',
'Female', 'Male', 'Agender', 'Polygender', 'Male', 'Male', 'Polygender', 'Female', 'Male',
'Male', 'Non-binary', 'Polygender', 'Male', 'Non-binary', 'Male'])
def test_filter(self):
results = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.map(lambda user: user['gender'])
.asList())
self.assertEqual(len(results), 12)
def test_length(self):
length = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.map(lambda user: user['gender'])
.length())
self.assertEqual(length, 12)
def test_reduce(self):
sum_of_salaries = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.map(lambda user: user['salary'])
.reduce(operator.add)
.asSingle()
)
self.assertEqual(sum_of_salaries, 977023)
def test_peek(self):
peekCount = 0
def increment_peek(data):
nonlocal peekCount
peekCount = peekCount + 1
sum_of_salaries = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.map(lambda user: user['salary'])
.peek(increment_peek)
.reduce(operator.add)
.asSingle()
)
self.assertEqual(peekCount, 12)
def test_catch_with_catch_all(self):
has_error = False
def catch_all_exception(data):
nonlocal has_error
print("test_catch",data)
has_error = True
sum_of_salaries = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.map(lambda user: user['salaryv'])
.reduce(operator.add)
.catchAll(catch_all_exception)
.asSingle()
)
self.assertTrue(has_error)
def test_catch_without_catch_all(self):
try:
sum_of_salaries = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.map(lambda user: user['salaryv'])
.reduce(operator.add)
.asSingle()
)
self.assertIsNone(sum_of_salaries)
except Exception as inst:
print(inst)
self.assertIsNotNone(inst)
def test_skip(self):
male_users_after_eighth = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.skip(8)
.asList()
)
self.assertEqual(male_users_after_eighth, [{'email': '<EMAIL>',
'first_name': 'Timoteo',
'gender': 'Male',
'id': 19,
'ip_address': '192.168.3.11',
'last_name': 'Lyburn',
'salary': 121935},
{'email': '<EMAIL>',
'first_name': 'Cly',
'gender': 'Male',
'id': 20,
'ip_address': '172.16.31.10',
'last_name': 'Coaster',
'salary': 85496},
{'email': '<EMAIL>',
'first_name': 'Syman',
'gender': 'Male',
'id': 23,
'ip_address': '192.168.3.11',
'last_name': 'Cadogan',
'salary': 54965},
{'email': '<EMAIL>',
'first_name': 'Windham',
'gender': 'Male',
'id': 25,
'ip_address': '172.16.31.10',
'last_name': 'Slowley',
'salary': 94147}])
def test_take(self):
first_two_male_users = (Stream
.create(get_users())
.filter(lambda user: user['gender'] == 'Male')
.take(2)
.asList()
)
self.assertEqual(first_two_male_users, [{'email': '<EMAIL>',
'first_name': 'Jasen',
'gender': 'Male',
'id': 6,
'ip_address': '172.16.31.10',
'last_name': 'Franzini',
'salary': 78373},
{'email': '<EMAIL>',
'first_name': 'Vasili',
'gender': 'Male',
'id': 7,
'ip_address': '172.16.58.3',
'last_name': 'Simester',
'salary': 78404}])
def test_take_numbers(self):
first_two_numbers = (Stream
.create(range(100))
.take(2)
.asList()
)
self.assertEqual([0, 1], first_two_numbers)
def test_skip_numbers(self):
last_two_numbers = (Stream
.create(range(100))
.skip(98)
.asList()
)
self.assertEqual([98, 99], last_two_numbers)
def test_transducer(self):
skip_five_and_take_three_items = (Stream
.transducer()
.skip(5)
.take(3)
)
skip_five_and_take_three_items_within_zero_to_hundred = (Stream
.create(range(100))
.pipe(skip_five_and_take_three_items)
.asList()
)
self.assertEqual([5, 6, 7], skip_five_and_take_three_items_within_zero_to_hundred)
skip_five_and_take_three_items_within_700_to_800 = (Stream
.create(range(700, 800))
.pipe(skip_five_and_take_three_items)
.asList()
)
self.assertEqual([705, 706, 707], skip_five_and_take_three_items_within_700_to_800)
def test_distinct(self):
results = (Stream
.create(get_users())
.map(lambda user: user['gender'])
.distinct()
.asList())
print("results", results)
self.assertListContains(['Female', 'Polygender', 'Male', 'Agender', 'Non-binary'], results)
self.assertIsNotNone(results)
self.assertIn('Male', results)
self.assertIn('Female', results)
def test_compose_functions(self):
is_clothing = lambda product: product['category'] == 'Clothing'
is_rating_greater_than_three = lambda product: product['overAllRating'] > 3
reviews_from_product = lambda product: product['reviews']
rating_from_review = lambda review: review['rating']
name_from_product = lambda product: product['name']
price_from_product = lambda product: product['price']
product_stream = Stream.create(get_products())
total_products = product_stream.stream().length()
products_of_rating_greater_than_three = (product_stream
.stream()
.filter(is_clothing)
.peek(lambda val: print("is_clothing", val))
.filter(is_rating_greater_than_three)
)
rating_values = (products_of_rating_greater_than_three
.stream()
.flatmap(reviews_from_product)
.map(rating_from_review)
.asList())
product_prices_of_rating_greater_than_three = (products_of_rating_greater_than_three
.stream()
.map(price_from_product))
product_prices = (product_prices_of_rating_greater_than_three
.stream()
.asList())
product_prices_skipped_nine_items = (product_prices_of_rating_greater_than_three
.stream()
.skip(9)
.asList())
product_prices_skip_first_five_take_next_two_items = (product_prices_of_rating_greater_than_three
.stream()
.skip(5)
.take(2)
.asList())
unique_product_prices = (product_prices_of_rating_greater_than_three
.stream()
.distinct()
.asList())
product_names = (products_of_rating_greater_than_three
.stream()
.map(name_from_product)
.asList())
print("rating_values", rating_values)
print("total_products", total_products)
print("product_names", product_names)
print("product_prices", product_prices)
print("product_prices_skipped_nine_items", product_prices_skipped_nine_items)
print("product_prices_skip_first_five_take_next_two_items", product_prices_skip_first_five_take_next_two_items)
print("unique_product_prices", unique_product_prices)
self.assertIsNotNone(rating_values)
self.assertListContains([5, 1, 2, 2, 1, 3, 2, 1, 2, 5, 1, 4, 1, 5, 5, 1], rating_values)
self.assertListContains([699.0, 1199.0, 1199.0, 999.0, 999.0, 899.0, 899.0, 1499.0, 5398.0, 2795.0, 2499.0],
product_prices)
self.assertListContains([2795.0, 2499.0], product_prices_skipped_nine_items)
self.assertListContains([899.0, 899.0], product_prices_skip_first_five_take_next_two_items)
self.assertListContains([899.0, 2499.0, 999.0, 2795.0, 1199.0, 1499.0, 5398.0, 699.0], unique_product_prices)
self.assertEqual(total_products, 154)
self.assertIn('Alisha Solid Women s Cycling Shorts', product_names)
self.assertIn(5, rating_values)
self.assertIn(1, rating_values)
|
from functools import wraps
from unittest.mock import patch, Mock, mock_open
from rekcurd_dashboard.apis import RekcurdDashboardException
from rekcurd_dashboard.models import db, ServiceModel, DataServerModel, DataServerModeEnum
from test.base import (
BaseTestCase, TEST_PROJECT_ID, TEST_APPLICATION_ID, TEST_SERVICE_ID, TEST_MODEL_ID,
create_model_model, create_data_server_model, create_kubernetes_model
)
def mock_decorator():
"""Decorator to mock for dashboard.
"""
def test_method(func):
@wraps(func)
def inner_method(*args, **kwargs):
with patch('builtins.open', new_callable=mock_open) as _, \
patch('rekcurd_dashboard.apis.api_service.switch_model_assignment',
new=Mock(return_value={"status": True, "message": "Success."})) as _, \
patch('rekcurd_dashboard.apis.api_service.delete_kubernetes_deployment',
new=Mock(return_value=True)) as _, \
patch('rekcurd_dashboard.apis.api_service.RekcurdDashboardClient',
new=Mock(return_value=Mock())) as rekcurd_dashboard_application:
rekcurd_dashboard_application.return_value.run_switch_service_model_assignment = Mock()
rekcurd_dashboard_application.return_value.run_switch_service_model_assignment.return_value = \
{"status": True, "message": "Success."}
return func(*args, **kwargs)
return inner_method
return test_method
class ApiServicesTest(BaseTestCase):
__URL = f'/api/projects/{TEST_PROJECT_ID}/applications/{TEST_APPLICATION_ID}/services'
def test_get(self):
response = self.client.get(self.__URL)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(response)
class ApiServiceIdTest(BaseTestCase):
__URL = f'/api/projects/{TEST_PROJECT_ID}/applications/{TEST_APPLICATION_ID}/services/{TEST_SERVICE_ID}'
def test_get(self):
response = self.client.get(self.__URL)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(response)
@mock_decorator()
def test_put(self):
model_id = 2
create_data_server_model(save=True)
create_model_model(model_id=model_id, file_path="rekcurd-test-model/new.model", description="new", save=True)
response = self.client.put(self.__URL, data={'model_id': TEST_MODEL_ID})
self.assertEqual(400, response.status_code)
response = self.client.put(self.__URL, data={'model_id': model_id})
self.assertEqual(200, response.status_code)
DataServerModel.query.filter(DataServerModel.project_id == TEST_PROJECT_ID).delete()
create_data_server_model(mode=DataServerModeEnum.CEPH_S3, save=True)
create_kubernetes_model(save=True)
response = self.client.put(self.__URL, data={'model_id': TEST_MODEL_ID})
self.assertEqual(200, response.status_code)
@mock_decorator()
def test_patch(self):
display_name = "new_service"
response = self.client.patch(
self.__URL, data={'display_name': display_name, 'description': 'new_test', 'version': 'v1'})
service_model = db.session.query(ServiceModel).filter(
ServiceModel.service_id == TEST_SERVICE_ID, ServiceModel.display_name == display_name).one_or_none()
self.assertEqual(200, response.status_code)
self.assertIsNotNone(service_model)
@mock_decorator()
def test_delete(self):
response = self.client.delete(self.__URL)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(response)
|
# Copyright (C) 2011 Philter Phactory Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X
# CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Philter Phactory Ltd. shall
# not be used in advertising or otherwise to promote the sale, use or other
# dealings in this Software without prior written authorization from Philter
# Phactory Ltd..
#
"""
Wrapper for loading templates from zipfiles. Very crude. If a path in TEMPLATE_DIRS has '.zip'
in it, we'll look in there for the template. You can put file paths after the .zip and we'll append them
to the search path in the zipfile. Intended for pulling admin templates out of the django source zipfile
without having to unpack them.
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
os.path.join(os.path.dirname(__file__), 'django-nonrel.zip/django/contrib/admin/templates"),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'zip_loader.Loader',
)
"""
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.template.loader import BaseLoader
import re
import sys
import zipfile
import os
class Loader(BaseLoader):
is_usable = True # because zipfile is core
# cache open zipfiles - almost certainly worth doing, unless you have more zipfiles
# in your template loader path than you have templates in zipfiles.
zipfile_cache = {}
def open_zipfile(self, filename):
if filename in self.zipfile_cache and self.zipfile_cache[filename]:
return self.zipfile_cache[filename]
else:
z = zipfile.ZipFile(filename)
self.zipfile_cache[filename] = z
return z
def load_template_source(self, template_name, template_dirs=None):
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
tried = []
for filepath in template_dirs:
zipname = None
zippath = None
if re.search(r'\.zip$', filepath):
zipname = filepath
zippath = ""
elif re.search(r'\.zip/', filepath):
zipname = filepath.split(".zip/")[0] + ".zip"
zippath = filepath.split(".zip/", 2)[1]
if not zipname:
continue
try:
z = self.open_zipfile(zipname)
source = z.read(os.path.join(zippath, template_name))
template_path = "%s:%s" % (filepath, template_name)
return (source, template_path)
except (IOError, KeyError):
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
_loader = Loader()
|
# -*- coding: utf8 -*-
import os
import os.path
from contextlib import contextmanager
from django.contrib.sites.models import Site
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core import mail
from django.urls import reverse
from selenium.webdriver.firefox.webdriver import WebDriver, FirefoxProfile
from selenium import webdriver
from accounts.models import UserSite
from accounts.tests import UserFactory
from genealogio.models import PersonFamily
from genealogio.tests import FamilyFactory, PersonFactory
from notaro.tests import NoteFactory, RST_WITH_ERRORS
# pylint: disable=no-member
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import (
staleness_of,
)
class LoginTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super(LoginTest, cls).setUpClass()
capabilities = webdriver.DesiredCapabilities().FIREFOX
capabilities['acceptSslCerts'] = True
profile = FirefoxProfile()
profile.set_preference('intl.accept_languages', 'de')
profile.accept_untrusted_certs = True
cls.selenium = WebDriver(profile, capabilities=capabilities)
cls.selenium.implicitly_wait(3)
def setUp(self):
self.user = UserFactory()
self.admin = UserFactory(is_superuser=True)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(LoginTest, cls).tearDownClass()
@contextmanager
def wait_for_page_load(self, timeout=30):
old_page = self.selenium.find_element_by_tag_name('html')
yield
WebDriverWait(self.selenium, timeout).until(
staleness_of(old_page)
)
def login(self, u):
self.selenium.get('%s%s' % (self.live_server_url, '/'))
self.assertIn('Familiengeschichte', self.selenium.title)
username_input = self.selenium.find_element_by_id("id_identification")
username_input.send_keys(u.username)
password_input = self.selenium.find_element_by_id("id_password")
password_input.send_keys('password')
with self.wait_for_page_load(timeout=10):
self.selenium.find_element_by_id('id_submitbutton').click()
def test_failed_login(self):
self.selenium.get('%s%s' % (self.live_server_url, '/'))
self.assertIn('Familiengeschichte', self.selenium.title)
# no user with these credentials exists
username_input = self.selenium.find_element_by_id("id_identification")
username_input.send_keys('myuser')
password_input = self.selenium.find_element_by_id("id_password")
password_input.send_keys('<PASSWORD>')
with self.wait_for_page_load(timeout=10):
self.selenium.find_element_by_id('id_submitbutton').click()
self.assertIn('Passwort vergessen', self.selenium.page_source)
def test_successful_login(self):
self.login(self.user)
self.assertNotIn('Passwort vergessen', self.selenium.page_source)
self.assertIn(self.user.username, self.selenium.page_source)
def test_note_with_rst_errors_user(self):
self.note2 = NoteFactory(text=RST_WITH_ERRORS)
self.login(self.user)
body = self.selenium.find_element_by_tag_name('body').text
self.assertIn('Alle Texte', body)
self.assertIn(self.note2.title, body)
self.selenium.get('%s%s' % (self.live_server_url, '/notes/all'))
body = self.selenium.find_element_by_tag_name('body').text
self.assertIn(self.note2.title, body)
self.selenium.get(
'%s/n%s' % (self.live_server_url, self.note2.link))
self.selenium.get(
'%s/notes/note-view/%d'
% (self.live_server_url, self.note2.id))
body = self.selenium.find_element_by_tag_name('body').text
self.assertIn('continuation of the', body)
self.assertNotIn('System Message: WARNING', body)
def test_note_with_rst_errors_staff(self):
self.login(self.admin)
self.note2 = NoteFactory(text=RST_WITH_ERRORS)
self.selenium.get(
'%s/notes/note-view/%d'
% (self.live_server_url, self.note2.id))
self.selenium.get(
'%s/n%s'
% (self.live_server_url, self.note2.link))
body = self.selenium.find_element_by_tag_name('body').text
self.assertIn('continuation of the', body)
self.assertNotIn('System Message: WARNING', body)
self.selenium.find_element_by_id('errormsg').click()
body = self.selenium.find_element_by_tag_name('body').text
self.assertIn('System Message: WARNING', body)
def test_login_for_different_usersites(self):
user1 = UserFactory(is_staff=True)
usersites = user1.userprofile.usersite_set.all()
self.assertEqual(len(usersites), 1)
# make user1 a user only for a site different from current
site = Site.objects.create(domain="new")
usersites[0].site_id = site.id
usersites[0].save()
# check that login fails
self.login(user1)
self.assertIn('korrekten Benutzername', self.selenium.page_source)
def test_login_for_different_status_depending_on_usersite_failure(self):
user1 = UserFactory(is_staff=True)
# make user1 a non-staff user on current site:
usersite = user1.userprofile.usersite_set.get(
site=Site.objects.get_current())
usersite.role = UserSite.USER
usersite.save()
# make user1 a staff user on a different site
site = Site.objects.create(domain="new")
UserSite.objects.create(
user=user1.userprofile, site=site, role=UserSite.STAFF)
# login to current site should work
self.login(user1)
self.assertNotIn('korrekten Benutzername', self.selenium.page_source)
self.assertIn(user1.username, self.selenium.page_source)
# but should not have staff status here (test for link to admin not
# being displayed in menu)
self.assertNotIn("Verwaltungsbereich", self.selenium.page_source)
def test_login_for_different_status_depending_on_usersite_success(self):
user1 = UserFactory(is_staff=True)
# since is_staff is True, user1 should have staff role for current site
usersite = user1.userprofile.usersite_set.get(
site=Site.objects.get_current())
self.assertEqual(usersite.role, UserSite.STAFF)
# login to current site should work
self.login(user1)
self.assertNotIn('korrekten Benutzername', self.selenium.page_source)
self.assertIn(user1.username, self.selenium.page_source)
# and should have staff status here (test for link to admin being
# displayed in menu)
self.assertIn("Verwaltungsbereich", self.selenium.page_source)
def test_send_invitation_email(self):
self.login(self.user)
self.assertEqual(len(mail.outbox), 0)
self.selenium.get('%s/accounts/invite/' % self.live_server_url)
self.assertIn('mindestens die Anrede', self.selenium.page_source)
input = self.selenium.find_element_by_id('id_email')
input.send_keys('<EMAIL>')
input = self.selenium.find_element_by_id('id_first_name')
input.send_keys('Django')
input = self.selenium.find_element_by_id('id_last_name')
input.send_keys('Reinhardt')
submit = self.selenium.find_element_by_id('id_submit_invitation')
with self.wait_for_page_load(timeout=10):
submit.click()
self.assertIn('verschickt', self.selenium.page_source)
site = Site.objects.get_current()
self.assertEqual(
mail.outbox[-1].subject,
'Einladung von %s' % site.domain)
def test_some_genealogio_views(self):
self.login(self.user)
# set up some persons and families
grandfather_f = PersonFactory()
grandmother_f = PersonFactory()
grandfather_m = PersonFactory()
grandmother_m = PersonFactory()
father = PersonFactory()
mother = PersonFactory()
child1 = PersonFactory()
child2 = PersonFactory()
family = FamilyFactory(father=grandfather_f, mother=grandmother_f)
PersonFamily.objects.create(person=father, family=family)
family = FamilyFactory(father=grandfather_m, mother=grandmother_m)
PersonFamily.objects.create(person=mother, family=family)
family = FamilyFactory(father=father, mother=mother)
PersonFamily.objects.create(person=child1, family=family)
PersonFamily.objects.create(person=child2, family=family)
self.selenium.get('%s/' % self.live_server_url)
self.assertIn(child1.get_primary_name(), self.selenium.page_source)
self.assertNotIn('Die neuesten Texte', self.selenium.page_source)
self.assertNotIn('Die neuesten Kommentare', self.selenium.page_source)
self.selenium.get(
'%s' % self.live_server_url +
reverse('person-detail', kwargs={'pk': child1.pk}))
self.assertIn('Vater', self.selenium.page_source)
self.assertIn('Mutter', self.selenium.page_source)
self.selenium.get(
'%s%s' % (
self.live_server_url,
reverse('person-detail', kwargs={'pk': grandfather_f.pk})))
# father, mother should be unknown
self.assertNotIn('Vater', self.selenium.page_source)
self.assertNotIn('Mutter', self.selenium.page_source)
# No comments so far
self.assertIn(
'Es gibt noch keine Kommentare', self.selenium.page_source)
# add a comment
inpt = self.selenium.find_element_by_id(
'id_%d_content' % grandfather_f.pk)
inpt.send_keys('<PASSWORD> comment.')
with self.wait_for_page_load(timeout=10):
inpt.submit()
self.assertIn(
'This is the first comment.', self.selenium.page_source)
self.assertNotIn(
'Es gibt noch keine Kommentare', self.selenium.page_source)
self.selenium.get(
'%s' % self.live_server_url +
reverse('person-list'))
self.assertIn(child1.get_primary_name(), self.selenium.page_source)
self.assertIn(child2.get_primary_name(), self.selenium.page_source)
self.assertIn(father.get_primary_name(), self.selenium.page_source)
self.assertIn(
grandfather_f.get_primary_name(), self.selenium.page_source)
self.assertIn(
grandmother_m.get_primary_name(), self.selenium.page_source)
self.assertIn(
grandmother_f.get_primary_name(), self.selenium.page_source)
self.selenium.get(
'%s' % self.live_server_url +
reverse('family-list'))
# expect to see 3 families
self.assertIn('3 Einträge', self.selenium.page_source)
self.selenium.get('%s/' % self.live_server_url)
self.assertIn(child2.get_primary_name(), self.selenium.page_source)
self.assertIn('Die neuesten Kommentare', self.selenium.page_source)
def test_upload_picture(self):
user1 = UserFactory(is_staff=True)
self.login(user1)
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('picture-list')))
self.assertIn('0 Einträge', self.selenium.page_source)
inpt = self.selenium.find_element_by_css_selector('input#id_archive')
inpt.send_keys(os.path.abspath('./functional_tests/static/white.png'))
self.selenium.find_element_by_id('id_submit_upload').click()
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('picture-list')))
self.assertIn('1 Eintr', self.selenium.page_source)
|
import logging
import os
from typing import Any, Dict, Optional
from pathlib import Path
from ray.experimental.internal_kv import _internal_kv_initialized
from ray._private.runtime_env.utils import RuntimeEnv
from ray._private.runtime_env.context import RuntimeEnvContext
from ray._private.runtime_env.packaging import (
download_and_unpack_package,
delete_package,
get_local_dir_from_uri,
get_uri_for_directory,
get_uri_for_package,
upload_package_to_gcs,
parse_uri,
Protocol,
upload_package_if_needed,
)
from ray._private.utils import get_directory_size_bytes, try_to_create_directory
default_logger = logging.getLogger(__name__)
def upload_working_dir_if_needed(
runtime_env: Dict[str, Any],
scratch_dir: str,
logger: Optional[logging.Logger] = default_logger,
) -> Dict[str, Any]:
"""Uploads the working_dir and replaces it with a URI.
If the working_dir is already a URI, this is a no-op.
"""
working_dir = runtime_env.get("working_dir")
if working_dir is None:
return runtime_env
if not isinstance(working_dir, str) and not isinstance(working_dir, Path):
raise TypeError(
"working_dir must be a string or Path (either a local path "
f"or remote URI), got {type(working_dir)}."
)
if isinstance(working_dir, Path):
working_dir = str(working_dir)
# working_dir is already a URI -- just pass it through.
try:
protocol, path = parse_uri(working_dir)
except ValueError:
protocol, path = None, None
if protocol is not None:
if protocol in Protocol.remote_protocols() and not path.endswith(".zip"):
raise ValueError("Only .zip files supported for remote URIs.")
return runtime_env
excludes = runtime_env.get("excludes", None)
try:
working_dir_uri = get_uri_for_directory(working_dir, excludes=excludes)
except ValueError: # working_dir is not a directory
package_path = Path(working_dir)
if not package_path.exists() or package_path.suffix != ".zip":
raise ValueError(
f"directory {package_path} must be an existing "
"directory or a zip package"
)
pkg_uri = get_uri_for_package(package_path)
upload_package_to_gcs(pkg_uri, package_path.read_bytes())
runtime_env["working_dir"] = pkg_uri
return runtime_env
upload_package_if_needed(
working_dir_uri,
scratch_dir,
working_dir,
include_parent_dir=False,
excludes=excludes,
logger=logger,
)
runtime_env["working_dir"] = working_dir_uri
return runtime_env
class WorkingDirManager:
def __init__(self, resources_dir: str):
self._resources_dir = os.path.join(resources_dir, "working_dir_files")
try_to_create_directory(self._resources_dir)
assert _internal_kv_initialized()
def delete_uri(
self, uri: str, logger: Optional[logging.Logger] = default_logger
) -> int:
"""Delete URI and return the number of bytes deleted."""
local_dir = get_local_dir_from_uri(uri, self._resources_dir)
local_dir_size = get_directory_size_bytes(local_dir)
deleted = delete_package(uri, self._resources_dir)
if not deleted:
logger.warning(f"Tried to delete nonexistent URI: {uri}.")
return 0
return local_dir_size
def get_uri(self, runtime_env: RuntimeEnv) -> Optional[str]:
working_dir_uri = runtime_env.working_dir()
if working_dir_uri != "":
return working_dir_uri
return None
def create(
self,
uri: str,
runtime_env: dict,
context: RuntimeEnvContext,
logger: Optional[logging.Logger] = default_logger,
) -> int:
local_dir = download_and_unpack_package(uri, self._resources_dir, logger=logger)
return get_directory_size_bytes(local_dir)
def modify_context(
self, uri: Optional[str], runtime_env_dict: Dict, context: RuntimeEnvContext
):
if uri is None:
return
local_dir = get_local_dir_from_uri(uri, self._resources_dir)
if not local_dir.exists():
raise ValueError(
f"Local directory {local_dir} for URI {uri} does "
"not exist on the cluster. Something may have gone wrong while "
"downloading or unpacking the working_dir."
)
context.command_prefix += [f"cd {local_dir}"]
# Insert the working_dir as the first entry in PYTHONPATH. This is
# compatible with users providing their own PYTHONPATH in env_vars.
python_path = str(local_dir)
if "PYTHONPATH" in context.env_vars:
python_path += os.pathsep + context.env_vars["PYTHONPATH"]
context.env_vars["PYTHONPATH"] = python_path
|
<filename>heat/core/tests/test_types.py<gh_stars>0
import numpy as np
import torch
import heat as ht
from .test_suites.basic_test import TestCase
class TestTypes(TestCase):
def assert_is_heat_type(self, heat_type):
self.assertIsInstance(heat_type, type)
self.assertTrue(issubclass(heat_type, ht.datatype))
def assert_non_instantiable_heat_type(self, heat_type):
self.assert_is_heat_type(heat_type)
with self.assertRaises(TypeError):
heat_type()
def assert_is_instantiable_heat_type(self, heat_type, torch_type):
# ensure the correct type hierarchy
self.assert_is_heat_type(heat_type)
# check a type constructor without any value
no_value = heat_type()
self.assertIsInstance(no_value, ht.DNDarray)
self.assertEqual(no_value.shape, (1,))
self.assertEqual((no_value.larray == 0).all().item(), 1)
self.assertEqual(no_value.larray.dtype, torch_type)
# check a type constructor with a complex value
ground_truth = [[3, 2, 1], [4, 5, 6]]
elaborate_value = heat_type(ground_truth)
self.assertIsInstance(elaborate_value, ht.DNDarray)
self.assertEqual(elaborate_value.shape, (2, 3))
self.assertEqual(
(
elaborate_value.larray
== torch.tensor(ground_truth, dtype=torch_type, device=self.device.torch_device)
)
.all()
.item(),
1,
)
self.assertEqual(elaborate_value.larray.dtype, torch_type)
# check exception when there is more than one parameter
with self.assertRaises(TypeError):
heat_type(ground_truth, ground_truth)
def test_generic(self):
self.assert_non_instantiable_heat_type(ht.datatype)
def test_bool(self):
self.assert_is_instantiable_heat_type(ht.bool, torch.bool)
self.assert_is_instantiable_heat_type(ht.bool_, torch.bool)
def test_number(self):
self.assert_non_instantiable_heat_type(ht.number)
def test_integer(self):
self.assert_non_instantiable_heat_type(ht.integer)
def test_signedinteger(self):
self.assert_non_instantiable_heat_type(ht.signedinteger)
def test_int8(self):
self.assert_is_instantiable_heat_type(ht.int8, torch.int8)
self.assert_is_instantiable_heat_type(ht.byte, torch.int8)
def test_int16(self):
self.assert_is_instantiable_heat_type(ht.int16, torch.int16)
self.assert_is_instantiable_heat_type(ht.short, torch.int16)
def test_int32(self):
self.assert_is_instantiable_heat_type(ht.int32, torch.int32)
self.assert_is_instantiable_heat_type(ht.int, torch.int32)
def test_int64(self):
self.assert_is_instantiable_heat_type(ht.int64, torch.int64)
self.assert_is_instantiable_heat_type(ht.long, torch.int64)
def test_unsignedinteger(self):
self.assert_non_instantiable_heat_type(ht.unsignedinteger)
def test_uint8(self):
self.assert_is_instantiable_heat_type(ht.uint8, torch.uint8)
self.assert_is_instantiable_heat_type(ht.ubyte, torch.uint8)
def test_floating(self):
self.assert_non_instantiable_heat_type(ht.floating)
def test_float32(self):
self.assert_is_instantiable_heat_type(ht.float32, torch.float32)
self.assert_is_instantiable_heat_type(ht.float, torch.float32)
self.assert_is_instantiable_heat_type(ht.float_, torch.float32)
def test_float64(self):
self.assert_is_instantiable_heat_type(ht.float64, torch.float64)
self.assert_is_instantiable_heat_type(ht.double, torch.float64)
def test_flexible(self):
self.assert_non_instantiable_heat_type(ht.flexible)
def test_complex64(self):
self.assert_is_instantiable_heat_type(ht.complex64, torch.complex64)
self.assert_is_instantiable_heat_type(ht.cfloat, torch.complex64)
self.assert_is_instantiable_heat_type(ht.csingle, torch.complex64)
self.assertEqual(ht.complex64.char(), "c8")
def test_complex128(self):
self.assert_is_instantiable_heat_type(ht.complex128, torch.complex128)
self.assert_is_instantiable_heat_type(ht.cdouble, torch.complex128)
self.assertEqual(ht.complex128.char(), "c16")
def test_iscomplex(self):
a = ht.array([1, 1.2, 1 + 1j, 1 + 0j])
s = ht.array([False, False, True, False])
r = ht.iscomplex(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
a = ht.array([1, 1.2, True], split=0)
s = ht.array([False, False, False], split=0)
r = ht.iscomplex(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
a = ht.ones((6, 6), dtype=ht.bool, split=0)
s = ht.zeros((6, 6), dtype=ht.bool, split=0)
r = ht.iscomplex(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
a = ht.full((5, 5), 1 + 1j, dtype=ht.int, split=1)
s = ht.ones((5, 5), dtype=ht.bool, split=1)
r = ht.iscomplex(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
def test_isreal(self):
a = ht.array([1, 1.2, 1 + 1j, 1 + 0j])
s = ht.array([True, True, False, True])
r = ht.isreal(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
a = ht.array([1, 1.2, True], split=0)
s = ht.array([True, True, True], split=0)
r = ht.isreal(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
a = ht.ones((6, 6), dtype=ht.bool, split=0)
s = ht.ones((6, 6), dtype=ht.bool, split=0)
r = ht.isreal(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
a = ht.full((5, 5), 1 + 1j, dtype=ht.int, split=1)
s = ht.zeros((5, 5), dtype=ht.bool, split=1)
r = ht.isreal(a)
self.assertEqual(r.shape, s.shape)
self.assertEqual(r.dtype, s.dtype)
self.assertEqual(r.device, s.device)
self.assertTrue(ht.equal(r, s))
class TestTypeConversion(TestCase):
def test_can_cast(self):
zeros_array = np.zeros((3,), dtype=np.int16)
# casting - 'no'
self.assertTrue(ht.can_cast(ht.uint8, ht.uint8, casting="no"))
self.assertFalse(ht.can_cast(ht.uint8, ht.int16, casting="no"))
self.assertFalse(ht.can_cast(ht.uint8, ht.int8, casting="no"))
self.assertFalse(ht.can_cast(ht.float64, ht.bool, casting="no"))
self.assertTrue(ht.can_cast(1.0, ht.float32, casting="no"))
self.assertFalse(ht.can_cast(zeros_array, ht.float32, casting="no"))
# casting - 'safe'
self.assertTrue(ht.can_cast(ht.uint8, ht.uint8, casting="safe"))
self.assertTrue(ht.can_cast(ht.uint8, ht.int16, casting="safe"))
self.assertFalse(ht.can_cast(ht.uint8, ht.int8, casting="safe"))
self.assertFalse(ht.can_cast(ht.float64, ht.bool, casting="safe"))
self.assertTrue(ht.can_cast(1.0, ht.float32, casting="safe"))
self.assertTrue(ht.can_cast(zeros_array, ht.float32, casting="safe"))
# casting - 'same_kind'
self.assertTrue(ht.can_cast(ht.uint8, ht.uint8, casting="same_kind"))
self.assertTrue(ht.can_cast(ht.uint8, ht.int16, casting="same_kind"))
self.assertTrue(ht.can_cast(ht.uint8, ht.int8, casting="same_kind"))
self.assertFalse(ht.can_cast(ht.float64, ht.bool, casting="same_kind"))
self.assertTrue(ht.can_cast(1.0, ht.float32, casting="same_kind"))
self.assertTrue(ht.can_cast(zeros_array, ht.float32, casting="same_kind"))
# casting - 'unsafe'
self.assertTrue(ht.can_cast(ht.uint8, ht.uint8, casting="unsafe"))
self.assertTrue(ht.can_cast(ht.uint8, ht.int16, casting="unsafe"))
self.assertTrue(ht.can_cast(ht.uint8, ht.int8, casting="unsafe"))
self.assertTrue(ht.can_cast(ht.float64, ht.bool, casting="unsafe"))
self.assertTrue(ht.can_cast(1.0, ht.float32, casting="unsafe"))
self.assertTrue(ht.can_cast(zeros_array, ht.float32, casting="unsafe"))
# exceptions
with self.assertRaises(TypeError):
ht.can_cast(ht.uint8, ht.uint8, casting=1)
with self.assertRaises(ValueError):
ht.can_cast(ht.uint8, ht.uint8, casting="hello world")
with self.assertRaises(TypeError):
ht.can_cast({}, ht.uint8, casting="unsafe")
with self.assertRaises(TypeError):
ht.can_cast(ht.uint8, {}, casting="unsafe")
def test_canonical_heat_type(self):
self.assertEqual(ht.core.types.canonical_heat_type(ht.float32), ht.float32)
self.assertEqual(ht.core.types.canonical_heat_type("?"), ht.bool)
self.assertEqual(ht.core.types.canonical_heat_type(int), ht.int32)
self.assertEqual(ht.core.types.canonical_heat_type("u1"), ht.uint8)
self.assertEqual(ht.core.types.canonical_heat_type(np.int8), ht.int8)
self.assertEqual(ht.core.types.canonical_heat_type(torch.short), ht.int16)
self.assertEqual(ht.core.types.canonical_heat_type(torch.cfloat), ht.complex64)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type({})
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(object)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(1)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type("i7")
def test_heat_type_of(self):
ht_tensor = ht.zeros((1,), dtype=ht.bool)
self.assertEqual(ht.core.types.heat_type_of(ht_tensor), ht.bool)
np_array = np.ones((3,), dtype=np.int32)
self.assertEqual(ht.core.types.heat_type_of(np_array), ht.int32)
scalar = 2.0
self.assertEqual(ht.core.types.heat_type_of(scalar), ht.float32)
iterable = [3, "hello world"]
self.assertEqual(ht.core.types.heat_type_of(iterable), ht.int32)
torch_tensor = torch.full((2,), 1 + 1j, dtype=torch.complex128)
self.assertEqual(ht.core.types.heat_type_of(torch_tensor), ht.complex128)
with self.assertRaises(TypeError):
ht.core.types.heat_type_of({})
with self.assertRaises(TypeError):
ht.core.types.heat_type_of(object)
def test_issubdtype(self):
# First level
self.assertTrue(ht.issubdtype(ht.bool, ht.datatype))
self.assertTrue(ht.issubdtype(ht.bool_, ht.datatype))
self.assertTrue(ht.issubdtype(ht.number, ht.datatype))
self.assertTrue(ht.issubdtype(ht.integer, ht.datatype))
self.assertTrue(ht.issubdtype(ht.signedinteger, ht.datatype))
self.assertTrue(ht.issubdtype(ht.unsignedinteger, ht.datatype))
self.assertTrue(ht.issubdtype(ht.floating, ht.datatype))
self.assertTrue(ht.issubdtype(ht.flexible, ht.datatype))
# Second level
self.assertTrue(ht.issubdtype(ht.integer, ht.number))
self.assertTrue(ht.issubdtype(ht.floating, ht.number))
self.assertTrue(ht.issubdtype(ht.signedinteger, ht.integer))
self.assertTrue(ht.issubdtype(ht.unsignedinteger, ht.integer))
# Third level
self.assertTrue(ht.issubdtype(ht.int8, ht.signedinteger))
self.assertTrue(ht.issubdtype(ht.int16, ht.signedinteger))
self.assertTrue(ht.issubdtype(ht.int32, ht.signedinteger))
self.assertTrue(ht.issubdtype(ht.int64, ht.signedinteger))
self.assertTrue(ht.issubdtype(ht.uint8, ht.unsignedinteger))
self.assertTrue(ht.issubdtype(ht.float32, ht.floating))
self.assertTrue(ht.issubdtype(ht.float64, ht.floating))
# Fourth level
self.assertTrue(ht.issubdtype(ht.byte, ht.int8))
self.assertTrue(ht.issubdtype(ht.short, ht.int16))
self.assertTrue(ht.issubdtype(ht.int, ht.int32))
self.assertTrue(ht.issubdtype(ht.long, ht.int64))
self.assertTrue(ht.issubdtype(ht.uint8, ht.ubyte))
self.assertTrue(ht.issubdtype(ht.float32, ht.float))
self.assertTrue(ht.issubdtype(ht.float32, ht.float_))
self.assertTrue(ht.issubdtype(ht.float64, ht.double))
# Small tests char representations (-> canonical_heat_type)
self.assertTrue("i", ht.int8)
self.assertTrue(ht.issubdtype("B", ht.uint8))
self.assertTrue(ht.issubdtype(ht.float64, "f8"))
# Small tests Exceptions (-> canonical_heat_type)
with self.assertRaises(TypeError):
ht.issubdtype(ht.bool, True)
with self.assertRaises(TypeError):
ht.issubdtype(4.2, "f")
with self.assertRaises(TypeError):
ht.issubdtype({}, ht.int)
def test_type_promotions(self):
self.assertEqual(ht.promote_types(ht.uint8, ht.uint8), ht.uint8)
self.assertEqual(ht.promote_types(ht.int8, ht.uint8), ht.int16)
self.assertEqual(ht.promote_types(ht.int32, ht.float32), ht.float32)
self.assertEqual(ht.promote_types("f4", ht.float), ht.float32)
self.assertEqual(ht.promote_types(ht.bool_, "?"), ht.bool)
self.assertEqual(ht.promote_types(ht.float32, ht.complex64), ht.complex64)
# exceptions
with self.assertRaises(TypeError):
ht.promote_types(1, "?")
with self.assertRaises(TypeError):
ht.promote_types(ht.float32, "hello world")
def test_result_type(self):
self.assertEqual(ht.result_type(1), ht.int32)
self.assertEqual(ht.result_type(1, 1.0), ht.float32)
self.assertEqual(ht.result_type(1.0, True, 1 + 1j), ht.complex64)
self.assertEqual(ht.result_type(ht.array(1, dtype=ht.int32), 1), ht.int32)
self.assertEqual(ht.result_type(1.0, ht.array(1, dtype=ht.int32)), ht.float32)
self.assertEqual(ht.result_type(ht.uint8, ht.int8), ht.int16)
self.assertEqual(ht.result_type("b", "f4"), ht.float32)
self.assertEqual(ht.result_type(ht.array([1], dtype=ht.float64), "f4"), ht.float64)
self.assertEqual(
ht.result_type(
ht.array([1, 2, 3, 4], dtype=ht.float64, split=0),
1,
ht.bool,
"u",
torch.uint8,
np.complex128,
ht.array(1, dtype=ht.int64),
),
ht.complex128,
)
self.assertEqual(
ht.result_type(np.array([1, 2, 3]), np.dtype("int32"), torch.tensor([1, 2, 3])),
ht.int64,
)
def test_finfo(self):
info32 = ht.finfo(ht.float32)
self.assertEqual(info32.bits, 32)
self.assertEqual(info32.max, (2 - 2**-23) * 2**127)
self.assertEqual(info32.min, -info32.max)
self.assertEqual(info32.eps, 2**-23)
with self.assertRaises(TypeError):
ht.finfo(1)
with self.assertRaises(TypeError):
ht.finfo(ht.int32)
with self.assertRaises(TypeError):
ht.finfo("float16")
def test_iinfo(self):
info32 = ht.iinfo(ht.int32)
self.assertEqual(info32.bits, 32)
self.assertEqual(info32.max, 2147483647)
self.assertEqual(info32.min, -2147483648)
with self.assertRaises(TypeError):
ht.iinfo(1.0)
with self.assertRaises(TypeError):
ht.iinfo(ht.float64)
with self.assertRaises(TypeError):
ht.iinfo("int16")
|
import os
class UbuntuInstaller:
def __init__(self, myenv):
self._my = myenv
self._tools = myenv.tools
def do_all(self, prebuilt=False, pips_level=3):
self._tools.log("installing Ubuntu version")
self._my.installers.ubuntu.ensure_version()
self._my.installers.ubuntu.base()
# UbuntuInstaller.ubuntu_base_install()
if not prebuilt:
self._my.installers.ubuntu.python_dev_install()
self._my.installers.ubuntu.apts_install()
if not prebuilt:
self._my.installers.base.pips_install(pips_level=pips_level)
def ensure_version(self):
if not os.path.exists("/etc/lsb-release"):
raise self._tools.exceptions.Base("Your operating system is not supported")
return True
def base(self):
self._my.init()
if self._my.state_get("base"):
return
rc, out, err = self._tools.execute("lsb_release -a")
if out.find("Ubuntu 18.04") != -1:
bionic = True
else:
bionic = False
if bionic:
script = """
if ! grep -Fq "deb http://mirror.unix-solutions.be/ubuntu/ bionic" /etc/apt/sources.list; then
echo >> /etc/apt/sources.list
echo "# Jumpscale Setup" >> /etc/apt/sources.list
echo deb http://mirror.unix-solutions.be/ubuntu/ bionic main universe multiverse restricted >> /etc/apt/sources.list
fi
"""
self._tools.execute(script, interactive=True, die=False)
script = """
apt-get update
apt-get install -y mc wget python3 git tmux telnet
set +e
apt-get install python3-distutils -y
set -e
apt-get install python3-psutil -y
apt-get install -y curl rsync unzip
locale-gen --purge en_US.UTF-8
apt-get install python3-pip -y
apt-get install -y redis-server
apt-get install locales -y
"""
self._tools.execute(script, interactive=True)
if bionic and not self._my._docker.indocker():
self._my.installers.ubuntu.docker_install()
self._my.state_set("base")
def docker_install(self):
if self._my.state_get("ubuntu_docker_install"):
return
script = """
apt-get update
apt-get upgrade -y --force-yes
apt-get install sudo python3-pip -y
pip3 install pudb
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable"
apt-get update
sudo apt-get install docker-ce -y
"""
self._tools.execute(script, interactive=True)
self._my.state_set("ubuntu_docker_install")
def python_dev_install(self):
if self._my.state_get("python_dev_install"):
return
self._tools.log("installing jumpscale tools")
script = """
cd /tmp
apt-get install -y build-essential
#apt-get install -y python3.8-dev
"""
rc, out, err = self._tools.execute(script, interactive=True, timeout=300)
if rc > 0:
# lets try other time
rc, out, err = self._tools.execute(script, interactive=True, timeout=300)
self._my.state_set("python_dev_install")
def apts_list(self):
return [
"iproute2",
"python-ufw",
"ufw",
"libpq-dev",
"iputils-ping",
"net-tools",
"libgeoip-dev",
"libcapnp-dev",
"graphviz",
"libssl-dev",
"cmake",
"fuse",
]
def apts_install(self):
for apt in self._my.installers.ubuntu.apts_list():
if not self._my.state_get("apt_%s" % apt):
command = "apt-get install -y %s" % apt
self._tools.execute(command, die=True)
self._my.state_set("apt_%s" % apt)
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-TSF-msctf
GUID : 4fba1227-f606-4e5f-b9e8-fab9ab5740f3
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=5, version=0)
class Microsoft_Windows_TSF_msctf_5_0(Etw):
pattern = Struct(
"thread_flags" / Int32ul,
"activation_flags" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=7, version=0)
class Microsoft_Windows_TSF_msctf_7_0(Etw):
pattern = Struct(
"thread_flags" / Int32ul,
"activation_flags" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=19, version=0)
class Microsoft_Windows_TSF_msctf_19_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=20, version=0)
class Microsoft_Windows_TSF_msctf_20_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=21, version=0)
class Microsoft_Windows_TSF_msctf_21_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=22, version=0)
class Microsoft_Windows_TSF_msctf_22_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=29, version=0)
class Microsoft_Windows_TSF_msctf_29_0(Etw):
pattern = Struct(
"LANGID" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=30, version=0)
class Microsoft_Windows_TSF_msctf_30_0(Etw):
pattern = Struct(
"LANGID" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=31, version=0)
class Microsoft_Windows_TSF_msctf_31_0(Etw):
pattern = Struct(
"event" / Int32ul,
"hwnd" / Int64ul,
"flags" / Int32ul,
"event_order" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=32, version=0)
class Microsoft_Windows_TSF_msctf_32_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=33, version=0)
class Microsoft_Windows_TSF_msctf_33_0(Etw):
pattern = Struct(
"event" / Int32ul,
"hwnd" / Int64ul,
"flags" / Int32ul,
"event_order" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=35, version=0)
class Microsoft_Windows_TSF_msctf_35_0(Etw):
pattern = Struct(
"message" / Int32ul,
"target_tid" / Int32ul,
"params" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=36, version=0)
class Microsoft_Windows_TSF_msctf_36_0(Etw):
pattern = Struct(
"message" / Int32ul,
"target_tid" / Int32ul,
"params" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=38, version=0)
class Microsoft_Windows_TSF_msctf_38_0(Etw):
pattern = Struct(
"message" / Int32ul,
"target_tid" / Int32ul,
"params" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=42, version=0)
class Microsoft_Windows_TSF_msctf_42_0(Etw):
pattern = Struct(
"message" / Int32ul,
"target_tid" / Int32ul,
"params" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=43, version=0)
class Microsoft_Windows_TSF_msctf_43_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=45, version=0)
class Microsoft_Windows_TSF_msctf_45_0(Etw):
pattern = Struct(
"message" / Int32ul,
"target_tid" / Int32ul,
"params" / Int32ul,
"hr" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=52, version=0)
class Microsoft_Windows_TSF_msctf_52_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=53, version=0)
class Microsoft_Windows_TSF_msctf_53_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=54, version=0)
class Microsoft_Windows_TSF_msctf_54_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=55, version=0)
class Microsoft_Windows_TSF_msctf_55_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=56, version=0)
class Microsoft_Windows_TSF_msctf_56_0(Etw):
pattern = Struct(
"pdimNewFocus" / Int64ul,
"pdimPrevFocus" / Int64ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=57, version=0)
class Microsoft_Windows_TSF_msctf_57_0(Etw):
pattern = Struct(
"pdimNewFocus" / Int64ul,
"pdimPrevFocus" / Int64ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=58, version=0)
class Microsoft_Windows_TSF_msctf_58_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=59, version=0)
class Microsoft_Windows_TSF_msctf_59_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=64, version=0)
class Microsoft_Windows_TSF_msctf_64_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=65, version=0)
class Microsoft_Windows_TSF_msctf_65_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=66, version=0)
class Microsoft_Windows_TSF_msctf_66_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=67, version=0)
class Microsoft_Windows_TSF_msctf_67_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=68, version=0)
class Microsoft_Windows_TSF_msctf_68_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=69, version=0)
class Microsoft_Windows_TSF_msctf_69_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=70, version=0)
class Microsoft_Windows_TSF_msctf_70_0(Etw):
pattern = Struct(
"guid" / Guid
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=74, version=0)
class Microsoft_Windows_TSF_msctf_74_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=75, version=0)
class Microsoft_Windows_TSF_msctf_75_0(Etw):
pattern = Struct(
"guid" / Guid,
"langid" / Int16ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=77, version=0)
class Microsoft_Windows_TSF_msctf_77_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=79, version=0)
class Microsoft_Windows_TSF_msctf_79_0(Etw):
pattern = Struct(
"DocId" / Int32ul,
"DocThread" / Int32ul,
"GainFocus" / Int8ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=80, version=0)
class Microsoft_Windows_TSF_msctf_80_0(Etw):
pattern = Struct(
"thread" / Int32ul,
"document" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=81, version=0)
class Microsoft_Windows_TSF_msctf_81_0(Etw):
pattern = Struct(
"thread" / Int32ul,
"document" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=82, version=0)
class Microsoft_Windows_TSF_msctf_82_0(Etw):
pattern = Struct(
"boolean" / Int8ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=84, version=0)
class Microsoft_Windows_TSF_msctf_84_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=85, version=0)
class Microsoft_Windows_TSF_msctf_85_0(Etw):
pattern = Struct(
"param" / Int32ul
)
@declare(guid=guid("4fba1227-f606-4e5f-b9e8-fab9ab5740f3"), event_id=89, version=0)
class Microsoft_Windows_TSF_msctf_89_0(Etw):
pattern = Struct(
"boolean" / Int8ul
)
|
<gh_stars>0
import argparse
import cv2 as cv
import datetime
import json
import numpy as np
import pyautogui
import random
import sys
import time
import os
import math
from PIL import Image
from pynput import mouse
from matplotlib import pyplot as plt
from matplotlib import cm
from PIL import ImageGrab
from functools import partial
ImageGrab.grab = partial(ImageGrab.grab, all_screens=True)
pyautogui.FAILSAFE = True
pyautogui.PAUSE = 0.5
np.set_printoptions(threshold=sys.maxsize)
OFFSET = 15
RANDOM_RANGE = 15
BLACK_LISTED_COLORS = {
0x010101,
0x282828,
0x212121,
0x1b1b1b,
0x0b0b0b,
0x272727,
0x202020,
0x1a1a1a,
0x0a0a0a,
0x090909,
0x030303,
0x080808,
0x292929,
0x020202,
0x0c0c0c,
0x222222,
0x1c1c1c,
0x040404,
0x202120,
0x100,
0x0f0f0f,
0x272827,
0x10001,
}
def rgb(r, g, b):
return (r << 16) + (g << 8) + (b)
class ClusterPainter:
def __init__(self, plot_enable, sim):
screen_width, screen_height = pyautogui.size()
self.x_min = 0
self.x_max = screen_width
self.y_min = 0
self.y_max = screen_height
self.top_left_set = False
self.bottom_right_set = False
self.submit_button = None
self.last_coord = []
self.plot_enable = plot_enable or sim is not None
self.sim = sim
def on_click(self, x, y, button, pressed):
if self.top_left_set and self.bottom_right_set and not pressed:
if button == mouse.Button.left:
self.last_coord = [x, y]
return False
else:
self.last_coord = []
return False
if not self.top_left_set and not pressed:
print('Top Left: {} {}'.format(x, y))
self.top_left_set = True
self.x_min = x
self.y_min = y
return False
elif not pressed:
print('Bottom Right: {} {}'.format(x, y))
self.bottom_right_set = True
self.x_max = x
self.y_max = y
return False
def set_bounderies(self):
if self.sim is not None and len(self.sim) != 0:
return
print('SET BOUNDARY : top_left')
with mouse.Listener(on_click=self.on_click) as listener:
listener.join()
print('SET BOUNDARY : bottom_right')
with mouse.Listener(on_click=self.on_click) as listener:
listener.join()
if self.x_max < self.x_min or self.x_max < self.x_min:
raise ValueError(
'bottom_right ({}, {}) is less than top_left ({}, {})'.format(
self.x_min, self.y_min, self.x_max, self.y_max,))
def set_submit_button(self):
if self.sim is not None and len(self.sim) != 0:
return
print('SET CONTINUE BUTTON')
submit_button = pyautogui.locateOnScreen(
'ui_targets/submit_button_new.png', confidence=0.5)
if not submit_button:
raise ValueError('[Submit] Button not found.')
self.submit_button = pyautogui.center(submit_button)
print(self.submit_button)
def screenshot(self):
image = pyautogui.screenshot(region=(
self.x_min,
self.y_min,
self.x_max - self.x_min,
self.y_max - self.y_min,
))
if self.plot_enable and self.sim is None:
image_name = 'cluster_{}.png'.format(datetime.datetime.now().timestamp())
image_path = 'images/{}'.format(image_name)
image.save(image_path)
return image
def transform_cluster_coord(self, coord):
x, y = coord
return self.x_min + x, self.y_min + y
def generate_cluster_data(self):
# Ignore square dots in the corners.
def ignore_color(c):
if c in BLACK_LISTED_COLORS:
return 0
return c
def heat_map_percentage(c):
r = (c >> 16) & 0xFF;
g = (c >> 8) & 0xFF;
b = c & 0xFF;
if r < 10:
return 0
pct = int((r - 255) / -255 * 100)
return pct
mean = None
median = None
max_pct = None
min_pct = None
def cleanup_heat_map(pct):
if pct < mean:
return 0
return pct
converted_image = None
if self.sim is not None and len(self.sim) != 0:
with Image.open(self.sim[0]) as image:
converted_image = np.array(image.convert('RGB'))
elif self.sim is not None:
converted_image = np.array(self.screenshot().convert('RGB'))
else:
converted_image = np.array(self.screenshot().convert('RGB'))
cv_image_array = np.asarray(converted_image, dtype='uint32')
cv_image_flatten = (cv_image_array[:, :, 0] << 16) \
+ (cv_image_array[:, :, 1] << 8) \
+ (cv_image_array[:, :, 2])
cv_image_flatten = np.vectorize(ignore_color)(cv_image_flatten)
cv_image_percentage = np.vectorize(heat_map_percentage)(cv_image_flatten)
if self.plot_enable:
plt.subplot(223)
flatten_plt = plt.imshow(cv_image_flatten)
flatten_plt.format_cursor_data = lambda data : \
'[{}]'.format(hex(data))
cv_image_data = np.array(cv_image_percentage.flatten(), dtype = float)
cv_image_data[cv_image_data == 0] = np.nan
mean = np.nanmean(cv_image_data)
median = np.nanmedian(cv_image_data)
max_pct = np.nanmax(cv_image_data)
min_pct = np.nanmin(cv_image_data)
print(mean, median, max_pct, min_pct)
cv_image_percentage = np.vectorize(cleanup_heat_map)(cv_image_percentage)
cluster_x = []
cluster_y = []
cluster_weight = []
for y in range(len(cv_image_percentage)):
for x in range(len(cv_image_percentage[y])):
pct = cv_image_percentage[y][x]
# for j in range(pct):
if pct != 0:
cluster_x.append(x)
cluster_y.append(y)
cluster_weight.append(pct)
z = np.vstack((cluster_x, cluster_y)).T
z = np.float32(z)
if self.plot_enable:
plt.subplot(222)
percentage_plt = plt.imshow(cv_image_percentage)
plt.subplot(224)
plt.imshow(converted_image)
return z, cluster_weight, len(cv_image_percentage[0]), len(cv_image_percentage)
def execute_clustering(self):
z, cluster_weight, max_x, max_y = self.generate_cluster_data()
import hdbscan
model = hdbscan.HDBSCAN(min_cluster_size = 30, allow_single_cluster = True)
clusters = model.fit_predict(z) # sample_weight = cluster_weight)
unique_clusters, counts = np.unique(clusters, return_counts = True)
if self.plot_enable:
cluster_plot = plt.subplot(221)
cluster_plot.axes.invert_yaxis()
centers_associations = {}
centers = []
cluster_counts = dict(zip(unique_clusters, counts))
sorted_unique_clusters = sorted(unique_clusters, key = lambda x : -cluster_counts[x])
print(sorted_unique_clusters)
count = 0
for cluster in sorted_unique_clusters:
row_index = np.where(clusters == cluster)
if count >= 2:
break
if len(clusters) > 2 and cluster == -1:
plt.scatter(z[row_index, 0], z[row_index, 1])
continue
x = np.mean(z[row_index, 0])
y = np.mean(z[row_index, 1])
centers.append((x, y))
centers_associations[(x, y)] = z[row_index]
if self.plot_enable:
plt.scatter(z[row_index, 0], z[row_index, 1])
count += 1
return centers, max_x, max_y
def generate_edge_to_edge_line(self, slope, b, max_x, max_y):
y_edge = slope * max_x + b;
y_zero = b
x_edge = (max_y - b) / slope
x_zero = -b / slope
points = []
if x_edge >= 0 and x_edge <= max_x:
points.append([x_edge, max_y])
if x_zero >= 0 and x_zero <= max_x:
points.append([x_zero, 0])
if y_edge >= 0 and y_edge <= max_y:
points.append([max_x, y_edge])
if y_zero >= 0 and y_zero <= max_y:
points.append([0, y_zero])
return points
def compute_edge_points(self, main_centers, max_x, max_y):
main_divider = None
slope = None
b = None
for c in main_centers:
for other_c in main_centers:
if c != other_c:
x_1, y_1 = c
x_2, y_2 = other_c
x_mid = (x_1 + x_2) / 2
y_mid = (y_1 + y_2) / 2
s = (y_2 - y_1) / (x_2 - x_1)
slope = -1 / s
b = y_mid - slope * x_mid
main_divider = np.array(self.generate_edge_to_edge_line(
slope, b, max_x, max_y))
if self.plot_enable:
plt.plot(main_divider[:, 0], main_divider[:, 1], '--', linewidth = 2)
return main_divider, slope, b
def action(self):
while True:
centers, max_x, max_y = self.execute_clustering()
divider, slope, b = self.compute_edge_points(
centers, max_x, max_y)
print(divider, slope, b)
if self.plot_enable:
plt.show()
if self.sim is not None and len(self.sim) != 0:
break
shapes = []
if divider is None:
shapes.append([random.randint(0, RANDOM_RANGE), random.randint(0, RANDOM_RANGE)])
shapes.append([max_x - random.randint(0, RANDOM_RANGE), random.randint(0, RANDOM_RANGE)])
shapes.append([max_x - random.randint(0, RANDOM_RANGE), max_y - random.randint(0, RANDOM_RANGE)])
shapes.append([random.randint(0, RANDOM_RANGE), max_y - random.randint(0, RANDOM_RANGE)])
shapes.append(shapes[0])
else:
x1, y1 = divider[0]
x2, y2 = divider[1]
top_left = (random.randint(0, RANDOM_RANGE), random.randint(0, RANDOM_RANGE))
top_right = (max_x - random.randint(0, RANDOM_RANGE), random.randint(0, RANDOM_RANGE))
bottom_right = (max_x, max_y)
bottom_left = (random.randint(0, RANDOM_RANGE), max_y - random.randint(0, RANDOM_RANGE))
offset = abs(math.sqrt((15 * slope) ** 2 + 15 ** 2)) / 2
print(offset)
line1 = self.generate_edge_to_edge_line(slope, b + offset, max_x, max_y)
line2 = self.generate_edge_to_edge_line(slope, b - offset, max_x, max_y)
points = [top_left, top_right, bottom_right, bottom_left]
points.extend(line1)
points.extend(line2)
bucket_1 = []
bucket_2 = []
for x, y in points:
d = (x - x1) * (y2 - y1) - (y - y1) * (x2 -x1)
if d < 0:
bucket_1.append((x, y))
else:
bucket_2.append((x, y))
cx = max_x / 2
cy = max_y / 2
def compare(x):
return math.atan2(x[0] - cx, x[1] - cy)
bucket_1 = sorted(bucket_1, key = compare)
bucket_2 = sorted(bucket_2, key = compare)
shapes.extend(bucket_1)
shapes.append(bucket_1[0])
shapes.extend(bucket_2)
shapes.append(bucket_2[0])
count = 0
for coordinates in shapes:
x, y = self.transform_cluster_coord(coordinates)
pyautogui.moveTo(x, y)
print('RECT 1 Moved mouse to: ({}, {})'.format(x, y))
pyautogui.click()
if self.sim is not None and len(self.sim) == 0:
input('Press [ENTER] when ready.')
continue
# Submit entry and click through UI to get next puzzle.
pyautogui.moveTo(
self.submit_button[0] + random.randint(-50, 50),
self.submit_button[1] + random.randint(-20, 20))
pyautogui.click()
time.sleep(1)
pyautogui.moveTo(
self.submit_button[0] + random.randint(-50, 50),
self.submit_button[1] + random.randint(-20, 20))
pyautogui.click()
time.sleep(1)
pyautogui.moveTo(
self.submit_button[0] + random.randint(-50, 50),
self.submit_button[1] + random.randint(-20, 20))
pyautogui.click()
# Wait until UI is ready before starting next iteration.
continue_button = None
while not continue_button:
continue_button = pyautogui.locateOnScreen(
'ui_targets/submit_button_new.png', confidence=0.5)
time.sleep(1)
return 'Done!'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--plot', action = 'store_true',
help = "generate analysis plot prior to doing automated action")
parser.add_argument('-s', '--simulate', type = str, nargs = '*',
help = "simulate analysis of passed in image")
args = parser.parse_args()
try:
painter = ClusterPainter(args.plot, args.simulate)
painter.set_bounderies()
painter.set_submit_button()
input('Press [ENTER] when ready.')
painter.action()
except pyautogui.FailSafeException:
return 'PyAutoGUI FailSafe Interrupt'
if __name__ == '__main__':
print('=== CLUSTER PAINTER ===')
print('==> Move mouse to top left to stop!')
print('Exit Message: ', main())
|
"""
Form types.
"""
try:
import decimal
haveDecimal = True
except ImportError:
haveDecimal = False
from zope.interface import implements
from twisted.internet import defer, task
from formal import iformal, validation
class Type(object):
implements( iformal.IType )
# Name of the instance
name = None
# Value to use if no value entered
missing = None
# Instance cannot be changed
immutable = False
# List of validators to test the value against
validators = ()
def __init__(self, name=None, required=None, missing=None, immutable=None, validators=None):
if name is not None:
self.name = name
if missing is not None:
self.missing = missing
if immutable is not None:
self.immutable = immutable
if validators is not None:
self.validators = list(validators)
else:
self.validators = list(self.validators)
if required is None:
required = self.required
if required:
self.validators.append(validation.RequiredValidator())
def validate(self, value):
dl = []
for validator in self.validators:
dl.append(defer.maybeDeferred(validator.validate, self, value))
def _cbValidate(_, value):
if value is None:
value = self.missing
return value
def _err(failure):
failure.trap(defer.FirstError)
return failure.value.subFailure
d = defer.DeferredList(dl, consumeErrors=True, fireOnOneErrback=True)
d.addCallbacks(_cbValidate, _err, [value])
return d
def hasValidator(self, validatorType):
"""
Check for the existance of a validator of a specific type.
"""
for v in self.validators:
if isinstance(v, validatorType):
return True
return False
def required():
def get(self):
return self.hasValidator(validation.RequiredValidator)
return get,
required = property(*required())
class String(Type):
# Strip the value before validation
strip = False
def __init__(self, **k):
strip = k.pop('strip', None)
if strip is not None:
self.strip = strip
super(String, self).__init__(**k)
def validate(self, value):
if value is not None and self.strip:
value = value.strip()
if not value:
value = None
return super(String, self).validate(value)
class Integer(Type):
pass
class Float(Type):
pass
class Boolean(Type):
pass
class Date(Type):
pass
class Time(Type):
pass
if haveDecimal:
class Decimal(Type):
"""
Forms type for Python 2.4's decimal.Decimal type.
"""
class Tuple(Type):
delimiter = ","
fields = None
def __init__(self, **k):
fields = k.pop("fields", None)
delimiter = k.pop("delimiter", None)
super(Tuple, self).__init__(**k)
if fields is not None:
self.fields = fields
if delimiter is not None:
self.delimiter = delimiter
def validate(self, value):
if not value:
return super(Tuple, self).validate(None)
def driver():
for (f,v) in zip(self.fields, value):
yield defer.maybeDeferred(f.validate, v).addCallback(result.append)
# Map the items to their validated versions.
result = []
d = task.coiterate(driver())
# Call the super class with the result.
d.addCallback(lambda ignore: super(Tuple, self).validate(tuple(result)))
return d
class Sequence(Type):
# Type of items in the sequence
type = None
def __init__(self, type=None, **k):
super(Sequence, self).__init__(**k)
if type is not None:
self.type = type
def validate(self, value):
# Map empty sequence to None. Otherwise validate each item according to
# type.
if not value:
d = defer.succeed(None)
else:
d = self._validateItems(value)
# Return superclass's response
return d.addCallback(super(Sequence, self).validate)
def _validateItems(self, value):
def validated(response):
for (success, result) in response:
if not success:
raise result
yield result
d = defer.DeferredList([self.type.validate(item) for item in value],
consumeErrors=True)
d.addCallback(validated)
d.addCallback(list)
return d
class File(Type):
pass
class RichText:
"""
A data structure for the RichTextType to use
"""
def __init__(self,type,value):
self.type = type
self.value = value
def __repr__(self):
shortvalue = self.value[:30]
if len(self.value) > 30:
shortvalue += '...'
return '<formal.types.RichText instance, (%s,"%s")>'%(self.type,shortvalue)
class RichTextType(Type):
"""Forms type used for rich text"""
def __init__(self, **kwds):
strip = kwds.pop('strip', None)
super(RichTextType, self).__init__(**kwds)
self.strip = strip or False
def validate(self, value):
# For the moment all the validation is against the content
if self.strip:
value.value = value.value.strip()
if not value.value:
value=None
return super(RichTextType, self).validate(value)
__all__ = [
'Boolean', 'Date', 'File', 'Float', 'Integer', 'Sequence', 'String',
'Time', 'Tuple', 'RichTextType', 'RichText',
]
if haveDecimal:
__all__.append('Decimal')
|
<reponame>scottwedge/OpenStack-Stein<filename>searchlight-6.0.0/searchlight/common/config.py
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Searchlight
"""
import logging
import os
import tempfile
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_middleware import cors
from oslo_policy import policy
from paste import deploy
from searchlight.i18n import _
from searchlight.version import version_info as version
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_('Partial name of a pipeline in your paste configuration '
'file with the service name removed. For example, if '
'your paste section name is '
'[pipeline:searchlight-api-keystone] use the value '
'"keystone"')),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_('The API paste config file to use.')),
]
common_opts = [
cfg.IntOpt('limit_param_default', default=25,
help=_('Default value for the number of items returned by a '
'request if not specified explicitly in the request')),
cfg.IntOpt('api_limit_max', default=1000,
help=_('Maximum permissible number of items that could be '
'returned by a request')),
cfg.HostAddressOpt('pydev_worker_debug_host',
help=_('The hostname/IP of the pydev '
'process listening for '
'debug connections')),
cfg.IntOpt('pydev_worker_debug_port', default=5678,
help=_('The port on which a pydev process is listening for '
'connections.')),
cfg.StrOpt('metadata_encryption_key', secret=True,
help=_('AES key for encrypting store \'location\' metadata. '
'This includes, if used, Swift or S3 credentials. '
'Should be set to a random string of length 16, 24 or '
'32 bytes')),
cfg.StrOpt('digest_algorithm', default='sha1',
help=_('Digest algorithm which will be used for digital '
'signature; the default is sha1 the default in Kilo '
'for a smooth upgrade process, and it will be updated '
'with sha256 in next release(L). Use the command '
'"openssl list-message-digest-algorithms" to get the '
'available algorithms supported by the version of '
'OpenSSL on the platform. Examples are "sha1", '
'"sha256", "sha512", etc.')),
]
CONF = cfg.CONF
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
CONF.register_opts(common_opts)
policy.Enforcer(CONF)
def parse_args(args=None, usage=None, default_config_files=None):
if "OSLO_LOCK_PATH" not in os.environ:
lockutils.set_defaults(tempfile.gettempdir())
CONF(args=args,
project='searchlight',
version=version.cached_version_string(),
usage=usage,
default_config_files=default_config_files)
def parse_cache_args(args=None):
config_files = cfg.find_config_files(project='searchlight',
prog='searchlight-cache')
parse_args(args=args, default_config_files=config_files)
def _get_deployment_flavor(flavor=None):
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
:param flavor: if specified, use this setting rather than the
paste_deploy.flavor configuration setting
"""
if not flavor:
flavor = CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
config_path = cfg.CONF.find_file(
cfg.CONF.paste_deploy['api_paste_config'])
if config_path is None:
return None
return os.path.abspath(config_path)
def load_paste_app(app_name, flavor=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file, if conf_file is None.
:param app_name: name of the application to load
:param flavor: name of the variant of the application to load
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor(flavor)
conf_file = _get_deployment_config_file()
if conf_file is None:
raise RuntimeError(_("Unable to locate config file"))
try:
logger = logging.getLogger(__name__)
logger.debug("Loading %(app_name)s from %(conf_file)s",
{'conf_file': conf_file, 'app_name': app_name})
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
# Log the options used when starting if we're in debug mode...
if CONF.debug:
CONF.log_opt_values(logger, logging.DEBUG)
return app
except (LookupError, ImportError) as e:
msg = (_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
logger.error(msg)
raise RuntimeError(msg)
def set_config_defaults():
"""This method updates all configuration default values."""
set_cors_middleware_defaults()
def set_cors_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
# CORS Defaults
# TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
cfg.set_defaults(cors.CORS_OPTS,
allow_headers=['X-Auth-Token',
'X-OpenStack-Request-ID'],
expose_headers=['X-OpenStack-Request-ID'],
allow_methods=['GET',
'POST']
)
|
"""
Pre-train expert for distiller
Author: <NAME> (https://github.com/vectominist)
"""
from easydict import EasyDict as edict
import yaml
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from pretrain.distiller.dataset import OnlineWaveDataset
from upstream.distiller.model import DistillerConfig, DistillerModel
def freeze_model(model):
"""Freeze all parameters in a model."""
for param in model.parameters():
param.requires_grad = False
class UpstreamPretrainExpert(nn.Module):
"""
The Distiller pretrain expert
"""
def __init__(
self, datarc, upstream_config, device="cuda", multi_gpu=False, **kwargs
):
super().__init__()
self.datarc = datarc
self.device = device
self.multi_gpu = multi_gpu
if type(upstream_config) == str:
self.upstream_config = yaml.load(
open(upstream_config, "r"), Loader=yaml.FullLoader
)
print(
"[UpstreamPretrainExpert] - Using upstream config from:",
upstream_config,
)
elif type(upstream_config) == dict:
self.upstream_config = upstream_config
print(
"[UpstreamPretrainExpert] - Using upstream config from the previous experiment."
)
else:
raise ValueError
self._get_train_dataloader()
print("[UpstreamPretrainExpert] - Initializing model...")
model_config = DistillerConfig(self.upstream_config["distiller"])
self.model = DistillerForPretrain(
model_config, edict(self.upstream_config["teacher"])
)
if self.multi_gpu:
self.model = torch.nn.DataParallel(self.model)
print(
"[UpstreamPretrainExpert] - Multi-GPU training Enabled: "
+ str(torch.cuda.device_count())
)
print(
"[UpstreamPretrainExpert] - Number of parameters: "
+ str(sum(p.numel() for p in self.model.parameters() if p.requires_grad))
)
def _get_train_dataloader(self):
dataset = OnlineWaveDataset(
self.upstream_config["task"],
self.datarc["train_batch_size"],
target_level=self.upstream_config["audio"]["target_level"],
**self.datarc,
)
self.dataloader = DataLoader(
dataset,
batch_size=1, # for bucketing
shuffle=True,
num_workers=self.datarc["num_workers"],
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn,
)
# Interface
def load_model(self, all_states):
if self.multi_gpu:
self.model.module.distiller.load_state_dict(all_states["Distiller"])
else:
self.model.distiller.load_state_dict(all_states["Distiller"])
# Interface
def add_state_to_save(self, all_states):
all_states["Distiller"] = (
self.model.float().distiller.state_dict()
if not self.multi_gpu
else self.model.float().module.distiller.state_dict()
)
all_states["Config"] = self.upstream_config
return all_states
# Interface
def get_train_dataloader(self):
return self.dataloader
# Interface
def forward(self, data, records={}, global_step=0, log_step=1000, **kwargs):
"""
Args:
data:
[wave_input, pad_mask]
records:
defaultdict(list), by appending contents into records,
these contents can be averaged and logged on Tensorboard
later by self.log_records every log_step
Return:
loss
"""
wave_input, wave_orig, wave_len, pad_mask = data
wave_input = wave_input.to(self.device)
wave_len = wave_len.to(self.device)
pad_mask = pad_mask.type(wave_input.dtype).to(self.device)
loss, other_res = self.model(
wave_input,
wave_orig,
wave_len,
pad_mask,
return_other=global_step % log_step == 0,
)
if global_step % log_step == 0:
for key, value in other_res.items():
if isinstance(value, torch.Tensor):
value = float(value.mean().cpu().item())
records[key] = value
return loss, records
# interface
def on_before_zero_grad(self):
pass
# interface
def log_records(self, records, logger, prefix, global_step, **kwargs):
"""
Args:
records:
defaultdict(list), contents already appended
logger:
Tensorboard SummaryWriter
please use f'{prefix}your_content_name' as key name
to log your customized contents
prefix:
used to indicate downstream and train/test on Tensorboard
eg. 'phone/train-'
global_step:
global_step in runner, which is helpful for Tensorboard logging
"""
for key, values in records.items():
if isinstance(values, torch.Tensor) and len(values.shape) > 1:
logger.add_image(f"{prefix}{key}", values, global_step=global_step)
elif isinstance(values, float):
logger.add_scalar(f"{prefix}{key}", values, global_step=global_step)
class DistillerForPretrain(nn.Module):
"""
Distiller for pretraining
"""
def __init__(self, config: DistillerConfig, teacher_config: edict):
super().__init__()
self.config = config
self.distiller = DistillerModel(config)
self.teacher_config = teacher_config
teacher = torch.hub.load("s3prl/s3prl", teacher_config.model)
if (
teacher_config.model.find("hubert") >= 0
or teacher_config.model.find("wav2vec2") >= 0
):
teacher.model.encoder.layerdrop = 0
print("[DistillerForPretrain] - Disabled teacher's encoder layerdrop")
assert self.distiller.n_tasks <= teacher_config.n_layers, (
self.distiller.n_tasks,
teacher_config.n_layers,
)
self.teacher = teacher
freeze_model(self.teacher)
print(
"[DistillerForPretrain] - Using {} as teacher with {} layers".format(
teacher_config.model, teacher_config.n_layers
)
)
if config.loss_type == "l1":
self.loss_func = nn.L1Loss(reduction="none")
elif config.loss_type == "l2":
self.loss_func = nn.MSELoss(reduction="none")
else:
raise NotImplementedError(config.loss_type)
self.cosine_loss = config.cosine_loss
if self.cosine_loss > 0:
print("[DistillerForPretrain] - Enabled cosine similarity loss.")
if config.init_teacher_conv_layers:
print(
"[DistillerForPretrain] - "
"Initializing feature extractor from teacher"
)
self.distiller.feature_extractor.load_state_dict(
self.teacher.model.feature_extractor.state_dict()
)
if self.distiller.post_extract_proj is not None:
self.distiller.post_extract_proj.load_state_dict(
self.teacher.model.post_extract_proj.state_dict()
)
if config.init_teacher_encoder_layers:
print("[DistillerForPretrain] - " "Initializing encoder from teacher")
self.distiller.encoder.pos_conv.load_state_dict(
self.teacher.model.encoder.pos_conv.state_dict()
)
for l in range(config.encoder_layers):
self.distiller.encoder.layers[l].load_state_dict(
self.teacher.model.encoder.layers[l].state_dict()
)
def forward(
self,
wave_input: torch.Tensor,
wave_orig: list,
wave_len: torch.Tensor,
pad_mask: torch.Tensor,
return_other: bool = False,
):
"""
Forward function.
Input:
wave_input: FloatTensor (B x T_wave)
wave_orig: List of FloatTensor
wave_len: LongTensor (B)
pad_mask: FloatTensor (B x T)
return_other: Bool (returns other information for logging)
"""
# Forward model
feat, feat_final, pred, pad_mask = self.distiller(wave_input, pad_mask)
with torch.no_grad():
wave_orig = [wave.to(wave_input.device) for wave in wave_orig]
with torch.cuda.amp.autocast(False):
teacher_hiddens = self.teacher(wave_orig)
if self.config.task_emb_type == "none":
teacher_hiddens = teacher_hiddens["hidden_states"][self.config.n_tasks]
teacher_hiddens = teacher_hiddens.unsqueeze(1)
else:
if self.config.task_emb_type in ["expand-last", "hnet", "self-hidden"]:
teacher_hiddens = [
teacher_hiddens["hidden_states"][i]
for i in self.distiller.pred_layer_id
]
else:
teacher_hiddens = teacher_hiddens["hidden_states"][1:]
teacher_hiddens = torch.stack(teacher_hiddens, dim=1) # B x N x T x D
# Compute all objectives
(
total_loss,
rec_loss,
rec_layer_loss,
feat_pen,
sim_loss,
sim_layer_loss,
) = self.compute_loss(feat, pred, teacher_hiddens, return_other)
if return_other:
with torch.no_grad():
other_res = {
"rec_loss": rec_loss,
"feat_pen": feat_pen,
"sim_loss": sim_loss,
"norm_feat_final": feat_final.pow(2).mean(),
}
teacher_norm = torch.abs(teacher_hiddens).mean((0, 2, 3))
if self.config.task_emb_type == "none":
other_res[f"rec_l{self.config.n_tasks}"] = rec_layer_loss[0]
other_res[f"tar_norm_l{self.config.n_tasks}"] = teacher_norm[0]
if sim_layer_loss is not None:
other_res[f"sim_l{self.config.n_tasks}"] = sim_layer_loss[0]
else:
for i in range(self.config.n_tasks):
layer_id = i + 1
if self.config.task_emb_type in [
"expand-last",
"hnet",
"self-hidden",
]:
layer_id = self.distiller.pred_layer_id[i]
other_res[f"rec_l{layer_id}"] = rec_layer_loss[i]
other_res[f"tar_norm_l{layer_id}"] = teacher_norm[i]
if sim_layer_loss is not None:
other_res[f"sim_l{layer_id}"] = sim_layer_loss[i]
if self.config.task_emb_type not in [
"expand-last",
"hnet",
"self-hidden",
]:
other_res[
"norm_task_emb"
] = self.distiller.task_embedding.weight.pow(2).mean()
else:
other_res = None
return total_loss, other_res
def compute_loss(self, feat, pred, target, return_other=False):
"""
Computes loss.
Inputs:
feat: B x T x D
pred: B x N x T x D
target: B x N x T x D
"""
# Reconstruction loss
assert pred.shape == target.shape, (pred.shape, target.shape)
rec_loss = self.loss_func(pred, target) # B x N x T x D
if return_other:
with torch.no_grad():
rec_layer_loss = rec_loss.mean((0, 2, 3))
else:
rec_layer_loss = None
rec_loss = rec_loss.mean()
# Cosine similarity loss
if self.cosine_loss > 0:
sim_loss = -F.logsigmoid(F.cosine_similarity(pred, target, dim=-1))
# B x N x T
if return_other:
with torch.no_grad():
sim_layer_loss = sim_loss.mean((0, 2))
else:
sim_layer_loss = None
sim_loss = sim_loss.mean()
else:
sim_loss = 0
sim_layer_loss = None
# Feature loss
feat_pen = feat.float().pow(2).mean()
total_loss = (
rec_loss
+ feat_pen * self.config.feat_pen_loss
+ sim_loss * self.cosine_loss
)
return total_loss, rec_loss, rec_layer_loss, feat_pen, sim_loss, sim_layer_loss
|
<reponame>Novodes/notepad2
# Script to check that headers are in a consistent order
# Canonical header order is defined in a file, normally scripts/HeaderOrder.txt
import sys
import pathlib
def IsHeader(x):
return x.strip().startswith("#") and \
("include" in x or "import" in x) and \
"dllimport" not in x
def HeaderFromIncludeLine(s):
#\s*#\s*(include|import)\s+\S+\s*
return s.strip()[1:].strip()[7:].strip()
def ExtractHeaders(file):
with file.open(encoding="utf-8") as infile:
return [HeaderFromIncludeLine(l) for l in infile if IsHeader(l)]
def ExtractWithPrefix(file, prefix):
with file.open(encoding="utf-8") as infile:
return [l.strip()[len(prefix):] for l in infile if l.startswith(prefix)]
def ExcludeName(name, excludes):
return any(exclude in name for exclude in excludes)
def SortLike(incs, order):
return sorted(incs, key = lambda i: order.index(i))
basePrefix = "//base:"
sourcePrefix = "//source:"
excludePrefix = "//exclude:"
def CheckFiles(headerOrderTxt):
headerOrderFile = pathlib.Path(headerOrderTxt).resolve()
bases = ExtractWithPrefix(headerOrderFile, basePrefix)
base = bases[0] if len(bases) > 0 else ".."
orderDirectory = headerOrderFile.parent
root = (orderDirectory / base).resolve()
# Find all the source code files
patterns = ExtractWithPrefix(headerOrderFile, sourcePrefix)
excludes = ExtractWithPrefix(headerOrderFile, excludePrefix)
filePaths = []
for p in patterns:
filePaths += root.glob(p)
headerOrder = ExtractHeaders(headerOrderFile)
originalOrder = headerOrder[:]
orderedPaths = [p for p in sorted(filePaths) if not ExcludeName(str(p), excludes)]
allIncs = set()
for f in orderedPaths:
#~ print(" File ", f.relative_to(root))
incs = ExtractHeaders(f)
allIncs = allIncs.union(set(incs))
m = 0
i = 0
# Detect headers not in header order list and insert at OK position
needs = []
while i < len(incs):
if m == len(headerOrder):
#~ print("**** extend", incs[i:])
headerOrder.extend(incs[i:])
needs.extend(incs[i:])
break
if headerOrder[m] == incs[i]:
#~ print("equal", headerOrder[m])
i += 1
m += 1
else:
if headerOrder[m] not in incs:
#~ print("skip", headerOrder[m])
m += 1
elif incs[i] not in headerOrder:
#~ print(str(f) + ":1: Add master", incs[i])
headerOrder.insert(m, incs[i])
needs.append(incs[i])
i += 1
m += 1
else:
i += 1
if needs:
print(f"{f}:1: needs these headers:")
for header in needs:
print("#include " + header)
# Detect out of order
ordered = SortLike(incs, headerOrder)
if incs != ordered:
print(f"{f}:1: is out of order")
fOrdered = pathlib.Path(str(f) + ".ordered")
with fOrdered.open("w", encoding='utf-8') as headerOut:
for header in ordered:
headerOut.write("#include " + header + "\n")
print(f"{fOrdered}:1: is ordered")
if headerOrder != originalOrder:
newIncludes = set(headerOrder) - set(originalOrder)
headerOrderNew = orderDirectory / "NewOrder.txt"
print(f"{headerOrderFile}:1: changed to {headerOrderNew}")
print(f" Added {', '.join(newIncludes)}.")
with headerOrderNew.open("w", encoding='utf-8') as headerOut:
for header in headerOrder:
headerOut.write("#include " + header + "\n")
unused = sorted(set(headerOrder) - allIncs)
if unused:
print("In HeaderOrder.txt but not used")
print("\n".join(unused))
if len(sys.argv) > 1:
CheckFiles(sys.argv[1])
else:
CheckFiles("HeaderOrder.txt")
|
"""
Test module for context subscriptions and notifications
"""
import unittest
from pydantic import ValidationError
from filip.clients.ngsi_v2 import ContextBrokerClient
from filip.models.ngsi_v2.subscriptions import \
Http, \
HttpCustom, \
Mqtt, \
MqttCustom, \
Notification, \
Subscription
from filip.models.base import FiwareHeader
from filip.utils.cleanup import clear_all, clean_test
from tests.config import settings
class TestSubscriptions(unittest.TestCase):
"""
Test class for context broker models
"""
def setUp(self) -> None:
"""
Setup test data
Returns:
None
"""
self.fiware_header = FiwareHeader(
service=settings.FIWARE_SERVICE,
service_path=settings.FIWARE_SERVICEPATH)
self.http_url = "https://test.de:80"
self.mqtt_url = "mqtt://test.de:1883"
self.mqtt_topic = '/filip/testing'
self.notification = {
"http":
{
"url": "http://localhost:1234"
},
"attrs": [
"temperature",
"humidity"
]
}
def test_notification_models(self):
"""
Test notification models
"""
# Test url field sub field validation
with self.assertRaises(ValidationError):
Http(url="brokenScheme://test.de:80")
with self.assertRaises(ValidationError):
HttpCustom(url="brokenScheme://test.de:80")
with self.assertRaises(ValidationError):
Mqtt(url="brokenScheme://test.de:1883",
topic='/testing')
with self.assertRaises(ValidationError):
Mqtt(url="mqtt://test.de:1883",
topic='/,t')
httpCustom = HttpCustom(url=self.http_url)
mqtt = Mqtt(url=self.mqtt_url,
topic=self.mqtt_topic)
mqttCustom = MqttCustom(url=self.mqtt_url,
topic=self.mqtt_topic)
# Test validator for conflicting fields
notification = Notification.parse_obj(self.notification)
with self.assertRaises(ValidationError):
notification.mqtt = httpCustom
with self.assertRaises(ValidationError):
notification.mqtt = mqtt
with self.assertRaises(ValidationError):
notification.mqtt = mqttCustom
@clean_test(fiware_service=settings.FIWARE_SERVICE,
fiware_servicepath=settings.FIWARE_SERVICEPATH,
cb_url=settings.CB_URL)
def test_subscription_models(self) -> None:
"""
Test subscription models
Returns:
None
"""
sub_dict = {
"description": "One subscription to rule them all",
"subject": {
"entities": [
{
"idPattern": ".*",
"type": "Room"
}
],
"condition": {
"attrs": [
"temperature"
],
"expression": {
"q": "temperature>40"
}
}
},
"notification": {
"http": {
"url": "http://localhost:1234"
},
"attrs": [
"temperature",
"humidity"
]
},
"expires": "2030-04-05T14:00:00Z",
"throttling": 5
}
sub = Subscription.parse_obj(sub_dict)
fiware_header = FiwareHeader(service=settings.FIWARE_SERVICE,
service_path=settings.FIWARE_SERVICEPATH)
with ContextBrokerClient(
url=settings.CB_URL,
fiware_header=fiware_header) as client:
sub_id = client.post_subscription(subscription=sub)
sub_res = client.get_subscription(subscription_id=sub_id)
def compare_dicts(dict1: dict, dict2: dict):
for key, value in dict1.items():
if isinstance(value, dict):
compare_dicts(value, dict2[key])
else:
self.assertEqual(str(value), str(dict2[key]))
compare_dicts(sub.dict(exclude={'id'}),
sub_res.dict(exclude={'id'}))
def tearDown(self) -> None:
"""
Cleanup test server
"""
clear_all(fiware_header=self.fiware_header,
cb_url=settings.CB_URL) |
"""
Properties
==========
Every model in Experimentor has a set of properties that define their state. A camera has, for example, an exposure
time, a DAQ card has a delay between data points, and an Experiment holds global parameters, such as the number of
repetitions a measurement should take.
In many situations, the parameters are stored as a dictionary, mainly because they are easy to retrieve from a file on
the hard drive and to access from within the class. We want to keep that same approach, but adding extra features.
Features of Properties
----------------------
Each parameter stored on a property will have three values: new_value, value, old_value, which represent the value which
will be set, the value that is currently set and the value that was there before. In this way it is possible to just
update on the device those values that need updating, it is also possible to revert back to the previously known value.
Each value will also be marked with a flag to_update in case the value was changed, but not yet transmitted to the
device. This allows us to collect all the values we need, for example looping through a user interface, reading a config
file, and applying only those needed whenever desired.
The Properties have also another smart feature, achieved through linking. Linking means building a relationship between
the parameters stored within the class and the methods that need to be executed in order to get or set those values. In
the linking procedure, we can set only getter methods for read-only properties, or both methods. A general apply
function then allows to use the known methods to set the values that need to be updated to the device.
Future Roadmap
--------------
We can consider forcing methods to always act on properties defined as new/known/old in order to use that information as
a form of cache and validation strategy.
:license: MIT, see LICENSE for more details
:copyright: 2021 <NAME>
"""
import warnings
from typing import List
from experimentor.lib.log import get_logger
from experimentor.models import BaseModel
from experimentor.models.exceptions import LinkException, PropertyException
class Properties:
""" Class to store the properties of models. It keeps track of changes in order to monitor whether a specific value
needs to be updated. It also allows to keep track of what method should be triggered for each update.
"""
def __init__(self, parent: BaseModel, **kwargs):
self._parent = parent
self._properties = dict()
self._links = dict()
self.logger = get_logger()
if kwargs:
for key, value in kwargs.items():
self.__setitem__(key, value)
def __setitem__(self, key, value):
if key not in self._properties:
self._properties.update({
key:
{
'new_value': value,
'value': None,
'old_value': None,
'to_update': True
}
})
else:
self._properties[key].update({
'new_value': value,
'to_update': True,
})
def __getitem__(self, item):
if isinstance(item, int):
key = list(self._properties.keys())[item]
return {key: self._properties[key]['value']}
if item in self._properties:
return self._properties[item]['value']
if item in self._parent._features:
return None
raise KeyError(f'Property {item} unknown')
def all(self):
""" Returns a dictionary with all the known values.
Returns
-------
properties : dict
All the known values
"""
p = dict()
for key, value in self._properties.items():
if key:
p.update({
key: value['value'],
})
return p
def update(self, values: dict):
"""Updates the values in the same way the update method of a dictionary works. It, however, stores the values
as a new value, it does not alter the values stored. For updating the proper values use :func:`self.upgrade`.
After updating the values, use :func:`self.apply_all` to send the new values to the device.
"""
for key, value in values.items():
self.__setitem__(key, value)
def upgrade(self, values, force=False):
"""This method actually overwrites the values stored in the properties. This method should be used only when the
real values generated by a device are known. It will change the new values to None, it will set the value to
value, and it will set the ``to_update`` flag to false.
Parameters
----------
values: dict
Dictionary in the form {property: new_value}
force: bool
If force is set to True, it will create the missing properties instead of raising an exception.
"""
for key, value in values.items():
if key not in self._properties:
if not force:
raise PropertyException(f'Trying to upgrade {key} but is not a listed property')
self.__setitem__(key, value)
self._properties[key].update({
'new_value': None,
'value': value,
'to_update': False,
})
def fetch(self, prop):
""" Fetches the desired property from the device, provided that a link is available. """
if prop in self._links:
getter = self._links[prop][0]
if callable(getter):
value = getter()
else:
value = getattr(self._parent, getter)
self.logger.debug(f'Fetched {prop} -> {value}')
return value
else:
# It may be a Model Property that has not been linked yet
if prop in self._parent._features:
self._links.update({prop: [prop, prop]})
return self.fetch(prop)
self.logger.error(f'{prop} is not a valid property')
raise KeyError(f'{prop} is not a valid property')
def fetch_all(self):
""" Fetches all the properties for which a link has been established and updates the value. This method does
not alter the to_update flag, new_value, nor old_value.
"""
self.logger.info(f'Fetching all properties of {self._parent}')
keys = {key for key in self._links} | {key for key in self._parent._features}
for key in keys:
value = self.fetch(key)
self.upgrade({key: value}, force=True)
def apply(self, property, force=False):
""" Applies the new value to the property. This is provided that the property is marked as to_update, or
forced to be updated.
Parameters
----------
property: str
The string identifying the property
force: bool (default: False)
If set to true it will update the propery on the device, regardless of whether it is marked as to_update
or not.
"""
if property in self._links:
if property in self._properties:
property_value = self.get_property(property)
if property_value['to_update'] or force:
setter = self._links[property][1]
if setter is not None:
property_value['old_value'] = property_value['value']
new_value = property_value['new_value']
if callable(setter):
value = setter(new_value)
else:
self._parent.__setattr__(setter, new_value)
value = None
if value is None:
value = self.fetch(property)
self.upgrade({property: value})
else:
self.logger.warning(f'Trying to change the value of {property}, but it is read-only')
else:
self.logger.info(f'{property} will not be updated')
else:
raise PropertyException('Trying to update a property which is not registered')
else:
# The property may have been defined as a Model Property, we can add it to the links
if property in self._parent._features:
self._links.update({property: [property, property]})
self.apply(property)
else:
raise LinkException(f'Trying to update {property}, but it is not linked to any setter method')
def apply_all(self):
""" Applies all changes marked as 'to_update', using the links to methods generated with :meth:~link
"""
values_to_update = self.to_update()
for key, values in values_to_update.items():
self.apply(key)
def get_property(self, prop):
"""Get the information of a given property, including the new value, value, old value and if it is marked as to
be updated.
Returns
-------
prop : dict
The requested property as a dictionary
"""
return self._properties[prop]
def to_update(self):
"""Returns a dictionary containing all the properties marked to be updated.
Returns
-------
props : dict
all the properties that still need to be updated
"""
props = {}
for key, values in self._properties.items():
if values['to_update']:
props[key] = values
return props
def link(self, linking):
"""Link properties to methods for update and retrieve them.
Parameters
-----------
linking : dict
Dictionary in where information is stored as parameter=>[getter, setter], for example::
linking = {'exposure_time': [self.get_exposure, self.set_exposure]}
In this case, ``exposure_time`` is the property stored, while ``get_exposure`` is the method that will be
called for getting the latest value, and set_exposure will be called to set the value. In case set_exposure
returns something different from None, no extra call to get_exposure will be made.
"""
for key, value in linking.items():
if key in self._links and self._links[key] is not None:
raise LinkException(f'That property is already linked to {self._links[key]}. Please, unlink first')
if not isinstance(value, list):
value = [value, None]
else:
if len(value) == 1:
value.append(None)
elif len(value) > 2:
raise PropertyException(f'Properties only accept setters and getter, trying to link {key} with {len(value)} methods')
getter = getattr(self._parent, value[0])
getter = getter if callable(getter) else value[0]
setter = getattr(self._parent, value[1]) if value[1] else None
setter = setter if callable(setter) else value[1]
self._links[key] = [getter, setter]
def unlink(self, unlink_list):
""" Unlinks the properties and the methods. This is just to prevent overwriting linkings under the hood and
forcing the user to actively unlink before linking again.
Parameters
----------
unlink_list : list
List containing the names of the properties to be unlinked.
"""
for link in unlink_list:
if link in self._links:
self._links[link] = None
else:
warnings.warn('Unlinking a property which was not previously linked.')
def autolink(self):
""" Links the properties defined as :class:`~ModelProp` in the models using their setters and getters. """
for prop_name, prop in self._parent._features.items():
if prop.fset:
self.link({
prop_name: [prop.fget.__name__, prop.fset.__name__]
})
else:
self.link({
prop_name: prop.fget.__name__
})
@classmethod
def from_dict(cls, parent, data):
"""Create a Properties object from a dictionary, including the linking information for methods. The data has
to be passed in the following form: {property: [value, getter, setter]}, where `getter` and `setter` are the
methods used by :meth:~link.
Parameters
----------
parent :
class to which the properties are attached
data : dict
Information on the values, getter and setter for each property
"""
parameters = dict()
links = dict()
for key, values in data.items():
parameters.update({
key: values[0]
})
links.update({
key: values[1:]
})
props = cls(parent, **parameters)
props.link(links)
return props
def __repr__(self):
return repr(self.all())
|
"""
Created by catzoo
Description: Discord.py role checks
"""
import os
import asqlite
import env_config
class NoDatabase(Exception):
"""Used for Checks.connection being None"""
pass
# noinspection PyRedundantParentheses
class Checks:
"""
This is used for discord.py checks
Use:
- developer_check(ctx)
- Only checks for guild owner / debug_id(s)
- manager_check(ctx)
- Checks for level 3 roles / user check
- moderator_check(ctx)
- Checks for level 2 roles / user check
- user_check(ctx)
- Checks for level 1 roles / user check
This will store roles in SQLite databases (location depending on env_config)
"""
def __init__(self):
self.connection = None
@classmethod
async def create(cls):
"""Creates the connection for the class
Not doing this in __init__ since its async"""
self = Checks()
location = f'{env_config.data_folder}/mod.db'
if not os.path.exists(location):
conn = await asqlite.connect(location)
c = await conn.cursor()
await c.execute("CREATE TABLE roles (role_id integer NOT NULL, level integer)")
else:
conn = await asqlite.connect(location)
self.connection = conn
return self
async def get_cursor(self):
"""Created this for use for most functions
But can be used to execute commands to the database if needed"""
if self.connection is None:
raise NoDatabase('Checks is not created!')
return await self.connection.cursor()
async def add_role(self, role_id, level):
"""Adds the role to the database."""
c = await self.get_cursor()
await c.execute("INSERT INTO roles VALUES (?,?)", (role_id, level))
async def remove_role(self, role_id):
"""Removes the role from the database."""
c = await self.get_cursor()
await c.execute("DELETE FROM roles WHERE role_id=?", (role_id))
async def get_role(self, role_id):
"""Returns the role from the database.
Might return None if it doesn't exist"""
c = await self.get_cursor()
await c.execute("SELECT * FROM roles WHERE role_id=?", (role_id))
return await c.fetchone()
async def get_all_roles(self):
"""Returns all the roles from the database
Might return None if there aren't any"""
c = await self.get_cursor()
await c.execute("SELECT * FROM roles")
return await c.fetchall()
async def _role_check(self, role_id, level):
"""Checks if the role is in the database with correct level"""
been_check = False
role = await self.get_role(role_id)
if role:
if role[1] >= level:
been_check = True
return been_check
async def _user_check(self, ctx):
"""See if its the guild's owner or the developer"""
been_check = False
if ctx.author.id in env_config.debug_id:
been_check = True
elif ctx.author == ctx.guild.owner:
been_check = True
return been_check
async def _main_check(self, ctx, level):
"""Uses both _role_check and _user_check"""
allow = False # saying if the check passed or not
c = await self.get_cursor()
await c.execute("SELECT * FROM roles")
if await self._user_check(ctx):
allow = True
else:
for r in ctx.author.roles:
if await self._role_check(r.id, level):
allow = True
return allow
@staticmethod
async def developer_check(ctx):
"""Highest level check.
Only checks for the developer or guild owner"""
self = await Checks.create()
return await self._user_check(ctx)
@staticmethod
async def manager_check(ctx):
"""Level 3 of role / user checking"""
self = await Checks.create()
return await self._main_check(ctx, 3)
@staticmethod
async def moderator_check(ctx):
"""Level 2 of role / user checking"""
self = await Checks.create()
return await self._main_check(ctx, 2)
@staticmethod
async def user_check(ctx):
"""Level 1 of role / user checking"""
self = await Checks.create()
return await self._main_check(ctx, 1) |
<filename>src/data_frame_creator.py
import datetime
import os
from src import management_departure_indexer
from src.tickers import TICKERS
current_file_dir_path = os.path.dirname(os.path.realpath(__file__))
def _get_relevant_eps_data(filing_date, eps_list):
for eps_date, eps_surprise_percentage in eps_list:
if eps_date <= filing_date:
return eps_surprise_percentage, (filing_date - eps_date).days
raise ValueError
def _get_eps_list_from_file(ticker):
indexed_eps_dir = os.path.join(current_file_dir_path, '..', 'input_data', 'Indexed_Eps')
indexed_eps_file_path = os.path.join(indexed_eps_dir, ticker + '.txt')
eps_list = []
with open(indexed_eps_file_path, 'r') as indexed_eps_file:
for line in indexed_eps_file:
splits = line.split(',')
eps_date_str = splits[0]
eps_date = datetime.datetime.strptime(eps_date_str, "%Y%m%d").date()
try:
eps_surprise_percentage = float(splits[1])
except ValueError:
continue
eps_list.append((eps_date, eps_surprise_percentage))
return sorted(eps_list, key=lambda x: x[0], reverse=True)
def _find_base_price(prices_dict, eps_date):
base_date = eps_date - datetime.timedelta(days=1)
days_back = 1
while prices_dict.get(base_date) is None:
base_date = base_date - datetime.timedelta(days=1)
days_back = days_back + 1
if days_back == 30:
break
return prices_dict.get(base_date)
def _find_price_at_horizon(prices_dict, eps_date, prediction_horizon_in_days):
base_date = eps_date + datetime.timedelta(days=prediction_horizon_in_days)
days_in_future = 5
while prices_dict.get(base_date) is None:
base_date = base_date + datetime.timedelta(days=1)
days_in_future = days_in_future + 1
if days_in_future == prediction_horizon_in_days + 30:
break
return prices_dict.get(base_date)
horizons = [5, 30, 90, 180, 360, 720, 1080]
columns = ['Ticker',
'Date',
'Filing_Type', # 0 for 10K, 1 for 8K
'EPS_Surprise_Percentage',
'Days_Since_EPS',
'Polarity',
'Subjectivity',
'Negative',
'Positive',
'Uncertainty',
'Litigious',
'Constraining',
'Superfluous',
'Interesting',
'Modal',
'WordCount',
'Cosine',
'CEO_Departure',
'CFO_Departure',
'Avg_Polarity',
'Avg_Subjectivity',
'Avg_Negative',
'Avg_Positive',
'Avg_Litigious',
'Avg_Constraining',
'Avg_Superfluous',
'Avg_Modal',
'Avg_WordCount',
'Avg_Cosine',
'Avg_CEO_Departure',
'Avg_CFO_Departure'
] + \
['Trend_' + str(horizon) for horizon in horizons]
def _get_prices_for_ticker(ticker):
price_history_dir = os.path.join(current_file_dir_path, '..', 'input_data', 'price_history')
price_history_file_path = os.path.join(price_history_dir, ticker + '.csv')
open_prices_dict = {}
close_prices_dict = {}
with open(price_history_file_path, 'r') as price_history_file:
line_number = 0
for line in price_history_file:
line_number = line_number + 1
if line_number == 1:
continue
splits = line.split(',')
price_date_str = splits[0]
price_date = datetime.datetime.strptime(price_date_str, "%Y-%m-%d").date()
open_price = float(splits[1])
close_price = float(splits[4])
open_prices_dict[price_date] = open_price
close_prices_dict[price_date] = close_price
return open_prices_dict, close_prices_dict
def _get_specialized_sentiment(filing_number):
sentiment_file_path = os.path.join(current_file_dir_path, '..', 'data', 'Specialized_Sentiment',
'Specialized_Sentiment_' + filing_number + 'k.csv')
specialized_sentiments_per_ticker = {}
line_number = 0
last_ticker = ''
ticker_sentiments_by_date = {}
with open(sentiment_file_path, 'r') as sentiment_file:
for line in sentiment_file:
line_number = line_number + 1
if line_number == 1:
continue
line = line.strip()
if line == '':
continue
splits = line.split(',')
ticker = splits[0]
if last_ticker != ticker:
if len(ticker_sentiments_by_date) > 0:
specialized_sentiments_per_ticker[last_ticker] = ticker_sentiments_by_date
ticker_sentiments_by_date = {}
last_ticker = ticker
file_name = splits[1]
filing_date_str = file_name.split('.')[0]
filing_date = datetime.datetime.strptime(filing_date_str, "%Y-%m-%d").date()
negative_count = int(splits[2])
positive_count = int(splits[3])
uncertainty_count = int(splits[4])
litigious_count = int(splits[5])
constraining_count = int(splits[6])
superfluous_count = int(splits[7])
interesting_count = int(splits[8])
modal_count = int(splits[9])
word_count = int(splits[18])
record = (ticker, filing_date, negative_count, positive_count, uncertainty_count, litigious_count,
constraining_count, superfluous_count, interesting_count, modal_count, word_count)
ticker_sentiments_by_date[filing_date] = record
return specialized_sentiments_per_ticker
def _get_8k_specialized_sentiment():
return _get_specialized_sentiment('8')
def _get_10k_specialized_sentiment():
return _get_specialized_sentiment('10')
def _get_cosine_index_for_10k():
cosine_file_path = os.path.join(current_file_dir_path, '..', 'data', 'Cosine_10K', 'cosine_10k.csv')
cosine_indices_per_ticker = {}
line_number = 0
last_ticker = ''
cosine_index_by_date = {}
with open(cosine_file_path, 'r') as cosine_file:
for line in cosine_file:
line_number = line_number + 1
if line_number == 1:
continue
line = line.strip()
if line == '':
continue
splits = line.split(',')
ticker = splits[0]
if last_ticker != ticker:
if len(cosine_index_by_date) > 0:
cosine_indices_per_ticker[last_ticker] = cosine_index_by_date
cosine_index_by_date = {}
last_ticker = ticker
file_name = splits[1]
filing_date_str = file_name.split('.')[0]
filing_date = datetime.datetime.strptime(filing_date_str, "%Y-%m-%d").date()
cosine_index = float(splits[2])
cosine_index_by_date[filing_date] = cosine_index
return cosine_indices_per_ticker
def _get_filing_sentiment_for_ticker(ticker, filing_number):
sentiment_dir = os.path.join(current_file_dir_path, '..', 'data', 'Sentiment_' + filing_number + 'K')
sentiment_file_path = os.path.join(sentiment_dir, ticker + '.csv')
line_number = 0
sentiments_ordered_by_date = []
with open(sentiment_file_path, 'r') as sentiment_file:
for line in sentiment_file:
line_number = line_number + 1
if line_number == 1:
continue
splits = line.split(',')
filing_date_str = splits[0]
filing_date = datetime.datetime.strptime(filing_date_str, "%Y-%m-%d").date()
try:
polarity = float(splits[1])
subjectivity = float(splits[2])
except ValueError:
continue
record = (ticker, filing_date, polarity, subjectivity)
sentiments_ordered_by_date.append(record)
return sentiments_ordered_by_date
def _get_10k_sentiment_for_ticker(ticker):
return _get_filing_sentiment_for_ticker(ticker, '10')
def _get_8k_sentiment_for_ticker(ticker):
return _get_filing_sentiment_for_ticker(ticker, '8')
def _get_min(sentiment_10k_list, sentiment_8k_list, sentiment_10k_index, sentiment_8k_index):
if sentiment_10k_index == len(sentiment_10k_list):
return sentiment_8k_list, sentiment_8k_index
if sentiment_8k_index == len(sentiment_8k_list):
return sentiment_10k_list, sentiment_10k_index
if sentiment_8k_list[sentiment_8k_index][1] <= sentiment_10k_list[sentiment_10k_index][1]:
return sentiment_8k_list, sentiment_8k_index
else:
return sentiment_10k_list, sentiment_10k_index
def _price_to_trend_bit(price_at_horizon, base_price):
price_trend_percentage = price_at_horizon / base_price
return 1 if price_trend_percentage >= 1 else 0
def _append_8k_row_for_ticker(data_frame_csv_file,
sentiment_8k_row,
eps_list_for_ticker,
open_prices_dict,
specialized_sentiment_8k_by_date,
departures_per_date,
averages_8k):
filing_date = sentiment_8k_row[1]
try:
eps_surprise_percentage, days_since_eps = _get_relevant_eps_data(filing_date, eps_list_for_ticker)
except ValueError:
return
specialized_sentiment = specialized_sentiment_8k_by_date[filing_date]
try:
ceo_departure, cfo_departure = departures_per_date[filing_date]
except KeyError:
ceo_departure = 0
cfo_departure = 0
base_price = _find_base_price(open_prices_dict, filing_date)
try:
price_trends = [_price_to_trend_bit(_find_price_at_horizon(open_prices_dict, filing_date, horizon), base_price)
for horizon in horizons]
except TypeError:
return
record = (sentiment_8k_row[0], # ticker
str(sentiment_8k_row[1]), # date
'1', # 1 for 8k, 0 for 10k
str(eps_surprise_percentage), # eps_percentage
str(days_since_eps), # days since eps
str(sentiment_8k_row[2]), # polarity
str(sentiment_8k_row[3]), # subjectivity
str(specialized_sentiment[2]), # Negative
str(specialized_sentiment[3]), # Positive
str(specialized_sentiment[4]), # Uncertainty
str(specialized_sentiment[5]), # Litigious
str(specialized_sentiment[6]), # Constraining
str(specialized_sentiment[7]), # Superfluous
str(specialized_sentiment[8]), # Interesting
str(specialized_sentiment[9]), # Modal
str(specialized_sentiment[10]), # WordCount
'0', # cosine
str(ceo_departure), # ceo_departure
str(cfo_departure), # cfo_departure
str(averages_8k[0]), # Avg_Polarity
str(averages_8k[1]), # Avg_Subjectivity
str(averages_8k[2]), # Avg_Negative
str(averages_8k[3]), # Avg_Positive
str(averages_8k[4]), # Avg_Litigious
str(averages_8k[5]), # Avg_Constraining
str(averages_8k[6]), # Avg_Superfluous
str(averages_8k[7]), # Avg_Modal
str(averages_8k[8]), # Avg_WordCount
'0', # Avg_Cosine
str(averages_8k[9]), # Avg_CEO_departure
str(averages_8k[10]), # Avg_CFO_departure
str(price_trends[0]), # 5 days
str(price_trends[1]), # 30 days
str(price_trends[2]), # 90 days
str(price_trends[3]), # 180 days
str(price_trends[4]), # 360 days
str(price_trends[5]), # 720 days
str(price_trends[6]), # 1080 days
)
data_frame_csv_file.write(','.join(record) + '\n')
averages_8k = (
(sentiment_8k_row[2] / 2) + (averages_8k[0] / 2), # Avg_Polarity
(sentiment_8k_row[3] / 2) + (averages_8k[1] / 2), # Avg_Subjectivity
(specialized_sentiment[2] / 2) + (averages_8k[2] / 2), # Avg_Negative
(specialized_sentiment[3] / 2) + (averages_8k[3] / 2), # Avg_Positive
(specialized_sentiment[4] / 2) + (averages_8k[4] / 2), # Avg_Litigious
(specialized_sentiment[5] / 2) + (averages_8k[5] / 2), # Avg_Constraining
(specialized_sentiment[6] / 2) + (averages_8k[6] / 2), # Avg_Superfluous
(specialized_sentiment[7] / 2) + (averages_8k[7] / 2), # Avg_Modal
(specialized_sentiment[8] / 2) + (averages_8k[8] / 2), # Avg_WordCount
(ceo_departure / 2) + (averages_8k[9] / 2), # Avg_CEO_Departure
(cfo_departure / 2) + (averages_8k[10] / 2) # Avg_CFO_Departure
)
return averages_8k
def _append_10k_row_for_ticker(data_frame_csv_file,
sentiment_10k_row,
eps_list_for_ticker,
open_prices_dict,
specialized_sentiment_10k_by_date,
cosine_indices,
averages_10k):
filing_date = sentiment_10k_row[1]
try:
eps_surprise_percentage, days_since_eps = _get_relevant_eps_data(filing_date, eps_list_for_ticker)
except ValueError:
return
specialized_sentiment = specialized_sentiment_10k_by_date[filing_date]
base_price = _find_base_price(open_prices_dict, filing_date)
try:
price_trends = [_price_to_trend_bit(_find_price_at_horizon(open_prices_dict, filing_date, horizon), base_price)
for horizon in horizons]
except TypeError:
return
record = (sentiment_10k_row[0], # ticker
str(sentiment_10k_row[1]), # date
'0', # 1 for 8k, 0 for 10k
str(eps_surprise_percentage), # eps_percentage
str(days_since_eps), # days since eps
str(sentiment_10k_row[2]), # polarity
str(sentiment_10k_row[3]), # subjectivity
str(specialized_sentiment[2]), # Negative
str(specialized_sentiment[3]), # Positive
str(specialized_sentiment[4]), # Uncertainty
str(specialized_sentiment[5]), # Litigious
str(specialized_sentiment[6]), # Constraining
str(specialized_sentiment[7]), # Superfluous
str(specialized_sentiment[8]), # Interesting
str(specialized_sentiment[9]), # Modal
str(specialized_sentiment[10]), # WordCount
str(cosine_indices[filing_date]), # cosine
'0', # ceo_departure
'0', # cfo_departure
str(averages_10k[0]), # Avg_Polarity
str(averages_10k[1]), # Avg_Subjectivity
str(averages_10k[2]), # Avg_Negative
str(averages_10k[3]), # Avg_Positive
str(averages_10k[4]), # Avg_Litigious
str(averages_10k[5]), # Avg_Constraining
str(averages_10k[6]), # Avg_Superfluous
str(averages_10k[7]), # Avg_Modal
str(averages_10k[8]), # Avg_WordCount
str(averages_10k[9]), # Avg_Cosine
'0', # Avg_CEO_Departure
'0', # Avg_CFO_Departure
str(price_trends[0]), # 5 days
str(price_trends[1]), # 30 days
str(price_trends[2]), # 90 days
str(price_trends[3]), # 180 days
str(price_trends[4]), # 360 days
str(price_trends[5]), # 720 days
str(price_trends[6]), # 1080 days
)
data_frame_csv_file.write(','.join(record) + '\n')
averages_10k = (
(sentiment_10k_row[2] / 2) + (averages_10k[0] / 2), # Avg_Polarity
(sentiment_10k_row[3] / 2) + (averages_10k[1] / 2), # Avg_Subjectivity
(specialized_sentiment[2] / 2) + (averages_10k[2] / 2), # Avg_Negative
(specialized_sentiment[3] / 2) + (averages_10k[3] / 2), # Avg_Positive
(specialized_sentiment[4] / 2) + (averages_10k[4] / 2), # Avg_Litigious
(specialized_sentiment[5] / 2) + (averages_10k[5] / 2), # Avg_Constraining
(specialized_sentiment[6] / 2) + (averages_10k[6] / 2), # Avg_Superfluous
(specialized_sentiment[7] / 2) + (averages_10k[7] / 2), # Avg_Modal
(specialized_sentiment[8] / 2) + (averages_10k[8] / 2), # Avg_WordCount
(cosine_indices[filing_date] / 2) + (averages_10k[8] / 2) # Avg_Cosine
)
return averages_10k
def _append_rows_for_ticker(data_frame_csv_file,
ticker,
specialized_sentiment_10k_per_ticker,
specialized_sentiment_8k_per_ticker,
cosine_indices_per_ticker,
departures_per_ticker):
# order the following by date:
# get all 10k sentiments
# get all 10k specialized sentiments
# get all 10k cosine
# order the following by date:
# get all 8k sentiments
# get all 8k specialized sentiments
# get all 8k departures
# get trend for each of the horizons from the filing date of each filing
# get eps for each of the filing dates
try:
sentiment_10k_list = _get_10k_sentiment_for_ticker(ticker)
except FileNotFoundError:
sentiment_10k_list = []
sentiment_8k_list = _get_8k_sentiment_for_ticker(ticker)
eps_list_for_ticker = _get_eps_list_from_file(ticker)
open_prices_dict, close_prices_dict = _get_prices_for_ticker(ticker)
sentiment_10k_index = 0
sentiment_8k_index = 0
averages_10k = (
0, # Avg_Polarity
0, # Avg_Subjectivity
0, # Avg_Negative
0, # Avg_Positive
0, # Avg_Litigious
0, # Avg_Constraining
0, # Avg_Superfluous
0, # Avg_Modal
0, # Avg_WordCount
0 # Avg_Cosine
)
averages_8k = (
0, # Avg_Polarity
0, # Avg_Subjectivity
0, # Avg_Negative
0, # Avg_Positive
0, # Avg_Litigious
0, # Avg_Constraining
0, # Avg_Superfluous
0, # Avg_Modal
0, # Avg_WordCount
0, # Avg_CEO_Departure
0 # Avg_CFO_Departure
)
while ((sentiment_10k_index < len(sentiment_10k_list) and
sentiment_10k_list[sentiment_10k_index][1] < datetime.date(2010, 1, 1))
and
(sentiment_8k_index < len(sentiment_8k_list) and
sentiment_8k_list[sentiment_8k_index][1] < datetime.date(2010, 1, 1))):
min_list, min_index = _get_min(sentiment_10k_list, sentiment_8k_list, sentiment_10k_index, sentiment_8k_index)
if min_list == sentiment_8k_list:
if specialized_sentiment_8k_per_ticker.get(ticker) is not None:
retval = _append_8k_row_for_ticker(data_frame_csv_file,
sentiment_8k_list[sentiment_8k_index],
eps_list_for_ticker,
open_prices_dict,
specialized_sentiment_8k_per_ticker[ticker],
departures_per_ticker[ticker],
averages_8k)
if retval is not None:
averages_8k = retval
sentiment_8k_index += 1
else:
if specialized_sentiment_10k_per_ticker.get(ticker) is not None:
retval = _append_10k_row_for_ticker(data_frame_csv_file,
sentiment_10k_list[sentiment_10k_index],
eps_list_for_ticker,
open_prices_dict,
specialized_sentiment_10k_per_ticker[ticker],
cosine_indices_per_ticker[ticker],
averages_10k)
if retval is not None:
averages_10k = retval
sentiment_10k_index += 1
def create_data_frame_csv_file(data_frame_csv_file_path):
specialized_sentiment_10k_per_ticker = _get_10k_specialized_sentiment()
specialized_sentiment_8k_per_ticker = _get_8k_specialized_sentiment()
cosine_indices_per_ticker = _get_cosine_index_for_10k()
departures_per_ticker = management_departure_indexer.departures_from_file()
with open(data_frame_csv_file_path, 'w') as data_frame_csv_file:
data_frame_csv_file.write(','.join(columns) + '\n')
for ticker in TICKERS:
print('Creating data frame for:' + ticker + '...')
_append_rows_for_ticker(data_frame_csv_file,
ticker,
specialized_sentiment_10k_per_ticker,
specialized_sentiment_8k_per_ticker,
cosine_indices_per_ticker,
departures_per_ticker
)
print('Done.')
if __name__ == '__main__':
current_file_dir_path = os.path.dirname(os.path.realpath(__file__))
data_frame_dir = os.path.join(current_file_dir_path, '..', 'data', 'Data_Frames')
data_frame_csv_file_path = os.path.join(data_frame_dir, 'relations_cosine_specialized_sentiment_1080.csv')
if not os.path.exists(data_frame_dir):
os.makedirs(data_frame_dir)
create_data_frame_csv_file(data_frame_csv_file_path)
|
<filename>old_scripts/relative_with_interpolation.py
import numpy as np
import cv2
import matplotlib.pyplot as plt
num_points_to_track = 200
x_coord_start = 200
x_coord_stop = 1720
frame_list = []
manifold_data = []
show_video_images = False
cap = cv2.VideoCapture("data/rope_two_hands.mp4")
if not cap.isOpened():
print "Error opening video stream or file"
def getRedHeight(image, x):
image = np.swapaxes(image, 0, 1)
bgr_arr = image[x,:]
r_b_arr = bgr_arr[:,2] - bgr_arr[:,0]
r_g_arr = bgr_arr[:,2] - bgr_arr[:,1]
arr = np.minimum(r_b_arr, r_g_arr)
arr[arr < 0] = 0
return np.argmax(arr)
frame_num = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame_list.append(cv2.resize(frame, (320, 180)))
frame_num = frame_num + 1
print "Frame %d" % frame_num
x_coords = np.linspace(x_coord_start, x_coord_stop, num=num_points_to_track, endpoint=True, dtype=int)
y_coords = np.array([getRedHeight(frame, x) for x in x_coords])
offset = y_coords[0]
y_coords = y_coords - offset
manifold_data.append(y_coords)
if (frame_num - 1) % 10 == 0 and show_video_images:
fig, axes = plt.subplots(2, 1)
frame_color_corrected = np.copy(frame)
frame_color_corrected[:,:,[0,1,2]] = frame[:,:,[2,1,0]]
axes[0].imshow(frame_color_corrected)
axes[1].scatter(x_coords, 1080-y_coords)
axes[1].set_xlim((0, 1920))
axes[1].set_ylim((0 + offset, 1080 + offset))
axes[1].set_aspect("equal")
plt.show()
else:
break
cap.release()
from sklearn.manifold import Isomap
embedding = Isomap(n_neighbors=12, n_components=2).fit_transform(manifold_data)
colors = np.array(range(len(embedding)), dtype=float)/float(len(embedding))
fig, axes = plt.subplots(1, 2)
points = axes[0].scatter(embedding[:,0], embedding[:,1], c="grey", s=20**2)
xlim = axes[0].get_xlim()
ylim = axes[0].get_ylim()
mfd_min = np.min(manifold_data)
mfd_max = np.max(manifold_data)
########################
# Set up interpolation #
########################
from scipy.spatial import Delaunay
interpolator = Delaunay(embedding, qhull_options="QJ")
################
# Display Plot #
################
def hover(event):
# if points.contains(event)[0]:
# # print points.contains(event)[1]["ind"]
# idx_list = points.contains(event)[1]["ind"]
# idx = idx_list[0]
# axes[0].clear()
# axes[0].scatter(embedding[:,0], embedding[:,1], c="grey", s=20**2)
# axes[0].scatter([embedding[idx,0]], [embedding[idx,1]], c="blue", s=20**2)
# axes[0].set_xlim(xlim)
# axes[0].set_ylim(ylim)
# if disp_mode == "image":
# frame = frame_list[idx]
# frame_color_corrected = np.copy(frame)
# frame_color_corrected[:,:,[0,1,2]] = frame[:,:,[2,1,0]]
# axes[1].imshow(frame_color_corrected)
# elif disp_mode == "manifold":
# axes[1].clear()
# axes[1].set_ylim((mfd_min, mfd_max))
# axes[1].plot(manifold_data[idx])
# fig.canvas.draw_idle()
xy = np.array([event.xdata, event.ydata])
# Check if xy is in the convex hull
simplex_num = interpolator.find_simplex(xy)
# print "xy", xy, "\tsimplex_num", simplex_num
if simplex_num != -1:
# Get the simplex
simplex_indices = interpolator.simplices[simplex_num]
# print "simplex_indices", simplex_indices
simplex = interpolator.points[simplex_indices]
# print "simplex", simplex
# Display the simplex vertices
axes[0].clear()
axes[0].scatter(embedding[:,0], embedding[:,1], c="grey", s=20**2)
axes[0].scatter(embedding[simplex_indices,0], embedding[simplex_indices,1], c="blue", s=20**2)
axes[0].plot(embedding[simplex_indices[[0,1]],0], embedding[simplex_indices[[0,1]],1], c="blue", linewidth=3)
axes[0].plot(embedding[simplex_indices[[1,2]],0], embedding[simplex_indices[[1,2]],1], c="blue", linewidth=3)
axes[0].plot(embedding[simplex_indices[[0,2]],0], embedding[simplex_indices[[0,2]],1], c="blue", linewidth=3)
axes[0].set_xlim(xlim)
axes[0].set_ylim(ylim)
# Compute barycentric coordinates
A = np.vstack((simplex.T, np.ones((1, 3))))
b = np.vstack((xy.reshape(-1, 1), np.ones((1, 1))))
b_coords = np.linalg.solve(A, b)
b = np.asarray(b_coords).flatten()
print "b_coords", b, np.sum(b_coords)
# Interpolate the deformation
mult_vec = np.zeros(len(manifold_data))
mult_vec[simplex_indices] = b
curve = np.sum(np.matmul(np.diag(mult_vec), manifold_data), axis=0)
# print "curve", curve
axes[1].clear()
axes[1].set_ylim((mfd_min, mfd_max))
axes[1].plot(curve)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show() |
import unittest
import os
import sys
from mx.DateTime import DateTime
from StringIO import StringIO
import csv
import shutil
# just to make sur we can launch it
# from the top folder
curdir = os.path.dirname(__file__)
topdir = os.path.realpath(os.path.split(curdir)[0])
bz2_file = os.path.join(curdir, 'stats.bz2')
stats_file = os.path.join(curdir, '2008-11-18.bz2')
if topdir not in sys.path:
sys.path.insert(0, topdir)
from apache_reader import ApacheLogReader
from apache_count import main
from apache_stats import ApacheLocalStats
from apache_stats import ApacheDistantLocalStats
log_sample = os.path.join(curdir, 'pypi.access.log.1.bz2')
config_file = os.path.join(curdir, 'pypi.cfg')
mirror = os.path.join(curdir, 'mirror')
mirrors = os.path.join(curdir, 'mirrors')
local_stats = os.path.join(curdir, 'local-stats')
global_stats = os.path.join(curdir, 'global-stats')
import apache_count
class FakeCursor(object):
def __init__(self):
self.res = None
self._data = [('Package', 'Package.tgz', 2), ]
self._index = 0
def execute(self, query, args=None):
if query.startswith('select value from timestamps'):
self.res = [[DateTime(1900, 01, 01)]]
elif query.startswith('select downloads'):
self.res = [[0]]
def fetchone(self):
return self.res[0]
def fetchall(self):
mirror = ['http://somewhere.com', 'xx', 'xxx', 'here']
return [mirror]
def __iter__(self):
return self
def next(self):
try:
try:
return self._data[self._index]
except IndexError:
raise StopIteration
finally:
self._index += 1
class FakeConn(object):
def commit(self):
pass
def _get_cursor(config):
return FakeConn(), FakeCursor()
apache_count.get_cursor = _get_cursor
class TestApacheReader(unittest.TestCase):
def setUp(self):
for folder in (local_stats, global_stats):
if os.path.exists(folder):
continue
os.mkdir(folder)
def tearDown(self):
if os.path.exists(bz2_file):
os.remove(bz2_file)
if os.path.exists(stats_file):
os.remove(stats_file)
if os.path.exists(mirror):
shutil.rmtree(mirror)
if os.path.exists(mirrors):
shutil.rmtree(mirrors)
for folder in (local_stats, global_stats):
shutil.rmtree(folder)
def _test_useragent(self):
logs = ApacheLogReader(log_sample)
logs = list(logs)
self.assertEquals(logs[45]['useragent'],
'Python-urllib/2.5 setuptools/0.6c7')
def test_apache_count(self):
# creating stats so they can be used by
# main() as distant stats
stats = ApacheLocalStats()
stats.build_monthly_stats(2008, 11, log_sample,
bz2_file, compression='bz2')
# now patching url so it return the built stats
import urllib2
old_open = urllib2.urlopen
def _open(url):
class FakeUrl(object):
def read(self):
return open(bz2_file).read()
return FakeUrl()
urllib2.urlopen = _open
# just to make sure it doesn't brake
try:
main(config_file, log_sample)
finally:
urllib2.urlopen = old_open
def test_build_daily_stats(self):
stats = ApacheLocalStats()
results = StringIO()
stats.build_daily_stats(2008, 11, 18, log_sample, results)
results.seek(0)
reader = csv.reader(results)
res = list(reader)
# first, make sure all entries have values
for line in res:
self.assertEquals(len(line), 4)
self.assert_('' not in line)
self.assertEquals(res[0],
['4Suite-XML', '4Suite-XML-1.0.1.tar.bz2', 'Mozilla/5.0', '1'])
self.assertEquals(res[456],
['PasteScript', 'PasteScript-0.3.1.tar.gz', 'Mozilla/5.0', '1'])
self.assertEquals(res[486],
['Phebe', 'Phebe-0.1.1-py2.5.egg.asc', 'Mozilla/5.0', '1'])
self.assertEquals(len(res), 8953)
def test_user_agent(self):
logs = ApacheLogReader(log_sample)
res = logs.get_simplified_ua('Python-urllib/2.5 setuptools/0.6c7')
self.assertEquals(res, 'setuptools/0.6c7')
res = logs.get_simplified_ua('Python-urllib/2.4')
self.assertEquals(res, 'Python-urllib/2.4')
safari = ('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; it-it) '
'AppleWebKit/525.26.2 (KHTML, like Gecko) Version/3.2 Safari/525.26.12')
res = logs.get_simplified_ua(safari)
self.assertEquals(res, 'Safari/3.2')
msn = 'msnbot/1.1 (+http://search.msn.com/msnbot.htm)'
res = logs.get_simplified_ua(msn)
self.assertEquals(res, 'msnbot/1.1')
nokia = ('Nokia6820/2.0 (4.83) Profile/MIDP-1.0 Configuration/CLDC-1.0 '
'(compatible; Googlebot-Mobile/2.1; +http://www.google.com/bot.html)')
res = logs.get_simplified_ua(nokia)
self.assertEquals(res, 'Googlebot-Mobile/2.1')
# firefox 2 or 3
ff = 'Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.8.1.3) Gecko/20070309 Firefox/2.0.0.3'
res = logs.get_simplified_ua(ff)
self.assertEquals(res, 'Firefox/2')
ff3 = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.4) Gecko/2008111318 Ubuntu/8.10 (intrepid) Firefox/3.0.4'
res = logs.get_simplified_ua(ff3)
self.assertEquals(res, 'Firefox/3')
slurp = 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)'
res = logs.get_simplified_ua(slurp)
self.assertEquals(res, 'Mozilla/5.0')
def test_build_monthly_stats(self):
results = StringIO()
stats = ApacheLocalStats()
stats.build_monthly_stats(2008, 11, log_sample, results)
results.seek(0)
reader = csv.reader(results)
res = list(reader)
# first, make sure all entries have values
for line in res:
self.assertEquals(len(line), 4)
self.assert_('' not in line)
self.assertEquals(res[0],
['appwsgi', '344.tar.bz2', 'Mozilla/5.0', '1'])
self.assertEquals(res[456],
['Mtrax', 'Mtrax-2.2.07-py2.5-win32.egg', 'Firefox/3', '1'])
self.assertEquals(res[486],
['OpenPGP', 'OpenPGP-0.2.3.tar.gz', 'Firefox/3', '1'])
self.assertEquals(len(res), 10043)
def test_read_stats(self):
results = StringIO()
stats = ApacheLocalStats()
stats.build_monthly_stats(2008, 11, log_sample, results)
results.seek(0)
read = stats.read_stats(results)
first_entry = read.next()
self.assertEquals(first_entry['count'], '1')
self.assertEquals(first_entry['packagename'], 'appwsgi')
def test_compression(self):
stats = ApacheLocalStats()
stats.build_monthly_stats(2008, 11, log_sample,
bz2_file, compression='bz2')
read = stats.read_stats(bz2_file)
first_entry = read.next()
self.assertEquals(first_entry['count'], '1')
self.assertEquals(first_entry['packagename'], 'appwsgi')
def test_build_local_stats(self):
# builds the standard stats local file
stats = ApacheLocalStats()
stats.build_local_stats(2008, 11, 18, log_sample, curdir)
self.assert_(os.path.exists(stats_file))
read = stats.read_stats(stats_file)
first_entry = read.next()
self.assertEquals(first_entry['count'], '1')
self.assertEquals(first_entry['packagename'], '4Suite-XML')
def test_distant_stats(self):
os.mkdir(mirror)
url = 'http://example.com/mirror/daily/2008-11-18.bz2'
stats = ApacheDistantLocalStats(mirror)
self.assertEquals(list(stats.read_stats(url)), [])
# let's build the stats
local_stats = ApacheLocalStats()
local_stats.build_monthly_stats(2008, 11, log_sample,
bz2_file, compression='bz2')
# now patching url so it return the built stats
import urllib2
old_open = urllib2.urlopen
def _open(url):
class FakeUrl(object):
def read(self):
return open(bz2_file).read()
return FakeUrl()
urllib2.urlopen = _open
read = stats.read_stats(url)
first_entry = read.next()
self.assertEquals(first_entry['count'], '1')
self.assertEquals(first_entry['packagename'], 'appwsgi')
# checking that the cache is filled
self.assert_('2008-11-18.bz2' in os.listdir(mirror))
# removing the urlopen patch
urllib2.urlopen = old_open
# the cache should be activated now
read = stats.read_stats(url)
first_entry = read.next()
self.assertEquals(first_entry['count'], '1')
self.assertEquals(first_entry['packagename'], 'appwsgi')
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestApacheReader))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
<filename>example.py
import json
from dataclasses import asdict, fields
from dataclass_tools.tools import (
DeSerializerOptions,
PrintMetadata,
deserialize_dataclass,
serialize_dataclass,
)
from pylatex import NoEscape, PageStyle
from gl_hsc_scantling.composites import PlyStack
from gl_hsc_scantling.report_config import default_report_config
from gl_hsc_scantling.shortcut import (
Bottom,
Core,
CoreMat,
Deck,
Fiber,
Lamina,
LaminaMonolith,
LaminaParts,
LBar,
Matrix,
Panel,
Ply,
SandwichLaminate,
Session,
Side,
SingleSkinLaminate,
Stiffener,
StructuralElement,
Vessel,
WetDeck,
)
from gl_hsc_scantling.stiffeners import (
Elmt,
Point2D,
SectionElementList,
SectionElmtRectHoriz,
SectionElmtRectVert,
Stiffener,
StiffenerSection,
StiffenerSectionWithFoot,
)
from gl_hsc_scantling.tex import generate_report
session = Session()
vessel = Vessel(
name="Catamaran",
speed=15,
displacement=6,
length=10,
beam=6.5,
fwd_perp=10,
aft_perp=0,
draft=0.51,
z_baseline=-0.51,
block_coef=0.4,
water_plane_area=10,
lcg=4,
deadrise_lcg=12,
dist_hull_cl=4.6,
type_of_service="PASSENGER",
service_range="USR",
)
session.add_stuff(vessel)
polyester = Matrix(
name="polyester", density=1200, modulus_x=3000000, modulus_xy=1140000, poisson=0.316
)
epoxy = Matrix(
name="epoxy", density=1400, modulus_x=5000000, modulus_xy=1200000, poisson=0.3
)
matrices = [polyester, epoxy]
session.add_stuff(matrices)
# session.matrices.update({matrix.name: matrix for matrix in matrices})
e_glass = Fiber(
name="e_glass",
density=2540,
modulus_x=73000000,
modulus_y=73000000,
modulus_xy=30000000,
poisson=0.18,
)
fibers = [e_glass]
session.add_stuff(fibers)
# session.fibers.update({fiber.name: fiber for fiber in fibers})
e_glass_poly_70_308 = Lamina(
LaminaParts(
name="e_glass_poly_70_308",
fiber=e_glass,
matrix=polyester,
f_mass_cont=0.7,
f_area_density=0.304,
max_strain_x=0.0105,
max_strain_xy=0.021,
)
)
et_0900 = Lamina(
LaminaMonolith(
name="et_0900",
modulus_x=14336000,
modulus_y=39248000,
modulus_xy=4530000,
poisson_xy=0.09,
thickness=0.000228,
f_mass_cont=0.7,
f_area_density=0.304,
max_strain_x=0.0105,
max_strain_xy=0.021,
)
)
s = serialize_dataclass(et_0900, printing_format=True, include_names=True)
laminas = [e_glass_poly_70_308, et_0900]
session.add_stuff(laminas)
# session.laminas.update({lamina.name: lamina for lamina in laminas})
H80 = CoreMat(
name="H80",
strength_shear=950,
modulus_shear=23000,
strength_tens=2200,
modulus_tens=85000,
strength_comp=1150,
modulus_comp=80000,
density=80,
resin_absorption=0.35,
core_type="solid",
)
core_mats = [H80]
session.add_stuff(core_mats)
# H80_20mm = Core(name="H80_20mm", core_material=H80, core_thickness=0.02)
# cores = [H80_20mm]
# session.add_stuff(cores)
orientation = [0, 90]
et_0900_20x_input = PlyStack(
[Ply(material=et_0900, orientation=ang) for ang in orientation], multiple=10
)
et_0900_20x = SingleSkinLaminate(name="et_0900_20x", ply_stack=et_0900_20x_input)
et_0900_20x_45_input = PlyStack(
[Ply(material=et_0900, orientation=ang) for ang in [45, -45]], multiple=10
)
et_0900_20x_45 = SingleSkinLaminate(
name="et_0900_20x_45", ply_stack=et_0900_20x_45_input
)
sandwich_skin_input = PlyStack(
[Ply(material=et_0900, orientation=ang) for ang in orientation], multiple=5
)
sandwich_laminate = SandwichLaminate(
name="Sandwich Laminate",
outter_laminate_ply_stack=sandwich_skin_input,
inner_laminate_ply_stack=sandwich_skin_input,
core=Core(core_material=H80, core_thickness=0.02),
)
laminates = [et_0900_20x, et_0900_20x_45, sandwich_laminate]
session.add_stuff(laminates)
panel = Panel(dim_x=1, dim_y=1, curvature_x=0.1, curvature_y=0.1, laminate=et_0900_20x)
bottom = Bottom(deadrise=20)
panel_element = StructuralElement(
name="Bottom Panel", x=5, z=-0.3, vessel=vessel, model=panel, location=bottom
)
session.add_stuff(panel_element)
lbar_input = {
"name": "lbar_01",
"section_profile": "LBar",
"laminate_web": "et_0900_20x_45",
"dimension_web": 0.05,
"laminate_flange": "et_0900_20x",
"dimension_flange": 0.02,
}
d = session.session_dict
lbar_2 = StiffenerSectionWithFoot(
elmt_container=LBar(
name="lbar",
laminate_flange=et_0900_20x,
dimension_flange=0.2,
laminate_web=et_0900_20x_45,
dimension_web=0.05,
),
)
lbar = deserialize_dataclass(
dct=lbar_input,
dataclass=StiffenerSectionWithFoot,
dict_of_collections=d,
build_instance=True,
)
session.add_stuff(lbar)
stiffener = Stiffener(
stiff_section=lbar,
span=1,
spacing_1=0.5,
spacing_2=0.5,
stiff_att_plate=1,
stiff_att_angle=0,
att_plate_1=et_0900_20x,
att_plate_2=et_0900_20x,
)
wet_deck = WetDeck(deadrise=0, air_gap=0.7)
stiffener_element = StructuralElement(
name="Wet Deck 01", x=2, z=0.7, vessel=vessel, model=stiffener, location=wet_deck
)
session.add_stuff(stiffener_element)
panel_wet = Panel(
dim_x=1, dim_y=1, curvature_x=0.1, curvature_y=0.1, laminate=et_0900_20x
)
panel_wet_deck_element = StructuralElement(
name="Wet Deck 01",
x=2,
z=0.7,
vessel=vessel,
model=panel_wet,
location=wet_deck,
)
session.add_stuff(panel_wet_deck_element)
session_serialized = serialize_dataclass(session)
with open("session.json", "w") as s:
json.dump(session_serialized, s)
loaded_session = Session()
loaded_session.load_session(session_serialized)
doc = generate_report(session, config=default_report_config)
# bottom_panel_01 = StructuralElement(
# name="Bottom Panel 01",
# x=8,
# z=-0.3,
# vessel=vessel,
# model=panel,
# location=bottom,
# )
# side_panel = StructuralElement(
# name="Side Panel 01",
# x=8,
# z=0.2,
# vessel=vessel,
# model=panel,
# location=Side(),
# )
# side_panel = StructuralElement(
# name="Side Panel 01",
# x=6.5,
# z=0.2,
# vessel=vessel,
# model=panel,
# location=Side(),
# )
|
<reponame>CrystalPea/pytest-easyread<gh_stars>1-10
# -*- coding: utf-8 -*-
pytest_plugins = "pytester"
import pytest
class TestEasyTerminalReporter(object):
def setup_method(self, method):
self.conftest = open("./pytest_easyread.py", "r")
def test_list_of_tests_items_formatted_correctly(self, testdir):
testdir.makepyfile("""
import pytest
def test_failing_function():
assert 0
def test_passing_function():
assert 1 == 1
"""
)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
result.stdout.fnmatch_lines([
"test_list_of_tests_items_formatted_correctly*",
" failing function (FAILED)",
" passing function (PASSED)"
])
def test_list_of_tests_has_empty_line_between_files(self, testdir):
test_content = """
import pytest
def test_failing_function():
assert 0
def test_passing_function():
assert 1 == 1
"""
testdir.makepyfile(test_list_of_tests=test_content,test_list_of_tests2=test_content)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
expected_result = "\n\ntest_list_of_tests.py \n failing function (FAILED)\n passing function (PASSED)\n\ntest_list_of_tests2.py \n failing function (FAILED)\n passing function (PASSED)"
assert expected_result in result.stdout.str()
def test_class_name_for_tests_formatted_correctly(self, testdir):
test_content = """
import pytest
class TestClassName(object):
def test_failing_function(self):
assert 0
def test_passing_function(self):
assert 1 == 1
class TestSecondClass(object):
def test_passing_function(self):
assert 1 == 1
"""
testdir.makepyfile(test_list_of_tests=test_content,test_list_of_tests2=test_content)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
expected_result = "test_list_of_tests.py \n TestClassName \n failing function (FAILED)\n passing function (PASSED)\n TestSecondClass \n passing function (PASSED)\n\ntest_list_of_tests2.py \n TestClassName \n failing function (FAILED)\n passing function (PASSED)"
assert expected_result in result.stdout.str()
def test_failure_titles_have_index_numbers_and_formatting(self, testdir):
test_content = """
import pytest
class TestClassName(object):
def test_zero_is_truthy(self):
assert 0
def test_passing_function(self):
assert 1 == 1
def test_one_equals_two():
assert 1 == 2
"""
testdir.makepyfile(test_list_of_tests=test_content,test_list_of_tests2=test_content)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
assert "1. TestClassName: zero is truthy . . . " and "2. one equals two . . . " in result.stdout.str()
def test_failure_path_is_visible(self, testdir):
test_content = """
import pytest
class TestClassName(object):
def test_zero_is_truthy(self):
assert 0
def test_passing_function(self):
assert 1 == 1
def test_one_equals_two():
assert 1 == 2
"""
testdir.makepyfile(test_list_of_tests=test_content)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
expected_line_1 = " Path: test_list_of_tests.py::TestClassName::test_zero_is_truthy"
expected_line_2 = " Path: test_list_of_tests.py::test_one_equals_two"
assert expected_line_1 and expected_line_2 in result.stdout.str()
def test_there_are_no_separator_dashes_within_report_messages(self, testdir):
test_content = """
import pytest
from do_not_panic import is_it_answer_to_life_universe_and_everything
class TestClassName(object):
def test_is_it_answer_to_life_universe_and_everything_throws_error_if_string_passed_in(self):
assert is_it_answer_to_life_universe_and_everything("love") == True
"""
do_not_panic = """
def is_it_answer_to_life_universe_and_everything(integer):
if isinstance(integer, int):
return integer == 42
else:
raise NameError("Hint: it's a number! :P")
"""
testdir.makepyfile(test_list_of_tests=test_content,test_list_of_tests2=test_content, do_not_panic=do_not_panic)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
banished_separator = "_ _ _ _"
assert banished_separator not in result.stdout.str()
def test_there_are_two_empty_lines_before_second_failure(self, testdir):
test_content = """
import pytest
def test_failing_function():
assert 0
def test_failing_function_no2():
assert 1 != 1
"""
testdir.makepyfile(test_list_of_tests=test_content)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
expected_result = "test_list_of_tests.py:3: AssertionError\n\n\n2. failing function no2"
assert expected_result in result.stdout.str()
def test_there_is_one_empty_line_before_first_failure(self, testdir):
test_content = """
import pytest
def test_failing_function():
assert 0
def test_failing_function_no2():
assert 1 != 1
"""
testdir.makepyfile(test_list_of_tests=test_content)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
expected_result_1 = " \n\n1. failing function"
expected_result_2 = ".\n\n1. failing function"
assert expected_result_1 or expected_result_2 in result.stdout.str()
def test_pytest_easyread_works_with_parametrize(self, testdir):
test_content = """
import pytest
@pytest.mark.parametrize("number", [4,5,6])
def test_number_divisible_by_2(number):
assert number % 2 == 0
"""
testdir.makepyfile(test_list_of_tests=test_content)
testdir.makeconftest(self.conftest.read())
result = testdir.runpytest('--easy')
expected_lines = [
"test_list_of_tests.py",
"number divisible by 2[4] (PASSED)",
"number divisible by 2[5] (FAILED)",
"number divisible by 2[6] (PASSED)"
]
assert all(expected_line in result.stdout.str() for expected_line in expected_lines)
|
<filename>run-tests.py
"""
Copyright (c) 2019 <NAME>. All rights reserved.
SPDX-License-Identifier: MIT
This script attempts to compile every test source + header generated by the
run-build.py script.
Usage:
python3.8 run-tests.py [--keep] COMPILER_PATH [COMPILER_FLAGS]
Where the optional `--keep` flag prevents the script from cleaning up the
generated .o object files.
Example:
python3.8 run-tests.py aarch64-none-elf-gcc
python3.8 run-tests.py --keep clang -std=c99 -O3
"""
import os
import shutil
import sys
import subprocess
def abort( msg:object, errno:int=1 ) -> None:
print(msg)
sys.exit(1)
if __name__ == "__main__":
USAGE = (f"usage: python3.8 {sys.argv[0]} [--keep] COMPILER_PATH [COMPILER_FLAGS]\n\n"
f"example: python3.8 {sys.argv[0]} aarch64-none-elf-gcc\n"
f"example: python3.8 {sys.argv[0]} --keep clang -std=c99 -O3")
if len(sys.argv) < 2:
abort(USAGE)
if len(sys.argv) == 2 and sys.argv[1] == "--keep":
abort(USAGE)
print("========== test setup ==========")
keep_objs = False
argv_compiler = 1
argv_flags = 2
if sys.argv[1] == "--keep":
keep_objs = True
argv_compiler = 2
argv_flags = 3
compiler = sys.argv[argv_compiler]
try:
os.chdir("test")
except FileNotFoundError:
abort("failed to cd into 'test/' directory; have you built the library?")
def cleanup_test_dir_and_return_list_of_sources() -> str:
files = []
try:
for (_, _, f) in os.walk("."):
files.extend(f)
break
sources = sorted(list(filter(lambda s: s.endswith(".c"), files)))
others = list(filter(lambda s: not s.endswith(".c"), files))
if others:
for o in others:
(shutil.rmtree if os.path.isdir(o) else os.remove)(o)
except OSError as e:
abort(e, e.errno if hasattr(e, "errno") else 1)
return sources
base_command = [
compiler,
"-g",
"-c",
"-I../include",
"-Wall",
"-Wextra",
"-pedantic",
"-Werror",
]
if len(sys.argv) > argv_flags:
base_command += sys.argv[argv_flags:]
base_command_str = " ".join(base_command)
if not "-O" in base_command_str:
base_command.append("-O2")
if not "-std" in base_command_str:
base_command.append("-std=c11")
if "clang" in compiler:
if not "--target" in base_command_str:
base_command.append("--target=aarch64-none-elf")
ok = []
failures = []
print("========== test start ==========")
with open(os.devnull, "wb") as devnull:
for source in cleanup_test_dir_and_return_list_of_sources():
command = f"{' '.join(base_command)} {source}"
print(f"{command}")
with subprocess.Popen(command.split()) as p:
p.wait()
if 0 == p.returncode:
ok.append(source)
else:
failures.append(source)
print("\n\n")
if failures:
print("========== test failures ==========")
print("\n".join(failures))
print("========== test results ==========")
print(f"using compiler: {subprocess.check_output([compiler, '--version']).splitlines()[0].decode('UTF-8')}")
print(f"successfully built {len(ok)} sources out of {len(ok)+len(failures)} ({len(failures)} failures)")
if not keep_objs:
cleanup_test_dir_and_return_list_of_sources()
sys.exit(not not len(failures))
|
import argparse
import copy
import json
import pickle
import pprint
import os
import sys
from tqdm import tqdm
from typing import *
from my_pybullet_envs import utils
import numpy as np
import torch
import math
import my_pybullet_envs
from system import policy, openrave
import pybullet as p
import time
import inspect
from my_pybullet_envs.inmoov_arm_obj_imaginary_sessions import (
ImaginaryArmObjSession,
)
from my_pybullet_envs.inmoov_shadow_demo_env_v4 import (
InmoovShadowHandDemoEnvV4,
)
from my_pybullet_envs.inmoov_shadow_hand_v2 import (
InmoovShadowNew,
)
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
homedir = os.path.expanduser("~")
# TODO: main module depends on the following code/model:
# demo env: especially observation # change obs vec (note diffTar)
# the settings of inmoov hand v2 # init thumb 0.0 vs 0.1
# obj sizes & frame representation & friction & obj xy range
# frame skip
# vision delay
"""Parse arguments"""
sys.path.append("a2c_ppo_acktr")
parser = argparse.ArgumentParser(description="RL")
parser.add_argument("--seed", type=int, default=101) # only keep np.random
parser.add_argument("--use_height", type=int, default=0)
parser.add_argument("--test_placing", type=int, default=0)
parser.add_argument("--long_move", type=int, default=0)
parser.add_argument("--non-det", type=int, default=0)
parser.add_argument("--render", type=int, default=0)
parser.add_argument("--sleep", type=int, default=0)
args = parser.parse_args()
np.random.seed(args.seed)
args.det = not args.non_det
"""Configurations."""
USE_GV5 = False # is false, use gv6
DUMMY_SLEEP = bool(args.sleep)
WITH_REACHING = True
WITH_RETRACT = True
USE_HEIGHT_INFO = bool(args.use_height)
TEST_PLACING = bool(args.test_placing) # if false, test stacking
ADD_SURROUNDING_OBJS = True
LONG_MOVE = bool(args.long_move)
SURROUNDING_OBJS_MAX_NUM = 4
GRASP_SPH_ON = True
ADD_WHITE_NOISE = True
RENDER = bool(args.render)
CLOSE_THRES = 0.25
NUM_TRIALS = 300
GRASP_END_STEP = 35
PLACE_END_STEP = 70
INIT_NOISE = True
DET_CONTACT = 0 # 0 false, 1 true
OBJ_MU = 1.0
FLOOR_MU = 1.0
HAND_MU = 1.0
OBJ_MASS = 3.5
IS_CUDA = True
DEVICE = "cuda" if IS_CUDA else "cpu"
if USE_GV5:
GRASP_PI = "0313_2_n_25_45"
GRASP_DIR = "./trained_models_%s/ppo/" % "0313_2_n"
PLACE_PI = "0313_2_placeco_0316_1" # 50ms
PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
GRASP_PI_ENV_NAME = "InmoovHandGraspBulletEnv-v5"
PLACE_PI_ENV_NAME = "InmoovHandPlaceBulletEnv-v9"
INIT_FIN_Q = np.array([0.4, 0.4, 0.4] * 3 + [0.4, 0.4, 0.4] + [0.0, 1.0, 0.1, 0.5, 0.0])
else:
# use gv6
if USE_HEIGHT_INFO:
GRASP_PI = "0404_0_n_20_40"
GRASP_DIR = "./trained_models_%s/ppo/" % "0404_0_n"
PLACE_PI = "0404_0_n_place_0404_0"
PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
else:
GRASP_PI = "0411_0_n_25_45"
GRASP_DIR = "./trained_models_%s/ppo/" % "0411_0_n"
PLACE_PI = "0411_0_n_place_0411_0"
PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
# GRASP_PI = "0426_0_n_25_45"
# GRASP_DIR = "./trained_models_%s/ppo/" % "0426_0_n"
#
# PLACE_PI = "0426_0_n_place_0426_0"
# PLACE_DIR = "./trained_models_%s/ppo/" % PLACE_PI
GRASP_PI_ENV_NAME = "InmoovHandGraspBulletEnv-v6"
PLACE_PI_ENV_NAME = "InmoovHandPlaceBulletEnv-v9"
INIT_FIN_Q = np.array([0.4, 0.4, 0.4] * 3 + [0.4, 0.4, 0.4] + [0.0, 1.0, 0.1, 0.5, 0.1])
if GRASP_SPH_ON:
GRASP_SPH_PI = "0422_sph_n_25_45" # TODO: 0420
GRASP_SPH_DIR = "./trained_models_%s/ppo/" % "0422_sph_n"
PLACE_SPH_PI = "0422_sph_n_place_0422_sph"
PLACE_SPH_DIR = "./trained_models_%s/ppo/" % PLACE_SPH_PI
USE_VISION_DELAY = True
VISION_DELAY = 2
PLACING_CONTROL_SKIP = 6
GRASPING_CONTROL_SKIP = 6
def planning(trajectory, restore_fingers=False):
# TODO: total traj length 300+5 now
max_force = env_core.robot.maxForce
last_tar_arm_q = env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0]
init_tar_fin_q = env_core.robot.tar_fin_q
init_fin_q = env_core.robot.get_q_dq(env_core.robot.fin_actdofs)[0]
env_core.robot.tar_arm_q = trajectory[-1] # TODO: important!
print("init_tar_fin_q")
print(["{0:0.3f}".format(n) for n in init_tar_fin_q])
print("init_fin_q")
print(["{0:0.3f}".format(n) for n in init_fin_q])
for idx in range(len(trajectory) + 5):
if idx > len(trajectory) - 1:
tar_arm_q = trajectory[-1]
else:
tar_arm_q = trajectory[idx]
tar_arm_vel = (tar_arm_q - last_tar_arm_q) / utils.TS
p.setJointMotorControlArray(
bodyIndex=env_core.robot.arm_id,
jointIndices=env_core.robot.arm_dofs,
controlMode=p.POSITION_CONTROL,
targetPositions=list(tar_arm_q),
targetVelocities=list(tar_arm_vel),
forces=[max_force * 5] * len(env_core.robot.arm_dofs))
if restore_fingers and idx >= len(trajectory) * 0.1: # TODO: hardcoded
blending = np.clip((idx - len(trajectory) * 0.1) / (len(trajectory) * 0.6), 0.0, 1.0)
cur_fin_q = env_core.robot.get_q_dq(env_core.robot.fin_actdofs)[0]
tar_fin_q = env_core.robot.init_fin_q * blending + cur_fin_q * (1-blending)
else:
# try to keep fin q close to init_fin_q (keep finger pose)
# add at most offset 0.05 in init_tar_fin_q direction so that grasp is tight
tar_fin_q = np.clip(init_tar_fin_q, init_fin_q - 0.05, init_fin_q + 0.05)
# clip to joint limit
tar_fin_q = np.clip(tar_fin_q,
env_core.robot.ll[env_core.robot.fin_actdofs],
env_core.robot.ul[env_core.robot.fin_actdofs])
p.setJointMotorControlArray(
bodyIndex=env_core.robot.arm_id,
jointIndices=env_core.robot.fin_actdofs,
controlMode=p.POSITION_CONTROL,
targetPositions=list(tar_fin_q),
forces=[max_force] * len(env_core.robot.fin_actdofs))
p.setJointMotorControlArray(
bodyIndex=env_core.robot.arm_id,
jointIndices=env_core.robot.fin_zerodofs,
controlMode=p.POSITION_CONTROL,
targetPositions=[0.0]*len(env_core.robot.fin_zerodofs),
forces=[max_force / 4.0] * len(env_core.robot.fin_zerodofs))
diff = np.linalg.norm(env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0]
- tar_arm_q)
if idx == len(trajectory) + 4:
print("diff final", diff)
print("vel final", np.linalg.norm(env_core.robot.get_q_dq(env_core.robot.arm_dofs)[1]))
print("fin dofs")
print(["{0:0.3f}".format(n) for n in env_core.robot.get_q_dq(env_core.robot.fin_actdofs)[0]])
print("cur_fin_tar_q")
print(["{0:0.3f}".format(n) for n in env_core.robot.tar_fin_q])
for _ in range(1):
p.stepSimulation()
if DUMMY_SLEEP:
time.sleep(utils.TS * 0.6)
last_tar_arm_q = tar_arm_q
def get_relative_state_for_reset(oid):
obj_pos, obj_quat = p.getBasePositionAndOrientation(oid) # w2o
hand_pos, hand_quat = env_core.robot.get_link_pos_quat(
env_core.robot.ee_id
) # w2p
inv_h_p, inv_h_q = p.invertTransform(hand_pos, hand_quat) # p2w
o_p_hf, o_q_hf = p.multiplyTransforms(
inv_h_p, inv_h_q, obj_pos, obj_quat
) # p2w*w2o
fin_q, _ = env_core.robot.get_q_dq(env_core.robot.all_findofs)
relative_state = {
"obj_pos_in_palm": o_p_hf,
"obj_quat_in_palm": o_q_hf,
"all_fin_q": fin_q,
"fin_tar_q": env_core.robot.tar_fin_q,
}
return relative_state
def sample_obj_dict(is_thicker=False, whole_table_top=False, only_sph=False):
# a dict containing obj info
# "shape", "radius", "height", "position", "orientation", "mass", "mu"
min_r = utils.HALF_W_MIN_BTM if is_thicker else utils.HALF_W_MIN
if whole_table_top:
x_min = utils.X_MIN
x_max = utils.X_MAX
y_min = utils.Y_MIN
y_max = utils.Y_MAX
else:
x_min = utils.TX_MIN
x_max = utils.TX_MAX
y_min = utils.TY_MIN
y_max = utils.TY_MAX
if GRASP_SPH_ON:
shape = utils.SHAPE_IND_TO_NAME_MAP[np.random.randint(3) - 1] # -1(sph)/0/1
else:
shape = utils.SHAPE_IND_TO_NAME_MAP[np.random.randint(2)]
if only_sph:
shape = utils.SHAPE_IND_TO_NAME_MAP[-1]
obj_dict = {
"shape": shape,
"radius": np.random.uniform(min_r, utils.HALF_W_MAX),
"height": np.random.uniform(utils.H_MIN, utils.H_MAX),
"position": [
np.random.uniform(x_min, x_max),
np.random.uniform(y_min, y_max),
0.0
],
"orientation": p.getQuaternionFromEuler(
[0., 0., np.random.uniform(low=0, high=2.0 * math.pi)]
),
"mass": OBJ_MASS,
"mu": OBJ_MU,
}
if obj_dict["shape"] == "box":
obj_dict["radius"] *= 0.8
elif GRASP_SPH_ON and obj_dict['shape'] == "sphere":
obj_dict['height'] *= 0.75
obj_dict['radius'] = None
obj_dict["position"][2] = obj_dict["height"] / 2.0
return obj_dict
def load_obj_and_construct_state(obj_dicts_list):
state = {}
# load surrounding first
for idx in range(2, len(obj_dicts_list)):
bullet_id = utils.create_sym_prim_shape_helper_new(obj_dicts_list[idx])
state[bullet_id] = obj_dicts_list[idx]
bottom_id = None
# ignore btm if placing on tabletop
if not TEST_PLACING:
obj_dicts_list[1]['color'] = 'green'
bottom_id = utils.create_sym_prim_shape_helper_new(obj_dicts_list[1])
state[bottom_id] = obj_dicts_list[1]
# TODO:tmp load grasp obj last
obj_dicts_list[0]['color'] = 'red'
topobj_id = utils.create_sym_prim_shape_helper_new(obj_dicts_list[0])
state[topobj_id] = obj_dicts_list[0]
return state, topobj_id, bottom_id
def construct_obj_array_for_openrave(obj_dicts_list):
arr = []
for idx, obj_dict in enumerate(obj_dicts_list):
if idx == 1 and TEST_PLACING:
# ignore btm if placing on tabletop
continue
# grasp obj should be at first
arr.append(obj_dict["position"][:2] + [0., 0.])
return np.array(arr)
def get_grasp_policy_obs_tensor(tx, ty, half_height, is_box):
if USE_GV5:
assert USE_HEIGHT_INFO
obs = env_core.get_robot_contact_txty_halfh_obs_nodup(tx, ty, half_height)
else:
if USE_HEIGHT_INFO:
obs = env_core.get_robot_contact_txtytz_halfh_shape_obs_no_dup(tx, ty, 0.0, half_height, is_box)
else:
obs = env_core.get_robot_contact_txty_shape_obs_no_dup(tx, ty, is_box)
obs = policy.wrap_obs(obs, IS_CUDA)
return obs
def get_stack_policy_obs_tensor(tx, ty, tz, t_half_height, is_box, t_pos, t_up, b_pos, b_up):
if USE_HEIGHT_INFO:
obs = env_core.get_robot_contact_txtytz_halfh_shape_2obj6dUp_obs_nodup_from_up(
tx, ty, tz, t_half_height, is_box, t_pos, t_up, b_pos, b_up
)
else:
obs = env_core.get_robot_contact_txty_shape_2obj6dUp_obs_nodup_from_up(
tx, ty, is_box, t_pos, t_up, b_pos, b_up
)
# if TEST_PLACING:
# obs.extend([1.0])
# else:
# obs.extend([-1.0])
obs = policy.wrap_obs(obs, IS_CUDA)
return obs
def is_close(obj_dict_a, obj_dict_b, dist=CLOSE_THRES):
xa, ya = obj_dict_a["position"][0], obj_dict_a["position"][1]
xb, yb = obj_dict_b["position"][0], obj_dict_b["position"][1]
return (xa - xb)**2 + (ya - yb)**2 < dist**2
def get_stacking_obs(
obj_state: dict,
top_oid: int,
btm_oid: int,
):
"""Retrieves stacking observations.
Args:
obj_state: world obj state dict of dicts
top_oid: The object ID of the top object.
btm_oid: The object ID of the bottom object.
Returns:
top_pos: The xyz position of the top object.
top_up: The up vector of the top object.
btm_pos: The xyz position of the bottom object.
btm_up: The up vector of the bottom object.
top_half_height: Half of the height of the top object.
"""
top_pos, top_quat = p.getBasePositionAndOrientation(top_oid)
if GRASP_SPH_ON and obj_state[top_oid]["shape"] == "sphere":
top_quat = [0., 0, 0, 1]
if btm_oid is None:
btm_pos, btm_quat = [0.0, 0, 0], [0.0, 0, 0, 1]
else:
btm_pos, btm_quat = p.getBasePositionAndOrientation(btm_oid)
top_up = utils.quat_to_upv(top_quat)
btm_up = utils.quat_to_upv(btm_quat)
top_half_height = obj_state[top_oid]["height"] / 2
if ADD_WHITE_NOISE:
top_pos = utils.perturb(np.random, top_pos, r=0.02)
btm_pos = utils.perturb(np.random, btm_pos, r=0.02)
top_up = utils.perturb(np.random, top_up, r=0.03)
btm_up = utils.perturb(np.random, btm_up, r=0.03)
top_half_height = utils.perturb_scalar(np.random, top_half_height, r=0.01)
return top_pos, top_up, btm_pos, btm_up, top_half_height
def gen_surrounding_objs(obj_dicts_list):
# gen objs and modifies obj_dicts_list accordingly
if ADD_SURROUNDING_OBJS:
num_obj = np.random.randint(SURROUNDING_OBJS_MAX_NUM) + 1 # 1,2,3,4
retries = 0
while len(obj_dicts_list) - 2 < num_obj and retries < 50:
new_obj_dict = sample_obj_dict(whole_table_top=True)
is_close_arr = [is_close(new_obj_dict, obj_dict) for obj_dict in obj_dicts_list]
if not any(is_close_arr):
obj_dicts_list.append(new_obj_dict)
retries += 1
return obj_dicts_list
success_count = 0
openrave_success_count = 0
"""Pre-calculation & Loading"""
g_actor_critic, _, _, _ = policy.load(
GRASP_DIR, GRASP_PI_ENV_NAME, IS_CUDA
)
p_actor_critic, _, recurrent_hidden_states, masks = policy.load(
PLACE_DIR, PLACE_PI_ENV_NAME, IS_CUDA
)
if GRASP_SPH_ON:
sph_g_actor_critic, _, _, _ = policy.load(
GRASP_SPH_DIR, GRASP_PI_ENV_NAME, IS_CUDA
)
sph_p_actor_critic, _, _, _ = policy.load(
PLACE_SPH_DIR, PLACE_PI_ENV_NAME, IS_CUDA
)
o_pos_pf_ave, o_quat_pf_ave, _ = \
utils.read_grasp_final_states_from_pickle(GRASP_PI)
p_pos_of_ave, p_quat_of_ave = p.invertTransform(
o_pos_pf_ave, o_quat_pf_ave
)
if GRASP_SPH_ON:
sph_o_pos_pf_ave, sph_o_quat_pf_ave, _ = \
utils.read_grasp_final_states_from_pickle(GRASP_SPH_PI)
sph_p_pos_of_ave, sph_p_quat_of_ave = p.invertTransform(
sph_o_pos_pf_ave, sph_o_quat_pf_ave
)
"""Start Bullet session."""
if RENDER:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
for trial in range(NUM_TRIALS):
"""Sample two/N objects"""
all_dicts = []
while True:
top_dict = sample_obj_dict(only_sph=False)
btm_dict = sample_obj_dict(is_thicker=True)
g_tx, g_ty = top_dict["position"][0], top_dict["position"][1]
p_tx, p_ty, p_tz = btm_dict["position"][0], btm_dict["position"][1], btm_dict["height"]
t_half_height = top_dict["height"]/2
if ADD_WHITE_NOISE:
g_tx += np.random.uniform(low=-0.015, high=0.015)
g_ty += np.random.uniform(low=-0.015, high=0.015)
t_half_height += np.random.uniform(low=-0.01, high=0.01)
p_tx += np.random.uniform(low=-0.015, high=0.015)
p_ty += np.random.uniform(low=-0.015, high=0.015)
p_tz += np.random.uniform(low=-0.015, high=0.015)
if TEST_PLACING:
# overwrite ptz
p_tz = 0.0
# if GRASP_SPH_ON
top_shape = utils.NAME_TO_SHAPE_IND_MAP[top_dict["shape"]] # -1(sph)/0/1
# else
# is_box = int(top_dict["shape"] == "box") # 0/1
dist = CLOSE_THRES*2.0 if LONG_MOVE else CLOSE_THRES
if is_close(top_dict, btm_dict, dist=dist):
continue # discard & re-sample
else:
all_dicts = [top_dict, btm_dict]
gen_surrounding_objs(all_dicts)
del top_dict, btm_dict
break
"""Imaginary arm session to get q_reach"""
if USE_GV5:
sess = ImaginaryArmObjSession()
Qreach = np.array(sess.get_most_comfortable_q_and_refangle(g_tx, g_ty)[0])
del sess
else:
# maybe not necessary to create table and robot twice. Decide later
desired_obj_pos = [g_tx, g_ty, 0.0]
table_id = utils.create_table(FLOOR_MU)
robot = InmoovShadowNew(
init_noise=False,
timestep=utils.TS,
np_random=np.random,
)
Qreach = utils.get_n_optimal_init_arm_qs(robot, utils.PALM_POS_OF_INIT,
p.getQuaternionFromEuler(utils.PALM_EULER_OF_INIT),
desired_obj_pos, table_id, wrist_gain=3.0)[0]
p.resetSimulation()
if USE_HEIGHT_INFO:
desired_obj_pos = [p_tx, p_ty, utils.PLACE_START_CLEARANCE + p_tz]
else:
if TEST_PLACING:
desired_obj_pos = [p_tx, p_ty, utils.PLACE_START_CLEARANCE + 0.0]
else:
desired_obj_pos = [p_tx, p_ty, utils.PLACE_START_CLEARANCE + utils.H_MAX]
table_id = utils.create_table(FLOOR_MU)
robot = InmoovShadowNew(
init_noise=False,
timestep=utils.TS,
np_random=np.random,
)
if GRASP_SPH_ON and top_shape == -1: # sphere has no "up vector"
_, desired_obj_quat = p.multiplyTransforms(
[0, 0, 0],
p.getQuaternionFromEuler(utils.PALM_EULER_OF_INIT),
[0, 0, 0],
sph_o_quat_pf_ave
)
Qdestin = utils.get_n_optimal_init_arm_qs(
robot, sph_p_pos_of_ave, sph_p_quat_of_ave, desired_obj_pos, table_id, desired_obj_quat=desired_obj_quat
)[0]
else:
desired_obj_quat = [0., 0, 0, 1] # box or cyl be upright
Qdestin = utils.get_n_optimal_init_arm_qs(
robot, p_pos_of_ave, p_quat_of_ave, desired_obj_pos, table_id, desired_obj_quat=desired_obj_quat
)[0]
del table_id, robot, desired_obj_pos
p.resetSimulation()
"""Clean up the simulation, since this is only imaginary."""
"""Setup Bullet world."""
""" Create table, robot, bottom obj, top obj"""
p.setPhysicsEngineParameter(numSolverIterations=utils.BULLET_CONTACT_ITER)
p.setPhysicsEngineParameter(deterministicOverlappingPairs=DET_CONTACT)
p.setTimeStep(utils.TS)
p.setGravity(0, 0, -utils.GRAVITY)
table_id = utils.create_table(FLOOR_MU)
env_core = InmoovShadowHandDemoEnvV4(
np_random=np.random,
init_noise=INIT_NOISE,
timestep=utils.TS,
withVel=False,
diffTar=True,
robot_mu=HAND_MU,
control_skip=GRASPING_CONTROL_SKIP,
sleep=DUMMY_SLEEP
)
env_core.change_init_fin_q(INIT_FIN_Q)
objs, top_id, btm_id = load_obj_and_construct_state(all_dicts)
OBJECTS = construct_obj_array_for_openrave(all_dicts)
"""Prepare for grasping. Reach for the object."""
print(f"Qreach: {Qreach}")
if WITH_REACHING:
env_core.robot.reset_with_certain_arm_q([0.0] * 7)
reach_save_path = homedir + "/container_data/PB_REACH.npz"
reach_read_path = homedir + "/container_data/OR_REACH.npz"
Traj_reach = openrave.get_traj_from_openrave_container(OBJECTS, None, Qreach, reach_save_path, reach_read_path)
if Traj_reach is None or len(Traj_reach) == 0:
p.resetSimulation()
print("*******", success_count * 1.0 / (trial + 1))
continue # reaching failed
else:
planning(Traj_reach)
print("arm q", env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0])
else:
env_core.robot.reset_with_certain_arm_q(Qreach)
# input("press enter")
g_obs = get_grasp_policy_obs_tensor(g_tx, g_ty, t_half_height, top_shape)
"""Grasp"""
control_steps = 0
for i in range(GRASP_END_STEP):
with torch.no_grad():
if GRASP_SPH_ON and top_shape == -1:
value, action, _, recurrent_hidden_states = sph_g_actor_critic.act(
g_obs, recurrent_hidden_states, masks, deterministic=args.det
)
else:
value, action, _, recurrent_hidden_states = g_actor_critic.act(
g_obs, recurrent_hidden_states, masks, deterministic=args.det
)
env_core.step(policy.unwrap_action(action, IS_CUDA))
g_obs = get_grasp_policy_obs_tensor(g_tx, g_ty, t_half_height, top_shape)
# print(g_obs)
# print(action)
# print(control_steps)
# control_steps += 1
# input("press enter g_obs")
masks.fill_(1.0)
# pose_saver.get_poses()
final_g_obs = copy.copy(g_obs)
del g_obs, g_tx, g_ty, t_half_height
state = get_relative_state_for_reset(top_id)
print("after grasping", state)
# state = get_relative_state_for_reset(top_id)
# print("after grasping", state)
# print("arm q", env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0])
# # input("after grasping")
"""Send move command to OpenRAVE"""
Qmove_init = env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0]
print(f"Qmove_init: {Qmove_init}")
print(f"Qdestin: {Qdestin}")
move_save_path = homedir + "/container_data/PB_MOVE.npz"
move_read_path = homedir + "/container_data/OR_MOVE.npz"
Traj_move = openrave.get_traj_from_openrave_container(OBJECTS, Qmove_init, Qdestin, move_save_path, move_read_path)
"""Execute planned moving trajectory"""
if Traj_move is None or len(Traj_move) == 0:
p.resetSimulation()
print("*******", success_count * 1.0 / (trial + 1))
continue # transporting failed
else:
planning(Traj_move)
print("arm q", env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0])
# input("after moving")
print("palm", env_core.robot.get_link_pos_quat(env_core.robot.ee_id))
# pose_saver.get_poses()
# print(f"Pose before placing")
# pprint.pprint(pose_saver.poses[-1])
#
# input("ready to place")
# ##### fake: reset###
# # reset only arm but not obj/finger
# # reset obj/finger but not arm
# # reset finger vel/obj vel only
# # reset obj but not arm/finger -- good
# # reset obj vel but not pos -- somewhat good
# # reset obj but not arm/finger
#
# # # TODO:tmp
# # state = get_relative_state_for_reset(top_id)
# # print("after grasping", state)
#
# o_pos_pf = state['obj_pos_in_palm']
# o_quat_pf = state['obj_quat_in_palm']
# all_fin_q_init = state['all_fin_q']
# tar_fin_q_init = state['fin_tar_q']
# # env_core.robot.reset_with_certain_arm_q_finger_states(Qdestin, all_fin_q_init, tar_fin_q_init)
# # env_core.robot.reset_only_certain_finger_states(all_fin_q_init, tar_fin_q_init)
#
# p_pos, p_quat = env_core.robot.get_link_pos_quat(env_core.robot.ee_id)
# o_pos, o_quat = p.multiplyTransforms(p_pos, p_quat, o_pos_pf, o_quat_pf)
# p.resetBasePositionAndOrientation(top_id, o_pos, o_quat)
# p.stepSimulation()
# # env_core.robot.reset_with_certain_arm_q_finger_states(Qdestin, all_fin_q_init, tar_fin_q_init)
# # env_core.robot.reset_only_certain_finger_states(all_fin_q_init, tar_fin_q_init)
# p.resetBasePositionAndOrientation(top_id, o_pos, o_quat)
# p.stepSimulation()
# #####
# # input("reset")
"""Prepare for placing"""
env_core.change_control_skip_scaling(c_skip=PLACING_CONTROL_SKIP)
t_pos, t_up, b_pos, b_up, t_half_height = get_stacking_obs(
obj_state=objs,
top_oid=top_id,
btm_oid=btm_id,
)
l_t_pos, l_t_up, l_b_pos, l_b_up, l_t_half_height = (
t_pos,
t_up,
b_pos,
b_up,
t_half_height,
)
# an ugly hack to force Bullet compute forward kinematics
_ = get_stack_policy_obs_tensor(
p_tx, p_ty, p_tz, t_half_height, top_shape, t_pos, t_up, b_pos, b_up
)
p_obs = get_stack_policy_obs_tensor(
p_tx, p_ty, p_tz, t_half_height, top_shape, t_pos, t_up, b_pos, b_up
)
# print("pobs", p_obs)
# input("ready to place")
"""Execute placing"""
print(f"Executing placing...")
for i in tqdm(range(PLACE_END_STEP)):
with torch.no_grad():
if GRASP_SPH_ON and top_shape == -1:
value, action, _, recurrent_hidden_states = sph_p_actor_critic.act(
p_obs, recurrent_hidden_states, masks, deterministic=args.det
)
else:
value, action, _, recurrent_hidden_states = p_actor_critic.act(
p_obs, recurrent_hidden_states, masks, deterministic=args.det
)
env_core.step(policy.unwrap_action(action, IS_CUDA))
if (i + 1) % VISION_DELAY == 0:
l_t_pos, l_t_up, l_b_pos, l_b_up, l_t_half_height = (
t_pos,
t_up,
b_pos,
b_up,
t_half_height,
)
t_pos, t_up, b_pos, b_up, t_half_height = get_stacking_obs(
obj_state=objs,
top_oid=top_id,
btm_oid=btm_id,
)
p_obs = get_stack_policy_obs_tensor(
p_tx, p_ty, p_tz, l_t_half_height, top_shape, l_t_pos, l_t_up, l_b_pos, l_b_up
)
# print(action)
# print(p_obs)
# input("press enter g_obs")
masks.fill_(1.0)
# pose_saver.get_poses()
# print(f"Pose after placing")
# pprint.pprint(pose_saver.poses[-1])
if WITH_RETRACT:
print(f"Starting release trajectory")
Qretract_init = env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0]
retract_save_path = homedir + "/container_data/PB_RETRACT.npz"
retract_read_path = homedir + "/container_data/OR_RETRACT.npz"
OBJECTS[0, :] = np.array([p_tx, p_ty, p_tz, 0.0]) # note: p_tz is 0 for placing
Traj_reach = openrave.get_traj_from_openrave_container(
OBJECTS, Qretract_init, None, retract_save_path, retract_read_path
)
if Traj_reach is None or len(Traj_reach) == 0:
p.resetSimulation()
print("*******", success_count * 1.0 / (trial + 1))
continue # retracting failed
else:
planning(Traj_reach, restore_fingers=True)
t_pos, t_quat = p.getBasePositionAndOrientation(top_id)
if t_pos[2] - p_tz > 0.05 and (t_pos[0] - p_tx)**2 + (t_pos[1] - p_ty)**2 < 0.1**2:
# TODO: ptz noisy a very rough check
success_count += 1
openrave_success_count += 1
p.resetSimulation()
print("*******", success_count * 1.0 / (trial+1), trial+1)
print("******* w/o OR", success_count * 1.0 / openrave_success_count, openrave_success_count)
p.disconnect()
print("*******total", success_count * 1.0 / NUM_TRIALS)
print("*******total w/o OR", success_count * 1.0 / openrave_success_count)
f = open("final_stats.txt", "a")
f.write(f"*******total: {success_count * 1.0 / NUM_TRIALS:.3f})")
f.write(f"*******total w/o OR: {success_count * 1.0 / openrave_success_count:.3f})")
f.write("\n")
f.close()
|
<gh_stars>1-10
from django.contrib.comments.forms import CommentForm
from django.contrib.contenttypes.models import ContentType
from django.template import Template, Context
from regressiontests.comment_tests.models import Article, Author
from regressiontests.comment_tests.tests import CommentTestCase
class CommentTemplateTagTests(CommentTestCase):
def render(self, t, **c):
ctx = Context(c)
out = Template(t).render(ctx)
return ctx, out
def testCommentFormTarget(self):
ctx, out = self.render("{% load comments %}{% comment_form_target %}")
self.assertEqual(out, "/post/")
def testGetCommentForm(self, tag=None):
t = "{% load comments %}" + (tag or "{% get_comment_form for comment_tests.article a.id as form %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertEqual(out, "")
self.assertTrue(isinstance(ctx["form"], CommentForm))
def testGetCommentFormFromLiteral(self):
self.testGetCommentForm("{% get_comment_form for comment_tests.article 1 as form %}")
def testGetCommentFormFromObject(self):
self.testGetCommentForm("{% get_comment_form for a as form %}")
def testRenderCommentForm(self, tag=None):
t = "{% load comments %}" + (tag or "{% render_comment_form for comment_tests.article a.id %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertTrue(out.strip().startswith("<form action="))
self.assertTrue(out.strip().endswith("</form>"))
def testRenderCommentFormFromLiteral(self):
self.testRenderCommentForm("{% render_comment_form for comment_tests.article 1 %}")
def testRenderCommentFormFromObject(self):
self.testRenderCommentForm("{% render_comment_form for a %}")
def testRenderCommentFormFromObjectWithQueryCount(self):
def test():
self.testRenderCommentFormFromObject()
self.assertNumQueries(1, test)
def testGetCommentCount(self, tag=None):
self.createSomeComments()
t = "{% load comments %}" + (tag or "{% get_comment_count for comment_tests.article a.id as cc %}") + "{{ cc }}"
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertEqual(out, "2")
def testGetCommentCountFromLiteral(self):
self.testGetCommentCount("{% get_comment_count for comment_tests.article 1 as cc %}")
def testGetCommentCountFromObject(self):
self.testGetCommentCount("{% get_comment_count for a as cc %}")
def testGetCommentList(self, tag=None):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments %}" + (tag or "{% get_comment_list for comment_tests.author a.id as cl %}")
ctx, out = self.render(t, a=Author.objects.get(pk=1))
self.assertEqual(out, "")
self.assertEqual(list(ctx["cl"]), [c2])
def testGetCommentListFromLiteral(self):
self.testGetCommentList("{% get_comment_list for comment_tests.author 1 as cl %}")
def testGetCommentListFromObject(self):
self.testGetCommentList("{% get_comment_list for a as cl %}")
def testGetCommentPermalink(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments %}{% get_comment_list for comment_tests.author author.id as cl %}"
t += "{% get_comment_permalink cl.0 %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s" % (ct.id, author.id, c2.id))
def testGetCommentPermalinkFormatted(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments %}{% get_comment_list for comment_tests.author author.id as cl %}"
t += "{% get_comment_permalink cl.0 '#c%(id)s-by-%(user_name)s' %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s-by-Joe Somebody" % (ct.id, author.id, c2.id))
def testRenderCommentList(self, tag=None):
t = "{% load comments %}" + (tag or "{% render_comment_list for comment_tests.article a.id %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertTrue(out.strip().startswith("<dl id=\"comments\">"))
self.assertTrue(out.strip().endswith("</dl>"))
def testRenderCommentListFromLiteral(self):
self.testRenderCommentList("{% render_comment_list for comment_tests.article 1 %}")
def testRenderCommentListFromObject(self):
self.testRenderCommentList("{% render_comment_list for a %}")
|
# goal
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import rospy
from geometry_msgs.msg import Vector3, Twist, PoseStamped
from nav_msgs.msg import Odometry
class newGoal():
def __init__(self, x=0, y=0, z=0):
self.curr_vel = Twist()
self.mean_vel = []
self.idle = False
#subscriber
self.sub_pose = rospy.Subscriber('/odom',Odometry,self.updateVel)
#pub
self.pub_goal = rospy.Publisher('/goal',PoseStamped,queue_size=1)
self.pub_mvb_goal = rospy.Publisher('/move_base_simple/goal',PoseStamped,queue_size=1)
self.vel_timer = rospy.Timer(rospy.Duration(0.1),self.cbMeanVel)
def updateVel(self,msg):
self.curr_vel = msg.twist.twist
def cbMeanVel(self,event):
v = abs(self.curr_vel.linear.x) + abs(self.curr_vel.linear.y) + abs(self.curr_vel.angular.z)
self.mean_vel.append(v)
if len(self.mean_vel)>30:
self.mean_vel.pop(0)
if sum(self.mean_vel) > 0:
self.idle = False
else:
# none of the 30 vels > 0
print "robot is in Idle Mode"
self.idle = True
self.mean_vel = []
def send_goal(self,goal,movebase):
# initialize goal
print "goal received"
pgoal = PoseStamped()
pgoal.header.stamp = rospy.get_rostime()
pgoal.header.frame_id = "map"
pgoal.pose.position.x = goal.x
pgoal.pose.position.y = goal.y
pgoal.pose.orientation.z = goal.z
pgoal.pose.orientation.w = 1
# publish goal mode
if movebase:
self.pub_mvb_goal.publish(pgoal)
else:
self.pub_goal.publish(pgoal)
def send_multiple_goals(self,goals,mode):
n_goals = len(goals)
n = 0
while n < n_goals:
if self.idle:
curr_goal = goals[n]
self.send_goal(curr_goal,mode)
n += 1
self.idle = False
def on_shutdown(self):
return
def run():
print 'moveBase_sendGoal loaded'
rospy.init_node('sendGoal',anonymous=False)
# test goals
mgoals = []
# 1
goal1 = Vector3()
goal1.x = 22.6
goal1.y = -0.743
goal1.z = 0.97
mgoals.append(goal1)
# 2
goal2 = Vector3()
goal2.x = 9.8
goal2.y = 13.86
goal2.z = -0.75
mgoals.append(goal2)
# 3
goal3 = Vector3()
goal3.x = 20.33
goal3.y = 15.47
goal3.z = 0.9
mgoals.append(goal3)
# 4
goal4 = Vector3()
goal4.x = 0
goal4.y = 0
goal4.z = 0
mgoals.append(goal4)
sendGoals = newGoal()
sendGoals.send_multiple_goals(mgoals,False)
rospy.on_shutdown(sendGoals.on_shutdown)
rospy.spin()
if __name__ == '__main__':
run() |
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import mock
from behave import then
from behave import when
from paasta_tools.cli.cmds import logs
from paasta_tools.utils import paasta_print
@when('we tail paasta logs and let threads run')
def tail_paasta_logs_let_threads_be_threads(context):
"""This test lets tail_paasta_logs() fire off processes to do work. We
verify that the work was done, basically irrespective of how it was done.
"""
service = 'fake_service'
context.levels = ['fake_level1', 'fake_level2']
context.components = ['deploy', 'monitoring']
context.clusters = ['fake_cluster1', 'fake_cluster2']
context.instances = ['fake_instance']
with mock.patch(
'paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True,
) as context.determine_scribereader_envs_patch, mock.patch(
'paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True,
) as scribe_tail_patch, mock.patch(
'paasta_tools.cli.cmds.logs.log', autospec=True,
), mock.patch(
'paasta_tools.cli.cmds.logs.print_log', autospec=True,
) as context.print_log_patch, mock.patch(
'paasta_tools.cli.cmds.logs.scribereader', autospec=True,
):
context.determine_scribereader_envs_patch.return_value = ['env1', 'env2']
def scribe_tail_side_effect(
self,
scribe_env,
stream_name,
service,
levels,
components,
clusters,
instances,
queue,
filter_fn,
parse_fn=None,
):
# The print here is just for debugging
paasta_print('fake log line added for %s' % scribe_env)
queue.put('fake log line added for %s' % scribe_env)
# This sleep() was the straw that broke the camel's back
# and forced me to move this test into the integration
# suite. The test is flaky without the sleep, and the
# sleep make it a lousy unit test.
time.sleep(0.05)
scribe_tail_patch.side_effect = scribe_tail_side_effect
context.scribe_log_reader = logs.ScribeLogReader(cluster_map={'env1': 'env1', 'env2': 'env2'})
context.scribe_log_reader.tail_logs(
service, context.levels, context.components, context.clusters, context.instances,
)
@then('one message is displayed from each scribe env')
def step_impl(context):
for cluster in context.clusters:
context.determine_scribereader_envs_patch.assert_any_call(
context.scribe_log_reader, context.components,
cluster,
)
# NOTE: Assertions about scribe_tail_patch break under multiprocessing.
# We think this is because the patched scribe_tail's attributes
# (call_count, call_args, etc.) don't get updated here in the main
# thread where we can inspect them. (The patched-in code does run,
# however, since it prints debugging messages.)
#
# Instead, we'll rely on what we can see, which is the result of the
# thread's work deposited in the shared queue.
assert context.print_log_patch.call_count == 2
context.print_log_patch.assert_any_call('fake log line added for env1', context.levels, False)
context.print_log_patch.assert_any_call('fake log line added for env2', context.levels, False)
|
# -*- coding: utf-8 -*-
"""
Multi-lib backend for POT
The goal is to write backend-agnostic code. Whether you're using Numpy, PyTorch,
or Jax, POT code should work nonetheless.
To achieve that, POT provides backend classes which implements functions in their respective backend
imitating Numpy API. As a convention, we use nx instead of np to refer to the backend.
Examples
--------
>>> from ot.utils import list_to_array
>>> from ot.backend import get_backend
>>> def f(a, b): # the function does not know which backend to use
... a, b = list_to_array(a, b) # if a list in given, make it an array
... nx = get_backend(a, b) # infer the backend from the arguments
... c = nx.dot(a, b) # now use the backend to do any calculation
... return c
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import scipy.special as scipy
from scipy.sparse import issparse, coo_matrix, csr_matrix
try:
import torch
torch_type = torch.Tensor
except ImportError:
torch = False
torch_type = float
try:
import jax
import jax.numpy as jnp
import jax.scipy.special as jscipy
jax_type = jax.numpy.ndarray
except ImportError:
jax = False
jax_type = float
str_type_error = "All array should be from the same type/backend. Current types are : {}"
def get_backend_list():
"""Returns the list of available backends"""
lst = [NumpyBackend(), ]
if torch:
lst.append(TorchBackend())
if jax:
lst.append(JaxBackend())
return lst
def get_backend(*args):
"""Returns the proper backend for a list of input arrays
Also raises TypeError if all arrays are not from the same backend
"""
# check that some arrays given
if not len(args) > 0:
raise ValueError(" The function takes at least one parameter")
# check all same type
if not len(set(type(a) for a in args)) == 1:
raise ValueError(str_type_error.format([type(a) for a in args]))
if isinstance(args[0], np.ndarray):
return NumpyBackend()
elif isinstance(args[0], torch_type):
return TorchBackend()
elif isinstance(args[0], jax_type):
return JaxBackend()
else:
raise ValueError("Unknown type of non implemented backend.")
def to_numpy(*args):
"""Returns numpy arrays from any compatible backend"""
if len(args) == 1:
return get_backend(args[0]).to_numpy(args[0])
else:
return [get_backend(a).to_numpy(a) for a in args]
class Backend():
"""
Backend abstract class.
Implementations: :py:class:`JaxBackend`, :py:class:`NumpyBackend`, :py:class:`TorchBackend`
- The `__name__` class attribute refers to the name of the backend.
- The `__type__` class attribute refers to the data structure used by the backend.
"""
__name__ = None
__type__ = None
__type_list__ = None
rng_ = None
def __str__(self):
return self.__name__
# convert to numpy
def to_numpy(self, a):
"""Returns the numpy version of a tensor"""
raise NotImplementedError()
# convert from numpy
def from_numpy(self, a, type_as=None):
"""Creates a tensor cloning a numpy array, with the given precision (defaulting to input's precision) and the given device (in case of GPUs)"""
raise NotImplementedError()
def set_gradients(self, val, inputs, grads):
"""Define the gradients for the value val wrt the inputs """
raise NotImplementedError()
def zeros(self, shape, type_as=None):
r"""
Creates a tensor full of zeros.
This function follows the api from :any:`numpy.zeros`
See: https://numpy.org/doc/stable/reference/generated/numpy.zeros.html
"""
raise NotImplementedError()
def ones(self, shape, type_as=None):
r"""
Creates a tensor full of ones.
This function follows the api from :any:`numpy.ones`
See: https://numpy.org/doc/stable/reference/generated/numpy.ones.html
"""
raise NotImplementedError()
def arange(self, stop, start=0, step=1, type_as=None):
r"""
Returns evenly spaced values within a given interval.
This function follows the api from :any:`numpy.arange`
See: https://numpy.org/doc/stable/reference/generated/numpy.arange.html
"""
raise NotImplementedError()
def full(self, shape, fill_value, type_as=None):
r"""
Creates a tensor with given shape, filled with given value.
This function follows the api from :any:`numpy.full`
See: https://numpy.org/doc/stable/reference/generated/numpy.full.html
"""
raise NotImplementedError()
def eye(self, N, M=None, type_as=None):
r"""
Creates the identity matrix of given size.
This function follows the api from :any:`numpy.eye`
See: https://numpy.org/doc/stable/reference/generated/numpy.eye.html
"""
raise NotImplementedError()
def sum(self, a, axis=None, keepdims=False):
r"""
Sums tensor elements over given dimensions.
This function follows the api from :any:`numpy.sum`
See: https://numpy.org/doc/stable/reference/generated/numpy.sum.html
"""
raise NotImplementedError()
def cumsum(self, a, axis=None):
r"""
Returns the cumulative sum of tensor elements over given dimensions.
This function follows the api from :any:`numpy.cumsum`
See: https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html
"""
raise NotImplementedError()
def max(self, a, axis=None, keepdims=False):
r"""
Returns the maximum of an array or maximum along given dimensions.
This function follows the api from :any:`numpy.amax`
See: https://numpy.org/doc/stable/reference/generated/numpy.amax.html
"""
raise NotImplementedError()
def min(self, a, axis=None, keepdims=False):
r"""
Returns the maximum of an array or maximum along given dimensions.
This function follows the api from :any:`numpy.amin`
See: https://numpy.org/doc/stable/reference/generated/numpy.amin.html
"""
raise NotImplementedError()
def maximum(self, a, b):
r"""
Returns element-wise maximum of array elements.
This function follows the api from :any:`numpy.maximum`
See: https://numpy.org/doc/stable/reference/generated/numpy.maximum.html
"""
raise NotImplementedError()
def minimum(self, a, b):
r"""
Returns element-wise minimum of array elements.
This function follows the api from :any:`numpy.minimum`
See: https://numpy.org/doc/stable/reference/generated/numpy.minimum.html
"""
raise NotImplementedError()
def dot(self, a, b):
r"""
Returns the dot product of two tensors.
This function follows the api from :any:`numpy.dot`
See: https://numpy.org/doc/stable/reference/generated/numpy.dot.html
"""
raise NotImplementedError()
def abs(self, a):
r"""
Computes the absolute value element-wise.
This function follows the api from :any:`numpy.absolute`
See: https://numpy.org/doc/stable/reference/generated/numpy.absolute.html
"""
raise NotImplementedError()
def exp(self, a):
r"""
Computes the exponential value element-wise.
This function follows the api from :any:`numpy.exp`
See: https://numpy.org/doc/stable/reference/generated/numpy.exp.html
"""
raise NotImplementedError()
def log(self, a):
r"""
Computes the natural logarithm, element-wise.
This function follows the api from :any:`numpy.log`
See: https://numpy.org/doc/stable/reference/generated/numpy.log.html
"""
raise NotImplementedError()
def sqrt(self, a):
r"""
Returns the non-ngeative square root of a tensor, element-wise.
This function follows the api from :any:`numpy.sqrt`
See: https://numpy.org/doc/stable/reference/generated/numpy.sqrt.html
"""
raise NotImplementedError()
def power(self, a, exponents):
r"""
First tensor elements raised to powers from second tensor, element-wise.
This function follows the api from :any:`numpy.power`
See: https://numpy.org/doc/stable/reference/generated/numpy.power.html
"""
raise NotImplementedError()
def norm(self, a):
r"""
Computes the matrix frobenius norm.
This function follows the api from :any:`numpy.linalg.norm`
See: https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html
"""
raise NotImplementedError()
def any(self, a):
r"""
Tests whether any tensor element along given dimensions evaluates to True.
This function follows the api from :any:`numpy.any`
See: https://numpy.org/doc/stable/reference/generated/numpy.any.html
"""
raise NotImplementedError()
def isnan(self, a):
r"""
Tests element-wise for NaN and returns result as a boolean tensor.
This function follows the api from :any:`numpy.isnan`
See: https://numpy.org/doc/stable/reference/generated/numpy.isnan.html
"""
raise NotImplementedError()
def isinf(self, a):
r"""
Tests element-wise for positive or negative infinity and returns result as a boolean tensor.
This function follows the api from :any:`numpy.isinf`
See: https://numpy.org/doc/stable/reference/generated/numpy.isinf.html
"""
raise NotImplementedError()
def einsum(self, subscripts, *operands):
r"""
Evaluates the Einstein summation convention on the operands.
This function follows the api from :any:`numpy.einsum`
See: https://numpy.org/doc/stable/reference/generated/numpy.einsum.html
"""
raise NotImplementedError()
def sort(self, a, axis=-1):
r"""
Returns a sorted copy of a tensor.
This function follows the api from :any:`numpy.sort`
See: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
"""
raise NotImplementedError()
def argsort(self, a, axis=None):
r"""
Returns the indices that would sort a tensor.
This function follows the api from :any:`numpy.argsort`
See: https://numpy.org/doc/stable/reference/generated/numpy.argsort.html
"""
raise NotImplementedError()
def searchsorted(self, a, v, side='left'):
r"""
Finds indices where elements should be inserted to maintain order in given tensor.
This function follows the api from :any:`numpy.searchsorted`
See: https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html
"""
raise NotImplementedError()
def flip(self, a, axis=None):
r"""
Reverses the order of elements in a tensor along given dimensions.
This function follows the api from :any:`numpy.flip`
See: https://numpy.org/doc/stable/reference/generated/numpy.flip.html
"""
raise NotImplementedError()
def clip(self, a, a_min, a_max):
"""
Limits the values in a tensor.
This function follows the api from :any:`numpy.clip`
See: https://numpy.org/doc/stable/reference/generated/numpy.clip.html
"""
raise NotImplementedError()
def repeat(self, a, repeats, axis=None):
r"""
Repeats elements of a tensor.
This function follows the api from :any:`numpy.repeat`
See: https://numpy.org/doc/stable/reference/generated/numpy.repeat.html
"""
raise NotImplementedError()
def take_along_axis(self, arr, indices, axis):
r"""
Gathers elements of a tensor along given dimensions.
This function follows the api from :any:`numpy.take_along_axis`
See: https://numpy.org/doc/stable/reference/generated/numpy.take_along_axis.html
"""
raise NotImplementedError()
def concatenate(self, arrays, axis=0):
r"""
Joins a sequence of tensors along an existing dimension.
This function follows the api from :any:`numpy.concatenate`
See: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html
"""
raise NotImplementedError()
def zero_pad(self, a, pad_width):
r"""
Pads a tensor.
This function follows the api from :any:`numpy.pad`
See: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
"""
raise NotImplementedError()
def argmax(self, a, axis=None):
r"""
Returns the indices of the maximum values of a tensor along given dimensions.
This function follows the api from :any:`numpy.argmax`
See: https://numpy.org/doc/stable/reference/generated/numpy.argmax.html
"""
raise NotImplementedError()
def mean(self, a, axis=None):
r"""
Computes the arithmetic mean of a tensor along given dimensions.
This function follows the api from :any:`numpy.mean`
See: https://numpy.org/doc/stable/reference/generated/numpy.mean.html
"""
raise NotImplementedError()
def std(self, a, axis=None):
r"""
Computes the standard deviation of a tensor along given dimensions.
This function follows the api from :any:`numpy.std`
See: https://numpy.org/doc/stable/reference/generated/numpy.std.html
"""
raise NotImplementedError()
def linspace(self, start, stop, num):
r"""
Returns a specified number of evenly spaced values over a given interval.
This function follows the api from :any:`numpy.linspace`
See: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html
"""
raise NotImplementedError()
def meshgrid(self, a, b):
r"""
Returns coordinate matrices from coordinate vectors (Numpy convention).
This function follows the api from :any:`numpy.meshgrid`
See: https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html
"""
raise NotImplementedError()
def diag(self, a, k=0):
r"""
Extracts or constructs a diagonal tensor.
This function follows the api from :any:`numpy.diag`
See: https://numpy.org/doc/stable/reference/generated/numpy.diag.html
"""
raise NotImplementedError()
def unique(self, a):
r"""
Finds unique elements of given tensor.
This function follows the api from :any:`numpy.unique`
See: https://numpy.org/doc/stable/reference/generated/numpy.unique.html
"""
raise NotImplementedError()
def logsumexp(self, a, axis=None):
r"""
Computes the log of the sum of exponentials of input elements.
This function follows the api from :any:`scipy.special.logsumexp`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.logsumexp.html
"""
raise NotImplementedError()
def stack(self, arrays, axis=0):
r"""
Joins a sequence of tensors along a new dimension.
This function follows the api from :any:`numpy.stack`
See: https://numpy.org/doc/stable/reference/generated/numpy.stack.html
"""
raise NotImplementedError()
def outer(self, a, b):
r"""
Computes the outer product between two vectors.
This function follows the api from :any:`numpy.outer`
See: https://numpy.org/doc/stable/reference/generated/numpy.outer.html
"""
raise NotImplementedError()
def reshape(self, a, shape):
r"""
Gives a new shape to a tensor without changing its data.
This function follows the api from :any:`numpy.reshape`
See: https://numpy.org/doc/stable/reference/generated/numpy.reshape.html
"""
raise NotImplementedError()
def seed(self, seed=None):
r"""
Sets the seed for the random generator.
This function follows the api from :any:`numpy.random.seed`
See: https://numpy.org/doc/stable/reference/generated/numpy.random.seed.html
"""
raise NotImplementedError()
def rand(self, *size, type_as=None):
r"""
Generate uniform random numbers.
This function follows the api from :any:`numpy.random.rand`
See: https://numpy.org/doc/stable/reference/generated/numpy.random.rand.html
"""
raise NotImplementedError()
def randn(self, *size, type_as=None):
r"""
Generate normal Gaussian random numbers.
This function follows the api from :any:`numpy.random.rand`
See: https://numpy.org/doc/stable/reference/generated/numpy.random.rand.html
"""
raise NotImplementedError()
def coo_matrix(self, data, rows, cols, shape=None, type_as=None):
r"""
Creates a sparse tensor in COOrdinate format.
This function follows the api from :any:`scipy.sparse.coo_matrix`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html
"""
raise NotImplementedError()
def issparse(self, a):
r"""
Checks whether or not the input tensor is a sparse tensor.
This function follows the api from :any:`scipy.sparse.issparse`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.issparse.html
"""
raise NotImplementedError()
def tocsr(self, a):
r"""
Converts this matrix to Compressed Sparse Row format.
This function follows the api from :any:`scipy.sparse.coo_matrix.tocsr`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tocsr.html
"""
raise NotImplementedError()
def eliminate_zeros(self, a, threshold=0.):
r"""
Removes entries smaller than the given threshold from the sparse tensor.
This function follows the api from :any:`scipy.sparse.csr_matrix.eliminate_zeros`
See: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.csr_matrix.eliminate_zeros.html
"""
raise NotImplementedError()
def todense(self, a):
r"""
Converts a sparse tensor to a dense tensor.
This function follows the api from :any:`scipy.sparse.csr_matrix.toarray`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.toarray.html
"""
raise NotImplementedError()
def where(self, condition, x, y):
r"""
Returns elements chosen from x or y depending on condition.
This function follows the api from :any:`numpy.where`
See: https://numpy.org/doc/stable/reference/generated/numpy.where.html
"""
raise NotImplementedError()
def copy(self, a):
r"""
Returns a copy of the given tensor.
This function follows the api from :any:`numpy.copy`
See: https://numpy.org/doc/stable/reference/generated/numpy.copy.html
"""
raise NotImplementedError()
def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
r"""
Returns True if two arrays are element-wise equal within a tolerance.
This function follows the api from :any:`numpy.allclose`
See: https://numpy.org/doc/stable/reference/generated/numpy.allclose.html
"""
raise NotImplementedError()
def dtype_device(self, a):
r"""
Returns the dtype and the device of the given tensor.
"""
raise NotImplementedError()
def assert_same_dtype_device(self, a, b):
r"""
Checks whether or not the two given inputs have the same dtype as well as the same device
"""
raise NotImplementedError()
class NumpyBackend(Backend):
"""
NumPy implementation of the backend
- `__name__` is "numpy"
- `__type__` is np.ndarray
"""
__name__ = 'numpy'
__type__ = np.ndarray
__type_list__ = [np.array(1, dtype=np.float32),
np.array(1, dtype=np.float64)]
rng_ = np.random.RandomState()
def to_numpy(self, a):
return a
def from_numpy(self, a, type_as=None):
if type_as is None:
return a
elif isinstance(a, float):
return a
else:
return a.astype(type_as.dtype)
def set_gradients(self, val, inputs, grads):
# No gradients for numpy
return val
def zeros(self, shape, type_as=None):
if type_as is None:
return np.zeros(shape)
else:
return np.zeros(shape, dtype=type_as.dtype)
def ones(self, shape, type_as=None):
if type_as is None:
return np.ones(shape)
else:
return np.ones(shape, dtype=type_as.dtype)
def arange(self, stop, start=0, step=1, type_as=None):
return np.arange(start, stop, step)
def full(self, shape, fill_value, type_as=None):
if type_as is None:
return np.full(shape, fill_value)
else:
return np.full(shape, fill_value, dtype=type_as.dtype)
def eye(self, N, M=None, type_as=None):
if type_as is None:
return np.eye(N, M)
else:
return np.eye(N, M, dtype=type_as.dtype)
def sum(self, a, axis=None, keepdims=False):
return np.sum(a, axis, keepdims=keepdims)
def cumsum(self, a, axis=None):
return np.cumsum(a, axis)
def max(self, a, axis=None, keepdims=False):
return np.max(a, axis, keepdims=keepdims)
def min(self, a, axis=None, keepdims=False):
return np.min(a, axis, keepdims=keepdims)
def maximum(self, a, b):
return np.maximum(a, b)
def minimum(self, a, b):
return np.minimum(a, b)
def dot(self, a, b):
return np.dot(a, b)
def abs(self, a):
return np.abs(a)
def exp(self, a):
return np.exp(a)
def log(self, a):
return np.log(a)
def sqrt(self, a):
return np.sqrt(a)
def power(self, a, exponents):
return np.power(a, exponents)
def norm(self, a):
return np.sqrt(np.sum(np.square(a)))
def any(self, a):
return np.any(a)
def isnan(self, a):
return np.isnan(a)
def isinf(self, a):
return np.isinf(a)
def einsum(self, subscripts, *operands):
return np.einsum(subscripts, *operands)
def sort(self, a, axis=-1):
return np.sort(a, axis)
def argsort(self, a, axis=-1):
return np.argsort(a, axis)
def searchsorted(self, a, v, side='left'):
if a.ndim == 1:
return np.searchsorted(a, v, side)
else:
# this is a not very efficient way to make numpy
# searchsorted work on 2d arrays
ret = np.empty(v.shape, dtype=int)
for i in range(a.shape[0]):
ret[i, :] = np.searchsorted(a[i, :], v[i, :], side)
return ret
def flip(self, a, axis=None):
return np.flip(a, axis)
def outer(self, a, b):
return np.outer(a, b)
def clip(self, a, a_min, a_max):
return np.clip(a, a_min, a_max)
def repeat(self, a, repeats, axis=None):
return np.repeat(a, repeats, axis)
def take_along_axis(self, arr, indices, axis):
return np.take_along_axis(arr, indices, axis)
def concatenate(self, arrays, axis=0):
return np.concatenate(arrays, axis)
def zero_pad(self, a, pad_width):
return np.pad(a, pad_width)
def argmax(self, a, axis=None):
return np.argmax(a, axis=axis)
def mean(self, a, axis=None):
return np.mean(a, axis=axis)
def std(self, a, axis=None):
return np.std(a, axis=axis)
def linspace(self, start, stop, num):
return np.linspace(start, stop, num)
def meshgrid(self, a, b):
return np.meshgrid(a, b)
def diag(self, a, k=0):
return np.diag(a, k)
def unique(self, a):
return np.unique(a)
def logsumexp(self, a, axis=None):
return scipy.logsumexp(a, axis=axis)
def stack(self, arrays, axis=0):
return np.stack(arrays, axis)
def reshape(self, a, shape):
return np.reshape(a, shape)
def seed(self, seed=None):
if seed is not None:
self.rng_.seed(seed)
def rand(self, *size, type_as=None):
return self.rng_.rand(*size)
def randn(self, *size, type_as=None):
return self.rng_.randn(*size)
def coo_matrix(self, data, rows, cols, shape=None, type_as=None):
if type_as is None:
return coo_matrix((data, (rows, cols)), shape=shape)
else:
return coo_matrix((data, (rows, cols)), shape=shape, dtype=type_as.dtype)
def issparse(self, a):
return issparse(a)
def tocsr(self, a):
if self.issparse(a):
return a.tocsr()
else:
return csr_matrix(a)
def eliminate_zeros(self, a, threshold=0.):
if threshold > 0:
if self.issparse(a):
a.data[self.abs(a.data) <= threshold] = 0
else:
a[self.abs(a) <= threshold] = 0
if self.issparse(a):
a.eliminate_zeros()
return a
def todense(self, a):
if self.issparse(a):
return a.toarray()
else:
return a
def where(self, condition, x, y):
return np.where(condition, x, y)
def copy(self, a):
return a.copy()
def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
return np.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
def dtype_device(self, a):
if hasattr(a, "dtype"):
return a.dtype, "cpu"
else:
return type(a), "cpu"
def assert_same_dtype_device(self, a, b):
# numpy has implicit type conversion so we automatically validate the test
pass
class JaxBackend(Backend):
"""
JAX implementation of the backend
- `__name__` is "jax"
- `__type__` is jax.numpy.ndarray
"""
__name__ = 'jax'
__type__ = jax_type
__type_list__ = None
rng_ = None
def __init__(self):
self.rng_ = jax.random.PRNGKey(42)
for d in jax.devices():
self.__type_list__ = [jax.device_put(jnp.array(1, dtype=jnp.float32), d),
jax.device_put(jnp.array(1, dtype=jnp.float64), d)]
def to_numpy(self, a):
return np.array(a)
def _change_device(self, a, type_as):
return jax.device_put(a, type_as.device_buffer.device())
def from_numpy(self, a, type_as=None):
if type_as is None:
return jnp.array(a)
else:
return self._change_device(jnp.array(a).astype(type_as.dtype), type_as)
def set_gradients(self, val, inputs, grads):
from jax.flatten_util import ravel_pytree
val, = jax.lax.stop_gradient((val,))
ravelled_inputs, _ = ravel_pytree(inputs)
ravelled_grads, _ = ravel_pytree(grads)
aux = jnp.sum(ravelled_inputs * ravelled_grads) / 2
aux = aux - jax.lax.stop_gradient(aux)
val, = jax.tree_map(lambda z: z + aux, (val,))
return val
def zeros(self, shape, type_as=None):
if type_as is None:
return jnp.zeros(shape)
else:
return self._change_device(jnp.zeros(shape, dtype=type_as.dtype), type_as)
def ones(self, shape, type_as=None):
if type_as is None:
return jnp.ones(shape)
else:
return self._change_device(jnp.ones(shape, dtype=type_as.dtype), type_as)
def arange(self, stop, start=0, step=1, type_as=None):
return jnp.arange(start, stop, step)
def full(self, shape, fill_value, type_as=None):
if type_as is None:
return jnp.full(shape, fill_value)
else:
return self._change_device(jnp.full(shape, fill_value, dtype=type_as.dtype), type_as)
def eye(self, N, M=None, type_as=None):
if type_as is None:
return jnp.eye(N, M)
else:
return self._change_device(jnp.eye(N, M, dtype=type_as.dtype), type_as)
def sum(self, a, axis=None, keepdims=False):
return jnp.sum(a, axis, keepdims=keepdims)
def cumsum(self, a, axis=None):
return jnp.cumsum(a, axis)
def max(self, a, axis=None, keepdims=False):
return jnp.max(a, axis, keepdims=keepdims)
def min(self, a, axis=None, keepdims=False):
return jnp.min(a, axis, keepdims=keepdims)
def maximum(self, a, b):
return jnp.maximum(a, b)
def minimum(self, a, b):
return jnp.minimum(a, b)
def dot(self, a, b):
return jnp.dot(a, b)
def abs(self, a):
return jnp.abs(a)
def exp(self, a):
return jnp.exp(a)
def log(self, a):
return jnp.log(a)
def sqrt(self, a):
return jnp.sqrt(a)
def power(self, a, exponents):
return jnp.power(a, exponents)
def norm(self, a):
return jnp.sqrt(jnp.sum(jnp.square(a)))
def any(self, a):
return jnp.any(a)
def isnan(self, a):
return jnp.isnan(a)
def isinf(self, a):
return jnp.isinf(a)
def einsum(self, subscripts, *operands):
return jnp.einsum(subscripts, *operands)
def sort(self, a, axis=-1):
return jnp.sort(a, axis)
def argsort(self, a, axis=-1):
return jnp.argsort(a, axis)
def searchsorted(self, a, v, side='left'):
if a.ndim == 1:
return jnp.searchsorted(a, v, side)
else:
# this is a not very efficient way to make jax numpy
# searchsorted work on 2d arrays
return jnp.array([jnp.searchsorted(a[i, :], v[i, :], side) for i in range(a.shape[0])])
def flip(self, a, axis=None):
return jnp.flip(a, axis)
def outer(self, a, b):
return jnp.outer(a, b)
def clip(self, a, a_min, a_max):
return jnp.clip(a, a_min, a_max)
def repeat(self, a, repeats, axis=None):
return jnp.repeat(a, repeats, axis)
def take_along_axis(self, arr, indices, axis):
return jnp.take_along_axis(arr, indices, axis)
def concatenate(self, arrays, axis=0):
return jnp.concatenate(arrays, axis)
def zero_pad(self, a, pad_width):
return jnp.pad(a, pad_width)
def argmax(self, a, axis=None):
return jnp.argmax(a, axis=axis)
def mean(self, a, axis=None):
return jnp.mean(a, axis=axis)
def std(self, a, axis=None):
return jnp.std(a, axis=axis)
def linspace(self, start, stop, num):
return jnp.linspace(start, stop, num)
def meshgrid(self, a, b):
return jnp.meshgrid(a, b)
def diag(self, a, k=0):
return jnp.diag(a, k)
def unique(self, a):
return jnp.unique(a)
def logsumexp(self, a, axis=None):
return jscipy.logsumexp(a, axis=axis)
def stack(self, arrays, axis=0):
return jnp.stack(arrays, axis)
def reshape(self, a, shape):
return jnp.reshape(a, shape)
def seed(self, seed=None):
if seed is not None:
self.rng_ = jax.random.PRNGKey(seed)
def rand(self, *size, type_as=None):
self.rng_, subkey = jax.random.split(self.rng_)
if type_as is not None:
return jax.random.uniform(subkey, shape=size, dtype=type_as.dtype)
else:
return jax.random.uniform(subkey, shape=size)
def randn(self, *size, type_as=None):
self.rng_, subkey = jax.random.split(self.rng_)
if type_as is not None:
return jax.random.normal(subkey, shape=size, dtype=type_as.dtype)
else:
return jax.random.normal(subkey, shape=size)
def coo_matrix(self, data, rows, cols, shape=None, type_as=None):
# Currently, JAX does not support sparse matrices
data = self.to_numpy(data)
rows = self.to_numpy(rows)
cols = self.to_numpy(cols)
nx = NumpyBackend()
coo_matrix = nx.coo_matrix(data, rows, cols, shape=shape, type_as=type_as)
matrix = nx.todense(coo_matrix)
return self.from_numpy(matrix)
def issparse(self, a):
# Currently, JAX does not support sparse matrices
return False
def tocsr(self, a):
# Currently, JAX does not support sparse matrices
return a
def eliminate_zeros(self, a, threshold=0.):
# Currently, JAX does not support sparse matrices
if threshold > 0:
return self.where(
self.abs(a) <= threshold,
self.zeros((1,), type_as=a),
a
)
return a
def todense(self, a):
# Currently, JAX does not support sparse matrices
return a
def where(self, condition, x, y):
return jnp.where(condition, x, y)
def copy(self, a):
# No need to copy, JAX arrays are immutable
return a
def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
return jnp.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
def dtype_device(self, a):
return a.dtype, a.device_buffer.device()
def assert_same_dtype_device(self, a, b):
a_dtype, a_device = self.dtype_device(a)
b_dtype, b_device = self.dtype_device(b)
assert a_dtype == b_dtype, "Dtype discrepancy"
assert a_device == b_device, f"Device discrepancy. First input is on {str(a_device)}, whereas second input is on {str(b_device)}"
class TorchBackend(Backend):
"""
PyTorch implementation of the backend
- `__name__` is "torch"
- `__type__` is torch.Tensor
"""
__name__ = 'torch'
__type__ = torch_type
__type_list__ = None
rng_ = None
def __init__(self):
self.rng_ = torch.Generator()
self.rng_.seed()
self.__type_list__ = [torch.tensor(1, dtype=torch.float32),
torch.tensor(1, dtype=torch.float64)]
if torch.cuda.is_available():
self.__type_list__.append(torch.tensor(1, dtype=torch.float32, device='cuda'))
self.__type_list__.append(torch.tensor(1, dtype=torch.float64, device='cuda'))
from torch.autograd import Function
# define a function that takes inputs val and grads
# ad returns a val tensor with proper gradients
class ValFunction(Function):
@staticmethod
def forward(ctx, val, grads, *inputs):
ctx.grads = grads
return val
@staticmethod
def backward(ctx, grad_output):
# the gradients are grad
return (None, None) + tuple(g * grad_output for g in ctx.grads)
self.ValFunction = ValFunction
def to_numpy(self, a):
return a.cpu().detach().numpy()
def from_numpy(self, a, type_as=None):
if isinstance(a, float):
a = np.array(a)
if type_as is None:
return torch.from_numpy(a)
else:
return torch.as_tensor(a, dtype=type_as.dtype, device=type_as.device)
def set_gradients(self, val, inputs, grads):
Func = self.ValFunction()
res = Func.apply(val, grads, *inputs)
return res
def zeros(self, shape, type_as=None):
if isinstance(shape, int):
shape = (shape,)
if type_as is None:
return torch.zeros(shape)
else:
return torch.zeros(shape, dtype=type_as.dtype, device=type_as.device)
def ones(self, shape, type_as=None):
if isinstance(shape, int):
shape = (shape,)
if type_as is None:
return torch.ones(shape)
else:
return torch.ones(shape, dtype=type_as.dtype, device=type_as.device)
def arange(self, stop, start=0, step=1, type_as=None):
if type_as is None:
return torch.arange(start, stop, step)
else:
return torch.arange(start, stop, step, device=type_as.device)
def full(self, shape, fill_value, type_as=None):
if isinstance(shape, int):
shape = (shape,)
if type_as is None:
return torch.full(shape, fill_value)
else:
return torch.full(shape, fill_value, dtype=type_as.dtype, device=type_as.device)
def eye(self, N, M=None, type_as=None):
if M is None:
M = N
if type_as is None:
return torch.eye(N, m=M)
else:
return torch.eye(N, m=M, dtype=type_as.dtype, device=type_as.device)
def sum(self, a, axis=None, keepdims=False):
if axis is None:
return torch.sum(a)
else:
return torch.sum(a, axis, keepdim=keepdims)
def cumsum(self, a, axis=None):
if axis is None:
return torch.cumsum(a.flatten(), 0)
else:
return torch.cumsum(a, axis)
def max(self, a, axis=None, keepdims=False):
if axis is None:
return torch.max(a)
else:
return torch.max(a, axis, keepdim=keepdims)[0]
def min(self, a, axis=None, keepdims=False):
if axis is None:
return torch.min(a)
else:
return torch.min(a, axis, keepdim=keepdims)[0]
def maximum(self, a, b):
if isinstance(a, int) or isinstance(a, float):
a = torch.tensor([float(a)], dtype=b.dtype, device=b.device)
if isinstance(b, int) or isinstance(b, float):
b = torch.tensor([float(b)], dtype=a.dtype, device=a.device)
if hasattr(torch, "maximum"):
return torch.maximum(a, b)
else:
return torch.max(torch.stack(torch.broadcast_tensors(a, b)), axis=0)[0]
def minimum(self, a, b):
if isinstance(a, int) or isinstance(a, float):
a = torch.tensor([float(a)], dtype=b.dtype, device=b.device)
if isinstance(b, int) or isinstance(b, float):
b = torch.tensor([float(b)], dtype=a.dtype, device=a.device)
if hasattr(torch, "minimum"):
return torch.minimum(a, b)
else:
return torch.min(torch.stack(torch.broadcast_tensors(a, b)), axis=0)[0]
def dot(self, a, b):
return torch.matmul(a, b)
def abs(self, a):
return torch.abs(a)
def exp(self, a):
return torch.exp(a)
def log(self, a):
return torch.log(a)
def sqrt(self, a):
return torch.sqrt(a)
def power(self, a, exponents):
return torch.pow(a, exponents)
def norm(self, a):
return torch.sqrt(torch.sum(torch.square(a)))
def any(self, a):
return torch.any(a)
def isnan(self, a):
return torch.isnan(a)
def isinf(self, a):
return torch.isinf(a)
def einsum(self, subscripts, *operands):
return torch.einsum(subscripts, *operands)
def sort(self, a, axis=-1):
sorted0, indices = torch.sort(a, dim=axis)
return sorted0
def argsort(self, a, axis=-1):
sorted, indices = torch.sort(a, dim=axis)
return indices
def searchsorted(self, a, v, side='left'):
right = (side != 'left')
return torch.searchsorted(a, v, right=right)
def flip(self, a, axis=None):
if axis is None:
return torch.flip(a, tuple(i for i in range(len(a.shape))))
if isinstance(axis, int):
return torch.flip(a, (axis,))
else:
return torch.flip(a, dims=axis)
def outer(self, a, b):
return torch.outer(a, b)
def clip(self, a, a_min, a_max):
return torch.clamp(a, a_min, a_max)
def repeat(self, a, repeats, axis=None):
return torch.repeat_interleave(a, repeats, dim=axis)
def take_along_axis(self, arr, indices, axis):
return torch.gather(arr, axis, indices)
def concatenate(self, arrays, axis=0):
return torch.cat(arrays, dim=axis)
def zero_pad(self, a, pad_width):
from torch.nn.functional import pad
# pad_width is an array of ndim tuples indicating how many 0 before and after
# we need to add. We first need to make it compliant with torch syntax, that
# starts with the last dim, then second last, etc.
how_pad = tuple(element for tupl in pad_width[::-1] for element in tupl)
return pad(a, how_pad)
def argmax(self, a, axis=None):
return torch.argmax(a, dim=axis)
def mean(self, a, axis=None):
if axis is not None:
return torch.mean(a, dim=axis)
else:
return torch.mean(a)
def std(self, a, axis=None):
if axis is not None:
return torch.std(a, dim=axis, unbiased=False)
else:
return torch.std(a, unbiased=False)
def linspace(self, start, stop, num):
return torch.linspace(start, stop, num, dtype=torch.float64)
def meshgrid(self, a, b):
X, Y = torch.meshgrid(a, b)
return X.T, Y.T
def diag(self, a, k=0):
return torch.diag(a, diagonal=k)
def unique(self, a):
return torch.unique(a)
def logsumexp(self, a, axis=None):
if axis is not None:
return torch.logsumexp(a, dim=axis)
else:
return torch.logsumexp(a, dim=tuple(range(len(a.shape))))
def stack(self, arrays, axis=0):
return torch.stack(arrays, dim=axis)
def reshape(self, a, shape):
return torch.reshape(a, shape)
def seed(self, seed=None):
if isinstance(seed, int):
self.rng_.manual_seed(seed)
elif isinstance(seed, torch.Generator):
self.rng_ = seed
else:
raise ValueError("Non compatible seed : {}".format(seed))
def rand(self, *size, type_as=None):
if type_as is not None:
return torch.rand(size=size, generator=self.rng_, dtype=type_as.dtype, device=type_as.device)
else:
return torch.rand(size=size, generator=self.rng_)
def randn(self, *size, type_as=None):
if type_as is not None:
return torch.randn(size=size, dtype=type_as.dtype, generator=self.rng_, device=type_as.device)
else:
return torch.randn(size=size, generator=self.rng_)
def coo_matrix(self, data, rows, cols, shape=None, type_as=None):
if type_as is None:
return torch.sparse_coo_tensor(torch.stack([rows, cols]), data, size=shape)
else:
return torch.sparse_coo_tensor(
torch.stack([rows, cols]), data, size=shape,
dtype=type_as.dtype, device=type_as.device
)
def issparse(self, a):
return getattr(a, "is_sparse", False) or getattr(a, "is_sparse_csr", False)
def tocsr(self, a):
# Versions older than 1.9 do not support CSR tensors. PyTorch 1.9 and 1.10 offer a very limited support
return self.todense(a)
def eliminate_zeros(self, a, threshold=0.):
if self.issparse(a):
if threshold > 0:
mask = self.abs(a) <= threshold
mask = ~mask
mask = mask.nonzero()
else:
mask = a._values().nonzero()
nv = a._values().index_select(0, mask.view(-1))
ni = a._indices().index_select(1, mask.view(-1))
return self.coo_matrix(nv, ni[0], ni[1], shape=a.shape, type_as=a)
else:
if threshold > 0:
a[self.abs(a) <= threshold] = 0
return a
def todense(self, a):
if self.issparse(a):
return a.to_dense()
else:
return a
def where(self, condition, x, y):
return torch.where(condition, x, y)
def copy(self, a):
return torch.clone(a)
def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
def dtype_device(self, a):
return a.dtype, a.device
def assert_same_dtype_device(self, a, b):
a_dtype, a_device = self.dtype_device(a)
b_dtype, b_device = self.dtype_device(b)
assert a_dtype == b_dtype, "Dtype discrepancy"
assert a_device == b_device, f"Device discrepancy. First input is on {str(a_device)}, whereas second input is on {str(b_device)}"
|
<filename>cmdb_sdk/api/instance_tree/instance_tree_search_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: instance_tree_search.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from cmdb_sdk.model.cmdb import instance_tree_root_node_pb2 as cmdb__sdk_dot_model_dot_cmdb_dot_instance__tree__root__node__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='instance_tree_search.proto',
package='instance_tree',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1ainstance_tree_search.proto\x12\rinstance_tree\x1a\x31\x63mdb_sdk/model/cmdb/instance_tree_root_node.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x84\x01\n\x19InstanceTreeSearchRequest\x12(\n\x04tree\x18\x01 \x01(\x0b\x32\x1a.cmdb.InstanceTreeRootNode\x12\x15\n\rignore_single\x18\x02 \x01(\x08\x12&\n\x05query\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"|\n!InstanceTreeSearchResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12%\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Structb\x06proto3')
,
dependencies=[cmdb__sdk_dot_model_dot_cmdb_dot_instance__tree__root__node__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_INSTANCETREESEARCHREQUEST = _descriptor.Descriptor(
name='InstanceTreeSearchRequest',
full_name='instance_tree.InstanceTreeSearchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tree', full_name='instance_tree.InstanceTreeSearchRequest.tree', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ignore_single', full_name='instance_tree.InstanceTreeSearchRequest.ignore_single', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query', full_name='instance_tree.InstanceTreeSearchRequest.query', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=259,
)
_INSTANCETREESEARCHRESPONSEWRAPPER = _descriptor.Descriptor(
name='InstanceTreeSearchResponseWrapper',
full_name='instance_tree.InstanceTreeSearchResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance_tree.InstanceTreeSearchResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='instance_tree.InstanceTreeSearchResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance_tree.InstanceTreeSearchResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance_tree.InstanceTreeSearchResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=261,
serialized_end=385,
)
_INSTANCETREESEARCHREQUEST.fields_by_name['tree'].message_type = cmdb__sdk_dot_model_dot_cmdb_dot_instance__tree__root__node__pb2._INSTANCETREEROOTNODE
_INSTANCETREESEARCHREQUEST.fields_by_name['query'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_INSTANCETREESEARCHRESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['InstanceTreeSearchRequest'] = _INSTANCETREESEARCHREQUEST
DESCRIPTOR.message_types_by_name['InstanceTreeSearchResponseWrapper'] = _INSTANCETREESEARCHRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InstanceTreeSearchRequest = _reflection.GeneratedProtocolMessageType('InstanceTreeSearchRequest', (_message.Message,), {
'DESCRIPTOR' : _INSTANCETREESEARCHREQUEST,
'__module__' : 'instance_tree_search_pb2'
# @@protoc_insertion_point(class_scope:instance_tree.InstanceTreeSearchRequest)
})
_sym_db.RegisterMessage(InstanceTreeSearchRequest)
InstanceTreeSearchResponseWrapper = _reflection.GeneratedProtocolMessageType('InstanceTreeSearchResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _INSTANCETREESEARCHRESPONSEWRAPPER,
'__module__' : 'instance_tree_search_pb2'
# @@protoc_insertion_point(class_scope:instance_tree.InstanceTreeSearchResponseWrapper)
})
_sym_db.RegisterMessage(InstanceTreeSearchResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
import math
from os import path, listdir
from typing import Callable
import cv2
class TimeData:
"""
Class for storing parsed time data
"""
seconds: int = 0
minutes: int = 0
hours: int = 0
def __init__(self, hours, minutes, seconds):
self.seconds = seconds
self.minutes = minutes
self.hours = hours
def get(self):
"""
Method for getting parsed time data as an array
:return: [hours, minutes, seconds]
"""
return [self.hours, self.minutes, self.seconds]
def files_list(target: str, callback: Callable[[str], bool] = None):
"""
Function to get a list of files in a specific directory
:param callback: fires when file added to the list
:param target: path to the file or directory
:return: list of files
"""
files = list()
# Return empty if path not exist
if not path.exists(target):
return files
if path.isfile(target):
files.append(target)
return files
elif path.isdir(target):
for file in listdir(target):
dir_target = path.join(target, file)
if path.isfile(dir_target):
append = True
if callback: append = callback(dir_target)
if append: files.append(dir_target)
return files
def sum_array(array: list):
"""
Function for calculating the sum of array elements
:param array: elements list
:return: int
"""
total = 0
for i in array:
total += i
return total
def format_filesize(size: int, suffix="iB"):
"""
Format filesize to human-readable format
:param size: file size
:param suffix: size suffix
:return: str
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(size) < 1024.0:
return f"{size:3.1f}{unit}{suffix}"
size /= 1024.0
return f"{size:.1f}Y{suffix}"
def parse_time(seconds: int):
"""
Convert seconds to TimeData object
:param seconds: seconds count
:return: TimeData
"""
hours = math.floor(seconds / 60 / 60)
minutes = math.floor(seconds / 60) - (hours * 60)
seconds_left = seconds - hours * 60 * 60 - minutes * 60
return TimeData(*[hours, minutes, seconds_left])
def format_time(value: int):
"""
Format seconds into human readable string using parse_time
:param value: seconds count
:return: str
"""
data = parse_time(value)
strings = list(map(lambda x: "{:02d}".format(x), data.get()))
formatted: list[str] = []
if data.hours >= 1:
formatted.append(f"{strings[0]}h")
if data.minutes >= 1:
formatted.append(f"{strings[1]}m")
formatted.append(f"{strings[2]}s")
if data.seconds == 0 and data.minutes == 0 and data.hours == 0:
return "..."
return " ".join(formatted).strip()
class VideoInfo:
"""
Class for getting information about a specific video file
"""
codec: str
frames: int
def __init__(self, target: str):
if not path.exists(target) or not path.isfile(target):
raise IOError("Video file reading error")
video = cv2.VideoCapture(target)
h = int(video.get(cv2.CAP_PROP_FOURCC))
self.codec = chr(h & 0xff) + chr((h >> 8) & 0xff) + chr((h >> 16) & 0xff) + chr((h >> 24) & 0xff)
self.frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
<reponame>nanmat/A_New_Mixed_Method<filename>3. Sentiment Analysis.py
# load libaries
import pandas as pd
import os
import numpy as np
from nltk.tokenize import sent_tokenize
import random
from danlp.models import load_bert_emotion_model, load_bert_tone_model,load_spacy_model
import operator
import plotly.graph_objects as go
import plotly.express as px
# load data
os.chdir('/data')
df = pd.read_csv('data.csv')
# divide documents into sentences
s = df["paragraphs"].apply(lambda x : sent_tokenize(x)).apply(pd.Series,1).stack()
s.index = s.index.droplevel(-1) # match the index of df
s.name = 'text_sentence' # give name to join
# removing empty rows
s.replace('', np.nan, inplace=True)
s.dropna(inplace=True)
# new dataframe
del df["paragraphs"]
df = df.join(s)
df.head(10)
### ESTIMATE EMOTIONS, POLARITY AND TONE
# load classiers and build function with them
classifier_emo = load_bert_emotion_model()
classifier_tone = load_bert_tone_model()
def predict_emo(x):
return classifier_emo.predict(x)
def predict_tone(x):
return classifier_tone.predict(x)
# BERT EMOTION
emotion = [predict_emo(i) for i in df['text_sentence']]
# get the percentage prediction for each emotion
Glede_Sindsro = []
Tillid_Accept= []
Forventning_Interrese= []
Overasket_Målløs= []
Vrede_Irritation= []
Foragt_Modvilje= []
Sorg_trist= []
Frygt_Bekymret= []
for i in df['text_sentence']:
result = classifier_emo.predict_proba(i)
Glede_Sindsro.append(result[0][0])
Tillid_Accept.append(result[0][1])
Forventning_Interrese.append(result[0][2])
Overasket_Målløs.append(result[0][3])
Vrede_Irritation.append(result[0][4])
Foragt_Modvilje.append(result[0][5])
Sorg_trist.append(result[0][6])
Frygt_Bekymret.append(result[0][7])
# BERT TONE:
d = []
for i in df['text_sentence']:
d.append(predict_tone(i))
tone = [i['analytic'] for i in d]
polarity = [i['polarity'] for i in d]
# get the percentage prediction for each tone and polarity
positiv = []
negativ = []
neutral = []
subjektiv = []
objektiv = []
for i in df['text_sentence']:
result = classifier_tone.predict_proba(i)
positiv.append(result[0][0])
neutral.append(result[0][1])
negativ.append(result[0][2])
objektiv.append(result[1][0])
subjektiv.append(result[1][1])
# Add all classifications to dataframe
df['polarity'] = polarity
df['tone'] = tone
df['emotion'] = emotion
df['Glede_Sindsro'] = Glede_Sindsro
df['Tillid_Accept'] = Tillid_Accept
df['Forventning_Interesse'] = Forventning_Interrese
df['Overrasket_Målløs'] = Overasket_Målløs
df['Vrede_Irritation'] = Vrede_Irritation
df['Foragt_Modvilje'] = Foragt_Modvilje
df['Sorg_Trist'] = Sorg_trist
df['Frygt_Bekymret'] = Frygt_Bekymret
df['positiv'] = positiv
df['neutral'] =neutral
df['negativ'] =negativ
df['subjektiv'] =subjektiv
df['objektiv'] =objektiv
# save dataframe
df.to_csv('data_sentiment.csv')
### SHOW SENTENCES WITH HIGHEST SCORES FOR EACH CLASS
pd.options.display.max_colwidth = 1000
# POLARITY
for index,review in enumerate(df.iloc[df['positive'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['negative'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['neutral'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
# TONE
for index,review in enumerate(df.iloc[df['subjective'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['objective'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
# EMOTION
for index,review in enumerate(df.iloc[df['Vrede_Irritation'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['Tillid_Accept'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['Forventning_Interesse'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['Foragt_Modvilje'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['Glede_Sindsro'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['Sorg_Trist'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['Overrasket_Målløs'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
for index,review in enumerate(df.iloc[df['Frygt_Bekymret'].sort_values(ascending=False)[:5].index]['text_sentence']):
print('Eksempel {}:\n'.format(index+1),review)
###### PLOTTING ########
os.chdir('/plots') # set directory to the folder where you want to save the plots
#change labels to english
df['emotion'] = df['emotion'].replace(['Forventning/Interrese','No Emotion','Foragt/Modvilje','Tillid/Accept','Frygt/Bekymret','Vrede/Irritation','Overasket/Målløs','Glæde/Sindsro','Sorg/trist'],
['Expectation','No Emotion','Contempt','Trust','Fear','Anger','Surprise','Joy','Sadness'])
### PREPARE DATA FOR PLOTTING
new1 = df[['company','emotion']].copy()
new_emotion = pd.crosstab(new1['company'], new1['emotion'], normalize='index') * 100
new_emotion = new_emotion.reindex(df2.company, axis="rows")
new_emotion = new_emotion.round(1)
new2 = df[['company','polarity']].copy()
new_polarity = pd.crosstab(new2['company'], new2['polarity'], normalize='index') * 100
new_polarity = new_polarity.reindex(df2.company, axis="rows")
new_polarity = new_polarity.round(1)
new3 = df[['company','tone']].copy()
new_tone = pd.crosstab(new3['company'], new3['tone'], normalize='index') * 100
new_tone = new_tone.reindex(df2.company, axis="rows")
new_tone = new_tone.round(1)
###################### PLOTTING
# POLARITY
new_polarity.negative = new_polarity.negative * -1 # making negative, in order to plot in the other direction
polarity_plot = go.Figure()
# Iterating over the columns
for col in new_polarity.columns:
# Adding a trace for negative sentiment
polarity_plot.add_trace(go.Bar(x=-new_polarity[col].values,
y=new_polarity.index,
orientation='h',
name=col,
customdata=new_polarity[col],
texttemplate="%{x} %",
textposition="inside",
hovertemplate="%{y}: %{customdata}"))
for col in new_polarity.columns:
# Adding a trace for positive and neutral sentiment
polarity_plot.add_trace(go.Bar(x=new_polarity[col],
y=new_polarity.index,
orientation='h',
name=col,
texttemplate="%{x} %",
textposition="inside",
hovertemplate="%{y}: %{x}"))
# change the richness of color of traces and x-axis
polarity_plot.update_traces(opacity=0.8)
polarity_plot.update_xaxes(title_text='%')
# Specify the layout
polarity_plot.update_layout(barmode='relative',
height=1000,
width=1000,
yaxis_autorange='reversed',
bargap=0.3,
colorway=['#8f160d', '#cf654b', '#ffb495'],
plot_bgcolor='#ffffff',
legend_orientation='v',
legend_x=1, legend_y=0,
title_text= 'Percentage distribution of polarity'
)
# show and save the plot
polarity_plot.show()
polarity_plot.write_html("interactive_polarity.html")
# TONE
tone_plot = go.Figure()
for col in new_tone.columns:
# Adding a trace for subjective and objective sentiment
tone_plot.add_trace(go.Bar(x=new_tone[col],
y=new_tone.index,
orientation='h',
name=col,
texttemplate="%{x} %",
textposition="inside",
hovertemplate="%{y}: %{x}"))
# change the richness of color of traces and x-axis
tone_plot.update_traces(opacity=0.8)
tone_plot.update_xaxes(title_text='%')
# Specify the layout
tone_plot.update_layout(barmode='relative',
height=1000,
width=900,
yaxis_autorange='reversed',
bargap=0.3,
colorway=['#4a3fad', '#c9a9f1'],
plot_bgcolor='#ffffff',
legend_orientation='v',
legend_x=1, legend_y=0,
title_text= 'Percentage distribution of tone'
)
# show and save the plot
tone_plot.show()
tone_plot.write_html("interactive_tone.html")
# EMOTION
column_name = ['Expectation','No emotion','Contempt','Trust','Fear','Anger','Surprise','Joy','Sadness'] # order the emotions
new_emotion = new_emotion[column_name]
emotion_plot = go.Figure()
for col in new_emotion.columns:
# Adding a trace for all emotions
emotion_plot.add_trace(go.Bar(x=new_emotion[col],
y=new_emotion.index,
orientation='h',
name=col,
texttemplate="%{x} %",
textposition="inside",
hovertemplate="%{y}: %{x}"))
# change the richness of color of traces and x-axis
emotion_plot.update_traces(opacity=0.8)
emotion_plot.update_xaxes(title_text='%')
# Specify the layout
emotion_plot.update_layout(barmode='relative',
height=1000,
width=1400,
yaxis_autorange='reversed',
bargap=0.3,
colorway=['#4a3fad', '#7061ce', '#9b84e8', '#c9a9f1', '#ffcfe4', '#ffbcaf', '#f4777f', '#cf3759', '#93003a'],
plot_bgcolor='#ffffff',
legend_orientation='v',
legend_x=1, legend_y=0,
title_text= 'Percentage distribution of emotions'
)
# show and save the plot
emotion_plot.show()
emotion_plot.write_html("interactive_emotion.html")
# EMOTION OVERALL - CIRCLE PLOT
emotion_overall_plot = px.pie(df, names='emotion', title='Overall percentage distribution of emotions',color='emotion',
color_discrete_map={'Expectation':'#4a3fad',
'No emotion':'#7061ce',
'Contempt':'#9b84e8',
'Trust':'#c9a9f1',
'Fear':'#ffcfe4',
'Anger':'#ffbcaf',
'Surprise':'#f4777f',
'Joy':'#cf3759',
'Sadness':'#93003a'},
height=600,
width=600)
emotion_overall_plot.update_traces(textposition='inside', textinfo='percent+label')
emotion_overall_plot.update_layout(showlegend=False)
emotion_overall_plot.show()
emotion_overall_plot.write_html("interactive_emotion_overall.html")
|
import torch
from torch import nn
from torch.nn import functional as F
from nsflow import utils
# # Projection of x onto y
# def proj(x, y):
# return torch.mm(y, x.t()) * y / torch.mm(y, y.t())
#
#
# # Orthogonalize x wrt list of vectors ys
# def gram_schmidt(x, ys):
# for y in ys:
# x = x - proj(x, y)
# return x
#
#
# # Apply num_itrs steps of the power method to estimate top N singular values.
# def power_iteration(W, u_, update=True, eps=1e-12):
# # Lists holding singular vectors and values
# us, vs, svs = [], [], []
# for i, u in enumerate(u_):
# # Run one step of the power iteration
# with torch.no_grad():
# v = torch.matmul(u, W)
# # Run Gram-Schmidt to subtract components of all other singular vectors
# v = F.normalize(gram_schmidt(v, vs), eps=eps)
# # Add to the list
# vs += [v]
# # Update the other singular vector
# u = torch.matmul(v, W.t())
# # Run Gram-Schmidt to subtract components of all other singular vectors
# u = F.normalize(gram_schmidt(u, us), eps=eps)
# # Add to the list
# us += [u]
# if update:
# u_[i][:] = u
# # Compute this singular value and add it to the list
# svs += [torch.squeeze(torch.matmul(torch.matmul(v, W.t()), u.t()))]
# #svs += [torch.sum(F.linear(u, W.transpose(0, 1)) * v)]
# return svs, us, vs
#
#
# # Spectral normalization base class
# class SN(object):
# def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12):
# # Number of power iterations per step
# self.num_itrs = num_itrs
# # Number of singular values
# self.num_svs = num_svs
# # Transposed?
# self.transpose = transpose
# # Epsilon value for avoiding divide-by-0
# self.eps = eps
# # Register a singular vector for each sv
# for i in range(self.num_svs):
# self.register_buffer('u%d' % i, torch.randn(1, num_outputs))
# self.register_buffer('sv%d' % i, torch.ones(1))
#
# # Singular vectors (u side)
# @property
# def u(self):
# return [getattr(self, 'u%d' % i) for i in range(self.num_svs)]
#
# # Singular values;
# # note that these buffers are just for logging and are not used in training.
# @property
# def sv(self):
# return [getattr(self, 'sv%d' % i) for i in range(self.num_svs)]
#
# # Compute the spectrally-normalized weight
# def W_(self):
# W_mat = self.weight.view(self.weight.size(0), -1)
# if self.transpose:
# W_mat = W_mat.t()
# # Apply num_itrs power iterations
# for _ in range(self.num_itrs):
# svs, us, vs = power_iteration(W_mat, self.u, update=self.training,
# eps=self.eps)
# # Update the svs
# if self.training:
# with torch.no_grad(): # Make sure to do this in a no_grad() context or you'll get memory leaks!
# for i, sv in enumerate(svs):
# self.sv[i][:] = sv
# return self.weight / svs[0]
#
#
# # 2D Conv layer with spectral norm
# class SNConv2d(nn.Conv2d, SN):
# def __init__(self, in_channels, out_channels, kernel_size, stride=1,
# padding=0, dilation=1, groups=1, bias=True,
# num_svs=1, num_itrs=1, eps=1e-12):
# nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride,
# padding, dilation, groups, bias)
# SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps)
#
# def forward(self, x):
# return F.conv2d(x, self.W_(), self.bias, self.stride,
# self.padding, self.dilation, self.groups)
#
#
# # Linear layer with spectral norm
# class SNLinear(nn.Linear, SN):
# def __init__(self, in_features, out_features, bias=True,
# num_svs=1, num_itrs=1, eps=1e-12):
# nn.Linear.__init__(self, in_features, out_features, bias)
# SN.__init__(self, num_svs, num_itrs, out_features, eps=eps)
#
# def forward(self, x):
# return F.linear(x, self.W_(), self.bias)
#
#
# # Embedding layer with spectral norm
# # We use num_embeddings as the dim instead of embedding_dim here
# # for convenience sake
# class SNEmbedding(nn.Embedding, SN):
# def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
# max_norm=None, norm_type=2, scale_grad_by_freq=False,
# sparse=False, _weight=None,
# num_svs=1, num_itrs=1, eps=1e-12):
# nn.Embedding.__init__(self, num_embeddings, embedding_dim, padding_idx,
# max_norm, norm_type, scale_grad_by_freq,
# sparse, _weight)
# SN.__init__(self, num_svs, num_itrs, num_embeddings, eps=eps)
#
# def forward(self, x):
# return F.embedding(x, self.W_())
# A non-local block as used in SA-GAN
# Note that the implementation as described in the paper is largely incorrect;
# refer to the released code for the actual implementation.
class AttentionBlock(nn.Module):
def __init__(self, channels, which_conv=nn.Conv2d, heads=8):
super(AttentionBlock, self).__init__()
# Channel multiplier
self.channels = channels
self.which_conv = which_conv
self.heads = heads
self.theta = self.which_conv(self.channels, self.channels // heads, kernel_size=1, padding=0,
bias=False)
self.phi = self.which_conv(self.channels, self.channels // heads, kernel_size=1, padding=0,
bias=False)
self.g = self.which_conv(self.channels, self.channels // 2, kernel_size=1, padding=0,
bias=False)
self.o = self.which_conv(self.channels // 2, self.channels, kernel_size=1, padding=0,
bias=False)
# Learnable gain parameter
self.gamma = nn.Parameter(torch.tensor(0.), requires_grad=True)
def forward(self, inputs, y=None):
# Apply convs
theta = self.theta(inputs)
phi = F.max_pool2d(self.phi(inputs), [2, 2])
g = F.max_pool2d(self.g(inputs), [2, 2])
# Perform reshapes
theta = theta.view(-1, self.channels // self.heads, inputs.shape[2] * inputs.shape[3])
phi = phi.view(-1, self.channels // self.heads, inputs.shape[2] * inputs.shape[3] // 4)
g = g.view(-1, self.channels // 2, inputs.shape[2] * inputs.shape[3] // 4)
# Matmul and softmax to get attention maps
beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)
# Attention map times g path
o = self.o(torch.bmm(g, beta.transpose(1, 2)).view(-1, self.channels // 2, inputs.shape[2],
inputs.shape[3]))
outputs = self.gamma * o + inputs
return outputs
class ConvAttentionNet(nn.Module):
def __init__(self,
in_channels,
out_channels,
hidden_channels,
num_blocks
):
super().__init__()
self.initial_layer = nn.Conv2d(in_channels, hidden_channels, kernel_size=1, padding=0)
self.attention_blocks = nn.ModuleList([
AttentionBlock(
channels=hidden_channels,
which_conv=nn.Conv2d,
heads=8
) for _ in range(num_blocks)
])
# if use_batch_norm:
self.batch_norm_layers = nn.ModuleList([
nn.BatchNorm2d(
num_features=hidden_channels
)
])
self.final_layer = nn.Conv2d(hidden_channels, out_channels, kernel_size=1, padding=0)
def forward(self, inputs):
temps = self.initial_layer(inputs)
for attention, batch_norm in zip(self.attention_blocks, self.batch_norm_layers):
temps = attention(temps)
temps = batch_norm(temps)
outputs = self.final_layer(temps)
return outputs
def main():
batch_size, channels, height, width = 100, 12, 64, 64
inputs = torch.rand(batch_size, channels, height, width)
net = ConvAttentionNet(
in_channels=channels,
out_channels=2 * channels,
hidden_channels=32,
num_blocks=4
)
print(utils.get_num_parameters(net))
outputs = net(inputs)
print(outputs.shape)
if __name__ == '__main__':
main()
|
import networkx as nx
from networkx.readwrite import json_graph
import pandas as pd
import plotly.express as px
from flask_caching import Cache
import plotly.graph_objects as go
import json
import glob
import os
import itertools
#set root directory for data files
#ROOTBEER = '/home/ubuntu/housing_equity/sandbox-singlepage/' #production
ROOTBEER = '' #local
network_dir = os.path.join(ROOTBEER + 'data/network/')
network_missing_dir = os.path.join(ROOTBEER + 'data/network-missing/')
maps_dir = os.path.join(ROOTBEER + 'data/maps/')
#set a map center (for maps only, obviously)
the_bounty = {"lat": 47.6615392, "lon": -122.3446507}
pikes_place = {"lat": 47.6145537,"lon": -122.3497373}
with open(ROOTBEER + 'data/washingtongeo.json','r') as GeoJSON:
tracts = json.load(GeoJSON)
df_combo = pd.read_csv(ROOTBEER + 'data/df_combo.csv', dtype={"GEOID": str,"TRACT_NUM": str,"YEAR":str})
#master loop function for slider variables
slider_names = ('alpha', 'bravo', 'charlie', 'delta', 'echo', 'foxtrot','golf') #IF YOU CHANGE THIS, also change the networkx_var dict inside update_network below. And don't forget to add a slider in the HTML.
slider_values_list = [dict(zip(slider_names, p)) for p in itertools.product([0,1,5], repeat=len(slider_names))]
def leppard(slider_values):
luggage_code = 'a{alpha}b{bravo}c{charlie}d{delta}e{echo}f{foxtrot}g{golf}'.format(**slider_values)
return luggage_code
slider_keys = [leppard(slider_values) for slider_values in slider_values_list]
#if '<KEY>' in slider_keys:
# slider_keys.remove('<KEY>')
for slider in slider_keys:
print('exporting maps for ' + slider)
map_file3_name = os.path.join(maps_dir, 'fig3_{key}.json'.format(key=slider))
if not os.path.exists(map_file3_name):
fig3 = px.scatter(df_combo,
x='omega13df_{key}'.format(key=slider),
y='omega18df_{key}'.format(key=slider),
color='neighborhood',
text='GEOID'
)
fig3.update_yaxes(
scaleanchor="x",
scaleratio=1,
)
# update_yaxes(
# range=[-1.5, 1.5]
# )
# fig3.update_xaxes(
# range=[-1.5, 1.5]
# )
fig3.update_traces(textposition="middle right")
# can set axis ratios, as well
# fig.update_yaxes(
# scaleanchor = "x",
# scaleratio = 1,
# )
#
fig3.update_traces(marker=dict(size=20))
# Add Diagonal Line so you can see movement btw 2013 and 2018
fig3.add_shape(
type="line",
x0=-5,
y0=-5,
x1=5,
y1=5,
line=dict(
color="MediumPurple",
width=4,
dash="dash",
)
)
with open(map_file3_name, 'w') as map_file3:
fig3.write_json(map_file3)
map_file4_name = os.path.join(maps_dir, 'fig4_{key}.json'.format(key=slider))
if not os.path.exists(map_file4_name):
# zmin = df_combo['omegadf_{key}'.format(key=slider)].quantile(0.05)
# zmax = df_combo['omegadf_{key}'.format(key=slider)].quantile(0.95)
fig4 = px.choropleth_mapbox(df_combo,
geojson=tracts,
locations=df_combo['GEOID_long'],
featureidkey='properties.GEOID',
color=df_combo['omegadf_{key}'.format(key=slider)],
opacity=0.7,
color_continuous_scale='RdYlGn_r',
# range_color=(zmin, zmax),
range_color=(-20, 20),
color_continuous_midpoint=0
)
fig4.update_layout(mapbox_style="open-street-map",
mapbox_zoom=10.5,
mapbox_center=pikes_place)
with open(map_file4_name, 'w') as map_file4:
fig4.write_json(map_file4)
map_file5_name = os.path.join(maps_dir, 'fig5_{key}.json'.format(key=slider))
if not os.path.exists(map_file5_name):
# zmin = df_combo['omega13df_{key}'.format(key=slider)].quantile(0.05)
# zmax = df_combo['omega13df_{key}'.format(key=slider)].quantile(0.95)
fig5 = px.choropleth_mapbox(df_combo,
geojson=tracts,
locations=df_combo['GEOID_long'],
featureidkey='properties.GEOID',
color=df_combo['omega13df_{key}'.format(key=slider)],
opacity=0.7,
color_continuous_scale='RdYlGn_r',
# range_color=(zmin, zmax),
range_color=(-20, 20),
color_continuous_midpoint=0
)
fig5.update_layout(mapbox_style="open-street-map",
mapbox_zoom=10.5,
mapbox_center=pikes_place)
with open(map_file5_name, 'w') as map_file5:
fig5.write_json(map_file5)
map_file6_name = os.path.join(maps_dir, 'fig6_{key}.json'.format(key=slider))
if not os.path.exists(map_file6_name):
# zmin = df_combo['omega18df_{key}'.format(key=slider)].quantile(0.05)
# zmax = df_combo['omega18df_{key}'.format(key=slider)].quantile(0.95)
fig6 = px.choropleth_mapbox(df_combo,
geojson=tracts,
locations=df_combo['GEOID_long'],
featureidkey='properties.GEOID',
color=df_combo['omega18df_{key}'.format(key=slider)],
opacity=0.7,
color_continuous_scale='RdYlGn_r',
# range_color=(zmin, zmax),
range_color=(-20, 20),
color_continuous_midpoint=0
)
fig6.update_layout(mapbox_style="open-street-map",
mapbox_zoom=10.5,
mapbox_center=pikes_place)
with open(map_file6_name, 'w') as map_file6:
fig6.write_json(map_file6)
'''
#set directory for graph_name jsons
json_dir = ROOTBEER + 'data/json/*'
graphs_dict = {} #set a dictionary to hold graphs
for file in glob.iglob(json_dir):
with open (file) as json_file:
graph_name = os.path.basename(file)
graph_name = graph_name.rstrip('.json')
graph = json.load(json_file)
graphs_dict[graph_name] = json_graph.node_link_graph(graph) #sets a value that consists of the networkx graph
print('loading graph files')
#loop over graph_dict to write all the variables for the graph
#for graph_name_name, graph in graphs_dict.items():
# exec(graph_name + '=graph')
#from build_network import get_nodes, get_edges
for graph_name in graphs_dict:
key = str(graph_name).lstrip('G2018_')
colorsIndex = {'wallingford':'#ef553b','rainier_beach':'#636efa'} #manually assign colors
colors = df_combo['neighborhood'].map(colorsIndex)
print('adding nodes and edges to graphs')
for graph_name in graphs_dict:
key = str(graph_name).lstrip('G2018_')
network_file_name = os.path.join(network_dir, 'network_{key}.json'.format(key=key))
network_missing_file_name = os.path.join(network_missing_dir, 'network_{key}.json'.format(key=key))
if not os.path.exists(network_file_name):
print('creating graph objects for ' + graph_name)
node_trace2018 = go.Scatter(
x=[],
y=[],
mode='markers+text', # make markers+text to show labels
text=[],
hoverinfo='text',
customdata=df_combo['GEOID'],
marker=dict(
showscale=False,
colorscale='YlGnBu',
reversescale=False,
color=[],
size=20,
opacity=0.8,
colorbar=dict(
thickness=10,
title='COLOR GROUP BY CENSUS TRACT NUMBER',
xanchor='left',
titleside='right'
),
line=dict(width=0)
),
showlegend=True,
marker_line_width=1
)
edge_trace2018 = go.Scatter(
x=[],
y=[],
line=dict(width=1, color='#c6c6c6'),
hoverinfo='text',
mode='lines'
)
print('building edges for ' + graph_name)
for edge in graphs_dict[graph_name].edges():
x0, y0 = graphs_dict[graph_name].nodes[edge[0]]['pos']
x1, y1 = graphs_dict[graph_name].nodes[edge[1]]['pos']
edge_trace2018['x'] += tuple([x0, x1, None])
edge_trace2018['y'] += tuple([y0, y1, None])
print('building nodes for ' + graph_name)
for node in graphs_dict[graph_name].nodes():
x, y = graphs_dict[graph_name].nodes[node]['pos']
print(graph_name + ' ' + node + ' x')
node_trace2018['x'] += tuple([x])
print(graph_name + ' ' + node + ' y')
node_trace2018['y'] += tuple([y])
print(graph_name + ' ' + node + ' text')
node_trace2018.text = df_combo['neighborhood'] + '<br>' + df_combo["TRACT_NUM"] # tract version
print(graph_name + ' ' + node + ' markers')
node_trace2018.marker.color = colors
# node_trace2018.marker.size = (1.5 + df_combo.omega18) * 20
# node_adjacencies = []
# for node, adjacencies in enumerate(graphs_dict[graph_name].items().adjacency()):
# node_adjacencies.append(len(adjacencies[1]))
fig = go.Figure(data=[edge_trace2018,node_trace2018],
layout=go.Layout(
title='',
titlefont=dict(size=16),
showlegend=False,
hovermode='closest',
margin=dict(b=20, l=5, r=5, t=40),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False))
)
with open(network_missing_file_name, 'w') as network_file:
fig.write_json(network_file)
''' |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from enum import Enum
from typing import Dict, Optional, Union
from google.protobuf.any_pb2 import Any as GrpcAny
from google.protobuf.message import Message as GrpcMessage
from dapr.clients.base import DEFAULT_JSON_CONTENT_TYPE
from dapr.clients.grpc._helpers import (
MetadataDict,
MetadataTuple,
tuple_to_dict,
to_bytes,
to_str,
unpack
)
class DaprRequest:
"""A base class for Dapr Request.
This is the base class for Dapr Request. User can get the metadata.
Attributes:
metadata(dict): A dict to include the headers from Dapr Request.
"""
def __init__(self, metadata: MetadataTuple = ()):
self.metadata = metadata # type: ignore
@property
def metadata(self) -> MetadataDict:
"""Get metadata from :class:`DaprRequest`."""
return self.get_metadata(as_dict=True) # type: ignore
@metadata.setter
def metadata(self, val) -> None:
"""Sets metadata."""
if not isinstance(val, tuple):
raise ValueError('val is not tuple')
self._metadata = val
def get_metadata(self, as_dict: bool = False) -> Union[MetadataDict, MetadataTuple]:
"""Gets metadata from the request.
Args:
as_dict (bool): dict type metadata if as_dict is True. Otherwise, return
tuple metadata.
Returns:
dict or tuple: request metadata.
"""
if as_dict:
return tuple_to_dict(self._metadata)
return self._metadata
class InvokeMethodRequest(DaprRequest):
"""A request data representation for invoke_method API.
This stores the request data with the proper serialization. This seralizes
data to :obj:`google.protobuf.any_pb2.Any` if data is the type of protocol
buffer message.
Attributes:
metadata(dict): A dict to include the headers from Dapr Request.
data (str, bytes, GrpcAny, GrpcMessage, optional): the serialized data
for invoke_method request.
content_type (str, optional): the content type of data which is valid
only for bytes array data.
"""
HTTP_METHODS = [
'GET',
'HEAD',
'POST',
'PUT',
'DELETE',
'CONNECT',
'OPTIONS',
'TRACE'
]
def __init__(
self,
data: Union[str, bytes, GrpcAny, GrpcMessage, None] = None,
content_type: Optional[str] = None):
"""Inits InvokeMethodRequestData with data and content_type.
Args:
data (bytes, str, GrpcAny, GrpcMessage, optional): the data
which is used for invoke_method request.
content_type (str): the content_type of data when the data is bytes.
The default content type is application/json.
Raises:
ValueError: data is not supported.
"""
super(InvokeMethodRequest, self).__init__(())
self._content_type = content_type
self._http_verb = None
self._http_querystring: Dict[str, str] = {}
self.set_data(data)
# Set content_type to application/json type if content_type
# is not given and date is bytes or str type.
if not self.is_proto() and not content_type:
self.content_type = DEFAULT_JSON_CONTENT_TYPE
@property
def http_verb(self) -> Optional[str]:
"""Gets HTTP method in Dapr invocation request."""
return self._http_verb
@http_verb.setter
def http_verb(self, val: Optional[str]) -> None:
"""Sets HTTP method to Dapr invocation request."""
if val not in self.HTTP_METHODS:
raise ValueError(f'{val} is the invalid HTTP verb.')
self._http_verb = val
@property
def http_querystring(self) -> Dict[str, str]:
"""Gets HTTP querystring as dict."""
return self._http_querystring
def is_http(self) -> bool:
"""Return true if this request is http compatible."""
return hasattr(self, '_http_verb') and not (not self._http_verb)
@property
def proto(self) -> GrpcAny:
"""Gets raw data as proto any type."""
return self._data
def is_proto(self) -> bool:
"""Returns true if data is protocol-buffer serialized."""
return hasattr(self, '_data') and self._data.type_url != ''
def pack(self, val: Union[GrpcAny, GrpcMessage]) -> None:
"""Serializes protocol buffer message.
Args:
message (:class:`GrpcMessage`, :class:`GrpcAny`): the protocol buffer message object
Raises:
ValueError: message is neither GrpcAny nor GrpcMessage.
"""
if isinstance(val, GrpcAny):
self._data = val
elif isinstance(val, GrpcMessage):
self._data = GrpcAny()
self._data.Pack(val)
else:
raise ValueError('invalid data type')
def unpack(self, message: GrpcMessage) -> None:
"""Deserializes the serialized protocol buffer message.
Args:
message (:obj:`GrpcMessage`): the protocol buffer message object
to which the response data is deserialized.
Raises:
ValueError: message is not protocol buffer message object or message's type is not
matched with the response data type
"""
unpack(self.proto, message)
@property
def data(self) -> bytes:
"""Gets request data as bytes."""
if self.is_proto():
raise ValueError('data is protocol buffer message object.')
return self._data.value
@data.setter
def data(self, val: Union[str, bytes]) -> None:
"""Sets str or bytes type data to request data."""
self.set_data(to_bytes(val))
def set_data(self, val: Union[str, bytes, GrpcAny, GrpcMessage, None]) -> None:
"""Sets data to request data."""
if val is None:
self._data = GrpcAny()
elif isinstance(val, (bytes, str)):
self._data = GrpcAny(value=to_bytes(val))
elif isinstance(val, (GrpcAny, GrpcMessage)):
self.pack(val)
else:
raise ValueError(f'invalid data type {type(val)}')
def text(self) -> str:
"""Gets the request data as str."""
return to_str(self.data)
@property
def content_type(self) -> Optional[str]:
"""Gets content_type for bytes data."""
return self._content_type
@content_type.setter
def content_type(self, val: Optional[str]) -> None:
"""Sets content type for bytes data."""
self._content_type = val
class BindingRequest(DaprRequest):
"""A request data representation for invoke_binding API.
This stores the request data and metadata with the proper serialization.
This seralizes data to bytes and metadata to a dictionary of key value pairs.
Attributes:
data (bytes): the data which is used for invoke_binding request.
metadata (Dict[str, str]): the metadata sent to the binding.
"""
def __init__(
self,
data: Union[str, bytes],
binding_metadata: Dict[str, str] = {}):
"""Inits BindingRequest with data and metadata if given.
Args:
data (bytes, str): the data which is used for invoke_binding request.
binding_metadata (tuple, optional): the metadata to be sent to the binding.
Raises:
ValueError: data is not bytes or str.
"""
super(BindingRequest, self).__init__(())
self.data = data # type: ignore
self._binding_metadata = binding_metadata
@property
def data(self) -> bytes:
"""Gets request data as bytes."""
return self._data
@data.setter
def data(self, val: Union[str, bytes]) -> None:
"""Sets str or bytes type data to request data."""
self._data = to_bytes(val)
def text(self) -> str:
"""Gets the request data as str."""
return to_str(self.data)
@property
def binding_metadata(self):
"""Gets the metadata for output binding."""
return self._binding_metadata
class TransactionOperationType(Enum):
"""Represents the type of operation for a Dapr Transaction State Api Call"""
upsert = "upsert"
delete = "delete"
class TransactionalStateOperation:
"""An upsert or delete operation for a state transaction, 'upsert' by default.
Attributes:
key (str): state's key.
data (Union[bytes, str]): state's data.
etag (str): state's etag.
operation_type (TransactionOperationType): operation to be performed.
"""
def __init__(
self,
key: str,
data: Union[bytes, str],
etag: Optional[str] = None,
operation_type: TransactionOperationType = TransactionOperationType.upsert):
"""Initializes TransactionalStateOperation item from :obj:`runtime_v1.TransactionalStateOperation`.
Args:
key (str): state's key.
data (Union[bytes, str]): state's data.
etag (str): state's etag.
operationType (Optional[TransactionOperationType]): operation to be performed.
Raises:
ValueError: data is not bytes or str.
"""
if not isinstance(data, (bytes, str)):
raise ValueError(f'invalid type for data {type(data)}')
self._key = key
self._data = data # type: ignore
self._etag = etag
self._operation_type = operation_type
@property
def key(self) -> str:
"""Gets key."""
return self._key
@property
def data(self) -> Union[bytes, str]:
"""Gets raw data."""
return self._data
@property
def etag(self) -> Optional[str]:
"""Gets etag."""
return self._etag
@property
def operation_type(self) -> TransactionOperationType:
"""Gets etag."""
return self._operation_type
|
<reponame>vd1371/XProject<filename>outlier_detection/MultivariateGaussian.py
import pandas as pd
import numpy as np
import os, sys
parent_dir = os.path.split(os.path.dirname(__file__))[0]
sys.path.insert(0,parent_dir)
from Reporter import *
from scipy.stats import multivariate_normal
from sklearn.preprocessing import StandardScaler, MinMaxScaler
class OutlierDetector(Report):
"""
This modules tries to find outliers in a dataset of continous variables
Based on multivariate normal distribution
"""
def __init__(self, df, name = None, percentage = 0.05, thresh = 0.01):
super(OutlierDetector, self).__init__(name, 'Preprocessing')
if isinstance(df, pd.DataFrame):
self.data = df
elif isinstance(df, str):
self.data = open_csv(df)
else:
raise TypeError("--- df type is wrong")
self.percentage = percentage
self.threshold = thresh
# self.sites = self.data.iloc[:,:106]
# self.data = self.data.iloc[:,-7:]
self.log.info('-------------- Outliers are about to be found %s '%self.name)
def setPercentage(self, per):
self.percentage = per
def findOutliers(self, distribution = 'normal'):
if distribution == 'log-normal':
self.data = self.data.apply(np.log)
# Finding covariance matrix and mean array
corr_mat = self.data.corr()
cov_mat = self.data.cov()
mean_arr = [self.data[col_name].astype('float64').mean() for col_name in self.data.columns]
self.log.info("Correlation matrix\n" +str(corr_mat))
self.log.info("Mean array:" + str(mean_arr))
# Creating multivariate probability distribution function
var = multivariate_normal(mean=mean_arr, cov=cov_mat)
self.data['pdf'] = self.data.apply(lambda row : var.pdf(row), axis = 1)
# Sorting based on pdf
self.data.sort_values(by=['pdf'], axis = 0, inplace = True, ascending= True)
# Slicing the dataset
self.data = self.data.iloc[int(self.percentage*len(self.data)):,:].copy()
# Shuffle the dataset and reseting index
self.data = self.data.sample(frac=1).reset_index(drop=True)
# Dropping the pdf column
self.data.drop('pdf', axis = 1, inplace=True)
if distribution == 'log-normal':
self.data = self.data.apply(np.exp)
# Saving the original dataset
# self.data = pd.concat([self.sites, self.data], axis = 1, join = 'inner')
self.data.to_csv(self.directory + "/" + self.name + "-NoOutOriginal.csv")
# self.data = self.data.iloc[:,-7:]
print (f'The data for {self.name} is saved')
def standardize(self, method = 'Standard', which_cols = "all", methods_list= []):
pointer = 0 if which_cols == 'all' else 1
if method == 'Standard':
methods_list = ['S' for _ in range(len(self.data.columns)-pointer)]
elif method == 'Normal':
methods_list = ['N' for _ in range(len(self.data.columns)-pointer)]
cols = self.data.columns[:] if pointer == 0 else self.data.columns[:-1]
# Scaling the Data
for i , col in enumerate(cols):
if methods_list[i] == 'S':
scaler = StandardScaler()
elif methods_list[i] == 'N':
scaler = MinMaxScaler()
self.data[col] = scaler.fit_transform(np.reshape(self.data[col].values, (-1,1)))
# self.data = pd.concat([self.sites, self.data], axis = 1, join = 'inner')
# Saving the dataset
self.data.to_csv(self.directory + "/" + self.name + "-NoOutScaled.csv")
print (f'The data for {self.name} is saved')
def run():
myDetector = OutlierDetector('DNNPaper','DNNPaper', percentage = 0.05)
myDetector.findOutliers(distribution='log')
# "Standard" or "Normal"
# Which call 'all' or else
myDetector.standardize(method = 'Standard', which_cols = "Not-all")
if __name__ == "__main__":
run()
|
<filename>API/utility.py
from __future__ import print_function
import math
import numpy
import glob
import torch
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import io
import glob
import os
from shutil import move, copy
from os.path import join
from os import listdir, rmdir
import time
import numpy as np
import random
def find_custom_dataset_mean_std(DATA_PATH, cuda):
num_of_inp_channels = 3
simple_transforms = transforms.Compose([
transforms.ToTensor()
])
exp = datasets.ImageFolder(DATA_PATH+"/train_set", transform=simple_transforms)
dataloader_args = dict(shuffle=True, batch_size=256, num_workers=4, pin_memory=True) if cuda else dict(shuffle=True, batch_size=64)
loader = torch.utils.data.DataLoader(exp, **dataloader_args)
mean = 0.0
for images, _ in loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
mean = mean / len(loader.dataset)
var = 0.0
for images, _ in loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
var += ((images - mean.unsqueeze(1))**2).sum([0,2])
std = torch.sqrt(var / (len(loader.dataset)*224*224))
# print("means: {}".format(mean))
# print("stdevs: {}".format(std))
# print('transforms.Normalize(mean = {}, std = {})'.format(mean, std))
return tuple(mean.numpy().astype(numpy.float32)), tuple(std.numpy().astype(numpy.float32))
def find_cifar10_normalization_values(data_path='./data'):
num_of_inp_channels = 3
simple_transforms = transforms.Compose([
transforms.ToTensor()
])
exp = datasets.CIFAR10(data_path, train=True, download=True, transform=simple_transforms)
data = exp.data
data = data.astype(numpy.float32)/255
means = ()
stdevs = ()
for i in range(num_of_inp_channels):
pixels = data[:,:,:,i].ravel()
means = means +(round(numpy.mean(pixels)),)
stdevs = stdevs +(numpy.std(pixels),)
print("means: {}".format(means))
print("stdevs: {}".format(stdevs))
print('transforms.Normalize(mean = {}, std = {})'.format(means, stdevs))
return means, stdevs
# visualize accuracy and loss graph
def visualize_graph(train_losses, train_acc, test_losses, test_acc):
fig, axs = plt.subplots(2,2,figsize=(15,10))
axs[0, 0].plot(train_losses)
axs[0, 0].set_title("Training Loss")
axs[1, 0].plot(train_acc)
axs[1, 0].set_title("Training Accuracy")
axs[0, 1].plot(test_losses)
axs[0, 1].set_title("Test Loss")
axs[1, 1].plot(test_acc)
axs[1, 1].set_title("Test Accuracy")
def visualize_save_train_vs_test_graph(EPOCHS, dict_list, title, xlabel, ylabel, PATH, name="fig"):
plt.figure(figsize=(20,10))
#epochs = range(1,EPOCHS+1)
for label, item in dict_list.items():
x = numpy.linspace(1, EPOCHS+1, len(item))
plt.plot(x, item, label=label)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.savefig(PATH+"/"+name+".png")
def set_device():
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
return device
# view and save comparison graph of cal accuracy and loss
def visualize_save_comparison_graph(EPOCHS, dict_list, title, xlabel, ylabel, PATH, name="fig"):
plt.figure(figsize=(20,10))
epochs = range(1,EPOCHS+1)
for label, item in dict_list.items():
plt.plot(epochs, item, label=label)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.savefig(PATH+"/visualization/"+name+".png")
# view and save misclassified images
def classify_images(model, test_loader, device, max_imgs=25):
misclassified_imgs = []
correct_imgs = []
with torch.no_grad():
ind = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
misclassified_imgs_pred = pred[pred.eq(target.view_as(pred))==False]
misclassified_imgs_indexes = (pred.eq(target.view_as(pred))==False).nonzero()[:,0]
for mis_ind in misclassified_imgs_indexes:
if len(misclassified_imgs) < max_imgs:
misclassified_imgs.append({
"target": target[mis_ind].cpu().numpy(),
"pred": pred[mis_ind][0].cpu().numpy(),
"img": data[mis_ind]
})
#for data, target in test_loader:
correct_imgs_pred = pred[pred.eq(target.view_as(pred))==True]
correct_imgs_indexes = (pred.eq(target.view_as(pred))==True).nonzero()[:,0]
for ind in correct_imgs_indexes:
if len(correct_imgs) < max_imgs:
correct_imgs.append({
"target": target[ind].cpu().numpy(),
"pred": pred[ind][0].cpu().numpy(),
"img": data[ind]
})
return misclassified_imgs, correct_imgs
def plot_images(images, PATH, name="fig", sub_folder_name="/visualization", is_cifar10 = True, labels_list=None):
cols = 5
rows = math.ceil(len(images) / cols)
fig = plt.figure(figsize=(20,10))
for i in range(len(images)):
img = denormalize(images[i]["img"])
plt.subplot(rows,cols,i+1)
plt.tight_layout()
plt.imshow(numpy.transpose(img.cpu().numpy(), (1, 2, 0)), cmap='gray', interpolation='none')
if is_cifar10:
CIFAR10_CLASS_LABELS = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
plt.title(f"{i+1}) Ground Truth: {CIFAR10_CLASS_LABELS[images[i]['target']]},\n Prediction: {CIFAR10_CLASS_LABELS[images[i]['pred']]}")
if labels_list is not None:
plt.title(f"{i+1}) Ground Truth: {labels_list[images[i]['target']]},\n Prediction: {labels_list[images[i]['pred']]}")
else:
plt.title(f"{i+1}) Ground Truth: {images[i]['target']},\n Prediction: {images[i]['pred']}")
plt.xticks([])
plt.yticks([])
plt.savefig(PATH+sub_folder_name+"/"+str(name)+".png")
def show_save_misclassified_images(model, test_loader, device, PATH, name="fig", max_misclassified_imgs=25, is_cifar10 = True, labels_list=None):
misclassified_imgs, _ = classify_images(model, test_loader, device, max_misclassified_imgs)
plot_images(misclassified_imgs, PATH, name, is_cifar10 = is_cifar10, labels_list=labels_list)
def show_save_correctly_classified_images(model, test_loader, device, PATH, name="fig", max_correctly_classified_images_imgs=25, is_cifar10 = True, labels_list=None):
_, correctly_classified_images = classify_images(model, test_loader, device, max_correctly_classified_images_imgs)
plot_images(correctly_classified_images, PATH, name, is_cifar10 = is_cifar10, labels_list=labels_list)
def denormalize(tensor, mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]):
single_img = False
if tensor.ndimension() == 3:
single_img = True
tensor = tensor[None,:,:,:]
if not tensor.ndimension() == 4:
raise TypeError('tensor should be 4D')
mean = torch.FloatTensor(mean).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
std = torch.FloatTensor(std).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
ret = tensor.mul(std).add(mean)
return ret[0] if single_img else ret
def imshow(img):
img = denormalize(img)
npimg = img.numpy()
plt.imshow(numpy.transpose(npimg, (1, 2, 0)))
def show_sample_images(train_loader, labels_list, num_imgs=5):
# get some random training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images[:num_imgs]))
# print labels
print(' '.join('%5s' % labels_list[labels[j]] for j in range(num_imgs)))
def class_to_label_mapping(DATA_PATH):
# find class names
train_paths = glob.glob(DATA_PATH+'/train_set/*')
class_list = []
for path in train_paths:
folder = path.split('/')[-1].split('\\')[-1]
class_list.append(folder)
labels_list = []
with open(DATA_PATH+'/words.txt', 'r') as f:
data = f.read()
for i in (data.splitlines()):
ind = i.split('\t')[0]
if ind in class_list:
label = i.split('\t')[1]
if ',' in label:
label = label.split(',')[0] + ",etc"
labels_list.append(label)
return labels_list
def merge_split_data(imagenet_root):
target_folder = imagenet_root+"/val/"
dest_folder = imagenet_root+"/train/"
val_dict = {}
with open(imagenet_root+'/val/val_annotations.txt','r') as f:
for line in f.readlines():
split_line = line.split('\t')
val_dict[split_line[0]] = split_line[1]
paths = glob.glob(imagenet_root+'/val/images/*')
for path in paths:
file = path.split('/')[-1].split('\\')[-1]
folder = val_dict[file]
dest = dest_folder + str(folder) + '/images/' + str(file)
move(path, dest)
target_folder = imagenet_root+'/train/'
train_folder = imagenet_root+'/train_set/'
test_folder = imagenet_root+'/test_set/'
os.mkdir(train_folder)
os.mkdir(test_folder)
paths = glob.glob(imagenet_root+'/train/*')
for path in paths:
folder = path.split('/')[-1].split('\\')[-1]
source = target_folder + str(folder+'/images/')
train_dest = train_folder + str(folder+'/')
test_dest = test_folder + str(folder+'/')
os.mkdir(train_dest)
os.mkdir(test_dest)
images = glob.glob(source+str('*'))
# shuffle
random.shuffle(images)
test_imgs = images[:165].copy()
train_imgs = images[165:].copy()
for image in test_imgs:
file = image.split('/')[-1].split('\\')[-1]
dest = test_dest + str(file)
move(image, dest)
for image in train_imgs:
file = image.split('/')[-1].split('\\')[-1]
dest = train_dest + str(file)
move(image, dest) |
<reponame>sleepsonthefloor/openstack-dashboard<filename>django-nova/src/django_nova/views/securitygroups.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Nova security groups.
"""
from django import http
from django import template
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render_to_response
from django_nova import exceptions
from django_nova import forms
from django_nova.exceptions import handle_nova_error
from django_nova.shortcuts import get_project_or_404
@login_required
@handle_nova_error
def index(request, project_id):
project = get_project_or_404(request, project_id)
securitygroups = project.get_security_groups()
return render_to_response('django_nova/securitygroups/index.html', {
'create_form': forms.CreateSecurityGroupForm(project),
'project': project,
'securitygroups': securitygroups,
}, context_instance = template.RequestContext(request))
@login_required
@handle_nova_error
def detail(request, project_id, group_name):
project = get_project_or_404(request, project_id)
securitygroup = project.get_security_group(group_name)
if not securitygroup:
raise http.Http404
return render_to_response('django_nova/securitygroups/detail.html', {
'authorize_form': forms.AuthorizeSecurityGroupRuleForm(),
'project': project,
'securitygroup': securitygroup,
}, context_instance = template.RequestContext(request))
@login_required
@handle_nova_error
def add(request, project_id):
project = get_project_or_404(request, project_id)
if request.method == 'POST':
form = forms.CreateSecurityGroupForm(project, request.POST)
if form.is_valid():
try:
project.create_security_group(
form.cleaned_data['name'],
form.cleaned_data['description'])
except exceptions.NovaApiError, e:
messages.error(request,
'Unable to create security group: %s' % e.message)
else:
messages.success(
request,
'Security Group %s has been succesfully created.' % \
form.cleaned_data['name'])
else:
securitygroups = project.get_security_groups()
return render_to_response('django_nova/securitygroups/index.html', {
'create_form': form,
'project': project,
'securitygroups': securitygroups,
}, context_instance = template.RequestContext(request))
return redirect('nova_securitygroups', project_id)
@login_required
@handle_nova_error
def authorize(request, project_id, group_name):
project = get_project_or_404(request, project_id)
if request.method == 'POST':
form = forms.AuthorizeSecurityGroupRuleForm(request.POST)
if form.is_valid():
try:
project.authorize_security_group(
group_name = group_name,
ip_protocol = form.cleaned_data['protocol'],
from_port = form.cleaned_data['from_port'],
to_port = form.cleaned_data['to_port'])
except exceptions.NovaApiError, e:
messages.error(request,
'Unable to authorize: %s' % e.message)
else:
messages.success(
request,
'Security Group %s: Access to %s ports %d - %d'
' has been authorized.' %
(group_name,
form.cleaned_data['protocol'],
form.cleaned_data['from_port'],
form.cleaned_data['to_port']))
else:
securitygroup = project.get_security_group(group_name)
if not securitygroup:
raise http.Http404
return render_to_response('django_nova/securitygroups/detail.html', {
'authorize_form': form,
'project': project,
'securitygroup': securitygroup,
}, context_instance = template.RequestContext(request))
return redirect('nova_securitygroups_detail', project_id, group_name)
@login_required
@handle_nova_error
def revoke(request, project_id, group_name):
project = get_project_or_404(request, project_id)
if request.method == 'POST':
try:
project.revoke_security_group(
group_name = group_name,
ip_protocol = request.POST['protocol'],
from_port = request.POST['from_port'],
to_port = request.POST['to_port'])
except exceptions.NovaApiError, e:
messages.error(request, 'Unable to revoke: %s' % e.message)
else:
messages.success(
request,
'Security Group %s: Access to %s ports %s - %s '
'has been revoked.' %
(group_name,
request.POST['protocol'],
request.POST['from_port'],
request.POST['to_port']))
return redirect('nova_securitygroups_detail', project_id, group_name)
@login_required
@handle_nova_error
def delete(request, project_id, group_name):
project = get_project_or_404(request, project_id)
if request.method == 'POST':
try:
project.delete_security_group(name=group_name)
except exceptions.NovaApiError, e:
messages.error(
request,
'Unable to delete security group: %s' % e.message)
else:
messages.success(request,
'Security Group %s was successfully deleted.' %
group_name)
return redirect('nova_securitygroups', project_id)
|
<reponame>lopp2005/HiSpatialCluster
# -*- coding: utf-8 -*-
"""
Calculate Density Tool
Created on Fri Apr 28 11:21:21 2017
@author: cheny
"""
from arcpy import Parameter
import arcpy
from multiprocessing import cpu_count
import numpy.lib.recfunctions as recfunctions
import sys
class CalculateDensityTool(object):
def __init__(self):
"""Calculate Density Tool"""
self.label = "1 Calculating Density Tool"
self.description = "Calculate Density for HiSpatialCluster."
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
#1
paraminput = Parameter(
displayName="Input Points",
name="in_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input")
paraminput.filter.list = ["Point"]
#2
paramidfield = Parameter(
displayName="Identifier Field",
name="id_field",
datatype="Field",
parameterType="Required",
direction="Input")
paramidfield.parameterDependencies = [paraminput.name]
paramidfield.filter.list = ['Short','Long']
#3
paramweight = Parameter(
displayName="Weight Field",
name="weight_field",
datatype="Field",
parameterType="Required",
direction="Input")
# Set the filter to accept only fields that are Short or Long type
paramweight.filter.list = ['Short','Long','Float','Single','Double']
paramweight.parameterDependencies = [paraminput.name]
#4
paramoutput = Parameter(
displayName="Output Result Points",
name="out_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Output")
#5
paramkt = Parameter(
displayName="Density Kernel Type",
name="kernel_type",
datatype="GPString",
parameterType="Required",
direction="Input"
)
paramkt.filter.list=['CUT_OFF','GAUSS']
paramkt.value='GAUSS'
#6
paramcod = Parameter(
displayName="Cut Off Distance",
name="cut_off_d",
datatype="GPDouble",
parameterType="Required",
direction="Input"
)
paramcod.value="100"
paramcod.enabled=0
#7
paramgks = Parameter(
displayName="Gauss Kernel's Sigma",
name="gauss_sigma",
datatype="GPDouble",
parameterType="Required",
direction="Input"
)
paramgks.value="30"
#8
paramdevice = Parameter(
displayName="Device for Calculation",
name="calc_device",
datatype="GPString",
parameterType="Required",
direction="Input"
)
paramdevice.filter.list=['CPU','GPU']
paramdevice.value='CPU'
#9
paramcpuc = Parameter(
displayName="CPU Parallel Cores",
name="cpu_cores",
datatype="GPLong",
parameterType="Required",
direction="Input"
)
paramcpuc.value=cpu_count()
params = [paraminput,paramidfield,paramweight,
paramoutput,paramkt,paramcod,
paramgks,paramdevice,paramcpuc]
return params
def updateParameters(self, parameters):
params=parameters
# if parameters[0].altered and not parameters[1].altered:
# parameters[1].value=arcpy.Describe(parameters[0].valueAsText).OIDFieldName
if params[4].value=='CUT_OFF':
params[5].enabled=1
params[6].enabled=0
else:
params[5].enabled=0
params[6].enabled=1
if params[7].value=='CPU':
params[8].enabled=1
else:
params[8].enabled=0
if parameters[0].altered and not parameters[3].altered:
in_fe=parameters[0].valueAsText
parameters[3].value=in_fe[:len(in_fe)-4]+'_dens'+in_fe[-4:] if in_fe[-3:]=='shp' else in_fe+'_dens'
return
def execute(self, parameters, messages):
#get params
input_feature=parameters[0].valueAsText
id_field=parameters[1].valueAsText
weight_field=parameters[2].valueAsText
output_feature=parameters[3].valueAsText
kernel_type=parameters[4].valueAsText
calc_device=parameters[7].valueAsText
if '64 bit' not in sys.version and calc_device=='GPU':
arcpy.AddError('Platform is 32bit and has no support for GPU/CUDA.')
return
arcpy.SetProgressorLabel('Calculating Density...')
#calculation
arrays=arcpy.da.FeatureClassToNumPyArray(input_feature,[id_field,'SHAPE@X','SHAPE@Y',weight_field])
densities=0
if calc_device=='GPU':
from section_gpu import calc_density_gpu
densities=calc_density_gpu(arrays['SHAPE@X'],arrays['SHAPE@Y'],\
arrays[weight_field],kernel_type,\
cutoffd=parameters[5].value,sigma=parameters[6].value)
else:
from section_cpu import calc_density_cpu
densities=calc_density_cpu(arrays['SHAPE@X'],arrays['SHAPE@Y'],\
arrays[weight_field],kernel_type,\
parameters[8].value,cutoffd=parameters[5].value,sigma=parameters[6].value)
result_struct=recfunctions.append_fields(recfunctions.drop_fields(arrays,weight_field),\
'DENSITY',data=densities,usemask=False)
# if '64 bit' in sys.version and id_field==arcpy.Describe(input_feature).OIDFieldName:
# sadnl=list(result_struct.dtype.names)
# sadnl[sadnl.index(id_field)]='OID@'
# result_struct.dtype.names=tuple(sadnl)
arcpy.da.NumPyArrayToFeatureClass(result_struct,output_feature,\
('SHAPE@X','SHAPE@Y'),arcpy.Describe(input_feature).spatialReference)
return
|
import numpy as np
import pandas as pd
from aslib_scenario.aslib_scenario import ASlibScenario
from ConfigSpace import Configuration
from ConfigSpace.conditions import EqualsCondition, InCondition
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter
)
from sklearn.ensemble import RandomForestClassifier
__author__ = "<NAME>"
__license__ = "BSD"
class RandomForest(object):
@staticmethod
def add_params(cs: ConfigurationSpace):
'''
adds parameters to ConfigurationSpace
'''
try:
classifier = cs.get_hyperparameter("classifier")
if "RandomForest" not in classifier.choices:
return
n_estimators = UniformIntegerHyperparameter(
name="rf:n_estimators", lower=10, upper=100, default_value=10, log=True
)
cs.add_hyperparameter(n_estimators)
criterion = CategoricalHyperparameter(
name="rf:criterion", choices=["gini", "entropy"], default_value="gini"
)
cs.add_hyperparameter(criterion)
max_features = CategoricalHyperparameter(
name="rf:max_features", choices=["sqrt", "log2", "None"], default_value="sqrt"
)
cs.add_hyperparameter(max_features)
max_depth = UniformIntegerHyperparameter(
name="rf:max_depth", lower=10, upper=2**31, default_value=2**31, log=True
)
cs.add_hyperparameter(max_depth)
min_samples_split = UniformIntegerHyperparameter(
name="rf:min_samples_split", lower=2, upper=100, default_value=2, log=True
)
cs.add_hyperparameter(min_samples_split)
min_samples_leaf = UniformIntegerHyperparameter(
name="rf:min_samples_leaf", lower=2, upper=100, default_value=10, log=True
)
cs.add_hyperparameter(min_samples_leaf)
bootstrap = CategoricalHyperparameter(
name="rf:bootstrap", choices=[True, False], default_value=True
)
cs.add_hyperparameter(bootstrap)
cond = InCondition(child=n_estimators, parent=classifier, values=["RandomForest"])
cs.add_condition(cond)
cond = InCondition(child=criterion, parent=classifier, values=["RandomForest"])
cs.add_condition(cond)
cond = InCondition(child=max_features, parent=classifier, values=["RandomForest"])
cs.add_condition(cond)
cond = InCondition(child=max_depth, parent=classifier, values=["RandomForest"])
cs.add_condition(cond)
cond = InCondition(child=min_samples_split, parent=classifier, values=["RandomForest"])
cs.add_condition(cond)
cond = InCondition(child=min_samples_leaf, parent=classifier, values=["RandomForest"])
cs.add_condition(cond)
cond = InCondition(child=bootstrap, parent=classifier, values=["RandomForest"])
cs.add_condition(cond)
print(cs)
except:
return
def __init__(self):
'''
Constructor
'''
self.model = None
def __str__(self):
return "RandomForest"
def fit(self, X, y, config: Configuration, weights=None):
'''
fit pca object to ASlib scenario data
Arguments
---------
X: numpy.array
feature matrix
y: numpy.array
label vector
weights: numpy.array
vector with sample weights
config: ConfigSpace.Configuration
configuration
'''
self.model = RandomForestClassifier(
n_estimators=config["rf:n_estimators"],
max_features=config["rf:max_features"] if config["rf:max_features"] != "None" else None,
criterion=config["rf:criterion"],
max_depth=config["rf:max_depth"],
min_samples_split=config["rf:min_samples_split"],
min_samples_leaf=config["rf:min_samples_leaf"],
bootstrap=config["rf:bootstrap"],
random_state=12345
)
self.model.fit(X, y, weights)
def predict(self, X):
'''
transform ASLib scenario data
Arguments
---------
X: numpy.array
instance feature matrix
Returns
-------
'''
return self.model.predict(X)
def get_attributes(self):
'''
returns a list of tuples of (attribute,value)
for all learned attributes
Returns
-------
list of tuples of (attribute,value)
'''
attr = []
attr.append("max_depth = %d" % (self.model.max_depth))
attr.append("min_samples_split = %d" % (self.model.min_samples_split))
attr.append("min_samples_leaf = %d" % (self.model.min_samples_leaf))
attr.append("criterion = %s" % (self.model.criterion))
attr.append("n_estimators = %d" % (self.model.n_estimators))
attr.append("max_features = %s" % (self.model.max_features))
return attr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.