text
stringlengths 65
6.05M
| lang
stringclasses 8
values | type
stringclasses 2
values | id
stringlengths 64
64
|
|---|---|---|---|
#!usr/bin/env python
# Script by Steven Grove (@sigwo)
# www.sigwo.com
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# 08-31-13 - v 0.1 Alpha
import sys
import datetime
import getpass
import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
user = raw_input("Please enter your username: ")
pass1 = getpass.getpass("Please enter your password: ") #Need to hash or encrypt or something
host = raw_input("Please put in IP address: ") # Add ability to browse for hosts.txt file of IP addresses
timestart = datetime.datetime.today() #time the script started
# Starts the deploy
# Open this as read-only or open a text box to paste in configs
with open(dir_path + '\config.txt', 'r+') as f:
ssh = paramiko.SSHClient()
ssh.connect(remote, username=user, password=pass1)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("config")
# Validate config syntax, ensure the command is issued correctly. This will be a huge undertaking... :-/
# Need to know if a command failed
exit_status = ssh_stdout.channel.recv_exit_status()
timeend = datetime.datetime.today() #time the script ended
|
Python
|
CL
|
a04ae418a424a410378b58578d53aea63b5eeded44229a29065267dc4a6e606a
|
import xml.etree.ElementTree as ET
import streamlit as st
import pandas as pd
import os
# Pasta com os arquivox XML
folder_name = 'data/'
# Lista a pasta
file_list = os.listdir(folder_name)
# Cria a barra lateral com a opcao para escolher o arquivo
equity = st.sidebar.selectbox('Qual fundo?', file_list)
# Abre o arquivo XML escolhido
tree = ET.parse(folder_name + equity)
# parse do arquivo
root = tree.getroot()
# Cria as variaveis que serao usadas ao longo do codigo
tmp_dict = {}
tmp_list = []
# Titulo
for elem in root.iter('NomeFundo'):
st.title(elem.text)
# Quantidade de Imoveis
st.header("Imoveis")
# Imoveis tem mais campos do que esses, mas no momento so esses interessam
field_list = [
'Nome', 'NumUnidades', 'OutrasCaractRelevantes', 'PercentVacancia', 'PercentInadimplencia', 'PercentReceitasFII'
]
# iterando todos os imoveis prontos
for elem in root.iter('LstImovRendaAcabados'):
for subelem in elem:
# Zerando a lista temporaria e o dictionario
tmp_dict = {}
tmp_inquilino_list = []
# Para cada informacao de imovel que tiver no XML vamos criar um campo no dicionario
for imovel in subelem:
if imovel.tag in field_list:
tmp_dict[imovel.tag] = imovel.text
# assim q o dicionario estiver ok, vamos adicionar ele em uma lista para ser consumida pelo pandas
# Cada imovel gera um dicionario, e essa lista tera todos eles
tmp_list.append(tmp_dict)
# criacao do dataframe
df_imoveis = pd.DataFrame(data=tmp_list, columns=field_list)
# Convertendo campos para numero, para somar no vinal
df_imoveis['PercentVacancia'] = pd.to_numeric(df_imoveis['PercentVacancia'])
df_imoveis['PercentInadimplencia'] = pd.to_numeric(
df_imoveis['PercentInadimplencia'])
df_imoveis['PercentReceitasFII'] = pd.to_numeric(
df_imoveis['PercentReceitasFII'])
# Somando os campos anteriores
df_imoveis.loc['Total'] = df_imoveis.iloc[:, 3:].sum()
# Cosmetico: preenchedo os nan com string vazia
df_imoveis = df_imoveis.fillna("")
# Plotando o grafico
st.table(df_imoveis)
# AtivosFinanceiros
st.header("Outros ativos financeiros")
# Reiniciando as variaveis
ativos_lista = []
tmp_list = []
# Dependendo do tipo do ativo ele pode ser de um Fundo ou de uma Sociedade
# Tudo será transformado em "Nome" para uma melhor vizualizacao
name_list = ['Fundo', 'Sociedade', 'Companhia']
# esperamos que o XML nao mude, mas caso ele mude, vamos sempre pegar os tipos
# de ativos financeiros diponiveis nele
for elem in root.findall('.//AtivosFinanceiros'):
for subelem in elem:
ativos_lista.append(subelem.tag)
# Agora vamos em todos os Ativos Financeiros e criar uma lista de dicionarios
for ativo in ativos_lista:
for elem in root.findall('.//' + ativo + '/Emissor'):
# zera o dicionario temporario
tmp_dict = {}
# Adiciona campo com o tipo do ativo financeiro
tmp_dict['Tipo'] = ativo
# Para cada informacao de ativo que tiver no XML vamos criar um campo no dicionario
for subelem in elem:
if subelem.tag in name_list:
tmp_dict['Nome'] = subelem.text
else:
tmp_dict[subelem.tag] = subelem.text
# assim q o dicionario estiver ok, vamos adicionar ele em uma lista para ser consumida pelo pandas
tmp_list.append(tmp_dict)
# Criando o dataframe
df_af = pd.DataFrame(data=tmp_list)
# Cosmetico: preenchedo os nan com string vazia
df_af = df_af.fillna("")
# Entao vamos juntar as duas informacoes em uma coluna e dropar as anteriores
# Plot
st.table(df_af)
# Vencimento
st.header("Vencimento dos contratos")
columns = [
'percentReceitaImovel', 'percentReceitasFII']
# Zerando o dictionario
tmp_dict = {}
# Itera a lista de vencimento de contratos
for elem in root.iter('DistrContratosPrazo'):
for subelem in elem:
# Se contem informacao ela será consumida, caso contratio adiciona 0
if len(subelem.attrib) != 0:
tmp_dict[subelem.tag] = subelem.attrib
else:
tmp_dict[subelem.tag] = 0
# Cria dataframe
df = pd.DataFrame(data=tmp_dict)
# Plot
st.table(df.T)
# Fim
|
Python
|
CL
|
fcf651de81c2aea149ecaaf6be42ca3f8a6814832dc0c5845d0679cffb7fd047
|
import numpy as np
from utils import arglist
from pysc2.lib import features
from pysc2.lib import actions
from collections import namedtuple
FlatFeature = namedtuple('FlatFeatures', ['type', 'index', 'scale'])
NUM_PLAYERS = features.SCREEN_FEATURES.player_id.scale # 17
'''
screen features: height_map, unit_hit_points, unit_hit_points_ration,
unit_energy, unit_energy_ratio, unit_shields, unit_shields_ratio,
unit_density, unit_density_aa, buff_duration, build_progress
'''
FLAT_FEATURES = [FlatFeature(features.FeatureType.SCALAR, 0, 1.),
FlatFeature(features.FeatureType.SCALAR, 1, 1.),
FlatFeature(features.FeatureType.SCALAR, 2, 1.),
FlatFeature(features.FeatureType.SCALAR, 3, 1.),
FlatFeature(features.FeatureType.SCALAR, 4, 1.),
FlatFeature(features.FeatureType.SCALAR, 5, 1.),
FlatFeature(features.FeatureType.SCALAR, 6, 1.),
FlatFeature(features.FeatureType.SCALAR, 7, 1.),
FlatFeature(features.FeatureType.SCALAR, 8, 1.),
FlatFeature(features.FeatureType.SCALAR, 9, 1.),
FlatFeature(features.FeatureType.SCALAR, 10, 1.)]
is_spatial_action = {} # x, y
for name, arg_type in actions.TYPES._asdict().items(): # HACK: we should infer the point type automatically
# example: name= screen 0 / arg_type= screen [0, 0]
is_spatial_action[arg_type] = name in ['minimap', 'screen', 'screen2']
def stack_ndarray_dicts(lst, axis=0):
# issue https://github.com/deepmind/pysc2/issues/273
res = {}
for k in lst[0].keys(): # screen, minimap, flat, available_actions
for i, d in enumerate(lst):
if i == 0:
res[k] = np.expand_dims(d[k], axis=axis)
else:
res[k] = np.concatenate([res[k], np.expand_dims(d[k], axis=axis)], axis=axis)
return res
class Preprocess():
"""Compute network inputs from pysc2 observations.
See https://github.com/deepmind/pysc2/blob/master/docs/environment.md
for the semantics of the available observations.
"""
def __init__(self):
self.num_screen_channels = len(features.SCREEN_FEATURES) # 17
self.num_minimap_channels = len(features.MINIMAP_FEATURES) # 7
self.num_flat_channels = len(FLAT_FEATURES) # 11
self.available_actions_channels = arglist.NUM_ACTIONS # 549
def get_input_channels(self):
"""Get static channel dimensions of network inputs."""
return {
'screen': self.num_screen_channels,
'minimap': self.num_minimap_channels,
'player': self.num_flat_channels,
'available_actions': self.available_actions_channels}
def preprocess_obs(self, obs_list):
return stack_ndarray_dicts(
[self._preprocess_obs(o.observation) for o in obs_list]) # o: state (env.reset())
def _preprocess_obs(self, obs):
"""Comput screen, minimap and flat network inputs from raw observations"""
available_actions = np.zeros(arglist.NUM_ACTIONS, dtype=np.float32)
available_actions[obs['available_actions']] = 1.
screen = self._preprocess_spatial(obs['feature_screen'])
minimap = self._preprocess_spatial(obs['feature_minimap'])
# TODO available_actions, control groups, cargo, multi select, build queue
flat = np.concatenate([obs['player']])
return {
'screen': screen,
'minimap': minimap,
'player': flat,
'available_actions': available_actions}
def _preprocess_spatial(self, spatial):
return spatial
def _onehot1d(self, x):
y = np.zeros((self.num_flat_channels, ), dtype='float32')
y[x] = 1.
return y
|
Python
|
CL
|
153ce0655435fe163e13eaa1e7c0d50f8eca413d8ff72e3e0bfbdea8d3cef13c
|
import os, sys, time
import functools
import numpy as np
import math
import argparse
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.optimizer as optimizer
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.transpiler import memory_optimize
from utility import add_arguments, print_arguments
import paddle.dataset.flowers as flowers
from paddle.dataset.flowers import *
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('with_ir_mem_opt', bool, True, "Whether to use ir memory optimization or not.")
def fc_net(x, y):
x = layers.data(name='x', shape=[10], dtype='float32')
y = layers.data(name='y', shape=[1], dtype='float32')
y_predict = layers.fc(input=x, size=1, act=None)
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(cost)
opt = optimizer.SGD(learning_rate=0.001)
opt = opt.minimize(avg_cost)
return avg_cost
def fake_reader(batch_size=32):
def reader():
while True:
x = np.random.uniform(low=-1, high=1, size=(batch_size, 10))
y = np.random.randint(low=0, high=10, size=(batch_size, 1))
yield y, x
return reader
def train(args):
x = layers.data(name='x', shape=[10], dtype='float32')
y = layers.data(name='y', shape=[1], dtype='float32')
avg_cost = fc_net(x, y)
if args.with_ir_mem_opt:
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.debug_graphviz_path = "./debug"
train_exe = fluid.ParallelExecutor(
use_cuda=False, loss_name=avg_cost.name, build_strategy=build_strategy)
train_batch_size = args.batch_size
import random
random.seed(1000)
np.random.seed(1000)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# train_reader = paddle.batch(
# fake_reader(batch_size=train_batch_size), batch_size=train_batch_size)
train_reader = fake_reader(batch_size = train_batch_size)
# train_reader = paddle.batch(
# flowers.test(use_xmap=False), batch_size=train_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[y, x])
fetch_list = [avg_cost.name]
for batch_id, data in enumerate(train_reader()):
t1 = time.time()
loss = train_exe.run(fetch_list, feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(np.array(loss))
if batch_id % 10 == 0:
print("Pass {0}, trainbatch {1}, loss {2}, \
time {5}"
.format(pass_id, \
batch_id, loss, \
"%2.2f sec" % period))
sys.stdout.flush()
else:
program = avg_cost.block.program
fluid.memory_optimize(program, print_log=True, level=1)
# place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
# exe = fluid.Executor(place)
# exe.run(fluid.default_startup_program())
def main():
args = parser.parse_args()
print_arguments(args)
train(args)
if __name__ == '__main__':
main()
|
Python
|
CL
|
02b5519f183de0c1275859ee5c30167f40234823551f7b8dd08e2bee1e8c9087
|
import os
import shutil
import tempfile
from datetime import datetime
from unittest import mock
from django.conf import settings
import pytest
from freezegun import freeze_time
from PIL import Image
from waffle.testutils import override_switch
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.indexers import AddonIndexer
from olympia.amo.tests import TestCase, addon_factory, user_factory
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import image_size
from olympia.constants.reviewers import EXTRA_REVIEW_TARGET_PER_DAY_CONFIG_KEY
from olympia.files.models import File
from olympia.reviewers.models import NeedsHumanReview, UsageTier
from olympia.users.models import UserProfile
from olympia.versions.models import Version, VersionPreview
from olympia.zadmin.models import set_config
from ..tasks import (
disable_addons,
flag_high_hotness_according_to_review_tier,
index_addons,
recreate_theme_previews,
resize_icon,
update_addon_average_daily_users,
update_addon_hotness,
update_addon_weekly_downloads,
)
@pytest.mark.django_db
def test_recreate_theme_previews():
xpi_path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/mozilla_static_theme.zip'
)
addon_without_previews = addon_factory(
type=amo.ADDON_STATICTHEME, file_kw={'filename': xpi_path}
)
addon_with_previews = addon_factory(
type=amo.ADDON_STATICTHEME, file_kw={'filename': xpi_path}
)
VersionPreview.objects.create(
version=addon_with_previews.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]},
)
assert len(addon_without_previews.current_previews) == 0
assert len(addon_with_previews.current_previews) == 1
recreate_theme_previews([addon_without_previews.id, addon_with_previews.id])
del addon_without_previews.reload().current_previews
del addon_with_previews.reload().current_previews
assert len(addon_without_previews.current_previews) == 2
assert len(addon_with_previews.current_previews) == 2
sizes = addon_without_previews.current_version.previews.values_list(
'sizes', flat=True
)
renderings = amo.THEME_PREVIEW_RENDERINGS
assert list(sizes) == [
{
'image': list(renderings['firefox']['full']),
'thumbnail': list(renderings['firefox']['thumbnail']),
'image_format': renderings['firefox']['image_format'],
'thumbnail_format': renderings['firefox']['thumbnail_format'],
},
{
'image': list(renderings['amo']['full']),
'thumbnail': list(renderings['amo']['thumbnail']),
'image_format': renderings['amo']['image_format'],
'thumbnail_format': renderings['amo']['thumbnail_format'],
},
]
PATCH_PATH = 'olympia.addons.tasks'
@pytest.mark.django_db
@mock.patch(f'{PATCH_PATH}.parse_addon')
def test_create_missing_theme_previews(parse_addon_mock):
parse_addon_mock.return_value = {}
theme = addon_factory(type=amo.ADDON_STATICTHEME)
amo_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={
'image': amo.THEME_PREVIEW_RENDERINGS['amo']['full'],
'thumbnail': amo.THEME_PREVIEW_RENDERINGS['amo']['thumbnail'],
'thumbnail_format': amo.THEME_PREVIEW_RENDERINGS['amo']['thumbnail_format'],
'image_format': amo.THEME_PREVIEW_RENDERINGS['amo']['image_format'],
},
)
firefox_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={
'image': amo.THEME_PREVIEW_RENDERINGS['firefox']['full'],
'thumbnail': amo.THEME_PREVIEW_RENDERINGS['firefox']['thumbnail'],
},
)
# add another extra preview size that should be ignored
extra_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]},
)
# addon has all the complete previews already so skip when only_missing=True
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 0
assert resize.call_count == 0
recreate_theme_previews([theme.id], only_missing=False)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# If the add-on is missing a preview, we call generate_static_theme_preview
VersionPreview.objects.get(id=amo_preview.id).delete()
firefox_preview.save()
extra_preview.save()
assert VersionPreview.objects.count() == 2
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# Preview is correct dimensions but wrong format, call generate_static_theme_preview
amo_preview.sizes['image_format'] = 'foo'
amo_preview.save()
firefox_preview.save()
extra_preview.save()
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# But we don't do the full regeneration to just get new thumbnail sizes or formats
amo_preview.sizes['thumbnail'] = [666, 444]
amo_preview.sizes['image_format'] = 'svg'
amo_preview.save()
assert amo_preview.thumbnail_dimensions == [666, 444]
firefox_preview.sizes['thumbnail_format'] = 'gif'
firefox_preview.save()
assert firefox_preview.get_format('thumbnail') == 'gif'
extra_preview.save()
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 0 # not called
assert resize.call_count == 2
amo_preview.reload()
assert amo_preview.thumbnail_dimensions == [720, 92]
firefox_preview.reload()
assert firefox_preview.get_format('thumbnail') == 'png'
assert VersionPreview.objects.count() == 3
@pytest.mark.django_db
def test_update_addon_average_daily_users():
addon = addon_factory(average_daily_users=0)
count = 123
data = [(addon.guid, count)]
assert addon.average_daily_users == 0
update_addon_average_daily_users(data)
addon.refresh_from_db()
assert addon.average_daily_users == count
@pytest.mark.django_db
def test_update_addon_average_daily_users_case_sensitive():
addon = addon_factory(average_daily_users=0)
data = [(addon.guid.upper(), 123)]
assert addon.average_daily_users == 0
update_addon_average_daily_users(data)
addon.refresh_from_db()
assert addon.average_daily_users == 0
@pytest.mark.django_db
@override_switch('local-statistics-processing', active=True)
def test_update_deleted_addon_average_daily_users():
addon = addon_factory(average_daily_users=0)
addon.delete()
count = 123
data = [(addon.guid, count)]
assert addon.average_daily_users == 0
update_addon_average_daily_users(data)
addon.refresh_from_db()
assert addon.average_daily_users == count
@pytest.mark.django_db
def test_update_addon_hotness():
addon1 = addon_factory(hotness=0, status=amo.STATUS_APPROVED)
addon2 = addon_factory(hotness=123, status=amo.STATUS_APPROVED)
addon3 = addon_factory(hotness=123, status=amo.STATUS_AWAITING_REVIEW)
addon4 = addon_factory(hotness=123)
addon4.delete()
averages = {
addon1.guid: {'avg_this_week': 213467, 'avg_previous_week': 123467},
addon2.guid: {
'avg_this_week': 1,
'avg_previous_week': 1,
},
addon3.guid: {'avg_this_week': 213467, 'avg_previous_week': 123467},
addon4.guid: {'avg_this_week': 213467, 'avg_previous_week': 123467},
}
update_addon_hotness(averages=averages.items())
addon1.refresh_from_db()
addon2.refresh_from_db()
addon3.refresh_from_db()
assert addon1.hotness > 0
assert addon3.hotness > 0
assert addon4.hotness > 0
# Too low averages so we set the hotness to 0.
assert addon2.hotness == 0
@freeze_time('2023-05-15 11:00')
@pytest.mark.django_db
def test_flag_high_hotness_according_to_review_tier():
user_factory(pk=settings.TASK_USER_ID)
set_config(EXTRA_REVIEW_TARGET_PER_DAY_CONFIG_KEY, '1')
# Create some usage tiers and add add-ons in them for the task to do
# something. The ones missing a lower, upper, or growth threshold don't
# do anything. Also, tiers need to have a lower adu threshold above
# MINIMUM_ADU_FOR_HOTNESS_NONTHEME (100) to do anything.
UsageTier.objects.create(name='Not a tier with usage values')
UsageTier.objects.create(
name='D tier (below minimum usage for hotness)',
lower_adu_threshold=0,
upper_adu_threshold=100,
growth_threshold_before_flagging=0.1,
)
UsageTier.objects.create(
name='C tier (no growth threshold)',
lower_adu_threshold=100,
upper_adu_threshold=200,
)
UsageTier.objects.create(
name='B tier',
lower_adu_threshold=200,
upper_adu_threshold=250,
growth_threshold_before_flagging=20,
)
UsageTier.objects.create(
name='A tier',
lower_adu_threshold=250,
upper_adu_threshold=1000,
growth_threshold_before_flagging=30,
)
UsageTier.objects.create(
name='S tier (no upper threshold)',
lower_adu_threshold=1000,
upper_adu_threshold=None,
growth_threshold_before_flagging=30,
)
not_flagged = [
# Usage below MINIMUM_ADU_FOR_HOTNESS_NONTHEME so tier is inactive
addon_factory(name='Low usage addon', average_daily_users=99, hotness=0.3),
# Belongs to C tier, which doesn't have a growth threshold set.
addon_factory(name='C tier addon', average_daily_users=100, hotness=0.3),
# Belongs to B tier but not an extension.
addon_factory(
name='B tier language pack',
type=amo.ADDON_LPAPP,
average_daily_users=200,
hotness=0.3,
),
addon_factory(
name='B tier theme',
type=amo.ADDON_STATICTHEME,
average_daily_users=200,
hotness=0.3,
),
# Belongs to A tier but below the growth threshold.
addon_factory(
name='A tier below threshold', average_daily_users=250, hotness=0.2
),
# Belongs to S tier, which doesn't have an upper threshold. (like
# notable, subject to human review anyway)
addon_factory(name='S tier addon', average_daily_users=1000, hotness=0.3),
# Belongs to A tier but already human reviewed.
addon_factory(
name='A tier already reviewed',
average_daily_users=250,
hotness=0.3,
version_kw={'human_review_date': datetime.now()},
),
# Belongs to B tier but already disabled.
addon_factory(
name='B tier already disabled',
average_daily_users=200,
hotness=0.3,
status=amo.STATUS_DISABLED,
),
# Belongs to B tier but already flagged for human review for growth
# (see below).
addon_factory(
name='B tier already flagged', average_daily_users=200, hotness=0.3
),
]
NeedsHumanReview.objects.create(
version=not_flagged[-1].current_version, is_active=True
)
flagged = [
addon_factory(name='B tier', average_daily_users=200, hotness=0.3),
addon_factory(name='A tier', average_daily_users=250, hotness=0.3),
addon_factory(
name='A tier with inactive flags', average_daily_users=250, hotness=0.3
),
]
# Add an inactive flag on the last one, shouldn't do anything.
NeedsHumanReview.objects.create(
version=flagged[-1].current_version, is_active=False
)
# Pretend all files were signed otherwise they would not get flagged.
File.objects.update(is_signed=True)
flag_high_hotness_according_to_review_tier()
for addon in not_flagged:
assert (
addon.versions.latest('pk')
.needshumanreview_set.filter(
reason=NeedsHumanReview.REASON_HOTNESS_THRESHOLD, is_active=True
)
.count()
== 0
)
for addon in flagged:
version = addon.versions.latest('pk')
assert (
version.needshumanreview_set.filter(
reason=NeedsHumanReview.REASON_HOTNESS_THRESHOLD, is_active=True
).count()
== 1
)
# We've set EXTRA_REVIEW_TARGET_PER_DAY_CONFIG_KEY so that there would be
# one review per day after . Since we've frozen time on a Wednesday,
# we should get: Friday, Monday (skipping week-end), Tuesday.
due_dates = (
Version.objects.filter(addon__in=flagged)
.values_list('due_date', flat=True)
.order_by('due_date')
)
assert list(due_dates) == [
datetime(2023, 5, 18, 11, 0),
datetime(2023, 5, 19, 11, 0),
datetime(2023, 5, 22, 11, 0),
]
@pytest.mark.django_db
def test_flag_high_hotness_according_to_review_tier_no_tiers_defined():
user_factory(pk=settings.TASK_USER_ID)
addon = addon_factory(average_daily_users=1001, file_kw={'is_signed': True})
flag_high_hotness_according_to_review_tier()
assert not addon.current_version.needshumanreview_set.exists()
@pytest.mark.django_db
def test_update_addon_weekly_downloads():
addon = addon_factory(weekly_downloads=0)
count = 123
data = [(addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
@pytest.mark.django_db
def test_update_addon_weekly_downloads_ignores_deleted_addons():
guid = 'some@guid'
deleted_addon = addon_factory(guid=guid)
deleted_addon.delete()
deleted_addon.update(guid=None)
addon = addon_factory(guid=guid, weekly_downloads=0)
count = 123
data = [(addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
@pytest.mark.django_db
def test_update_addon_weekly_downloads_skips_non_existent_addons():
addon = addon_factory(weekly_downloads=0)
count = 123
invalid_hashed_guid = 'does.not@exist'
data = [(invalid_hashed_guid, 0), (addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
class TestResizeIcon(TestCase):
def _uploader(self, resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
src = tempfile.NamedTemporaryFile(
mode='r+b', suffix='.png', delete=False, dir=settings.TMP_PATH
)
if not isinstance(final_size, list):
final_size = [final_size]
resize_size = [resize_size]
uploadto = os.path.join(settings.MEDIA_ROOT, 'addon_icons')
try:
os.makedirs(uploadto)
except OSError:
pass
for rsize, expected_size in zip(resize_size, final_size, strict=True):
# resize_icon moves the original
shutil.copyfile(img, src.name)
src_image = Image.open(src.name)
assert src_image.size == original_size
dest_name = os.path.join(uploadto, '1234')
with mock.patch('olympia.amo.utils.pngcrush_image') as pngcrush_mock:
return_value = resize_icon(src.name, dest_name, [rsize])
dest_image = f'{dest_name}-{rsize}.png'
assert pngcrush_mock.call_count == 1
assert pngcrush_mock.call_args_list[0][0][0] == dest_image
assert image_size(dest_image) == expected_size
# original should have been moved to -original
orig_image = '%s-original.png' % dest_name
assert os.path.exists(orig_image)
# Return value of the task should be a dict with an icon_hash key
# containing the 8 first chars of the md5 hash of the source file,
# which is bb362450b00f0461c6bddc6b97b3c30b.
assert return_value == {'icon_hash': 'bb362450'}
os.remove(dest_image)
assert not os.path.exists(dest_image)
os.remove(orig_image)
assert not os.path.exists(orig_image)
shutil.rmtree(uploadto)
assert not os.path.exists(src.name)
def test_resize_icon_shrink(self):
"""Image should be shrunk so that the longest side is 32px."""
resize_size = 32
final_size = (32, 12)
self._uploader(resize_size, final_size)
def test_resize_icon_enlarge(self):
"""Image stays the same, since the new size is bigger than both sides."""
resize_size = 350
final_size = (339, 128)
self._uploader(resize_size, final_size)
def test_resize_icon_same(self):
"""Image stays the same, since the new size is the same."""
resize_size = 339
final_size = (339, 128)
self._uploader(resize_size, final_size)
def test_resize_icon_list(self):
"""Resize multiple images at once."""
resize_size = [32, 339, 350]
final_size = [(32, 12), (339, 128), (339, 128)]
self._uploader(resize_size, final_size)
@pytest.mark.django_db
@mock.patch('olympia.addons.tasks.index_addons.delay')
def test_disable_addons(index_addons_mock):
UserProfile.objects.create(pk=settings.TASK_USER_ID)
addon = addon_factory()
disable_addons([addon.id])
addon.reload()
assert addon.status == amo.STATUS_DISABLED
assert addon.current_version is None
assert addon.versions.all()[0].file.status == amo.STATUS_DISABLED
assert ActivityLog.objects.filter(
action=amo.LOG.FORCE_DISABLE.id, addonlog__addon=addon
).exists()
index_addons_mock.assert_called_with([addon.id])
@pytest.mark.django_db
@mock.patch('olympia.addons.tasks.unindex_objects')
@mock.patch('olympia.addons.tasks.index_objects')
def test_index_addons(index_objects_mock, unindex_objects_mock):
public_addon = addon_factory()
incomplete_addon = addon_factory(status=amo.STATUS_NULL)
disabled_addon = addon_factory(disabled_by_user=True)
index_addons((public_addon.id, incomplete_addon.id, disabled_addon.id))
index_objects_mock.assert_called_once()
call = index_objects_mock.mock_calls[0]
assert list(call.kwargs.keys()) == ['queryset', 'indexer_class', 'index']
assert list(call.kwargs['queryset']) == [public_addon]
assert call.kwargs['indexer_class'] == AddonIndexer
assert call.kwargs['index'] is None
unindex_objects_mock.assert_called_with(
[incomplete_addon.id, disabled_addon.id], indexer_class=AddonIndexer
)
# Confirm that we don't make unnessecary calls to index_objects/unindex_objects when
# there are no addons to index/unindex
index_objects_mock.reset_mock()
unindex_objects_mock.reset_mock()
index_addons((public_addon.id,))
index_objects_mock.assert_called_once()
unindex_objects_mock.assert_not_called()
index_objects_mock.reset_mock()
unindex_objects_mock.reset_mock()
index_addons((incomplete_addon.id,))
index_objects_mock.assert_not_called()
unindex_objects_mock.assert_called_once()
|
Python
|
CL
|
8fa1a6fd9ef6b624b06955ed3d89b2d948f82a36375d65569637f6be58aa45e6
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovos_plugin_manager.templates.hotwords import HotWordEngine
class DummyWakeWordPlugin(HotWordEngine):
"""Dummy Wake Word, only button presses trigger listening"""
def __init__(self, hotword="dummy", config=None, lang="en-us"):
super(DummyWakeWordPlugin, self).__init__(hotword, config or {}, lang)
def found_wake_word(self, frame_data):
""" frame data contains audio data that needs to be checked for a wake
word, you can process audio here or just return a result
previously handled in update method """
return False
def update(self, chunk):
""" In here you have access to live audio chunks, allows for
streaming predictions, result still need to be returned in
found_wake_word method """
def stop(self):
""" Perform any actions needed to shut down the hot word engine.
This may include things such as unload loaded data or shutdown
external processes.
"""
|
Python
|
CL
|
08ec121868e27138dfd71d1383c793f55ee8d3d694823843118e75299035e0e0
|
"""CNN-based image classification on SageMaker with TensorFlow and Keras"""
# Dependencies:
import argparse
# TODO
# TODO: Function definitions as needed?
# Training script:
if __name__ == "__main__":
# Load arguments from CLI / environment variables?
# Load images from container filesystem into training / test data sets?
# Create the Keras model?
# Fit the Keras model?
# Evaluate model quality and log metrics?
# Save outputs (e.g. trained model) to specified folder(s)?
|
Python
|
CL
|
98c9304d0d57dd744403307e2d83be038fe78c54573f6ff212cfc0ca6abc06ce
|
import os.path
import numpy as np
import xarray as xr
import time
import tensorflow as tf
from parflow_nn.preprocess_PF import create_feature_or_target_da
from parflow_nn.predpp import PredPP
def train_step(model, input, target, learning_rate):
# prediction = model(input, training=True)
loss_func = tf.keras.losses.MeanSquaredError()
n_samples = input.shape[0]
with tf.GradientTape() as ae_tape:
total_loss = 0
for j in range(n_samples):
prediction = model(input[j, :][np.newaxis, ...])
# Calculate loss
loss = loss_func(target[j, 1:][np.newaxis, ...], prediction)
# print(loss)
total_loss += loss
# Get the encoder and decoder variables
trainable_vars = model.trainable_variables
# Calculate gradient
ae_grads = ae_tape.gradient(total_loss, trainable_vars)
# And then apply the gradient to change the weights
ae_optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate)
ae_optimizer.apply_gradients(zip(ae_grads, trainable_vars))
# Loss is returned to monitor it while training
return total_loss, ae_optimizer
@tf.function
def reshape_patch_back(patch_tensor, patch_size):
batch_size = np.shape(patch_tensor)[0]
seq_length = np.shape(patch_tensor)[1]
patch_height = np.shape(patch_tensor)[2]
patch_width = np.shape(patch_tensor)[3]
channels = np.shape(patch_tensor)[4]
img_channels = int(channels / (patch_size*patch_size))
a = tf.reshape(patch_tensor, [batch_size, seq_length,
patch_height, patch_width,
patch_size, patch_size,
img_channels])
b = tf.transpose(a, [0,1,2,4,3,5,6])
img_tensor = tf.reshape(b, [batch_size, seq_length,
patch_height * patch_size,
patch_width * patch_size,
img_channels])
return img_tensor
@tf.function
def reshape_patch(img_tensor, patch_size):
batch_size = tf.shape(img_tensor)[0]
seq_length = tf.shape(img_tensor)[1]
img_height = tf.shape(img_tensor)[2]
img_width = tf.shape(img_tensor)[3]
num_channels = tf.shape(img_tensor)[4]
a = tf.reshape(img_tensor, [batch_size, seq_length,
int(img_height/patch_size), patch_size,
int(img_width/patch_size), patch_size,
num_channels])
b = tf.transpose(a, [0,1,2,4,3,5,6])
patch_tensor = tf.reshape(b, [batch_size, seq_length,
int(img_height/patch_size),
int(img_width/patch_size),
patch_size*patch_size*num_channels])
return patch_tensor
def normalize_feature_da(feature_da, feature_names=None):
"""Normalize feature arrays, and optionally target array
Args:
feature_da: feature Dataset
feature_names: Feature name strings
Returns:
da: Normalized DataArray
"""
if feature_names is not None: # static inputs
con_stats_norm = []
for feati in feature_da:
if len(np.unique(feati)) == 1:
con_stats_norm.append(feati)
else:
meani = np.ma.mean(feati, axis = (2, 3))
stdi = np.ma.std(feati, axis = (2, 3))
meani[stdi == 0] = 0
stdi[stdi == 0] = 1
# broadcast back stdi
stdi_broadcast = np.tile(stdi, (1, feati.shape[2], feati.shape[3], 1))
stdi_broadcast = np.swapaxes(stdi_broadcast, 2, 3)
stdi_broadcast = np.swapaxes(stdi_broadcast, 1, 2)
# broadcast back meani
meani_broadcast = np.tile(meani, (1, feati.shape[2], feati.shape[3], 1))
meani_broadcast = np.swapaxes(meani_broadcast, 2, 3)
meani_broadcast = np.swapaxes(meani_broadcast, 1, 2)
feati_norm = (feati - meani_broadcast) / stdi_broadcast
feati_norm = tf.conver_to_tensor(feati_norm, dtype = tf.float32)
con_stats_norm.append(feati_norm)
return con_stats_norm
else: # forcing inputs and target
forcing_mean = np.ma.mean(feature_da, axis = (0, 1, 2, 3))
forcing_std = np.ma.std(feature_da, axis = (0, 1, 2, 3))
forcing_mean[forcing_std == 0] = 0
forcing_std[forcing_std == 0] = 1
# broadcast back
mean_broadcast = np.tile(forcing_mean, (1, feature_da.shape[1], feature_da.shape[2],
feature_da.shape[3], 1))
std_broadcast = np.tile(forcing_std, (1, feature_da.shape[1], feature_da.shape[2],
feature_da.shape[3], 1))
out_arr = (feature_da - mean_broadcast) / std_broadcast
return tf.convert_to_tensor(out_arr, dtype = tf.float32)
if __name__ == '__main__':
# --------------------------------------------------
is_clm = True
NC_DIR = '/home/hvtran/washita_clm/nc_files'
static_input = xr.open_dataset(os.path.join(NC_DIR, 'washita_clm_static.nc'))
forcing_input = xr.open_dataset(os.path.join(NC_DIR, 'washita_clm_forcings.nc'))
target_flow_input_xr = xr.open_dataset(os.path.join(NC_DIR, 'washita_clm_flow.nc'))
# target_wtd_input_xr = xr.open_dataset(os.path.join(NC_DIR, 'washita_clm_wtd.nc'))
num_hidden = [1028]*8
num_layers = len(num_hidden)
delta = 0.00002
base = 0.99998
eta = 1
reverse_input = False
filter_size = 5
# --------------------------------------------------
# TODO: The second argument is simply first_argument.data_vars.keys()
static_feature_da, static_feature_names = create_feature_or_target_da(
static_input,
['prev_press', 'slope_x', 'slope_y', 'perm', 'poros',
'rel_perm_alpha', 'rel_perm_N',
'satur_alpha', 'satur_N', 'satur_sres', 'satur_ssat',
'tensor_x', 'tensor_y', 'tensor_z', 'spec_storage', 'mannings'],
0,
'feature',
flx_same_dt=True
)
one_layer_feats = ['slope_x', 'slope_y', 'spec_storage', 'mannings',
'tensor_x', 'tensor_y', 'tensor_z']
new_static_feature_da = []
new_static_names = []
for ii, fname in enumerate(static_feature_names.data):
if fname.split('_lev')[0] in one_layer_feats:
if int(fname[-2:]) == 0:
new_static_feature_da.append(static_feature_da[:, ii, :, :])
new_static_names.append(fname)
else:
continue
else:
new_static_feature_da.append(static_feature_da[:, ii, :, :])
new_static_names.append(fname)
new_static_feature_da = np.stack(new_static_feature_da, axis=0)
new_static_feature_da = np.swapaxes(new_static_feature_da, 0, 1)
new_static_feature_da = np.swapaxes(new_static_feature_da, 1, 2)
new_static_feature_da = np.swapaxes(new_static_feature_da, 2, 3)
# ---------------------------------------------
# FORCING
# ---------------------------------------------
forcing_feature_da, forcing_feature_names = create_feature_or_target_da(
forcing_input,
['forcings'],
0,
'feature',
flx_same_dt=True
)
# Add channel dimension
if is_clm:
forcing_feature_da = forcing_feature_da.data[:]
forcing_feature_da = np.swapaxes(forcing_feature_da, 1, 2)
forcing_feature_da = np.swapaxes(forcing_feature_da, 2, 3)
forcing_feature_da = np.repeat(forcing_feature_da,
repeats=[2] + [1] * (forcing_feature_da.shape[0] - 1),
axis=0) # duplicate the first row
forcing_feature_da = forcing_feature_da[np.newaxis, ...]
else:
forcing_feature_da = forcing_feature_da.data[:, 0, :, :]
forcing_feature_da = forcing_feature_da[..., np.newaxis]
forcing_feature_da = forcing_feature_da[np.newaxis, ...]
# ---------------------------------------------
# TARGETS
# ---------------------------------------------
target_da = np.concatenate([target_flow_input_xr.flow], axis=1)
target_da = target_da[np.newaxis, ...]
target_da = np.swapaxes(target_da, 2, 3)
target_da = np.swapaxes(target_da, 3, 4)
print(target_da.shape) # 1, 8761, 41, 41, 123
# forcing_feature_train = np.stack(forcings)
# target_train = np.stack(targets)
n_sample = 3
n_days = 2
TRAIN_HOURS = 24 * n_days * n_sample
forcing_feature_train = forcing_feature_da[:, :TRAIN_HOURS, :40, :40, [2,3,6,7]]
target_train = target_da[:, :TRAIN_HOURS, :40, :40, :]
new_static_feature_da = new_static_feature_da[:, :40, :40, :]
# ----------------------------------------------
# Reshape based on number of samples
# ----------------------------------------------
forcing_feature_train = np.reshape(forcing_feature_train, (n_sample, 24 * n_days, forcing_feature_train.shape[2], forcing_feature_train.shape[3],
forcing_feature_train.shape[4]))
target_train = np.reshape(target_train, (n_sample, 24 * n_days, target_train.shape[2], target_train.shape[3],
target_train.shape[4]))
forcing_norm_train = normalize_feature_da(forcing_feature_train)
target_norm_train = normalize_feature_da(target_train)
t0 = time.time()
patch_size = tf.Variable(20)
ims = reshape_patch(forcing_norm_train, patch_size)
tars = reshape_patch(target_norm_train, patch_size)
# tars = tars[:, :, :, :, :50]
t1 = time.time()
print('reshape time: ' + str(t1 - t0))
# Plot samples
"""
forcing_mean = np.mean(forcing_feature_train,axis=(2,3,))
plt.plot(forcing_mean[0,:,1],'b')
plt.plot(forcing_mean[1,:,1],'r')
plt.plot(forcing_mean[2,:,1],'m')
"""
# --------------------------------------------------
# OPTIMIZER AND LOSS FUNCTION
# --------------------------------------------------
# Optimizer and loss function
ae_optimizer = tf.keras.optimizers.RMSprop(learning_rate=1e-3)
# MSE works here best
loss_func = tf.keras.losses.MeanSquaredError()
model = tf.keras.models.Sequential()
mylayer = PredPP(ims.get_shape().as_list(), tars.shape[4],
num_layers, num_hidden,
filter_size,
ims.shape[1],
True,
)
model.add(mylayer)
model.compile(optimizer=ae_optimizer, loss=loss_func, metrics='mse')
#save_name = '3_samples_2_weeks_8_layers_weights'
# --------------------------------------------------
# TRAIN with 4 days
# --------------------------------------------------
t0 = time.time()
lr = 1e-4
curr_loss = 10
for ii in range(100):
loss, ae_optimizer = train_step(model, ims, tars, lr)
if reverse_input:
ims_rev = ims[:, ::-1]
tars_rev = tars[:, ::-1]
tmp_loss, _ = train_step(model, ims_rev, tars_rev, lr)
loss += tmp_loss
loss = loss / 2
"""
if loss < curr_loss:
print('save loss: '+str(loss))
model.save_weights(save_name)
curr_loss = loss
"""
if ii % 5 == 0:
t1 = time.time()
elapsed_time = t1 - t0
t0 = time.time()
print("loss {:1.6f}, time step {:1.0f}, elapsed_time {:2.4f} s".format(loss, ii, elapsed_time))
import sys
sys.exit()
# --------------------------------------------------
# TRAIN with 7 days
# --------------------------------------------------
n_sample = 3
n_days = 7
TRAIN_HOURS = 24 * n_days * n_sample
forcing_feature_train = forcing_feature_da[:, :TRAIN_HOURS, :40, :40, [2,3,6,7]]
target_train = target_da[:, :TRAIN_HOURS, :40, :40, :]
new_static_feature_da = new_static_feature_da[:, :40, :40, :]
# Reshape based on number of samples
forcing_feature_train = np.reshape(forcing_feature_train, (n_sample, 24 * n_days, forcing_feature_train.shape[2], forcing_feature_train.shape[3],
forcing_feature_train.shape[4]))
target_train = np.reshape(target_train, (n_sample, 24 * n_days, target_train.shape[2], target_train.shape[3],
target_train.shape[4]))
forcing_norm_train = normalize_feature_da(forcing_feature_train)
target_norm_train = normalize_feature_da(target_train)
t0 = time.time()
patch_size = tf.Variable(20)
ims = reshape_patch(forcing_norm_train, patch_size)
tars = reshape_patch(target_norm_train, patch_size)
# tars = tars[:, :, :, :, :50]
t1 = time.time()
print('reshape time: ' + str(t1 - t0))
t0 = time.time()
lr = 1e-4
for ii in range(150):
loss, ae_optimizer = train_step(model, ims, tars, lr)
if reverse_input:
ims_rev = ims[:, ::-1]
tars_rev = tars[:, ::-1]
tmp_loss, _ = train_step(model, ims_rev, tars_rev, lr)
loss += tmp_loss
loss = loss / 2
if loss < curr_loss:
print('save loss: '+str(loss))
model.save_weights(save_name)
curr_loss = loss
if ii % 5 == 0:
t1 = time.time()
elapsed_time = t1 - t0
t0 = time.time()
print("loss {:1.6f}, time step {:1.0f}, elapsed_time {:2.4f} s".format(loss, ii, elapsed_time))
# --------------------------------------------------
# TRAIN with 14 days
# --------------------------------------------------
n_sample = 3
n_days = 14
TRAIN_HOURS = 24 * n_days * n_sample
forcing_feature_train = forcing_feature_da[:, :TRAIN_HOURS, :40, :40, [2,3,6,7]]
target_train = target_da[:, :TRAIN_HOURS, :40, :40, :]
new_static_feature_da = new_static_feature_da[:, :40, :40, :]
# Reshape based on number of samples
forcing_feature_train = np.reshape(forcing_feature_train, (n_sample, 24 * n_days, forcing_feature_train.shape[2], forcing_feature_train.shape[3],
forcing_feature_train.shape[4]))
target_train = np.reshape(target_train, (n_sample, 24 * n_days, target_train.shape[2], target_train.shape[3],
target_train.shape[4]))
forcing_norm_train = normalize_feature_da(forcing_feature_train)
target_norm_train = normalize_feature_da(target_train)
t0 = time.time()
patch_size = tf.Variable(20)
ims = reshape_patch(forcing_norm_train, patch_size)
tars = reshape_patch(target_norm_train, patch_size)
# tars = tars[:, :, :, :, :50]
t1 = time.time()
print('reshape time: ' + str(t1 - t0))
t0 = time.time()
lr = 1e-4
for ii in range(150):
loss, ae_optimizer = train_step(model, ims, tars, lr)
if reverse_input:
ims_rev = ims[:, ::-1]
tars_rev = tars[:, ::-1]
tmp_loss, _ = train_step(model, ims_rev, tars_rev, lr)
loss += tmp_loss
loss = loss / 2
if loss < curr_loss:
print('save loss: '+str(loss))
model.save_weights(save_name)
curr_loss = loss
if ii % 5 == 0:
t1 = time.time()
elapsed_time = t1 - t0
t0 = time.time()
print("loss {:1.6f}, time step {:1.0f}, elapsed_time {:2.4f} s".format(loss, ii, elapsed_time))
# --------------------------------------------------
# TRAIN with 30 days
# --------------------------------------------------
n_sample = 3
n_days = 30
TRAIN_HOURS = 24 * n_days * n_sample
forcing_feature_train = forcing_feature_da[:, :TRAIN_HOURS, :40, :40, [2,3,6,7]]
target_train = target_da[:, :TRAIN_HOURS, :40, :40, :]
new_static_feature_da = new_static_feature_da[:, :40, :40, :]
# Reshape based on number of samples
forcing_feature_train = np.reshape(forcing_feature_train, (n_sample, 24 * n_days, forcing_feature_train.shape[2], forcing_feature_train.shape[3],
forcing_feature_train.shape[4]))
target_train = np.reshape(target_train, (n_sample, 24 * n_days, target_train.shape[2], target_train.shape[3],
target_train.shape[4]))
forcing_norm_train = normalize_feature_da(forcing_feature_train)
target_norm_train = normalize_feature_da(target_train)
t0 = time.time()
patch_size = tf.Variable(20)
ims = reshape_patch(forcing_norm_train, patch_size)
tars = reshape_patch(target_norm_train, patch_size)
# tars = tars[:, :, :, :, :50]
t1 = time.time()
print('reshape time: ' + str(t1 - t0))
t0 = time.time()
lr = 1e-6
for ii in range(300):
loss, ae_optimizer = train_step(model, ims, tars, lr)
if reverse_input:
ims_rev = ims[:, ::-1]
tars_rev = tars[:, ::-1]
tmp_loss, _ = train_step(model, ims_rev, tars_rev, lr)
loss += tmp_loss
loss = loss / 2
if loss < curr_loss:
print('save loss: '+str(loss))
model.save_weights(save_name)
curr_loss = loss
if ii % 5 == 0:
t1 = time.time()
elapsed_time = t1 - t0
t0 = time.time()
print("loss {:1.6f}, time step {:1.0f}, elapsed_time {:2.4f} s".format(loss, ii, elapsed_time))
# --------------------------------------------------
# FINISHED!
# --------------------------------------------------
print('done')
|
Python
|
CL
|
ff848c09d4cacbc98994906684d98024cb87e5d60c97ac75677f7220e302977d
|
import time
import sys
"""Step 03 (D): Function for Table To VCF"""
def fnc_table_to_vcf(args):
print("converting Table file to VCF")
begin_time = time.time()
"""Assign some input variables. """
infile = args.inFile
meta_header = args.vcfHeader
outfile = args.outVCF
samples = args.samples
formats = args.formats
infos = args.infos
# find the genotype tags that are in iupac bases
genotype_is = args.GTbase
gt_tag_as_iupac = []
for gts_tag in genotype_is:
tag_format = gts_tag.split(':')
if tag_format[1] == 'iupac':
gt_tag_as_iupac.append(tag_format[0])
with open(infile) as tablefile, open(meta_header) as meta_header, open(
outfile, "w+") as vcf_out:
"""Start reading the haplotype file as generator. This saves memory. """
for line in tablefile:
## find and set the indexes ...
# ... of pre-fields, INFO, FORMAT and SAMPLE level information
""" Step 01: The very first line of the file is read;
- to find the variable name and it's index position in the input file.
- almost all the variable created downstream are "global variables".
- SAMPLE level information is automatically identified unless explicitly given.
The sample names is identified using ":" in the column names, so other names
should not have ":" at all.
- FORMAT level tags can also be provided as list, or can be mined automatically
along with SAMPLE by using ":" matching.
- All the preHeader tags, ie. CHROM POS ID REF ALT QUAL FILTER are reserved and
updated by matching the names in text header line.
"""
# to use the "header" name that have already been taken
# this will help in finding appropriate "INFO" level tags from the header file
used_header = []
if line.startswith("CHROM") or line.startswith("#CHROM"):
header_line = line.rstrip("\n").split("\t")
################# function 01 ######################
## ?? Bhuwan - move this as a preheader to another function and optimize if posible
if "CHROM" in header_line:
contig_idx = header_line.index("CHROM")
# update the taken header "labels"
used_header += ["CHROM"]
elif "#CHROM" in header_line:
contig_idx = header_line.index("#CHROM")
# update the taken header "labels"
used_header += ["#CHROM"]
else:
print("CHROM field does not exist in the input table file. Update your file")
print("Exiting the program")
sys.exit(0)
if "POS" in header_line:
pos_idx = header_line.index("POS")
used_header += ["POS"]
else:
print("POS field does not exist. Update your file")
print("Exiting the program")
sys.exit()
if "ID" in header_line:
id_idx = header_line.index("ID")
else:
id_idx = None
used_header += ["ID"]
if "REF" in header_line:
ref_idx = header_line.index("REF")
else:
ref_idx == None
used_header += ["REF"]
if "ALT" in header_line:
alt_idx = header_line.index("ALT")
else:
alt_idx = None
used_header += ["ALT"]
if "QUAL" in header_line:
qual_idx = header_line.index("QUAL")
else:
qual_idx = None
used_header += ["QUAL"]
if "FILTER" in header_line:
filter_idx = header_line.index("FILTER")
else:
filter_idx = None
used_header += ["FILTER"]
################## function 01 ends here ######
###############function 02 ##################################
## ?? Bhuwan - move this to a function and optimize the process
"""INFO tags are identified by matching "INFO:" in the column names."""
infos_in_header = [x for x in header_line if x.startswith("INFO:")]
all_infos = [x.replace("INFO:", "") for x in infos_in_header]
if len(all_infos) == 0:
print("INFO tags are not available.")
print("INFO field will be populated with empty '.' value")
info_tags = []
elif infos[0] == 'all':
info_tags = all_infos
print("Using the following metrics as INFO tags: ")
print(" %s" % info_tags)
else:
info_tags = infos
## find any missing INFO tags or any nonsense tag
non_matching_infos = list(set(info_tags) - set(all_infos))
non_used_infos = list(set(all_infos) - set(info_tags))
if len(non_matching_infos) > 0:
print("the following user provided infos are not available in input file")
print(" %s" % non_matching_infos)
if len(non_used_infos) > 0:
print("the following INFO tags won't be put in INFO fields of output VCF")
print(" %s" %non_used_infos)
# also find the position of the info tags on header line
infos_idx = []
if len(info_tags) != 0:
for inftag in info_tags:
infos_idx.append(header_line.index("INFO:" + inftag))
else:
infos_idx = None
########################## #######################
##############function 03 ################################
## ?? Bhuwan - move this to a separate function and optimize the process if possible
"""SAMPLE names and FORMAT tags are identified using ":" delimiter in the column names,
after excluding the INFO fields."""
possible_samples = [x for x in header_line if ':' in x]
# remove the INFO fields
samples_and_formats = list(set(possible_samples) - set(infos_in_header))
# split and set to collect unique sample names and unique format tags
# make sure to add process so the order is maintained
all_samples = list(set([x.split(':')[0] for x in samples_and_formats]))
all_formats = list(set([x.split(':')[1] for x in samples_and_formats]))
# find the available format tags
# ?? Bhuwan - write this as a separate function or subfunction
#### sub function 03 A
### prepare sample names
if formats[0]== "all":
format_tags = all_formats
elif len(formats) == 0:
print("No format tags available.")
format_tags = []
else: format_tags = formats
# In the available FORMAT tags, move "GT" field to the beginning.
if "GT" in format_tags:
format_tags.remove("GT")
format_tags.insert(0, "GT")
## ?? Bhuwan - write as sub function 03 B
### prepare sample names
if samples[0]== "all":
sample_names = all_samples
elif len(samples) == 0:
print("No sample available.")
sample_names = []
else:
sample_names = samples
nonsense_sample_names = [x for x in samples if not x in all_samples]
if len(nonsense_sample_names) > 0:
print("The following sample names %s are not available in table file and not valid."
% nonsense_sample_names)
sys.exit(0)
used_header += sample_names
### ?? Bhuwan - write as function 04 and optimize
""" Now, Read the meta header and add it to the output VCF file. """
print('\nReading meta header from file "%s" ' % (meta_header.name))
if meta_header != None:
meta_info = meta_header.readlines()
# if the meta header has "#CHROM POS REF ...." line then delete it
if meta_info[-1].startswith("#CHROM\tPOS"):
meta_info = "".join(meta_info[:-1]).rstrip("\n")
else:
meta_info = "".join(meta_info).rstrip("\n")
else:
print("Header with meta information is not provided")
print("Exiting the program")
sys.exit(0)
# add meta header to the output VCF file
meta_info += "\n"
meta_info += (
"\t".join(
[
"#CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
]
)
+ "\t"
)
# add SAMPLE fields to output VCF file
meta_info += "\t".join(sample_names)
# Finally, write the header part of the output VCF
vcf_out.write(meta_info + "\n")
######### function 04 ends here ###########
continue
# break
"""' Now, extract the required data from each of the remaining lines add to output VCF. """
updated_line = table_to_vcf(
line,
contig_idx,
pos_idx,
id_idx,
ref_idx,
alt_idx,
qual_idx,
filter_idx,
infos_idx,
info_tags,
format_tags,
sample_names,
gt_tag_as_iupac,
header_line,
)
vcf_out.write(updated_line)
vcf_out.write("\n")
print('Elapsed time : "%s".' % (time.time() - begin_time))
"""Function part of Table to VCF """
def table_to_vcf(
line_in,
contig_idx,
pos_idx,
id_idx,
ref_idx,
alt_idx,
qual_idx,
filter_idx,
infos_idx,
info_tags,
format_tags,
sample_names,
gt_tag_as_iupac,
header_line,
):
line = line_in.rstrip("\n").split("\t")
if contig_idx is not None:
chrom = line[contig_idx]
else:
chrom = "."
if pos_idx is not None:
pos = line[pos_idx]
else:
pos = "."
if id_idx is not None:
ids = line[id_idx]
else:
ids = "."
if ref_idx is not None:
ref = line[ref_idx]
else:
ids = "."
if alt_idx is not None:
alt = line[alt_idx]
else:
alt = "."
if qual_idx is not None:
qual = line[qual_idx]
else:
qual = "."
if filter_idx is not None:
filter_ = line[filter_idx]
else:
filter_ = "."
# Update "info tags and value". This is little complex
if info_tags != []:
info_ = []
for ith, itemi in enumerate(info_tags):
tag_val = "=".join([itemi, line[infos_idx[ith]]])
info_.append(tag_val)
info_ = ";".join(info_)
elif info_tags == []:
info_ = "."
# write the tags names of the FORMAT column
if format_tags != None:
format_ = ":".join(format_tags)
else:
format_ = "."
# update the output line
line_out = (
"\t".join([chrom, pos, ids, ref, alt, qual, filter_, info_, format_]) + "\t"
)
# Further update the SAMPLE-to-FORMAT values
# pass the line to another function
format_to_sample_vals = update_sample_format(
line, ref, alt, sample_names, format_tags, header_line, gt_tag_as_iupac
)
line_out = line_out + format_to_sample_vals
return line_out
""" Function part of Table to VCF """
def update_sample_format(
line, ref, alt, sample_names, format_tags, header_line, gt_tag_as_iupac
):
# The "line" variable is passed into this function.
# The global variables are "genotype_is", "sample_names" and "format_tags"
# to store updated line
format_sample_line = []
all_alleles = [ref] + alt.split(",")
for namex in sample_names:
namex_vals = []
for tagx in format_tags:
sample_format_tag = namex + ":" + tagx
sample_format_idx = header_line.index(sample_format_tag)
sample_format_val = line[sample_format_idx]
""" further update the sample:format value if GT in table is as IUPAC base """
if tagx in gt_tag_as_iupac:
if (
sample_format_val == "."
or sample_format_val == "./."
or sample_format_val == ".|."
):
continue
elif "/" in sample_format_val:
sample_format_val = sample_format_val.split("/")
sample_format_val = [
all_alleles.index(sample_format_val[0]),
all_alleles.index(sample_format_val[1]),
]
sample_format_val = "/".join(str(xth) for xth in sample_format_val)
elif "|" in sample_format_val:
sample_format_val = sample_format_val.split("|")
sample_format_val = [
all_alleles.index(sample_format_val[0]),
all_alleles.index(sample_format_val[1]),
]
sample_format_val = "|".join(str(xth) for xth in sample_format_val)
namex_vals.append(sample_format_val)
format_sample_line.append(":".join(namex_vals))
sample_format_final = "\t".join(format_sample_line)
return sample_format_final
|
Python
|
CL
|
2fe6a9ef6f26d60efaa3f807daae0bf07b760e0686f2b32e47aef62e631a50b7
|
from robin_stocks.tda.helper import format_inputs, login_required, request_get
from robin_stocks.tda.urls import URLS
@login_required
@format_inputs
def get_hours_for_markets(markets, date, jsonify=None):
""" Gets market hours for various markets.
:param markets: The markets for which you're requesting market hours, comma-separated. \
Valid markets are EQUITY, OPTION, FUTURE, BOND, or FOREX.
:type markets: str
:param date: The date for which market hours information is requested. Valid ISO-8601 formats are : \
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
:type date: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.markets()
payload = {
"markets": markets,
"date": date
}
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_hours_for_market(market, date, jsonify=None):
""" Gets market hours for a specific market.
:param market: The market for which you're requesting market hours, comma-separated. \
Valid markets are EQUITY, OPTION, FUTURE, BOND, or FOREX.
:type market: str
:param date: The date for which market hours information is requested. Valid ISO-8601 formats are : \
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
:type date: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.market(market)
payload = {
"date": date
}
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_movers(market, direction, change, jsonify=None):
""" Gets market hours for a specific market.
:param market: The market for which you're requesting market hours, comma-separated. \
Valid markets are $DJI, $COMPX, or $SPX.X.
:type market: str
:param direction: To return movers with the specified directions of "up" or "down".
:type direction: str
:param change: To return movers with the specified change types of "percent" or "value".
:type change: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.movers(market)
payload = {
"direction": direction,
"change": change
}
data, error = request_get(url, payload, jsonify)
return data, error
|
Python
|
CL
|
7594ff10218442ff4ebd720e1d8bba83b7918c2459210dae8f19805bc3af5dcb
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Utilities for preprocessing sequence data.
"""
from __future__ import print_function
from tensorflow.python.keras.preprocessing.sequence import TimeseriesGenerator
from tensorflow.python.keras.preprocessing.sequence import make_sampling_table
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.preprocessing.sequence import skipgrams
del print_function
|
Python
|
CL
|
97fab81953a93ee990aaaadd41639e7f35a09b7d8a639c596e7e35160ba7fa6a
|
import collections
import decimal
import json
from django.core.exceptions import ValidationError
class TransferService:
"""
Not used in current implementation.
Service that distributes transactions to accounts.
Init:
transactions - list of transactions dicts
[{id: ..., account_id: ..., amount: ...}, ]
accounts - list of debt accounts dicts
[{id: ..., share: ...}, ]
After processing, self.transfer contains
transfer data with ready data to insert
to Transfer model.
"""
def __init__(self, transactions, accounts):
# Group transactions by debit accounts
od = collections.OrderedDict()
for d in transactions:
aid = d['account_id']
if aid not in od:
od[aid] = {}
od[aid]['amount'] = 0
od[aid]['amount_transferred'] = 0
od[aid]['transactions'] = []
od[aid]['transactions'].append(d['id'])
od[aid]['amount'] += d['amount']
self.debits = []
for k, v in od.items():
d = {'id': k}
d.update(v)
self.debits.append(d)
self.debts = []
for d in accounts:
d['amount_target'] = 0
d['amount_transferred'] = 0
self.debts.append(d)
self.transfer = []
def get_total(self):
"""
Returns sum of all transactions amount.
"""
total = 0
for d in self.debits:
total += d['amount']
return total
def fill_target(self):
"""
Fill target amount for each account accordingly to
account's share.
"""
total = self.get_total()
sum = 0
for d in self.debts:
target = total * d['share'] / 100
target = target.quantize(decimal.Decimal('.01'))
d['amount_target'] = target
sum += target
if sum != total:
s = json.dumps(self.debts)
raise ValidationError(
'TransferService: sum != total, {}'.format(s))
def is_debit_processed(self, d):
if d['amount'] == d['amount_transferred']:
return True
return False
def is_debt_processed(self, d):
if d['amount_target'] == d['amount_transferred']:
return True
return False
def get_debit(self):
"""
Returns first not yet processed debit account
or None if all debits accounts has been processed.
"""
for d in self.debits:
if not self.is_debit_processed(d):
return d
return None
def get_debt(self):
"""
Returns first not yet processed debt account
or None if all debts accounts has been processed.
"""
for d in self.debts:
if not self.is_debt_processed(d):
return d
return None
def process(self):
"""
Recursion method, that will process transfers until finish.
"""
debit = self.get_debit()
if debit is None:
return
debt = self.get_debt()
target = debt['amount_target']
transferred = debt['amount_transferred']
amount_to_transfer = debit['amount'] - debit['amount_transferred']
if amount_to_transfer > target - transferred:
amount_to_transfer = target - transferred
debit['amount_transferred'] += amount_to_transfer
debt['amount_transferred'] += amount_to_transfer
result = {}
result['account_from'] = debit['id']
result['account_to'] = debt['id']
result['amount'] = amount_to_transfer
self.transfer.append(result)
return self.process()
def check(self):
sum1 = 0
for d in self.debits:
sum1 += d['amount_transferred']
sum2 = 0
for d in self.debits:
sum2 += d['amount_transferred']
if sum1 != sum2:
raise ValidationError(
'TransferService: transferred amounts not match.')
def run(self):
self.fill_target()
self.process()
self.check()
for d in self.transfer:
print(d)
@staticmethod
def mock_transactions():
"""
Returns transactions for testing.
"""
return [
{'id': 1, 'account_id': 1, 'amount': decimal.Decimal('0.55')},
{'id': 2, 'account_id': 1, 'amount': decimal.Decimal('0.21')},
{'id': 3, 'account_id': 1, 'amount': decimal.Decimal('0.94')},
{'id': 4, 'account_id': 2, 'amount': decimal.Decimal('0.32')},
{'id': 5, 'account_id': 2, 'amount': decimal.Decimal('0.48')},
{'id': 6, 'account_id': 3, 'amount': decimal.Decimal('0.11')},
{'id': 7, 'account_id': 3, 'amount': decimal.Decimal('0.51')},
]
@staticmethod
def mock_accounts():
"""
Returns accounts for testing.
Sum of share of all accounts should be equal to 100.
"""
return [
{'id': 4, 'share': 57},
{'id': 5, 'share': 12},
{'id': 6, 'share': 31},
]
|
Python
|
CL
|
3ce6d82bd68a54bf955b8ea169eb916528b64111276798be1cc51e2a34f44f49
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import random
import numpy as np
import scipy.ndimage
def get_batch_shape(batch):
batch_shape = batch[0].shape
return batch_shape[0], batch_shape[1]
def apply_transform(x, transform_matrix, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, 2, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [scipy.ndimage.interpolation.affine_transform(
x_channel, final_affine_matrix, final_offset, order=0, mode=fill_mode, cval=cval)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, 2 + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
"""Return transform matrix offset center.
Used with `rotation`, `shear`, `zoom`.
Args:
matrix : `numpy array` Transform matrix.
x : `int`.
y : `int`.
"""
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def crop(batch, crop_height, crop_width, is_random=True, padding=None):
"""Randomly or centrally crop an image according to `crop_height`, `crop_width`.
An optional padding can be specified, for padding picture with 0s (To conserve original
image shape).
Args:
batch:
crop_height: `int`. The crop shape height.
crop_width: `int`. The crop shape width.
is_random : `boolean`. random crop or central crop.
padding: `int`. If not None, the image is padded with 'padding' 0s.
Examples:
```python
>>> # Example: pictures of 32x32
>>> # Random crop of 24x24 into a 32x32 picture => output 24x24
>>> crop(batch, crop_height=24, crop_width=24)
>>> # Random crop of 32x32 with image padding of 6 #
>>> # (to conserve original image shape) => output 32x32
>>> crop(batch, crop_height=32, crop_width=32, padding=6)
```
"""
shape_w, shape_h = get_batch_shape(batch)
if padding:
shape_w, shape_h = shape_w + 2 * padding, shape_h + 2 * padding
new_batch = []
pad_width = ((padding, padding), (padding, padding), (0, 0))
if is_random:
h_offset = random.randint(0, shape_w - crop_height)
w_offset = random.randint(0, shape_h - crop_width)
else: # central crop
h_offset = int(np.floor((shape_w - crop_height) / 2.))
w_offset = int(np.floor((shape_h - crop_width) / 2.))
for i in range(len(batch)):
new_i_batch = batch[i]
if padding:
new_i_batch = np.lib.pad(new_i_batch, pad_width=pad_width,
mode='constant', constant_values=0)
new_i_batch = new_i_batch[
h_offset:h_offset + crop_height, w_offset:w_offset + crop_width]
new_batch.append(new_i_batch)
return np.asarray(new_batch)
def flip(batch, axis=0, is_random=True):
"""Flip an image (left to right) `axis` 0 or (up and down) if `axis` 1.
Args:
batch:
axis: `int`. 0 for horizontal, 1 for vertical
is_random : `boolean`.
"""
flip = True if not is_random else np.random.uniform(-1, 1) > 0
flip_fct = np.fliplr if axis == 0 else np.flipud
for i in range(len(batch)):
if flip:
batch[i] = flip_fct(batch[i])
return batch
def shift(batch, width_pct=0.1, height_pct=0.1, is_random=True, fill_mode='nearest', cval=0.):
"""Shift an image.
Args:
batch:
width_pct : `float`. Percentage of shift in axis x, usually -0.25 ~ 0.25.
height_pct : `float`. Percentage of shift in axis y, usually -0.25 ~ 0.25.
is_random : `boolean`.
fill_mode : `string`.
Method to fill missing pixel, option: ‘nearest’, ‘constant’, ‘reflect’ or ‘wrap’.
- `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`_
cval : `float`. Value used for points outside the boundaries of the input if mode='constant'.
- `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`_
"""
shape_w, shape_h = get_batch_shape(batch)
if is_random:
tx = np.random.uniform(-height_pct, height_pct) * shape_h
ty = np.random.uniform(-width_pct, width_pct) * shape_w
else:
tx, ty = height_pct * shape_h, height_pct * shape_w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
new_batch = []
for i in range(len(batch)):
x = apply_transform(batch[i], transform_matrix, fill_mode, cval)
new_batch.append(x)
return np.asarray(new_batch)
def blur(batch, sigma_max=5., is_random=True):
"""Randomly blur an image by applying a gaussian filter with a random sigma (0., sigma_max).
Args:
batch:
sigma_max: `float` or list of `float`. Standard deviation for Gaussian
kernel. The standard deviations of the Gaussian filter are
given for each axis as a sequence, or as a single number,
in which case it is equal for all axes.
is_random: `boolean`.
"""
blur = True if not is_random else np.random.uniform(-1, 1) > 0
for i in range(len(batch)):
if blur:
# Random sigma
sigma = random.uniform(0., sigma_max)
batch[i] = scipy.ndimage.filters.gaussian_filter(batch[i], sigma)
return np.asarray(batch)
def zoom(batch, zoom_range=(0.9, 1.1), is_random=True, fill_mode='nearest', cval=0.):
"""Zoom in and out of images, randomly or non-randomly.
Args:
batch:
zoom_range: `list` or `tuple`.
- If is_random=False, (h, w) are the fixed zoom factor for row and column axies,
factor small than one is zoom in.
- If is_random=True, (min zoom out, max zoom out) for x and y with different
random zoom in/out factor.
e.g (0.5, 1) zoom in 1~2 times.
is_random: `boolean`.
fill_mode: `string`.
Method to fill missing pixel, option: ‘nearest’, ‘constant’, ‘reflect’ or ‘wrap’.
cval: `float`. Used for points outside the boundaries of the input if mode='constant'.
"""
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
print(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
shape_w, shape_h = get_batch_shape(batch)
transform_matrix = transform_matrix_offset_center(zoom_matrix, shape_h, shape_w)
new_batch = []
for i in range(len(batch)):
x = apply_transform(batch[i], transform_matrix, fill_mode, cval)
new_batch.append(x)
return np.asarray(new_batch)
def add_random_90degrees_rotation(batch, rotations=(0, 1, 2, 3)):
"""Rotate by 90 degrees.
Args:
batch:
rotations: `tuple`. Allowed 90 degrees rotations.
"""
for i in range(len(batch)):
num_rotations = random.choice(rotations)
batch[i] = np.rot90(batch[i], num_rotations)
return np.asarray(batch)
def add_random_rotation(batch, max_angle=20., is_random=True):
"""Rotate an image by a random angle (-max_angle, max_angle).
Args:
batch:
max_angle: `float`. The maximum rotation angle.
is_random: `boolean`.
"""
rotate = True if not is_random else np.random.uniform(-1, 1) > 0
for i in range(len(batch)):
if rotate:
# Random angle
angle = random.uniform(-max_angle, max_angle)
batch[i] = scipy.ndimage.interpolation.rotate(batch[i], angle, reshape=False)
return np.asarray(batch)
def add_drop(batch, drop=0.5):
"""Randomly set some pixels to zero by a given keeping probability.
Args:
batch: batch of `numpy array` (An image with dims of [row, col, channel] or [row, col]).
drop: `float` (0, 1), The drop probability, higher => more values will be set to zero.
"""
batch_shape = batch[0].shape
def drop_color(x):
mask = np.random.binomial(n=1, p=1 - drop, size=batch_shape[:-1])
for i in range(3):
x[:, :, i] = np.multiply(x[:, :, i], mask)
return x
def drop_gray(x):
return np.multiply(x, np.random.binomial(n=1, p=1 - drop, size=batch_shape))
if len(batch_shape) == 3:
if batch_shape[-1] == 3: # color
drop_fct = drop_color
elif batch_shape[-1] == 1: # greyscale image
drop_fct = drop_gray
else:
raise Exception('Unsupported shape {}'.format(batch_shape))
elif len(batch_shape) == 2 or 1: # greyscale matrix (image) or vector
drop_fct = drop_gray
else:
raise Exception('Unsupported shape {}'.format(batch_shape))
new_batch = []
for i in range(len(batch)):
new_batch.append(drop_fct(new_batch[i]))
return np.asarray(new_batch)
|
Python
|
CL
|
fdf1e2e1f028b859dd89160e79f9b7c59fc0979307495b9eff18e6038527808f
|
## subset sum, dynamic programming - pseudocode
## given n items (each with increasing weight), and W upper bound
## compute the best set of items to put in the set S
## so that you get the highest weight thats still
## less than the max weight W
## 1. build look-up table M
## 2. work backwards from max weight to find
## what items to put in the set S
##
## n: number of items
## w: array with weight of each item ; n total items
## W: max weight the sum can be
##
w = array of weights
def subset_sum(n, W):
## initialize the look-up table
for r = 0, ... ,W
M[0,r] = 0
for j = 1, ..., n
M[j,0] = 0
for j = 1, ... , n
for r = 0 , ... , W
if w[j] > r:
M[j,r] = M[j-1, r] #if this item too heavy, cannot include;
# therefore max sum is whatever it was
# with the all items up to the last j
M[j , r ] = max( M[j-1, r],
w[j] + M[ j-1, W-w[j] ] ) #find max of these two
return M[n,W] ## after table has been filled, we simply look it up
|
Python
|
CL
|
f05a040d65e08a57ae207809b73ac40a09ee35b0e8d6f653d62f778acbf670db
|
import FWCore.ParameterSet.Config as cms
#--------------------------------------------------------------------------------
# Compute ratio of parton luminosities at LHC @ xxx TeV center-of-mass energy vs. TeVatron
# for:
# o gluon + gluon
# o b + bbar
# o u + ubar, d + dbar
#--------------------------------------------------------------------------------
process = cms.Process('compPartonLuminosity')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(0)
)
process.source = cms.Source("EmptySource")
pdfSetFilePath = "/afs/cern.ch/user/v/veelken/scratch0/CMSSW_3_8_5/src/TauAnalysis/FittingTools/test"
process.compPartonLuminosity = cms.EDAnalyzer("PartonLuminosityAnalyzer",
##pdfSet = cms.string(pdfSetFilePath + "/" + "MSTW2008nlo68cl.LHgrid"),
pdfSet = cms.string("MRST2004nlo.LHgrid"),
sqrtS_TeVatron = cms.double(1960.), # units = GeV
sqrtS_LHC = cms.double(7000.), # units = GeV
massMin = cms.double(100.),
massMax = cms.double(500.),
canvasSizeX = cms.int32(800),
canvasSizeY = cms.int32(640),
xScale = cms.string("linear"),
yScale = cms.string("log"),
yMin = cms.double(0.5),
yMax = cms.double(1000.),
##outputFilePath = cms.string("./plots"),
outputFileName = cms.string("compPartonLuminosity.png")
)
process.p = cms.Path(process.compPartonLuminosity)
|
Python
|
CL
|
521e978270614fdcb6014a992f0c5e9b2ae0ef5c6f7f4e1f7cde06d498bbc268
|
import argparse, json
import boto3
from jinja2 import Environment, FileSystemLoader
"""
A bunch of free functions that we use in all scripts.
"""
def get_jinja_env(config):
"""
Get a jinja2 Environment object that we can use to find templates.
"""
return Environment(loader=FileSystemLoader('.'))
def json_file(filename):
with open(filename, 'r') as f:
return json.load(f)
def get_parent_parser():
"""
Get an argparse parser with arguments that are always needed
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--prod', action='store_false', dest='sandbox',
default=True,
help="Whether to run on the production AMT site.")
parser.add_argument('--hit_ids_file')
parser.add_argument('--config', default='config.json',
type=json_file)
return parser
def get_mturk_connection_from_args(args):
"""
Utility method to get an MTurkConnection from argparse args.
"""
aws_access_key = args.config.get('aws_access_key')
print(aws_access_key)
aws_secret_key = args.config.get('aws_secret_key')
return get_mturk_connection(sandbox=args.sandbox,
aws_access_key=aws_access_key,
aws_secret_key=aws_secret_key)
def get_mturk_connection(sandbox=True,
aws_access_key=None,
aws_secret_key=None,
region_name='us-east-1'):
"""
Get a boto mturk connection. This is a thin wrapper over boto3.client;
the only difference is a boolean flag to indicate sandbox or not.
"""
kwargs = {}
# boto3 client requires a region to make a connection. if you
# have a default region in your ~/.aws/config other than us-east-1,
# it throws an error. Since Mturk endpoint is by default only in
# us-east-1, there is no point of asking users to provide it. See #29
kwargs['region_name'] = region_name
if aws_access_key is not None:
kwargs['aws_access_key_id'] = aws_access_key
if aws_secret_key is not None:
kwargs['aws_secret_access_key'] = aws_secret_key
if sandbox:
host = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
else:
host='https://mturk-requester.us-east-1.amazonaws.com'
return boto3.client('mturk', endpoint_url=host, **kwargs)
def setup_qualifications(hit_properties, mtc):
"""
Replace some of the human-readable keys from the raw HIT properties
JSON data structure with boto-specific objects.
"""
qual = []
if 'QualificationId' in hit_properties and 'QualificationComparator' in hit_properties and 'QualificationInteger' in hit_properties:
comparator = hit_properties['QualificationComparator']
if comparator == '>':
c = 'GreaterThan'
elif comparator == '=':
c = 'EqualTo'
elif comparator == '<':
c = 'LessThan'
else:
print("The 'qualification comparator' is not one of the designated values ('<', '=', '>').")
qual.append({
'QualificationTypeId': hit_properties['QualificationId'],
'Comparator': c,
'IntegerValues': [int(hit_properties['QualificationInteger'])],
'RequiredToPreview': False,
})
del hit_properties['QualificationId']
del hit_properties['QualificationComparator']
del hit_properties['QualificationInteger']
if 'Country' in hit_properties:
qual.append({
'QualificationTypeId': '00000000000000000071',
'Comparator': 'In',
'LocaleValues': [{'Country': country} for country in hit_properties['Country']],
})
del hit_properties['Country']
if 'HitsApproved' in hit_properties:
qual.append({
'QualificationTypeId': '00000000000000000040',
'Comparator': 'GreaterThan',
'IntegerValues': [hit_properties['HitsApproved']],
})
del hit_properties['HitsApproved']
if 'PercentApproved' in hit_properties:
qual.append({
'QualificationTypeId': '000000000000000000L0',
'Comparator': 'GreaterThan',
'IntegerValues': [hit_properties['PercentApproved']],
})
del hit_properties['PercentApproved']
hit_properties['QualificationRequirements'] = qual
|
Python
|
CL
|
082f55651155146ac1cb4aef5c23d06c58ad5d6ed7bfa6b222eed202fc341490
|
# coding: utf-8
# ## Задача №5
# ### Вариант 9
#
# #### Решение задачи о нахождении локально минимального дерева посредством алгоритма Мелзака-Хванга
#
# Автор реализации: Михаил Кучеренко,
# МГТУ им. Н.Э. Баумана, ИУ5-64, 2019г.
#
# Исходные тексты:
# - Python-Notebook: https://github.com/SnipGhost/MDO/blob/master/Z5.ipynb
# - Python-Sources: https://github.com/SnipGhost/MDO/blob/master/Z5.py
# In[1]:
# Устанавливаем вывод matplotlib
get_ipython().magic(u'matplotlib inline')
# In[2]:
# Импортируем необходимые функции и библиотеки
from math import (cos, sin, pi, sqrt)
import numpy as np
import matplotlib.pyplot as plt
# In[3]:
# Номер варианта
d = 9
# In[4]:
l = [None] * 6
for i in reversed(range(1,7,1)):
n = d % i + 1
m = 7 - i
print '{}. {} mod {} + 1 = {}'.format(m, d, i, n)
for j in range(len(l)):
if l[j] is None:
n -= 1
if n == 0:
l[j] = m
break
print ' L = {}\n'.format(l)
# Для удобства рассчетов точек P1-P6:
l = [None] + l
# [!] вершины ОБЯЗАТЕЛЬНО отсортированы по возрастанию, иначе будут проблемы с итерированием по списку вершин
# In[5]:
# Дано в условии
G1 = [[7],[7],[8],[8],[9],[9],[1,2,10],[3,4,10],[5,6,10],[7,8,9]]
G2 = [[7],[7],[8],[10],[10],[9],[1,2,8],[3,7,9],[6,8,10],[4,5,9]]
# #### Генерируем ряд данных для нашего варианта
# In[6]:
def generate_data(l):
x = []
y = []
for k in range(1, len(l)):
ax = 3 * cos(pi * k / 3) + cos(pi * l[k] / 3)
ay = 3 * sin(pi * k / 3) + sin(pi * l[k] / 3)
x.append(ax)
y.append(ay)
print 'P{}: ({}, {})'.format(k, ax, ay)
return x, y
# In[7]:
x, y = generate_data(l)
# ### Вспомогательные функции
# #### Построение графика
#
# `x, y` - основной набор данных
# `x2, y2` - дополнительный набор данных (если необходим)
# `p_lim` - количество первичных граничных точек
# `max_x` - размеры сетки по модулю для x
# `max_y` - размеры сетки по модулю для y
# `step` - шаг линий сетки
# In[8]:
def draw(x, y, x2=None, y2=None, connect=None, connect2=None, circle=None, p_lim=6):
# Устанавливаем размеры графика и разрешение
plt.figure(figsize=(draw.size, draw.size), dpi=draw.dpi)
# Проводим линии, обозначающие основные оси и начало координат
plt.axhline(0, color='black')
plt.axvline(0, color='black')
# Индивидуально строим ребра между существующими вершинами графа
if draw.graph:
ap = [] # Уже построенные ребра
for i in range(len(x)):
for v in draw.graph[i]:
if v-1 < len(x) and ((i, v-1) not in ap) and ((v-1, i) not in ap):
rx, ry = [x[i], x[v-1]], [y[i], y[v-1]]
plt.plot(rx, ry, 'r--', color='gray')
ap.append((i, v-1))
# Если необходимо - дополнительно соединяем указанные точки (ряд1)
if connect:
ap = []
for v in connect:
for w in connect:
if (w != v) and ((w, v) not in ap) and ((v, w) not in ap):
if type(v) != tuple or type(w) != tuple:
rx = []
ry = []
if type(v) == tuple:
rx.append(v[0])
ry.append(v[1])
else:
rx.append(x[v])
ry.append(y[v])
if type(w) == tuple:
rx.append(w[0])
ry.append(w[1])
else:
rx.append(x[w])
ry.append(y[w])
plt.plot(rx, ry, 'r-.', color='teal')
ap.append((v, w))
# Если необходимо - дополнительно соединяем указанные точки (ряд2)
if connect2:
ap = []
for v in connect2:
for w in connect2:
if (w != v) and ((w, v) not in ap) and ((v, w) not in ap):
if type(v) != tuple or type(w) != tuple:
rx = []
ry = []
if type(v) == tuple:
rx.append(v[0])
ry.append(v[1])
else:
rx.append(x[v])
ry.append(y[v])
if type(w) == tuple:
rx.append(w[0])
ry.append(w[1])
else:
rx.append(x[w])
ry.append(y[w])
plt.plot(rx, ry, 'r-.', color='brown')
ap.append((v, w))
# Если необходимо - дополнительно строим окружности с заданными параметрами
if circle:
for v in circle:
theta = np.linspace(0, 2*pi, 100)
rx = v[2]*np.cos(theta)+v[0]
ry = v[2]*np.sin(theta)+v[1]
plt.plot(rx, ry, 'r', color='orange', linewidth=1, scaley=False)
# Строим граничные вершины
plt.plot(x[:p_lim], y[:p_lim], 'ro', markersize=3)
for i, _ in enumerate(x):
# Наносим подписи для каждой точки
plt.annotate('P'+str(i+1), (x[i], y[i]), size=draw.text_size)
# Достраиваемые точки Штейнера, строим отдельно другим цветом
if len(x) >= p_lim:
plt.plot(x[p_lim:], y[p_lim:], 'ro', markersize=4, color='green')
for i in range(len(x[p_lim:])):
plt.annotate('P'+str(p_lim+i+1), (x[p_lim+i], y[p_lim+i]), size=draw.text_size)
# Дополнительный ряд данных, если необходим
if x2 and y2 and len(x2) == len(y2):
plt.plot(x2, y2, 'ro', markersize=4, color='blue')
for i, _ in enumerate(x2):
plt.annotate('S'+str(i+1), (x2[i], y2[i]), size=draw.text_size)
# Устанавливаем оси:
plt.axis([-draw.max_x, draw.max_x, -draw.max_y, draw.max_y])
# Устанавливаем разметку осей
x_ticks = np.arange(-draw.max_x, draw.max_x+1, draw.step)
y_ticks = np.arange(-draw.max_y, draw.max_y+1, draw.step)
plt.xticks(x_ticks)
plt.yticks(y_ticks)
# Устанавливаем координатную сетку
plt.grid()
# Отображаем график
plt.show()
# Или сохраняем график на диск
# plt.savefig('graph-{}.png'.format(draw.graph_counter))
# draw.graph_counter += 1
# Настройки функции
draw.graph_counter = 0
draw.size = 12
draw.dpi = 180
draw.text_size = 12
draw.max_x = 8
draw.max_y = 8
draw.step = 1
# #### Инициализируем связи для построения первого графа
# In[9]:
draw.graph = G1
# In[10]:
draw(x, y)
# #### Определение центра описанной окружности треугольника ABC - A(x1,y1), B(x2,y2), C(x3,y3)
#
# Уравнение окружности:
# $(x-a)^2+(y-b)^2=r^2$
#
# Записав его для 3х вершин треугольника получим:
# $\begin{cases}
# (x_{1}-a)^2+(y_{1}-b)^2=r^2 \\
# (x_{2}-a)^2+(y_{2}-b)^2=r^2 \\
# (x_{3}-a)^2+(y_{3}-b)^2=r^2
# \end{cases}$
#
# Вычитаем из первого уравнения второе и из первого третье:
# $\begin{cases}
# 2(x_{1}-x_{2})a+2(y_{1}-y_{2})b=(x_{1}^2-x_{2}^2)+(y_{1}^2-y_{2}^2) \\
# 2(x_{1}-x_{3})a+2(y_{1}-y_{3})b=(x_{1}^2-x_{3}^2)+(y_{1}^2-y_{3}^2)
# \end{cases}$
#
# Получили систему двух линейных уравнений с двумя неизвестными
# In[11]:
def get_center(x1, y1, x2, y2, x3, y3):
# Находим решение системы уравнений
x12 = x1 - x2
x23 = x2 - x3
x31 = x3 - x1
y12 = y1 - y2
y23 = y2 - y3
y31 = y3 - y1
z1 = x1**2 + y1**2
z2 = x2**2 + y2**2
z3 = x3**2 + y3**2
zx = y12 * z3 + y23 * z1 + y31 * z2
zy = x12 * z3 + x23 * z1 + x31 * z2
z = x12 * y31 - y12 * x31
a = - zx / (2 * z)
b = zy / (2 * z)
r = sqrt((a-x1)**2 + (b-y1)**2)
return a, b, r
# #### Определение вершин равносторонних треугольников, построенных на основании AB - A(a,b), B(c,d)
#
# Проведем две окружности с радиусом равным длине AB, точки их пересечения - вершины искомых треугольников.
#
# Для нахождения точек составим систему уравнений:
#
# $\begin{cases}
# (x-a)^2+(y-b)^2=r^2 \\
# (x-c)^2+(y-d)^2=r^2 \\
# r = \sqrt{(a-c)^2+(b-d)^2}
# \end{cases}$
# In[12]:
def find_vertex(a, b, c, d, eps=0.000001):
# Находим 2 решения системы уравнений
# Точки совпали
if abs(b-d) < eps and abs(a-c) < eps:
raise ArithmeticException('FindVertex: a == b == c == d')
# Вырожденный случай: b == d
if abs(b-d) < eps:
rx = (a + c) / 2.0
r1y = 1.0 / 2 * (2 * d - sqrt(3) * sqrt((a - c)**2))
r2y = 1.0 / 2 * (sqrt(3) * sqrt((a - c)**2) + 2 * d)
return [rx, rx], [r1y, r2y]
r1x = 1.0/2 * (a - sqrt(3) * sqrt((b - d)**2) + c)
r1y = (sqrt(3) * a * sqrt((b - d)**2) + b**2 - sqrt(3) * c * sqrt((b - d)**2) - d**2) / (2.0 * (b - d))
r2x = 1.0/2 * (a + sqrt(3) * sqrt((b - d)**2) + c)
r2y = (-sqrt(3) * a * sqrt((b - d)**2) + b**2 + sqrt(3) * c * sqrt((b - d)**2) - d**2) / (2.0 * (b - d))
return [r1x, r2x], [r1y, r2y]
# #### Определение факта попадания точки в многоугольник
#
# `x, y` - заданная точка
# `xp, yp` - массив вершин многоугольника
# In[13]:
def in_polygon(x, y, xp, yp):
# Метод литья лучей: если четное число пересечений - то вне фигруы
c = 0
for i in range(len(xp)):
if (((yp[i]<=y and y<yp[i-1]) or (yp[i-1]<=y and y<yp[i])) and (x > (xp[i-1] - xp[i]) * (y - yp[i]) / (yp[i-1] - yp[i]) + xp[i])): c = 1 - c
return (c == 1)
# #### Определение факта пересечения отрезков A(ax1,ay1,ax2,ay2) и B(bx1,by1,bx2,by2)
#
# Вычисляем ориентированные площади соответствующих треугольников и сравниваем их знаки
# In[14]:
def is_intersect(ax1, ay1, ax2, ay2, bx1, by1, bx2, by2):
v1 = (bx2-bx1)*(ay1-by1)-(by2-by1)*(ax1-bx1)
v2 = (bx2-bx1)*(ay2-by1)-(by2-by1)*(ax2-bx1)
v3 = (ax2-ax1)*(by1-ay1)-(ay2-ay1)*(bx1-ax1)
v4 = (ax2-ax1)*(by2-ay1)-(ay2-ay1)*(bx2-ax1)
return (v1*v2 < 0) and (v3*v4 < 0)
# #### Определение факта принадлежности точки M(mx, my) отрезку AB(x1, y1, x2, y2)
#
# При условии что точка M уже принадлежит прямой AB (установлено по ходу решения)
# In[15]:
def is_belongs(mx, my, x1, y1, x2, y2):
ax = x1 - mx
ay = y1 - my
bx = x2 - mx
by = y2 - my
dot = ax * bx + ay * by
return (dot <= 0)
# #### Поиск пересечения окружности (задана: a,b,r) и отрезка (задан: c,d,e,f)
#
# Для решения подзадачи решим уравнения окружности и прямой:
#
# $\begin{cases}
# (x-a)^2 + (y-b)^2 = r^2 \\
# \frac{x-c}{e-c} = \frac{y-d}{f-d}
# \end{cases}$
#
# При этом по условиям применения функции -
# мы знаем что одна точно уже раположена на отрезке, поэтому ее необходимо исключить из решения.
#
# Из-за громоздкости вычислений они были сгенерированы и упрощены в [Wolfram Mathematica][Math], а затем переведены на python.
#
# [Math]: https://www.wolframalpha.com/input/?i=solve+%7B+(y-d)%2F(f-d)%3D(x-c)%2F(e-c),+(x-a)%5E2+%2B+(y-b)%5E2+%3D+r%5E2+%7D+for+x,+y
#
# `eps` - заданная точность определения координат
# In[16]:
def find_intersection(a, b, c, d, e, f, r, eps=0.001):
# Находим перое решение системы уравнений:
rx1 = (-sqrt(-(c - e)**2 * (a**2 * d**2 - 2 * a**2 * d * f + a**2 * f**2 - 2 * a * b * c * d + 2 * a * b * c * f + 2 * a * b * d * e - 2 * a * b * e * f + 2 * a * c * d * f - 2 * a * c * f**2 - 2 * a * d**2 * e + 2 * a * d * e * f + b**2 * c**2 - 2 * b**2 * c * e + b**2 * e**2 - 2 * b * c**2 * f + 2 * b * c * d * e + 2 * b * c * e * f - 2 * b * d * e**2 + c**2 * f**2 - c**2 * r**2 - 2 * c * d * e * f + 2 * c * e * r**2 + d**2 * e**2 - d**2 * r**2 + 2 * d * f * r**2 - e**2 * r**2 - f**2 * r**2)) + a * c**2 - 2 * a * c * e + a * e**2 + b * c * d - b * c * f - b * d * e + b * e * f - c * d * f + c * f**2 + d**2 * e - d * e * f)/(c**2 - 2 * c * e + d**2 - 2 * d * f + e**2 + f**2)
ry1 = (-d * sqrt(-(c - e)**2 * (a**2 * d**2 - 2 * a**2 * d * f + a**2 * f**2 - 2 * a * b * c * d + 2 * a * b * c * f + 2 * a * b * d * e - 2 * a * b * e * f + 2 * a * c * d * f - 2 * a * c * f**2 - 2 * a * d**2 * e + 2 * a * d * e * f + b**2 * c**2 - 2 * b**2 * c * e + b**2 * e**2 - 2 * b * c**2 * f + 2 * b * c * d * e + 2 * b * c * e * f - 2 * b * d * e**2 + c**2 * f**2 - c**2 * r**2 - 2 * c * d * e * f + 2 * c * e * r**2 + d**2 * e**2 - d**2 * r**2 + 2 * d * f * r**2 - e**2 * r**2 - f**2 * r**2)) + f * sqrt(-(c - e)**2 * (a**2 * d**2 - 2 * a**2 * d * f + a**2 * f**2 - 2 * a * b * c * d + 2 * a * b * c * f + 2 * a * b * d * e - 2 * a * b * e * f + 2 * a * c * d * f - 2 * a * c * f**2 - 2 * a * d**2 * e + 2 * a * d * e * f + b**2 * c**2 - 2 * b**2 * c * e + b**2 * e**2 - 2 * b * c**2 * f + 2 * b * c * d * e + 2 * b * c * e * f - 2 * b * d * e**2 + c**2 * f**2 - c**2 * r**2 - 2 * c * d * e * f + 2 * c * e * r**2 + d**2 * e**2 - d**2 * r**2 + 2 * d * f * r**2 - e**2 * r**2 - f**2 * r**2)) + a * c**2 * d - a * c**2 * f - 2 * a * c * d * e + 2 * a * c * e * f + a * d * e**2 - a * e**2 * f + b * c * d**2 - 2 * b * c * d * f + b * c * f**2 - b * d**2 * e + 2 * b * d * e * f - b * e * f**2 + c**3 * f - c**2 * d * e - 2 * c**2 * e * f + 2 * c * d * e**2 + c * e**2 * f - d * e**3) / ((c - e) * (c**2 - 2 * c * e + d**2 - 2 * d * f + e**2 + f**2))
# Второе решение:
rx2 = (sqrt(-(c - e)**2 * (a**2 * d**2 - 2 * a**2 * d * f + a**2 * f**2 - 2 * a * b * c * d + 2 * a * b * c * f + 2 * a * b * d * e - 2 * a * b * e * f + 2 * a * c * d * f - 2 * a * c * f**2 - 2 * a * d**2 * e + 2 * a * d * e * f + b**2 * c**2 - 2 * b**2 * c * e + b**2 * e**2 - 2 * b * c**2 * f + 2 * b * c * d * e + 2 * b * c * e * f - 2 * b * d * e**2 + c**2 * f**2 - c**2 * r**2 - 2 * c * d * e * f + 2 * c * e * r**2 + d**2 * e**2 - d**2 * r**2 + 2 * d * f * r**2 - e**2 * r**2 - f**2 * r**2)) + a * c**2 - 2 * a * c * e + a * e**2 + b * c * d - b * c * f - b * d * e + b * e * f - c * d * f + c * f**2 + d**2 * e - d * e * f)/(c**2 - 2 * c * e + d**2 - 2 * d * f + e**2 + f**2)
ry2 = (d * sqrt(-(c - e)**2 * (a**2 * d**2 - 2 * a**2 * d * f + a**2 * f**2 - 2 * a * b * c * d + 2 * a * b * c * f + 2 * a * b * d * e - 2 * a * b * e * f + 2 * a * c * d * f - 2 * a * c * f**2 - 2 * a * d**2 * e + 2 * a * d * e * f + b**2 * c**2 - 2 * b**2 * c * e + b**2 * e**2 - 2 * b * c**2 * f + 2 * b * c * d * e + 2 * b * c * e * f - 2 * b * d * e**2 + c**2 * f**2 - c**2 * r**2 - 2 * c * d * e * f + 2 * c * e * r**2 + d**2 * e**2 - d**2 * r**2 + 2 * d * f * r**2 - e**2 * r**2 - f**2 * r**2)) - f * sqrt(-(c - e)**2 * (a**2 * d**2 - 2 * a**2 * d * f + a**2 * f**2 - 2 * a * b * c * d + 2 * a * b * c * f + 2 * a * b * d * e - 2 * a * b * e * f + 2 * a * c * d * f - 2 * a * c * f**2 - 2 * a * d**2 * e + 2 * a * d * e * f + b**2 * c**2 - 2 * b**2 * c * e + b**2 * e**2 - 2 * b * c**2 * f + 2 * b * c * d * e + 2 * b * c * e * f - 2 * b * d * e**2 + c**2 * f**2 - c**2 * r**2 - 2 * c * d * e * f + 2 * c * e * r**2 + d**2 * e**2 - d**2 * r**2 + 2 * d * f * r**2 - e**2 * r**2 - f**2 * r**2)) + a * c**2 * d - a * c**2 * f - 2 * a * c * d * e + 2 * a * c * e * f + a * d * e**2 - a * e**2 * f + b * c * d**2 - 2 * b * c * d * f + b * c * f**2 - b * d**2 * e + 2 * b * d * e * f - b * e * f**2 + c**3 * f - c**2 * d * e - 2 * c**2 * e * f + 2 * c * d * e**2 + c * e**2 * f - d * e**3)/((c - e) * (c**2 - 2 * c * e + d**2 - 2 * d * f + e**2 + f**2))
# Одно из них отбрасываем, потому что одна точка уже однозначно принадлежит началу отрезка
if ((abs(rx1 - c) < eps) and (abs(ry1 - d) < eps)) or ((abs(rx1 - e) < eps) and (abs(ry1 - f) < eps)):
return rx2, ry2
else:
return rx1, ry1
# #### Проверка кандидатов граничных точек на условие Хванга
#
# `x, y` - массив точек задачи
# `xs, ys` - точка-кандидат на новую граничную
# `ax, ay, bx, by` - основание равностороннего треугольника
# In[17]:
def check_edge_point(x, y, xs, ys, ax, ay, bx, by):
# Проверяем наличие вершины внутри всей фигуры
# Если внутри - условие Хванга не выполнено
if in_polygon(xs, ys, x, y):
return False
# Проверяем пересечения со всеми возможными ребрами
# Если хоть какое-то пересекает - условие Хванга не выполнено
for i in range(len(x)):
for j in range(len(x)):
if i != j:
e1 = is_intersect(x[i], y[i], x[j], y[j], xs, ys, ax, ay)
e2 = is_intersect(x[i], y[i], x[j], y[j], xs, ys, bx, by)
if e1 or e2:
return False
return True
# Стоит обратить внимание:
#
# Оберточные функции `create_edge_point` и `create_stein_point` принимают на вход классические индексы (начинаются с единицы), но внутри себя работают уже с привычными для языков программирования индексами, начинающимся с нуля. Это сделано для того, чтобы облегчить работу оператору данных функций.
# ### Прямой ход алгоритма
# In[18]:
# Добавить граничную точку в массив данных
def create_edge_point(x, y, k1, k2):
k1 -= 1
k2 -= 1
x2, y2 = find_vertex(x[k1], y[k1], x[k2], y[k2])
print 'Строим вершины треугольников - S1 и S2:'
r = sqrt((x[k1] - x[k2])**2 + (y[k1] - y[k2])**2)
circles = ((x[k1], y[k1], r), (x[k2], y[k2], r))
connections = (k1, k2, (x2[0], y2[0]), (x2[1], y2[1]))
draw(x, y, x2, y2, connect=connections, circle=circles)
if check_edge_point(x, y, x2[0], y2[0], x[k1], y[k1], x[k2], y[k2]):
x.append(x2[0])
y.append(y2[0])
elif check_edge_point(x, y, x2[1], y2[1], x[k1], y[k1], x[k2], y[k2]):
x.append(x2[1])
y.append(y2[1])
else:
raise ArithmeticError('No valid edge points')
print 'Выбрали необходимую граничную вершину P{}:'.format(len(x))
draw(x, y, connect=(k1, k2))
# In[19]:
create_edge_point(x, y, 1, 2)
# In[20]:
create_edge_point(x, y, 3, 4)
# In[21]:
create_edge_point(x, y, 5, 6)
# In[22]:
create_edge_point(x, y, 7, 8)
# In[23]:
def get_simpsons_length(x, y, G):
l = len(G) - 1
k = G[l][2] - 1
x1 = x[k]
y1 = y[k]
x2 = x[l]
y2 = y[l]
return sqrt((x1-x2)**2+(y1-y2)**2)
# In[24]:
sl = get_simpsons_length(x, y, G1)
print 'Длина ЛМД по линии Симпсона: {}'.format(sl)
# <div style="page-break-after: always;"></div>
# ### Обратный ход алгоритма
# In[25]:
def create_stein_point(x, y, k1, k2, k3, l):
k1 -= 1
k2 -= 1
k3 -= 1
l -= 1
a, b, r = get_center(x[k1], y[k1], x[k2], y[k2], x[k3], y[k3])
print 'Нашли центр описанной окружности с радиусом {} и центром в S1:'.format(r)
circles = ((a, b ,r),)
draw(x, y, [a], [b], circle=circles)
x_stein, y_stein = find_intersection(a, b, x[k3], y[k3], x[l], y[l], r)
# Проверяем условия построения ЛМД
exp1 = is_intersect(x[k1], y[k1], x[k2], y[k2], x[k3], y[k3], x[l], y[l])
exp2 = is_belongs(x_stein, y_stein, x[k3], y[k3], x[l], y[l])
print 'Нашли положение новой точки Штейнера - S1:'
draw(x, y, [x_stein], [y_stein], connect=(k1, k2), connect2=(k3, (x_stein, y_stein)))
if exp1 and exp2:
print 'Check: OK'
x[k3], y[k3] = x_stein, y_stein
print 'Построили точку Штейнера P{}:'.format(k3+1)
draw(x, y)
else:
raise ArithmeticError('Check failed')
# In[26]:
create_stein_point(x, y, 7, 8, 10, 9)
# In[27]:
create_stein_point(x, y, 5, 6, 9, 10)
# In[28]:
create_stein_point(x, y, 3, 4, 8, 10)
# In[29]:
create_stein_point(x, y, 1, 2, 7, 10)
# ### Подсчет длины ЛМД
# In[30]:
def calculate_path_len(x, y, G):
ap = []
path_len = 0
for i in range(len(x)):
for v in G[i]:
if v-1 < len(x) and ((i, v-1) not in ap) and ((v-1, i) not in ap):
r_len = sqrt((x[v-1] - x[i])**2 + (y[v-1] - y[i])**2)
path_len += r_len
ap.append((i, v-1))
print 'Подсчитано ребро P{}P{}: {}'.format(i+1, v, r_len)
return path_len
# In[31]:
loc_len = calculate_path_len(x, y, G1)
print 'Длина локально минимального дерева: {}'.format(loc_len)
# ## Общее решение
# In[32]:
def solve(l, G):
draw.graph = G
x, y = generate_data(l)
origin_len = len(x)
for i in range(origin_len, len(G), 1):
create_edge_point(x, y, G[i][0], G[i][1])
sl = get_simpsons_length(x, y, G)
print 'Длина ЛМД по линии Симпсона: {}'.format(sl)
for i in reversed(range(origin_len, len(G), 1)):
create_stein_point(x, y, G[i][0], G[i][1], i+1, G[i][2])
print 'Длина локально минимального дерева: {}'.format(calculate_path_len(x, y, G))
# In[33]:
draw.max_x = 10
draw.max_y = 10
# Построить автомагически для G2
solve(l, G2)
# In[ ]:
|
Python
|
CL
|
311b4d4f6d92cf5154a771146b18595b30204816b3cca74476070da99a9afb98
|
'''
self.model
-> train_warm : input trainX, trainY and then opt (old)
-> pred_warm : input X, output Y_hat (old)
-> train_ME : input trainX, trainY and then opt (new)
-> pred_ME : ...
-> get_meta_embedding : ??
-> assign_meta_embedding : ??
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Meta_Model(nn.Module):
def __init__(self, ID_col, item_col, context_col, nb_words, model='FM',
emb_size=128, alpha=0.1,
warm_lr=1e-3, cold_lr=1e-4, ME_lr=1e-3):
super(Meta_Model, self).__init__()
"""
ID_col: string, the column name of the item ID
item_col: list, the columns of item features
context_col: list, the columns of other features
nb_words: dict, nb of words in each of these columns
"""
self.columns = [ID_col] + item_col + context_col
self.ID_col = ID_col
self.item_col = item_col
self.cold_lr = cold_lr
self.alpha = alpha
self.warm_lr = warm_lr
self.ME_lr = ME_lr
self.emb_size = emb_size
'''
*CHOOSE THE BASE MODEL HERE*
'''
self.get_yhat = {
"PNN": self.get_yhat_PNN,
"deepFM": self.get_yhat_deepFM
}[model]
# lookup embedding
self.column2lookup_embedding_layer = dict()
for col in self.columns:
lookup_embedding_layer = nn.Embedding(nb_words[col], emb_size)#.to(device)
lookup_embedding_layer.weight.data.normal_(0, 0.01)
self.column2lookup_embedding_layer[col] = lookup_embedding_layer
self.title_lookup_embedding_layer = nn.Embedding(20001, emb_size)#.to(device)
self.genres_lookup_embedding_layer = nn.Embedding(21, emb_size)#.to(device)
# layer
# self.emb_pred_Dense = nn.Parameter(torch.rand((len(item_col)+ 2)*emb_size,emb_size), requires_grad=True).type(torch.FloatTensor)
# self.register_parameter('emb_predictor' , self.emb_pred_Dense)
self.emb_pred_Dense = nn.Linear((len(item_col)+ 2)*emb_size, emb_size)
feature_num = len(self.columns) + 2
#self.deep0_dense_layer = nn.Parameter(torch.rand(feature_num*emb_size,feature_num*emb_size), requires_grad=True).type(torch.FloatTensor)
#self.register_parameter('deep-0' , self.deep0_dense_layer)
#self.deep1_dense_layer = nn.Parameter(torch.rand(feature_num*emb_size,feature_num*emb_size), requires_grad=True).type(torch.FloatTensor)
#self.register_parameter('deep-1' , self.deep1_dense_layer)
self.deep0_dense_layer = nn.Linear(feature_num*emb_size, feature_num*emb_size)
self.deep1_dense_layer = nn.Linear(feature_num*emb_size, feature_num*emb_size)
#self.out_dense_layer = nn.Parameter(torch.rand((feature_num*emb_size)+len(self.columns),1), requires_grad=True).type(torch.FloatTensor)
#self.register_parameter('out' , self.out_dense_layer)
self.out_dense_layer = nn.Linear((feature_num*emb_size)+feature_num, 1)
# activation layer
self.relu_layer = nn.ReLU()
self.sigmoid_layer = nn.Sigmoid()
def get_yhat_deepFM(self, ID_emb, item_embs, other_embs, **kwargs):
embeddings = [ID_emb] + item_embs + other_embs
embeddings_cat = torch.cat([emb.view(-1,1,self.emb_size) for emb in embeddings], 1) #torch.Size([200, 8, 128])
#print('embeddings_cat : ',embeddings_cat.shape)
sum_of_emb = torch.mean(embeddings_cat, 1) #torch.Size([200, 128])
#print('sum_of_emb : ',sum_of_emb.shape)
diff_of_emb = [sum_of_emb - x for x in embeddings]
dot_of_emb = [torch.sum(embeddings[i]*diff_of_emb[i], axis=1).view(-1,1) for i in range(len(embeddings))]
h = torch.cat(dot_of_emb, 1) #torch.Size([200, 6])
h2 = torch.cat(embeddings, 1) #torch.Size([200, 1024])
h2 = self.relu_layer(self.deep0_dense_layer(h2)) #torch.Size([1024, 1024]) | torch.Size([200, 1024])
h2 = self.relu_layer(self.deep1_dense_layer(h2)) #torch.Size([1024, 1024]) | torch.Size([200, 1024])
h = torch.cat([h,h2], 1) #torch.Size([200, 1030])
#y_hat = self.sigmoid_layer(h.mm(self.out_dense_layer)) #torch.Size([1030, 1]) | torch.Size([200, 1])
y_hat = self.sigmoid_layer(self.out_dense_layer(h)) #torch.Size([1030, 1]) | torch.Size([200, 1])
return y_hat
def get_yhat_PNN(self):
y_hat = None
return y_hat
def get_embeddings(self, batch_x, batch_t, batch_g):
item_embs, other_embs = [], []
for col in self.columns:
lookup_embedding_layer = self.column2lookup_embedding_layer[col]
input_tensor = torch.tensor(list(batch_x[col])).long()
if col==self.ID_col:
ID_emb = lookup_embedding_layer(input_tensor)
elif col in self.item_col:
item_embs.append(lookup_embedding_layer(input_tensor))
else:
other_embs.append(lookup_embedding_layer(input_tensor))
batch_t_tensor = torch.tensor(batch_t).long()
batch_g_tensor = torch.tensor(batch_g).long()
title_emb = self.title_lookup_embedding_layer(batch_t_tensor)
genre_emb = self.genres_lookup_embedding_layer(batch_g_tensor)
item_embs.append(torch.mean(title_emb, axis=1))
item_embs.append(torch.mean(genre_emb, axis=1))
return ID_emb, item_embs, other_embs
def generate_meta_emb(self, item_embs):
"""
This is the simplest architecture of the embedding generator,
with only a dense layer.
You can customize it if you want have a stronger performance,
for example, you can add an l2 regularization term or alter
the pooling layer.
"""
embs = torch.stack(item_embs, 1)
item_h = torch.flatten(embs,1)
#emb_pred = item_h.mm(self.emb_pred_Dense) / 5.
emb_pred = self.emb_pred_Dense(item_h) / 5.
return emb_pred
def forward(self, batch_x, batch_t, batch_g, cold_loss_a=None,meta_ID_emb=None, warm_or_cold=str):
# get lookup embedding
ID_emb, item_embs, other_embs = self.get_embeddings(batch_x, batch_t, batch_g)
# main model
if warm_or_cold == 'warm':
y_hat = self.get_yhat(ID_emb, item_embs, other_embs)
return y_hat
elif warm_or_cold == 'cold':
# Meta-Embedding: step 1, cold-start,
# use the generated meta-embedding to make predictions
# and calculate the cold-start loss_a
if meta_ID_emb is None:
meta_ID_emb = self.generate_meta_emb(item_embs)
self.meta_ID_emb = meta_ID_emb
cold_yhat_a = self.get_yhat(meta_ID_emb, item_embs, other_embs)
return cold_yhat_a
else:
# Meta-Embedding: step 2, apply gradient descent once
# get the adapted embedding
#cold_emb_grads = tf.gradients(cold_loss_a, meta_ID_emb)[0]
cold_emb_grads = torch.autograd.grad(cold_loss_a, meta_ID_emb,retain_graph=True)[0]
meta_ID_emb_new = meta_ID_emb - self.cold_lr * cold_emb_grads
# Meta-Embedding: step 3,
# use the adapted embedding to make prediction on another mini-batch
# and calculate the warm-up loss_b
cold_yhat_b = self.get_yhat(meta_ID_emb_new, item_embs, other_embs)
return cold_yhat_b
|
Python
|
CL
|
3b7d9818a4e998657423e53ea9d683055b26d93dac57e90f38dda14b161425b7
|
'''
Asks if you want to encrypt or decrypt a message.
If you encrypt, it shifts each letter by an inputted number (say, 3).
If you decrypt, it does the exact same thing, but the number is negative, so it decrypts the message.
Alternatively, decrypt and encrypt can be reversed, so that decrypt actually encrypts it and encrypt actually decrypts it.
This works for capital letters and numbers as well.
Furthermore, before the above happens, it scrambles all of the letters using a key that can either be pasted in from a previous session or randomly generated,
further encoding the message.
In addition, this project can now mess with the order of the letters,
from playing the message backwards to adding "garbage" letters that don't mean anything to putting the odd letters first,
then the even letters (example: hello world -> hlowrdel orl).
As such, the message can be much more encrypted than before. This program can now save keys to a text file.
If you save the key to a text file, a file called Encryption will appear in your documents folder if it did not exist already, and all of your keys will appear there.
From there, it is up to you to decide how to label the keys so that you know what key to use for each situation.
You can also specify a pathname, and a file will appear with that pathname. Conversionlength is the length of the key.
It can be anything above 1000 length, but the advisable length is between 1000-10000. The default length is 10000.
NOTE: Any length above 65532 will not work, due the 32 bit integer limit. It now has a backup system, which will recover the key system if conversionlength is too high or low.
It also has the ability to specify the conversionlength on the shell if desired.
Furthermore, it now can grab keys from files, so you can simply paste the key you want in a text file and use it there instead of inputting it.
The program can now not only put garbage letters on the front and back of the key, but in the middle as well.
It takes the amount of letters, divides them by two, and rounds it down, then puts the garbage letters there. Now has a public/private key system.
Three inputs are required, one to activate the system and two more to input prime numbers.
'''
import importlib
secrets = importlib.import_module('secrets')
conversion=""
def RandInt(a,b):
'''(integer,integer)->integer
Returns a truly random integer
(0,5)->3'''
while True:
c=secrets.randbelow(b+1)
if c>=a:
break
return c
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def lcm(x, y):
"""This function takes two
integers and returns the L.C.M."""
# choose the greater number
if x > y:
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
def PrimeCheck(num):
'''(integer)->Boolean
Checks whether a number is prime or not.
(24)->False
'''
if num > 1:
for i in range(2,num):
if (num % i) == 0:
return False
return True
else:
return False
def RSAKey(prime1,prime2):
'''(integer,integer)->string
Uses the RSA cryptosystem to generate a public and private key.
(61,53)->"The public key is 17, the private key is 413,n is 3233"'''
n=prime1*prime2
L=lcm(prime1-1,prime2-1)
while True:
e=RandInt(1,L)
primecheck3=PrimeCheck(e)
if primecheck3 and L%e!=0:
break
d=modinv(e,L)
return n,e,d
def RSAEncryption(n,EncryptionNumber1,phrase2,alphabet):
phrase3=""
padding=""
for i in phrase2:
padding+=str(alphabet.index(i))
padding=(int(padding)**EncryptionNumber1)%n
phrase3+=str(padding)
phrase3+="."
padding=""
return phrase3
def RSADecryption(n,EncryptionNumber2,phrase2,alphabet,conversionlength):
phrase=""
phrase3=""
for i in phrase2:
if i!=".":
phrase+=i
else:
phrase=(int(phrase)**EncryptionNumber2)%n
phrase3+=alphabet[phrase]
phrase=""
return phrase3
def KeyCreator(conversionlength):
alphabet=[]
for i in range(0,conversionlength+3):
alphabet.append(chr(i))
#Deletes enter character, to avoid trouble inputting it during another session.
del alphabet[10]
del alphabet[12]
return alphabet
def caesar(phrase,shift,change):
'''(string,int,int)->string
Takes the phrase, and moves the letters forward by shift, and returns the resultinig string. Change changes the shift by change number each time.
'''
newPhrase=""
for i in phrase:
if i in alphabet:
x=alphabet.index(i)
while True:
if x+shift > conversionlength-1:
shift-=(conversionlength-1)
if x+shift < 0:
shift+=(conversionlength-1)
newPhrase+=alphabet[x+shift]
break
else:
newPhrase+=i
shift+=change
return newPhrase
def VowelsToNumbers(phrase,Vowels,VowelsAsNums):
'''(string,string,string)->string
Takes the phrase, and filters it through the key, then prints the result.
(hello world, helo wrd, ksrnmdqo)->ksrrnmdnqro
'''
vowels=list(Vowels)
vowelsAsNums=list(VowelsAsNums)
newPhrase=""
for i in phrase:
if i in vowels:
newPhrase+=vowelsAsNums[vowels.index(i)]
else:
newPhrase+=i
return newPhrase
def ScramblingDecode(phrase3):
'''(string)->string
Decodes the string by finding where the odd and even letter meet, then putting the min their proper place.
(hlowrdel ol)->hello world
'''
if int(len(phrase3))//2==int(len(phrase3))/2:
part=int(len(phrase3))//2
else:
part=int(len(phrase3))//2+1
phrasepart1=phrase3[:part]
phrasepart2=phrase3[part:]
phrase3=""
#Adds the two phrases together
for i in range(0,int(len(phrasepart1))):
phrase3+=phrasepart1[i]
#If the phrase has an even number of characters, adds the last character. Otherwise, it causes an error and stops.
try:
phrase3+=phrasepart2[i]
except IndexError:
#Do nothing in the event that an error occurs, as nothing needs to be done
j="j"
return phrase3
def ScramblingEncode(phrase2):
'''(string)->string
Encodes the word by separating the word into odd and even letters, and putting those odd and even portions together.
(hello world)->hlowrdel ol
'''
phrasepart1=phrase2[::2]
phrasepart2=phrase2[1::2]
phrase2=phrasepart1+phrasepart2
return phrase2
def Garbage(garbage1,garbage2,garbage3,phrasePart1,phrasePart2):
'''(int,int,string)->string
Takes the phrase and adds random letters to the beginning, end, and middle of it. The amount of letters is garbage1 letters for the beginning, garbage2 letters for the end, and garbage3 letters for the middle.
(3,5,hello world)->qerhello worldjskel
'''
for i in range(0,garbage3):
phrasePart1+=secrets.choice(alphabet)
phrase2=""
for i in range(0,garbage1):
phrase2+=secrets.choice(alphabet)
phrase2+=phrasePart1
phrase2+=phrasePart2
for i in range(0,garbage2):
phrase2+=secrets.choice(alphabet)
return phrase2
def Length(phrase,Garbage3Part2):
'''(string)->int
Returns the amount of letters that half the phrase has minus a certain number, rounded down.
(hello world)->5
'''
return int((len(phrase)-Garbage3Part2)//2)
def GarbageDecrypt(phrase2,Garbage1Part2,Garbage2Part2,Garbage3Part2,garbage3):
if Garbage1Part2 != 0 and Garbage2Part2 != 0 and Garbage3Part2 !=0:
phrase2=phrase2[Garbage1Part2:-Garbage2Part2]
halfway=Length(phrase2,Garbage3Part2)
phrase2Part1=phrase2[:halfway]
phrase2Part2=phrase2[halfway+Garbage3Part2:]
phrase2=phrase2Part1+phrase2Part2
elif Garbage1Part2!=0 and Garbage2Part2 !=0:
phrase2=phrase2[Garbage1Part2:-Garbage2Part2]
elif Garbage1Part2 != 0 and Garbage3Part2 !=0:
phrase2=phrase2[Garbage1Part2:]
halfway=Length(phrase2,Garbage3Part2)
phrase2Part1=phrase2[:halfway]
phrase2Part2=phrase2[halfway+Garbage3Part2:]
phrase2=phrase2Part1+phrase2Part2
elif Garbage2Part2 !=0 and Garbage3Part2 !=0:
phrase2=phrase2[:-Garbage2Part2]
halfway=Length(phrase2,Garbage3Part2)
phrase2Part1=phrase2[:halfway]
phrase2Part2=phrase2[halfway+Garbage3Part2:]
phrase2=phrase2Part1+phrase2Part2
elif Garbage1Part2 != 0:
phrase2=phrase2[Garbage1Part2:]
elif Garbage2Part2 !=0:
phrase2=phrase2[:-Garbage2Part2]
elif Garbage3Part2 !=0:
halfway=Length(phrase2,Garbage3Part2)
phrase2Part1=phrase2[:halfway]
phrase2Part2=phrase2[halfway+Garbage3Part2:]
phrase2=phrase2Part1+phrase2Part2
return phrase2
def EncryptionPhase(phrase,phrase2,garbage1,garbage2,garbage3,Garbage1Part1,Garbage2Part1,Garbage1Part2,Garbage2Part2,Garbage3Part1,Garbage3Part2,scramble,backward,shift,change1,change2,alphabet,conversion,SystemKey,n,EncryptionNumber1):
'''Encrypts the message.'''
'''phrase2 parameter literally has no purpose idk why it's there'''
halfway=Length(phrase,0)
if backward=="y":
phrase2=phrase[::-1]
else:
phrase2=phrase
phrase2=Garbage(Garbage1Part1,Garbage2Part1,Garbage3Part1,phrase2[:halfway],phrase2[halfway:])
if scramble.lower()=="y":
phrase2=ScramblingEncode(phrase2)
phrase2=VowelsToNumbers(phrase2,alphabet,conversion)
phrase2=caesar(phrase2,shift,change1)
phrase2=VowelsToNumbers(phrase2,alphabet,conversion)
if scramble.lower()=="y":
phrase2=ScramblingEncode(phrase2)
halfway=Length(phrase2,0)
phrase2=Garbage(Garbage1Part2,Garbage2Part2,Garbage3Part2,phrase2[:halfway],phrase2[halfway:])
if SystemKey.lower()=="y":
phrase2=RSAEncryption(n,EncryptionNumber1,phrase2,alphabet)
phrase2=VowelsToNumbers(phrase2,alphabet,conversion)
phrase2=caesar(phrase2,shift,change2)
phrase2=VowelsToNumbers(phrase2,alphabet,conversion)
return phrase2
def DecryptionPhase(phrase,phrase2,garbage1,garbage2,Garbage1Part1,garbage3,Garbage2Part1,Garbage1Part2,Garbage2Part2,Garbage3Part1,Garbage3Part2,scramble,backward,shift,change1,change2,alphabet,conversion,SystemKey,n,EncryptionNumber2,conversionlength):
'''Decrypts the code'''
'''phrase parameter literally has no purpose idk why it's there'''
phrase2=VowelsToNumbers(phrase2,conversion,alphabet)
phrase2=caesar(phrase2, -shift,-change2)
phrase2=VowelsToNumbers(phrase2,conversion,alphabet)
if SystemKey.lower()=="y":
phrase2=RSADecryption(n,EncryptionNumber2,phrase2,alphabet,conversionlength)
phrase2=GarbageDecrypt(phrase2,Garbage1Part2,Garbage2Part2,Garbage3Part2,garbage3)
if scramble.lower()=="y":
phrase2=ScramblingDecode(phrase2)
phrase2=VowelsToNumbers(phrase2,conversion,alphabet)
phrase2=caesar(phrase2, -shift,-change1)
phrase2=VowelsToNumbers(phrase2,conversion,alphabet)
if scramble.lower()=="y":
phrase2=ScramblingDecode(phrase2)
phrase2=GarbageDecrypt(phrase2,Garbage1Part1,Garbage2Part1,Garbage3Part1,garbage3)
if backward=="y":
phrase2=phrase2[::-1]
return phrase2
while True:
conversionlength=10000
while True:
try:
conversionlength=int(input("\nHow long do you want the key to be? (1000-65532) "))
if conversionlength<=999 or conversionlength>=65533:
print("Please check your input.")
else:
break
except:
print("\nIt has to be an integer.")
try:
keylist=KeyCreator(conversionlength)
alphabet=KeyCreator(conversionlength)
except Exception as e:
print(e)
#This is the backup system in case conversionlength doesn't work, DO NOT CHANGE.
print("\nAn error occured. Please check to see if conversionlength is below 1000 or above 65532.")
keylist=KeyCreator(10000)
alphabet=KeyCreator(10000)
conversionlength=10000
#Asks if you want to randomly generate a key or use an input. If you select input, it asks you whether to input a key from scratch or use the previous key. If there is no previous key, asks you to generate a key from scratch. #DO NOT USE A KEY THAT WAS NOT GENERATED BY THIS PROGRAM. IF YOU CREATE A KEY MANUALLY, THE PROGRAM WILL NOT WORK PROPERLY.
key=input("\nDo you want to use an inputed key for the encryption? (WARNING: YOU MUST USE AN INPUTTED KEY THAT HAS BEEN RANDOMLY GENERATED BY THIS PROGRAM) Y/N ")
if key.lower()=="y":
if conversion!="":
key=input("\nUse the previous key? Y/N ")
else:
key="n"
if key.lower()=="n":
conversion=""
key=input("\nUse a text file for a key? Y/N ")
if key.lower()=="y":
while True:
try:
#.txt needs to be appended after the filename, even if .txt is in the filename itself.
key2=input("\nInput the path of the file: ")
key3=open(key2, encoding="utf8")
conversion=key3.read()
if int(len(conversion))!=conversionlength:
conversionlength=int(len(conversion))
alphabet=KeyCreator(conversionlength)
print("\nThe alphabet is: \n"+ "".join(alphabet))
print("\nThe key is: \n"+ conversion)
break
except Exception as e:
print(e)
print("That file does not exist, or there was an error with opening the file. Be sure to only type the file name and not any file extensions, and check the error message printed.")
else:
while len(conversion)!=conversionlength:
conversion=input("\nInput your key here: ")
print(len(conversion))
else:
conversion=""
for i in range(0,conversionlength+1):
variable=secrets.choice(keylist)
keylist.remove(variable)
conversion+=variable
print("\nThe alphabet is: \n"+ "".join(alphabet))
print("\nThe key is: \n"+ conversion)
while True:
purpose=input("\nDo you want to encrypt or decrypt? E/D ")
if purpose.lower()=="e":
purpose="encrypt"
if purpose.lower()=="d":
purpose="decrypt"
if purpose.lower()=="encrypt" or purpose.lower()=="decrypt":
break
else:
print("\nYou must specify to either encrypt or decrypt.")
phrase=input("\nGive a phrase: ")
while True:
try:
shift=int(input("\nHow many places to shift? "))
change=int(input("\nHow many places to shift after each letter? "))
break
except ValueError:
print("\nYou must specify a number.")
while True:
SystemKey=input("\nUse the public/private key system? Y/N ")
if SystemKey.lower()=="y":
RSACreator=input("\nGenerate a new public/private key, or use an old one? N/O ")
if RSACreator.lower()!="o":
while True:
try:
prime1=int(input("\nWhat number is the first prime? "))
prime2=int(input("\nWhat number is the second prime? "))
primecheck1=PrimeCheck(prime1)
primecheck2=PrimeCheck(prime2)
if primecheck1 and primecheck2:
n,e,d=RSAKey(prime1,prime2)
print("\nThe public key is",e, "the private key is", d, " and n is",n)
if n>=conversionlength:
EncryptionNumber1=e
EncryptionNumber2=d
break
else:
print("\nn must be greater than or equal to conversionlength.")
else:
print("\nOne of the numbers inputted is not prime. Please check your input.")
except ValueError:
print("\nYou must specify a number.")
else:
while True:
try:
n=int(input("Input integer n. "))
if purpose.lower()=="encrypt":
EncryptionNumber1=int(input("\nPlease input the public key to encrypt the program. "))
else:
EncryptionNumber2=int(input("Please input the private key to decrypt the program. "))
break
except:
print("Please check your input.")
else:
n=0
EncryptionNumber1=0
EncryptionNumber2=0
break
while True:
#Asks the user to scramble letters, play the message backwards and add garbage letters to the beginning and end of the phrase.
scramble=input("\nScramble letters? Y/N ")
backward=input("\nRepeat message backwards? Y/N ")
try:
garbage1=int(input("\nHow many garbage letters to put at the beginning of the word? (0-infinity) "))
garbage2=int(input("\nHow many garbage letters to put at the end of the word? (0-infinity) "))
garbage3=int(input("\nHow many garbage letters to put in the middle of the word? (0-infinity) "))
if garbage1>=0 and garbage2>=0 and garbage3 >=0:
if scramble.lower()=="y" or scramble.lower()=="n":
if backward.lower()=="y" or backward.lower()=="n":
break
else:
print("\nBackwards needs to be either Y or N.")
else:
print("\nScramble needs to be either Y or N.")
else:
print("\nGarbage1 and Garbage2 and Garbage3 need to be greater than or equal to zero.")
except:
print("\nPlease check your input.")
#Checks to see if the process can be encrypted correctly. If it works, it encrypts the phrase. If it is decrypted, the process is reversed.
#If it doesn't encrypt & decrypt correctly, it sends a message that the message cannot be encrypted correctly.
phrase2=phrase
Garbage1Part2=garbage1//2
Garbage1Part1=garbage1-Garbage1Part2
Garbage2Part2=garbage2//2
Garbage2Part1=garbage2-Garbage2Part2
Garbage3Part2=garbage3//2
Garbage3Part1=garbage3-Garbage3Part2
change2=change//2
change1=change-change2
if purpose.lower() == "encrypt":
Check=input("\nCheck to see if the encryption works correctly? Y/N ")
else:
Check = "n"
#NOTE: You need to have knowledge of both the public and private keys in order to check the encryption if the RSA encryption is enabled. Otherwise it doesn't work.
if Check.lower()!="n" and SystemKey.lower()=="y"and EncryptionNumber1==0 and EncryptionNumber2==0:
EncryptionNumber1=int(input("\nPlease input the public key to encrypt the program. "))
EncryptionNumber2=int(input("\nPlease input the private key to decrypt the program. "))
#This is where the encoding and decoding begins.
if Check.lower()=="y":
phrase2=EncryptionPhase(phrase,phrase2,garbage1,garbage2,garbage3,Garbage1Part1,Garbage2Part1,Garbage1Part2,Garbage2Part2,Garbage3Part1,Garbage3Part2,scramble,backward,shift,change1,change2,alphabet,conversion,SystemKey,n,EncryptionNumber1)
phrase2=DecryptionPhase(phrase,phrase2,garbage1,garbage2,Garbage1Part1,garbage3,Garbage2Part1,Garbage1Part2,Garbage2Part2,Garbage3Part1,Garbage3Part2,scramble,backward,shift,change1,change2,alphabet,conversion,SystemKey,n,EncryptionNumber2,conversionlength)
if phrase2==phrase:
#phrase2=""
#If decrypt is selected, decrypt code
if purpose.lower()=="decrypt":
phrase2=DecryptionPhase(phrase,phrase2,garbage1,garbage2,Garbage1Part1,garbage3,Garbage2Part1,Garbage1Part2,Garbage2Part2,Garbage3Part1,Garbage3Part2,scramble,backward,shift,change1,change2,alphabet,conversion,SystemKey,n,EncryptionNumber2,conversionlength)
print("\n"+phrase2)
else:
#Otherwise, encrypt code
phrase2=EncryptionPhase(phrase,phrase2,garbage1,garbage2,garbage3,Garbage1Part1,Garbage2Part1,Garbage1Part2,Garbage2Part2,Garbage3Part1,Garbage3Part2,scramble,backward,shift,change1,change2,alphabet,conversion,SystemKey,n,EncryptionNumber1)
print("\n"+phrase2)
else:
print("\n"+phrase2)
print("\nThe message does not encrypt properly.")
#Asks if you want to make another message. If you answer yes, repeats the whole script over again.
save=input("\nDo you want to save the key to a text file? Y/N ")
if save.lower()=="y":
try:
ask2=input("Input a pathname? Y/N ")
if ask2.lower()=="y":
ask=input("Please input a pathname for a text file. ")
#The pathname for the file doesn't have to exist, it just needs to be in the right syntax.
#An example of good syntax is: C:/Users/Pa Cyber/Documents/Encryption.txt
else:
ask='C:/Users/Pa Cyber/Documents/Encryption.txt'
test=open(ask,"ab")
test.write(conversion.encode('utf-8'))
test.close()
print("\nKey saved.")
except Exception as e:
print(e)
print("\nKey save failed.")
x=input("\nDo you want to make another message? Y/N ")
if x.lower()=="n":
break
|
Python
|
CL
|
b0915d9901db326d61822aba1b02d08f7f488694b73b7ccc6636445677f30c67
|
# Goal of this file is to run a basic Feathr script within spark so that Maven packages can be downloaded into the docker container to save time during actual run.
# This can also serve as a sanity check
import os
import tempfile
from datetime import datetime
import pandas as pd
from feathr import FeathrClient
from feathr import BOOLEAN, FLOAT, INT32, ValueType
from feathr import Feature, DerivedFeature, FeatureAnchor
from feathr import FeatureQuery, ObservationSettings
from feathr import INPUT_CONTEXT, HdfsSource
from feathr import WindowAggTransformation
from feathr import TypedKey
from pyspark.sql import DataFrame
import feathr
from pathlib import Path
print(feathr.__version__)
os.environ['SPARK_LOCAL_IP'] = "127.0.0.1"
os.environ['REDIS_PASSWORD'] = "foobared" # default password for Redis
# Make sure we get the Feathr jar name, assuming we just have one jar file.
import glob
jar_name = glob.glob("./*.jar")[0]
print(f"Found jar file at {jar_name}")
yaml_config = f"""
api_version: 1
project_config:
project_name: 'local_spark'
spark_config:
# choice for spark runtime. Currently support: azure_synapse, databricks, local
spark_cluster: 'local'
spark_result_output_parts: '1'
local:
master: 'local[*]'
feathr_runtime_location: "{jar_name}"
online_store:
redis:
# Redis configs to access Redis cluster
host: '127.0.0.1'
port: 6379
ssl_enabled: False
feature_registry:
# The API endpoint of the registry service
api_endpoint: "http://127.0.0.1:8000/api/v1"
"""
feathr_workspace_folder = Path("./feathr_config.yaml")
feathr_workspace_folder.parent.mkdir(exist_ok=True, parents=True)
feathr_workspace_folder.write_text(yaml_config)
client = FeathrClient(str(feathr_workspace_folder))
DATA_FILE_PATH = "/tmp/green_tripdata_2020-04_with_index.csv"
from feathr.datasets.utils import maybe_download
from feathr.datasets.constants import NYC_TAXI_SMALL_URL
maybe_download(src_url=NYC_TAXI_SMALL_URL, dst_filepath=DATA_FILE_PATH)
TIMESTAMP_COL = "lpep_dropoff_datetime"
TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"
def preprocessing(df: DataFrame) -> DataFrame:
import pyspark.sql.functions as F
df = df.withColumn("fare_amount_cents",
(F.col("fare_amount") * 100.0).cast("float"))
return df
batch_source = HdfsSource(
name="nycTaxiBatchSource",
path=DATA_FILE_PATH,
event_timestamp_column=TIMESTAMP_COL,
preprocessing=preprocessing,
timestamp_format=TIMESTAMP_FORMAT,
)
# We define f_trip_distance and f_trip_time_duration features separately
# so that we can reuse them later for the derived features.
f_trip_distance = Feature(
name="f_trip_distance",
feature_type=FLOAT,
transform="trip_distance",
)
f_trip_time_duration = Feature(
name="f_trip_time_duration",
feature_type=FLOAT,
transform="cast_float((to_unix_timestamp(lpep_dropoff_datetime) - to_unix_timestamp(lpep_pickup_datetime)) / 60)",
)
features = [
f_trip_distance,
f_trip_time_duration,
Feature(
name="f_is_long_trip_distance",
feature_type=BOOLEAN,
transform="trip_distance > 30.0",
),
Feature(
name="f_day_of_week",
feature_type=INT32,
transform="dayofweek(lpep_dropoff_datetime)",
),
Feature(
name="f_day_of_month",
feature_type=INT32,
transform="dayofmonth(lpep_dropoff_datetime)",
),
Feature(
name="f_hour_of_day",
feature_type=INT32,
transform="hour(lpep_dropoff_datetime)",
),
]
# After you have defined features, bring them together to build the anchor to the source.
feature_anchor = FeatureAnchor(
name="feature_anchor",
source=INPUT_CONTEXT, # Pass through source, i.e. observation data.
features=features,
)
agg_key = TypedKey(
key_column="DOLocationID",
key_column_type=ValueType.INT32,
description="location id in NYC",
full_name="nyc_taxi.location_id",
)
agg_window = "90d"
# Anchored features with aggregations
agg_features = [
Feature(
name="f_location_avg_fare",
key=agg_key,
feature_type=FLOAT,
transform=WindowAggTransformation(
agg_expr="fare_amount_cents",
agg_func="AVG",
window=agg_window,
),
),
Feature(
name="f_location_max_fare",
key=agg_key,
feature_type=FLOAT,
transform=WindowAggTransformation(
agg_expr="fare_amount_cents",
agg_func="MAX",
window=agg_window,
),
),
]
agg_feature_anchor = FeatureAnchor(
name="agg_feature_anchor",
# External data source for feature. Typically a data table.
source=batch_source,
features=agg_features,
)
f_trip_time_distance = DerivedFeature(name="f_trip_time_distance",
feature_type=FLOAT,
input_features=[
f_trip_distance, f_trip_time_duration],
transform="f_trip_distance * f_trip_time_duration")
f_trip_time_rounded = DerivedFeature(name="f_trip_time_rounded",
feature_type=INT32,
input_features=[f_trip_time_duration],
transform="f_trip_time_duration % 10")
derived_feature = [f_trip_time_distance, f_trip_time_rounded]
client.build_features(
anchor_list=[feature_anchor, agg_feature_anchor],
derived_feature_list=derived_feature,
)
feature_names = [feature.name for feature in features + agg_features]
feature_names
# Try to register the service after the spark run (so that the Feathr API can start with sufficient time)
try:
client.register_features()
except Exception as e:
print(e)
print(client.list_registered_features(project_name=client.project_name))
now = datetime.now().strftime("%Y%m%d%H%M%S")
offline_features_path = os.path.join("debug", f"test_output_{now}")
# Features that we want to request. Can use a subset of features
query = FeatureQuery(
feature_list=feature_names,
key=agg_key,
)
settings = ObservationSettings(
observation_path=DATA_FILE_PATH,
event_timestamp_column=TIMESTAMP_COL,
timestamp_format=TIMESTAMP_FORMAT,
)
client.get_offline_features(
observation_settings=settings,
feature_query=query,
output_path=offline_features_path,
)
client.wait_job_to_finish(timeout_sec=5000)
from feathr.utils.job_utils import get_result_df
res_df = get_result_df(client)
print(res_df.head())
|
Python
|
CL
|
52538d44d41b2bab2ea0e3e973901926cc5b389fdecef192e82338f34070635a
|
"""
This module defines a few essential variables for the system.
To use the configuration variables in different modules import the constant you want.
The value of the variables has to be persistent, therefore we use pickle to keep it alive.
The pickle files are inside bins/ folder.
Functions in this module change configurations persistently.
> Exemple
from config import SOURCES
DON'T CHANGE THE VALUES IN THIS MODULE UNLESS YOU KNOW WHAT YOU'RE DOING.
"""
import os
import pickle # uses pickle to save values
from news_searcher.settings import BASE_DIR
SOURCES = pickle.load(open(os.path.join(BASE_DIR, "interface", "src", "bins", "sources.bin"), "rb"))
TERMS = pickle.load(open(os.path.join(BASE_DIR, "interface", "src", "bins", "terms.bin"), "rb"))
# Words of interest and their respective weight value.
# Weights are ranged from 0 to 5.
# There are 7 diffent 'interest categories', so formatting is like follows:
# word : [ [synonyms], sci/tech, politics, economics, dissemination, impact, severity, current interest]
# every key is a word of interest in an article.
#TERMS = {
# 'aids' : [['hiv'], 0, 0, 0, 1, 1, 5, 3],
# 'botulismo' : [[], 0, 0, 0, 1, 1, 4, 1],
# 'dengue' : [[], 0, 0, 0, 1, 1, 3, 3],
# 'dst' : [['std'], 0, 0, 0, 1, 1, 1, 2]
#}
def updateKey(new_key):
"""
Updates API KEY to new_key.
Saves changes in pickle file.
"""
pickle.dump(new_key, open(os.path.join(BASE_DIR,"interface", "src", "bins", "api-key.bin"), "wb"))
# adicionar uma fonte
def addSource(source):
"""
Appends new source to SOURCES.
Saves changes in pickle file.
"""
SOURCES.append(str(source))
pickle.dump(SOURCES, open(os.path.join(BASE_DIR,"interface", "src", "bins", "sources.bin"), "wb"))
return SOURCES
# remover uma fonte
def removeSource(source):
"""
Remove source fom SOURCES.
Saves changes in pickle file.
"""
SOURCES.remove(str(source))
pickle.dump(SOURCES, open(os.path.join(BASE_DIR,"interface", "src", "bins", "sources.bin"), "wb"))
return SOURCES
# adicionar um novo termo
def addTerm(term, sinonimos, t, p, e, d, i, s, c):
"""
Adds new term to TERMS.
Saves changes in pickle file.
"""
TERMS[str(term)] = [sinonimos, int(t), int(p), int(e), int(d), int(i), int(s), int(c)]
pickle.dump(TERMS, open(os.path.join(BASE_DIR,"interface", "src", "bins", "terms.bin"), "wb"))
return TERMS
# remover um termo
def removeTerm(term):
"""
Removes term from TERMS.
Saves changes in pickle file.
"""
TERMS.pop(str(term))
pickle.dump(TERMS, open(os.path.join(BASE_DIR,"interface", "src", "bins", "terms.bin"), "wb"))
return TERMS
def updateBD(url, key):
"""
Updates BD access parameters.
"""
pickle.dump((url, key), open(os.path.join(BASE_DIR, 'interface', 'src', 'bins', 'bd.bin'), 'wb'))
return (url, key)
def BD_INFO():
"""
Returns BD_URL and BD_PASSWD
"""
return pickle.load(open(os.path.join(BASE_DIR, 'interface', 'src', 'bins', 'bd.bin'), 'rb'))
|
Python
|
CL
|
1e6c12afc04b9aecb643b6bae015d9309c4bb720974e0663729d4406cc58aef5
|
# -*- coding: utf-8 -*-
"""VEM BOLD Constrained
File that contains function for BOLD data analysis with positivity
and l2-norm=1 constraints.
It imports functions from vem_tools.py in pyhrf/vbjde
"""
import time
import copy
import logging
import os
import os.path as op
import numpy as np
import pyhrf
import pyhrf.vbjde.vem_tools as vt
from pyhrf.boldsynth.hrf import getCanoHRF, genGaussianSmoothHRF
from pyhrf.sandbox.physio_params import (PHY_PARAMS_KHALIDOV11,
linear_rf_operator,
create_physio_brf,
create_physio_prf)
import matplotlib
import matplotlib.pyplot as plt
try:
os.environ["DISPLAY"]
except KeyError:
matplotlib.use("Agg")
plt.switch_backend("Agg")
else:
try:
matplotlib.use("Qt4Agg")
plt.switch_backend("Qt4Agg")
except ImportError:
pass
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
eps = 1e-6
#@profile
def Main_vbjde_physio(graph, Y, Onsets, durations, Thrf, K, TR, beta, dt,
scale=1, estimateSigmaH=True, estimateSigmaG=True,
sigmaH=0.05, sigmaG=0.05, gamma_h=0, gamma_g=0,
NitMax=-1, NitMin=1, estimateBeta=True, PLOT=False,
contrasts=[], computeContrast=False,
idx_first_tag=0, simulation=None, sigmaMu=None,
estimateH=True, estimateG=True, estimateA=True,
estimateC=True, estimateZ=True, estimateNoise=True,
estimateMP=True, estimateLA=True, use_hyperprior=False,
positivity=False, constraint=False,
phy_params=PHY_PARAMS_KHALIDOV11, prior='omega', zc=False):
logger.info("EM for ASL!")
np.random.seed(6537540)
logger.info("data shape: ")
logger.info(Y.shape)
Thresh = 1e-5
D, M = np.int(np.ceil(Thrf / dt)) + 1, len(Onsets)
#D, M = np.int(np.ceil(Thrf / dt)), len(Onsets)
n_sess, N, J = Y.shape[0], Y.shape[1], Y.shape[2]
Crit_AH, Crit_CG, cTime, rerror, FE = 1, 1, [], [], []
EP, EPlh, Ent = [],[],[]
Crit_H, Crit_G, Crit_Z, Crit_A, Crit_C = 1, 1, 1, 1, 1
cAH, cCG, AH1, CG1 = [], [], [], []
cA, cC, cH, cG, cZ = [], [], [], [], []
h_norm, g_norm = [], []
SUM_q_Z = [[] for m in xrange(M)]
mua1 = [[] for m in xrange(M)]
muc1 = [[] for m in xrange(M)]
sigmaH = sigmaH * J / 100
print sigmaH
gamma_h = gamma_h * 100 / J
print gamma_h
# Beta data
MaxItGrad = 200
gradientStep = 0.005
gamma = 7.5
print 'gamma = ', gamma
print 'voxels = ', J
maxNeighbours, neighboursIndexes = vt.create_neighbours(graph, J)
print 'graph.shape = ', graph.shape
# Conditions
print 'Onsets: ', Onsets
print 'durations = ', durations
print 'creating conditions...'
X, XX, condition_names = vt.create_conditions_block_ms(Onsets, durations,
M, N, D, n_sess, TR, dt)
# Covariance matrix
#R = vt.covariance_matrix(2, D, dt)
_, R_inv = genGaussianSmoothHRF(zc, D, dt, 1., 2)
R = np.linalg.inv(R_inv)
if zc:
XX = XX[:, :, :, 1:-1] # XX shape (S, M, N, D)
D = D - 2
AH1, CG1 = np.zeros((J, M, D)), np.zeros((J, M, D))
print 'HRF length = ', D
print 'Condition number = ', M
print 'Number of scans = ', N
print 'Number of voxels = ', J
print 'Number of sessions = ', n_sess
print 'XX.shape = ', XX.shape
# Noise matrix
Gamma = np.identity(N)
# Noise initialization
sigma_eps = np.ones((n_sess, J))
# Labels
logger.info("Labels are initialized by setting active probabilities "
"to ones ...")
q_Z = np.ones((M, K, J), dtype=np.float64) / 2.
#q_Z = np.zeros((M, K, J), dtype=np.float64)
#q_Z[:, 1, :] = 1
q_Z1 = copy.deepcopy(q_Z)
Z_tilde = copy.deepcopy(q_Z)
# H and G
TT, m_h = getCanoHRF(Thrf, dt)
H = np.array(m_h[:D]).astype(np.float64)
H /= np.linalg.norm(H)
Hb = create_physio_brf(phy_params, response_dt=dt, response_duration=Thrf)
Hb /= np.linalg.norm(Hb)
if prior=='balloon':
H = Hb.copy()
H1 = copy.deepcopy(H)
Sigma_H = np.zeros((D, D), dtype=np.float64)
# Initialize model parameters
Beta = beta * np.ones((M), dtype=np.float64)
n_drift = 4
P = np.zeros((n_sess, N, n_drift+1), dtype=np.float64)
L = np.zeros((n_drift+1, J, n_sess), dtype=np.float64)
for s in xrange(0, n_sess):
P[s, :, :] = vt.PolyMat(N, n_drift, TR)
L[:, :, s] = vt.polyFit(Y[s, :, :], TR, n_drift, P[s, :, :])
print 'P shape = ', P.shape
print 'L shape = ', L.shape
WP = P.copy()
AL = L.copy()
PL = np.einsum('ijk,kli->ijl', P, L)
y_tilde = Y - PL
# Parameters Gaussian mixtures
mu_Ma = np.append(np.zeros((M, 1)), np.ones((M, 1)), axis=1).astype(np.float64)
sigma_Ma = np.ones((M, K), dtype=np.float64) * 0.3
# Params RLs
m_A = np.zeros((n_sess, J, M), dtype=np.float64)
for s in xrange(0, n_sess):
for j in xrange(0, J):
m_A[s, j, :] = (np.random.normal(mu_Ma, np.sqrt(sigma_Ma)) * q_Z[:, :, j]).sum(axis=1).T
m_A1 = m_A.copy()
Sigma_A = np.ones((M, M, J, n_sess)) * np.identity(M)[:, :, np.newaxis, np.newaxis]
G = np.zeros_like(H)
m_C = np.zeros_like(m_A)
Sigma_G = np.zeros_like(Sigma_H)
Sigma_C = np.zeros_like(Sigma_A)
mu_Mc = np.zeros_like(mu_Ma)
sigma_Mc = np.ones_like(sigma_Ma)
W = np.zeros_like(Gamma) # (N, N)
# Precomputations
print 'W shape is ', W.shape
WX = W.dot(XX).transpose(1, 2, 0, 3) # shape (S, M, N, D)
Gamma_X = np.zeros((N, n_sess, M, D), dtype=np.float64) # shape (N, S, M, D)
X_Gamma_X = np.zeros((D, M, n_sess, M, D), dtype=np.float64) # shape (D, M, S, M, D)
Gamma_WX = np.zeros((N, n_sess, M, D), dtype=np.float64) # shape (N, S, M, D)
XW_Gamma_WX = np.zeros((D, M, n_sess, M, D), dtype=np.float64) # shape (D, M, S, M, D)
Gamma_WP = np.zeros((N, n_sess, n_drift+1), dtype=np.float64) # shape (N, S, M, D)
WP_Gamma_WP = np.zeros((n_sess, n_drift+1, n_drift+1), dtype=np.float64) # shape (D, M, S, M, D)
for s in xrange(0, n_sess):
Gamma_X[:, s, :, :] = np.tensordot(Gamma, XX[s, :, :, :], axes=(1, 1))
X_Gamma_X[:, :, s, :, :] = np.tensordot(XX[s, :, :, :].T, Gamma_X[:, s, :, :], axes=(1, 0))
Gamma_WX[:, s, :, :] = np.tensordot(Gamma, WX[s, :, :, :], axes=(1, 1))
XW_Gamma_WX[:, :, s, :, :] = np.tensordot(WX[s, :, :, :].T, Gamma_WX[:, s, :, :], axes=(1, 0))
Gamma_WP[:, s, :] = Gamma.dot(WP[s, :, :]) # (N, n_drift)
WP_Gamma_WP[s, :, :] = WP[s, :, :].T.dot(Gamma_WP[:, s, :]) # (n_drift, n_drift)
sigma_eps_m = np.maximum(sigma_eps, eps) # (n_sess, J)
cov_noise = sigma_eps_m[:, :, np.newaxis, np.newaxis] # (n_sess, J, 1, 1)
###########################################################################
############################################# VBJDE
t1 = time.time()
ni = 0
#while ((ni < NitMin + 1) or (((Crit_AH > Thresh) or (Crit_CG > Thresh)) \
# and (ni < NitMax))):
#while ((ni < NitMin + 1) or (((Crit_AH > Thresh)) \
# and (ni < NitMax))):
while ((ni < NitMin + 1) or (((Crit_FE > Thresh * np.ones_like(Crit_FE)).any()) \
and (ni < NitMax))):
logger.info("-------- Iteration n° " + str(ni + 1) + " --------")
if PLOT and ni >= 0: # Plotting HRF and PRF
logger.info("Plotting HRF and PRF for current iteration")
vt.plot_response_functions_it(ni, NitMin, M, H, G)
# Managing types of prior
priorH_cov_term = np.zeros_like(R_inv)
matrix_covH = R_inv.copy()
if prior=='balloon':
logger.info(" prior balloon")
#matrix_covH = np.eye(R_inv.shape[0], R_inv.shape[1])
priorH_mean_term = np.dot(matrix_covH / sigmaH, Hb)
else:
logger.info(" NO prior")
priorH_mean_term = np.zeros_like(H)
priorG_mean_term = np.zeros_like(G)
#####################
# EXPECTATION
#####################
# HRF H
if estimateH:
logger.info("E H step ...")
Ht, Sigma_H = vt.expectation_H_ms(Sigma_A, m_A, m_C, G, XX, W, Gamma,
Gamma_X, X_Gamma_X, J, y_tilde,
cov_noise, matrix_covH, sigmaH,
priorH_mean_term, priorH_cov_term, N, M, D, n_sess)
if constraint:
if not np.linalg.norm(Ht)==1:
logger.info(" constraint l2-norm = 1")
H = vt.constraint_norm1_b(Ht, Sigma_H)
#H = Ht / np.linalg.norm(Ht)
else:
logger.info(" l2-norm already 1!!!!!")
H = Ht.copy()
Sigma_H = np.zeros_like(Sigma_H)
else:
H = Ht.copy()
h_norm = np.append(h_norm, np.linalg.norm(H))
print 'h_norm = ', h_norm
Crit_H = (np.linalg.norm(H - H1) / np.linalg.norm(H1)) ** 2
cH += [Crit_H]
H1[:] = H[:]
# A
if estimateA:
logger.info("E A step ...")
m_A, Sigma_A = vt.expectation_A_ms(m_A, Sigma_A, H, G, m_C, W, XX,
Gamma, Gamma_X, q_Z,
mu_Ma, sigma_Ma, J, y_tilde,
Sigma_H, sigma_eps_m, N, M, D, n_sess)
cA += [(np.linalg.norm(m_A - m_A1) / np.linalg.norm(m_A1)) ** 2]
m_A1[:, :, :] = m_A[:, :, :]
# Q labels
if estimateZ:
logger.info("E Q step ...")
q_Z, Z_tilde = vt.expectation_Q_ms(Sigma_A, m_A, Sigma_C, m_C,
sigma_Ma, mu_Ma, sigma_Mc, mu_Mc,
Beta, Z_tilde, q_Z, neighboursIndexes, graph, M, J, K, n_sess)
if 0:
import matplotlib.pyplot as plt
plt.close('all')
fig = plt.figure(1)
for m in xrange(M):
ax = fig.add_subplot(2, M, m + 1)
im = ax.matshow(m_A[:, :, m].mean(0).reshape(20, 20))
plt.colorbar(im, ax=ax)
ax = fig.add_subplot(2, M, m + 3)
im = ax.matshow(q_Z[m, 1, :].reshape(20, 20))
plt.colorbar(im, ax=ax)
fig = plt.figure(2)
for m in xrange(M):
for s in xrange(n_sess):
ax = fig.add_subplot(M, n_sess, n_sess * m + s + 1)
im = ax.matshow(m_A[s, :, m].reshape(20, 20))
plt.colorbar(im, ax=ax)
plt.show()
cZ += [(np.linalg.norm(q_Z - q_Z1) / (np.linalg.norm(q_Z1) + eps)) ** 2]
q_Z1 = q_Z
if ni > 0:
free_energyE = 0
for s in xrange(n_sess):
free_energyE += vt.Compute_FreeEnergy(y_tilde[s, :, :], m_A[s, :, :], Sigma_A[:, :, :, s],
mu_Ma, sigma_Ma, H, Sigma_H, AuxH, R, R_inv, sigmaH, sigmaG,
m_C[s, :, :], Sigma_C[:, :, :, s], mu_Mc, sigma_Mc, G, Sigma_G,
AuxG, q_Z, neighboursIndexes, Beta, Gamma,
gamma, gamma_h, gamma_g, sigma_eps[s, :], XX[s, :, :, :],
W, J, D, M, N, K, use_hyperprior, Gamma_X[:, s, :, :],
Gamma_WX[:, s, :, :], bold=True, S=n_sess)
if free_energyE < free_energy:
logger.info("free energy has decreased after E step from %f to %f", free_energy, free_energyE)
# crit. AH and CG
logger.info("crit. AH and CG")
AH = m_A[:, :, :, np.newaxis] * H[np.newaxis, np.newaxis, :]
Crit_AH = (np.linalg.norm(AH - AH1) / (np.linalg.norm(AH1) + eps)) ** 2
cAH += [Crit_AH]
AH1 = AH.copy()
logger.info("Crit_AH = " + str(Crit_AH))
#####################
# MAXIMIZATION
#####################
if prior=='balloon':
logger.info(" prior balloon")
AuxH = H - Hb
AuxG = G - Gb
else:
logger.info(" NO prior")
AuxH = H.copy()
AuxG = G.copy()
# Variance HRF: sigmaH
if estimateSigmaH:
logger.info("M sigma_H step ...")
sigmaH = vt.maximization_sigma_asl(D, Sigma_H, matrix_covH, AuxH, use_hyperprior, gamma_h)
logger.info('sigmaH = ' + str(sigmaH))
if ni > 0:
free_energyVh = 0
for s in xrange(n_sess):
free_energyVh += vt.Compute_FreeEnergy(y_tilde[s, :, :], m_A[s, :, :], Sigma_A[:, :, :, s], mu_Ma, sigma_Ma,
H, Sigma_H, AuxH, R, R_inv, sigmaH, sigmaG,
m_C[s, :, :], Sigma_C[:, :, :, s], mu_Mc, sigma_Mc, G, Sigma_G,
AuxG, q_Z, neighboursIndexes, Beta, Gamma,
gamma, gamma_h, gamma_g, sigma_eps[s, :], XX[s, :, :, :], W,
J, D, M, N, K, use_hyperprior, Gamma_X[:, s, :, :], Gamma_WX[:, s, :, :], bold=True, S=n_sess)
if free_energyVh < free_energyE:
logger.info("free energy has decreased after v_h computation from %f to %f", free_energyE, free_energyVh)
# (mu,sigma)
if estimateMP:
logger.info("M (mu,sigma) a and c step ...")
#print 'q_Z = ', q_Z
#print q_Z.shape
mu_Ma, sigma_Ma = vt.maximization_mu_sigma_ms(q_Z, m_A, Sigma_A, M, J, n_sess, K)
print 'mu_Ma = ', mu_Ma
print 'sigma_Ma = ', sigma_Ma
if ni > 0:
free_energyMP = 0
for s in xrange(n_sess):
free_energyMP += vt.Compute_FreeEnergy(y_tilde[s, :, :], m_A[s, :, :], Sigma_A[:, :, :, s], mu_Ma, sigma_Ma,
H, Sigma_H, AuxH, R, R_inv, sigmaH, sigmaG,
m_C[s, :, :], Sigma_C[:, :, :, s], mu_Mc, sigma_Mc, G, Sigma_G,
AuxG, q_Z, neighboursIndexes, Beta, Gamma,
gamma, gamma_h, gamma_g, sigma_eps[s, :], XX[s, :, :, :], W,
J, D, M, N, K, use_hyperprior, Gamma_X[:, s, :, :], Gamma_WX[:, s, :, :], bold=True, S=n_sess)
if free_energyMP < free_energyVh:
logger.info("free energy has decreased after GMM parameters computation from %f to %f", free_energyVh, free_energyMP)
# Drift L, alpha
if estimateLA:
logger.info("M L, alpha step ...")
for s in xrange(n_sess):
AL[:, :, s] = vt.maximization_LA_asl(Y[s, :, :], m_A[s, :, :], m_C[s, :, :], XX[s, :, :, :],
WP[s, :, :], W, WP_Gamma_WP[s, :, :], H, G, Gamma)
PL = np.einsum('ijk,kli->ijl', WP, AL)
y_tilde = Y - PL
if ni > 0:
free_energyLA = 0
for s in xrange(n_sess):
free_energyLA += vt.Compute_FreeEnergy(y_tilde[s, :, :], m_A[s, :, :], Sigma_A[:, :, :, s], mu_Ma, sigma_Ma,
H, Sigma_H, AuxH, R, R_inv, sigmaH, sigmaG,
m_C[s, :, :], Sigma_C[:, :, :, s], mu_Mc, sigma_Mc, G, Sigma_G,
AuxG, q_Z, neighboursIndexes, Beta, Gamma,
gamma, gamma_h, gamma_g, sigma_eps[s, :], XX[s, :, :, :], W,
J, D, M, N, K, use_hyperprior, Gamma_X[:, s, :, :], Gamma_WX[:, s, :, :], bold=True, S=n_sess)
if free_energyLA < free_energyMP:
logger.info("free energy has decreased after drifts computation from %f to %f", free_energyMP, free_energyLA)
# Beta
if estimateBeta:
logger.info("M beta step ...")
"""Qtilde = np.concatenate((Z_tilde, np.zeros((M, K, 1), dtype=Z_tilde.dtype)), axis=2)
Qtilde_sumneighbour = Qtilde[:, :, neighboursIndexes].sum(axis=3)
Beta = vt.maximization_beta_m2(Beta.copy(), q_Z, Qtilde_sumneighbour,
Qtilde, neighboursIndexes, maxNeighbours,
gamma, MaxItGrad, gradientStep)
logger.info(Beta)
"""
logger.info("M beta step ...")
Qtilde = np.concatenate((Z_tilde, np.zeros((M, K, 1), dtype=Z_tilde.dtype)), axis=2)
Qtilde_sumneighbour = Qtilde[:, :, neighboursIndexes].sum(axis=3)
for m in xrange(0, M):
Beta[m] = vt.maximization_beta_m2_scipy_asl(Beta[m].copy(), q_Z[m, :, :], Qtilde_sumneighbour[m, :, :],
Qtilde[m, :, :], neighboursIndexes, maxNeighbours,
gamma, MaxItGrad, gradientStep)
logger.info(Beta)
if ni > 0:
free_energyB = 0
for s in xrange(n_sess):
free_energyB += vt.Compute_FreeEnergy(y_tilde[s, :, :], m_A[s, :, :], Sigma_A[:, :, :, s], mu_Ma, sigma_Ma,
H, Sigma_H, AuxH, R, R_inv, sigmaH, sigmaG,
m_C[s, :, :], Sigma_C[:, :, :, s], mu_Mc, sigma_Mc, G, Sigma_G,
AuxG, q_Z, neighboursIndexes, Beta, Gamma,
gamma, gamma_h, gamma_g, sigma_eps[s, :], XX[s, :, :, :], W,
J, D, M, N, K, use_hyperprior, Gamma_X[:, s, :, :], Gamma_WX[:, s, :, :], bold=True, S=n_sess)
if free_energyB < free_energyLA:
logger.info("free energy has decreased after Beta computation from %f to %f", \
free_energyLA, free_energyB)
if 0 and ni < 5:
plt.close('all')
for m in xrange(0, M):
range_b = np.arange(-10., 20., 0.1)
beta_plotting = np.zeros_like(range_b)
grad_plotting = np.zeros_like(range_b)
for ib, b in enumerate(range_b):
beta_plotting[ib] = vt.fun(b, q_Z[m, :, :], Qtilde_sumneighbour[m, :, :],
neighboursIndexes, gamma)
grad_plotting[ib] = vt.grad_fun(b, q_Z[m, :, :], Qtilde_sumneighbour[m, :, :],
neighboursIndexes, gamma)
#print beta_plotting
plt.figure(1)
plt.hold('on')
plt.plot(range_b, beta_plotting)
plt.figure(2)
plt.hold('on')
plt.plot(range_b, grad_plotting)
plt.show()
# Sigma noise
if estimateNoise:
logger.info("M sigma noise step ...")
for s in xrange(n_sess):
sigma_eps[s, :] = vt.maximization_sigma_noise_asl(XX[s, :, :, :], m_A[s, :, :], Sigma_A[:, :, :, s], H, m_C[s, :, :], Sigma_C[:, :, :, s], \
G, Sigma_H, Sigma_G, W, y_tilde[s, :, :], Gamma, \
Gamma_X[:, s, :, :], Gamma_WX[:, s, :, :], N)
if PLOT:
for m in xrange(M):
SUM_q_Z[m] += [q_Z[m, 1, :].sum()]
mua1[m] += [mu_Ma[m, 1]]
free_energy = 0
for s in xrange(n_sess):
if s==n_sess-1:
plotFE = True
else:
plotFE = False
free_energy += vt.Compute_FreeEnergy(y_tilde[s, :, :], m_A[s, :, :], Sigma_A[:, :, :, s], mu_Ma, sigma_Ma,
H, Sigma_H, AuxH, R, R_inv, sigmaH, sigmaG,
m_C[s, :, :], Sigma_C[:, :, :, s], mu_Mc, sigma_Mc, G, Sigma_G,
AuxG, q_Z, neighboursIndexes, Beta, Gamma,
gamma, gamma_h, gamma_g, sigma_eps[s, :], XX[s, :, :, :], W,
J, D, M, N, K, use_hyperprior, Gamma_X[:, s, :, :], Gamma_WX[:, s, :, :],
plot=plotFE, bold=True, S=n_sess)
if ni > 0:
if free_energy < free_energyB:
logger.info("free energy has decreased after Noise computation from %f to %f", free_energyB, free_energy)
if ni > 0:
if free_energy < FE[-1]:
logger.info("WARNING! free energy has decreased in this iteration from %f to %f", FE[-1], free_energy)
FE += [free_energy]
if ni > 5:
#Crit_FE = np.abs((FE[-1] - FE[-2]) / FE[-2])
FE0 = np.array(FE)
Crit_FE = np.abs((FE0[-5:] - FE0[-6:-1]) / FE0[-6:-1])
print Crit_FE
print (Crit_FE > Thresh * np.ones_like(Crit_FE)).any()
else:
Crit_FE = 100
ni += 1
cTime += [time.time() - t1]
logger.info("Computing reconstruction error")
StimulusInducedSignal = vt.computeFit_asl(H, m_A[s, :, :], G, m_C[s, :, :], W, XX[s, :, :, :])
rerror = np.append(rerror, \
np.mean(((Y[s, :, :] - StimulusInducedSignal) ** 2).sum(axis=0)) \
/ np.mean((Y[s, :, :] ** 2).sum(axis=0)))
CompTime = time.time() - t1
# Normalize if not done already
if not constraint: # or not normg:
logger.info("l2-norm of H and G to 1 if not constraint")
Hnorm = np.linalg.norm(H)
H /= Hnorm
Sigma_H /= Hnorm**2
m_A *= Hnorm
if zc:
H = np.concatenate(([0], H, [0]))
## Compute contrast maps and variance
if computeContrast and len(contrasts) > 0:
logger.info("Computing contrasts ... ")
CONTRAST_A, CONTRASTVAR_A, \
CONTRAST_C, CONTRASTVAR_C = vt.compute_contrasts(condition_names,
contrasts, m_A[s, :, :], m_C[s, :, :],
Sigma_A[:, :, :, s], Sigma_C[:, :, :, s], M, J)
else:
CONTRAST_A, CONTRASTVAR_A, CONTRAST_C, CONTRASTVAR_C = 0, 0, 0, 0
###########################################################################
########################################## PLOTS and SNR computation
logger.info("Nb iterations to reach criterion: %d", ni)
logger.info("Computational time = %s min %s s",
str(np.int(CompTime // 60)), str(np.int(CompTime % 60)))
logger.info("Iteration time = %s min %s s",
str(np.int((CompTime // ni) // 60)), str(np.int((CompTime / ni) % 60)))
logger.info("perfusion baseline mean = %f", np.mean(AL[0, :, s]))
logger.info("perfusion baseline var = %f", np.var(AL[0, :, s]))
logger.info("drifts mean = %f", np.mean(AL[1:, :, s]))
logger.info("drifts var = %f", np.var(AL[1:, :, s]))
logger.info("noise mean = %f", np.mean(sigma_eps[s, :]))
logger.info("noise var = %f", np.var(sigma_eps[s, :]))
SNR10 = 20 * (np.log10(np.linalg.norm(Y[s, :, :]) / \
np.linalg.norm(Y[s, :, :] - StimulusInducedSignal - PL[s, :, :])))
logger.info("SNR = %d", SNR10)
return ni, m_A.mean(0), H, m_C.mean(0), G, Z_tilde, sigma_eps[s, :], \
mu_Ma, sigma_Ma, mu_Mc, sigma_Mc, Beta, AL[:, :, s], PL[s, :, :], \
np.zeros_like(AL[0, :, s]), Sigma_A[:, :, :, s], Sigma_C[:, :, :, s], Sigma_H, Sigma_G, rerror, \
CONTRAST_A, CONTRASTVAR_A, CONTRAST_C, CONTRASTVAR_C, \
cA[:], cH[2:], cC[2:], cG[2:], cZ[2:], cAH[2:], cCG[2:], \
cTime, FE
|
Python
|
CL
|
e4888a0457087619ea787680b82835c9560ca4a6ff3e3482a8ee49b031a40f9c
|
import numpy as np
import torch.nn as nn
import random
import os
import torch
def create_features(dataframe, list_of_features=['u_in']):
# u_in cumsum
dataframe['u_in_cumsum'] = dataframe.groupby('breath_id')['u_in'].cumsum()
# u_in shift change
for lag in np.arange(1, 5, 1):
dataframe[f'u_in_lag_fwrd{lag}'] = dataframe.groupby('breath_id')['u_in'].shift(
lag).fillna(0)
dataframe[f'u_in_lag_back{lag}'] = dataframe.groupby('breath_id')['u_in'].shift(
int(-lag)).fillna(0)
# time diff
dataframe['time_diff'] = dataframe.groupby('breath_id')['time_step'].diff(1).fillna(0)
dataframe['time_diff_2'] = dataframe.groupby('breath_id')['time_step'].diff(2).fillna(
0)
dataframe['time_diff_3'] = dataframe.groupby('breath_id')['time_step'].diff(3).fillna(
0)
dataframe['time_diff_4'] = dataframe.groupby('breath_id')['time_step'].diff(4).fillna(
0)
dataframe['time_diff_5'] = dataframe.groupby('breath_id')['time_step'].diff(5).fillna(
0)
# u_in area
dataframe['area'] = dataframe['time_step'] * dataframe['u_in']
dataframe['area_cumsum'] = dataframe.groupby('breath_id')['area'].cumsum()
# add rectangle method
dataframe['auc_u_in'] = dataframe['time_diff'] * dataframe['u_in']
dataframe['auc_u_in_cumsum'] = dataframe.groupby('breath_id')['auc_u_in'].cumsum()
dataframe['u_in_cumsum'] = dataframe.groupby('breath_id')['u_in'].cumsum()
for feature in list_of_features:
grouped_dataframe = dataframe.groupby('breath_id')[feature].agg(
[max, min, np.mean, np.median])
dataframe = dataframe.merge(
grouped_dataframe,
how='left',
on='breath_id'
)
dataframe = dataframe.rename(
columns={
'max': feature + '_max',
'min': feature + '_min',
'mean': feature + '_mean',
'median': feature + '_median'
}
)
dataframe[f'{feature}_range'] = (
dataframe[f'{feature}_max'] - dataframe[f'{feature}_min']).apply(
lambda x: max(0, x))
return dataframe
# create a class wrapper from PyTorch nn.Module, so
# the function now can be easily used in models
# next time, fool, use directly the nn.SiLU() activation
class Swish(nn.Module):
'''
Applies the Sigmoid Linear Unit (SiLU) function element-wise:
SiLU(x) = x * sigmoid(x)
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
'''
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.sigmoid(x)
def seed_everything(seed):
"""
Seeds basic parameters for reproductibility of results.
Args:
seed (int): Number of the seed.
"""
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def worker_init_fn(worker_id):
"""
Handles PyTorch x Numpy seeding issues.
Args:
worker_id (int): Id of the worker.
"""
np.random.seed(np.random.get_state()[1][0] + worker_id)
def save_model_weights(model, filename, verbose=1, cp_folder=""):
"""
Saves the weights of a PyTorch model.
Args:
model (torch model): Model to save the weights of.
filename (str): Name of the checkpoint.
verbose (int, optional): Whether to display infos. Defaults to 1.
cp_folder (str, optional): Folder to save to. Defaults to "".
"""
if verbose:
print(f"\n -> Saving weights to {os.path.join(cp_folder, filename)}\n")
torch.save(model.state_dict(), os.path.join(cp_folder, filename))
def compute_metric(df, preds):
"""
Metric for the problem, as I understood it.
"""
y = np.array(df['pressure'].values.tolist())
# inspiratory phase
mask = 1 - np.array(df['u_out'].values.tolist())
# combine with mae calculusse
mae = mask * np.abs(y - preds)
mae = mae.sum() / mask.sum()
return mae
# Custom loss
class VentilatorLoss(nn.Module):
"""
Directly optimizes the competition metric
"""
def __call__(self, preds, y, u_out):
mask = 1 - u_out
mae = mask * (y - preds).abs()
mae = mae.sum(-1) / mask.sum(-1)
return mae
|
Python
|
CL
|
394a8f92fa8ea95bc114c90004444a051a3611272001d7a24d459510cc82da30
|
# Flask REST-based application to retrieve named enties from a text string
# using the spaCy library
from flask import Flask,abort,jsonify,make_response,request, url_for
import logging
import jsons
from googleapi import google
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('proxy')
app = Flask(__name__,static_folder=None)
@app.route('/')
def index():
"""Lists the available REST endpoints for the application.
Args:
none
Returns:
A json array containing a JSON array indexed at "endPoints".
Each element in the array contains a JSON object with ...
Example:
{
"code": 200,
"endPoints": [
{
"methods": "GET,OPTIONS,HEAD",
"rule": "/"
},
{
"methods": "OPTIONS,POST",
"rule": ""
}
]
}
Raises:
None
"""
routes = []
for rule in app.url_map.iter_rules():
myRule = {}
myRule["rule"] = rule.rule
myRule["methods"] = ",".join(list(rule.methods))
#myRule["function"] = rule.endpoint
routes.append(myRule)
return jsonify(code=200, endPoints=routes)
@app.route('/google/search/<searchTerm>/<numPages>',methods=['GET'])
def performGoogleSearch(searchTerm,numPages):
"""Search the google for the given terms and return the result
Args:
Returns:
Raises:
None. all errors are captured. 401 returned for any raised exceptions. Exception reason printed to log
"""
try:
search_results = google.search(searchTerm, int(numPages))
return jsons.dumps(search_results)
except Exception as e:
logger.warning (str(e))
resp = make_response("",401)
return resp
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run(debug=False,host='0.0.0.0',port=5000)
|
Python
|
CL
|
77ed7263dbb1b355e5a3d3dc6d3d0a12e708f57b6282a31027f69e7212d07a57
|
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
import requests
import re
from rasa_sdk.forms import FormAction, REQUESTED_SLOT
from typing import Any, Text, Dict, List, Union, Optional
from typing import Any, Text, Dict, List
import json
from rasa_sdk import Action, Tracker
from rasa_sdk.events import AllSlotsReset, FollowupAction, UserUtteranceReverted, ActionReverted, Restarted
from rasa_sdk.executor import CollectingDispatcher
import pickle
import datetime
#from sms_api import send_message
from threading import Thread
#from date_valid import validate_date , haptik_date_validation
from pytz import timezone
#from datetime import datetime
from datetime import datetime
import pytz
IST = pytz.timezone('Asia/Kolkata')
with open("templates.json", "r", encoding="utf-8") as temp:
templates = json.load(temp)
class ActionFallback(Action):
def name(self) -> Text:
return "action_fallback"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# Checking for 2 fallback, if bot didn't understand final message is uttered.
bot_msg = ""
count = 0
templates_temp = {}
for key,message in templates.items():
templates_temp[key] = message
for event in reversed(tracker.events):
if event.get("event") == "bot":
#logger.debug("Inside Fallback : text is : "+event.get("text"))
if templates["utter_fallback"] in event.get("text"):
count += 1
if count >= 2:
dispatcher.utter_message(templates["utter_bot_not_understand"])
return [Restarted()]
# Call is Ended EOC
else:
count += 1
bot_msg = event.get("text")
for template_key,template_message in templates_temp.items():
if event.get("text") == template_message:
short_key = template_key + "_short"
if short_key in templates_temp:
bot_msg = templates_temp[short_key]
break
else:
bot_msg = templates_temp[template_key]
break
#bot_msg = last_bot_message
break
elif event.get("text") in templates_temp.values():
# replace bot message here\
bot_msg = event.get("text")
#logger.debug(" Bot message in the elif block of fallback is : "+str(bot_msg))
for template_key,template_message in templates_temp.items():
#logger.debug("Template message in the elif block of fallback is : "+str(template_message))
if event.get("text") == template_message:
short_key = template_key + "_short"
if short_key in templates_temp:
bot_msg = templates_temp[short_key]
break
else:
bot_msg = templates_temp[template_key]
break
break
else:
bot_msg = event.get("text")
#logger.debug("Bot message in the else block of fallback is : "+str(bot_msg))
for template_key,template_message in templates_temp.items():
#logger.debug("Template message in the else block of fallback is : "+str(template_message))
if event.get("text") == template_message:
short_key = template_key + "_short"
if short_key in templates_temp:
bot_msg = templates_temp[short_key]
break
else:
bot_msg = templates_temp[template_key]
break
#bot_msg = last_bot_message
count += 1
break
if bot_msg == "":
dispatcher.utter_message(templates["initial_message"])
else:
if templates["utter_fallback"] in bot_msg:
dispatcher.utter_message(bot_msg)
else:
dispatcher.utter_message(templates["utter_fallback"]+ '. ' + bot_msg)
return [UserUtteranceReverted()]
# Question Number 1
class FormGetRatingQuestion2(FormAction):
def name(self) -> Text:
return "form_question1"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["question1"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"question1": self.from_text(),
}
def validate_question1(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate form_question1"""
try:
intent_name = tracker.latest_message['intent']['name']
if intent_name == "intent_name":
return {"question1": value}
else:
dispatcher.utter_message(templates["utter_question_1"])
# validation failed, set this slot to None, meaning the
# user will be asked for the slot again
return {"question1": None}
except Exception as e:
print("EXCEPTION here : "+ str(e))
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
return []
# Question Number 2
class FormGetRatingQuestion2(FormAction):
def name(self) -> Text:
return "form_question2"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["question2"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"question2": self.from_text(),
}
def validate_question2(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate form_question2"""
try:
intent_name = tracker.latest_message['intent']['name']
if intent_name == "intent_age" :
return {"question2": value}
else:
dispatcher.utter_message(templates["utter_question_2"])
# validation failed, set this slot to None, meaning the
# user will be asked for the slot again
return {"question2": None}
except Exception as e:
print("EXCEPTION here : "+ str(e))
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
return[]
# Question Number 3
class FormGetRatingQuestion3(FormAction):
def name(self) -> Text:
return "form_question3"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
return ["question3"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"question3": self.from_text(),
}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
return []
|
Python
|
CL
|
d73e2d58c0fffd9bfae1c89a0119e3fe66ffaaa0c7af2796a62a9fb24ecc2504
|
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.command.install_data import install_data
import sys
import os
from subprocess import check_call
#full_info = open("README.md").read()
class clrmagic_build_ext(build_ext):
def build_extension(self, ext):
"""
build clrmagic.dll using csc or mcs
"""
if sys.platform == "win32":
_clr_compiler = "C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\csc.exe"
else:
_clr_compiler = "mcs"
cmd = [
_clr_compiler,
"/target:library",
"clrmagic.cs"
]
check_call(" ".join(cmd), shell=True)
class clrmagic_install_data(install_data):
def run(self):
build_cmd = self.get_finalized_command("build_ext")
install_cmd = self.get_finalized_command("install")
build_lib = os.path.abspath(build_cmd.build_lib)
install_platlib = os.path.relpath(install_cmd.install_platlib, self.install_dir)
for i, data_files in enumerate(self.data_files):
if isinstance(data_files, str):
self.data_files[i] = data_files[i].format(build_lib=build_lib)
else:
for j, filename in enumerate(data_files[1]):
data_files[1][j] = filename.format(build_lib=build_lib)
dest = data_files[0].format(install_platlib=install_platlib)
self.data_files[i] = dest, data_files[1]
return install_data.run(self)
setupdir = os.path.dirname(__file__)
if setupdir:
os.chdir(setupdir)
setup(
name = "clrmagic",
version = "0.0.1a2",
description = "IPython cell magic to use .NET languages",
author = "Xavier Dupré, Denis Akhiyarov",
author_email = "denis.akhiyarov@gmail.com",
url = "https://github.com/denfromufa/clrmagic",
license = "MIT",
keywords = ".NET CLR Mono Jupyter IPython notebook C# CSHARP pythonnet",
py_modules = ["clrmagic"],
install_requires = ["pythonnet"],
classifiers = [
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: IPython",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft",
"Programming Language :: C#",
"Programming Language :: Python",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Software Development"
],
zip_safe = False,
ext_modules=[
Extension("clrmagic", sources=["clrmagic.cs"])
],
data_files = [
("{install_platlib}", ["clrmagic.dll"])
],
cmdclass = {
"build_ext": clrmagic_build_ext,
"install_data": clrmagic_install_data
}
)
|
Python
|
CL
|
bb9346512d1f5a85821601e75c215ddaf7a1e04219f66886abca6f6b5d22f073
|
#
# This file is part of BDC-ODC.
# Copyright (C) 2020 INPE.
#
# stac2odc is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
__version__ = '0.0.1'
|
Python
|
CL
|
96a8d74828a8b96e213ae0efc21f9c79f8f524a57f5847034b6dca9495e37958
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-09-07 22:39
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('build', '0017_auto_20180904_1457'), ('build', '0018_rebuild_qa_comment'), ('build', '0019_auto_20180907_1335'), ('build', '0020_auto_20180907_1414')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('build', '0016_buildflow_asset_hash'),
]
operations = [
migrations.AddField(
model_name='build',
name='qa_comment',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='build',
name='status',
field=models.CharField(choices=[('queued', 'Queued'), ('waiting', 'Waiting'), ('running', 'Running'), ('success', 'Success'), ('error', 'Error'), ('fail', 'Failed'), ('qa', 'QA Testing')], default='queued', max_length=16),
),
migrations.AlterField(
model_name='rebuild',
name='status',
field=models.CharField(choices=[('queued', 'Queued'), ('waiting', 'Waiting'), ('running', 'Running'), ('success', 'Success'), ('error', 'Error'), ('fail', 'Failed'), ('qa', 'QA Testing')], default='queued', max_length=16),
),
migrations.AddField(
model_name='rebuild',
name='qa_comment',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='build',
name='qa_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='builds_qa', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='rebuild',
name='qa_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='rebuilds_qa', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='build',
name='time_qa_end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='build',
name='time_qa_start',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='rebuild',
name='time_qa_end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='rebuild',
name='time_qa_start',
field=models.DateTimeField(blank=True, null=True),
),
]
|
Python
|
CL
|
dd4f920318cd82eb1efcc8e8f26b1c770d30e038c595178c9e2f581e6fb5b410
|
# Generated by Django 2.1.5 on 2019-03-02 17:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sessions', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ActiveContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show_background_search_feedback', models.BooleanField(default=False)),
('check_for_diversity', models.BooleanField(default=True)),
('show_arch_suggestions', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='AllowedCommand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('command_type', models.CharField(choices=[('engineer', 'Engineer Commands'), ('critic', 'Critic Commands'), ('historian', 'Historian Commands'), ('analyst', 'iFEED Commands'), ('analyst_instruments', 'Instruments Cheatsheet'), ('analyst_instrument_parameters', 'Instrument Parameters Cheatsheet'), ('analyst_measurements', 'Measurements Cheatsheet'), ('analyst_stakeholders', 'Stakeholders Cheatsheet'), ('measurements', 'Historical Measurements Cheatsheet'), ('missions', 'Historical Missions Cheatsheet'), ('technologies', 'Historical Technologies Cheatsheet'), ('objectives', 'Objectives Cheatsheet'), ('space_agencies', 'Space Agencies Cheatsheet')], max_length=40)),
('command_descriptor', models.IntegerField()),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('voice_answer', models.TextField()),
('visual_answer_type', models.TextField()),
('visual_answer', models.TextField()),
],
),
migrations.CreateModel(
name='Design',
fields=[
('design_id', models.AutoField(primary_key=True, serialize=False)),
('id', models.IntegerField()),
('inputs', models.TextField()),
('outputs', models.TextField()),
('activecontext', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='daphne_API.ActiveContext')),
],
),
migrations.CreateModel(
name='EDLContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_mat_file', models.CharField(max_length=255)),
('current_mat_file_for_print', models.CharField(max_length=255)),
('current_scorecard_file', models.CharField(max_length=255)),
('current_scorecard', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='EngineerContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vassar_instrument', models.TextField()),
('instrument_parameter', models.TextField()),
('vassar_measurement', models.TextField()),
],
),
migrations.CreateModel(
name='EOSSContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('problem', models.CharField(max_length=50)),
('dataset_name', models.CharField(max_length=80)),
('dataset_user', models.BooleanField()),
('last_arch_id', models.IntegerField()),
('selected_arch_id', models.IntegerField()),
('added_archs_count', models.IntegerField()),
('vassar_port', models.IntegerField()),
],
),
migrations.CreateModel(
name='ExperimentAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.TextField()),
('date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='ExperimentContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_running', models.BooleanField()),
('experiment_id', models.IntegerField()),
('current_state', models.TextField()),
('eosscontext', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.EOSSContext')),
],
),
migrations.CreateModel(
name='ExperimentStage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=50)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('end_state', models.TextField()),
('experimentcontext', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.ExperimentContext')),
],
),
migrations.CreateModel(
name='UserInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('daphne_version', models.CharField(choices=[('EOSS', 'Earth Observation Satellite Systems'), ('EDL', 'Entry, Descent & Landing'), ('AnomalyDetection', 'Anomaly Detection for Astronauts')], max_length=40)),
('channel_name', models.CharField(max_length=120)),
('session', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='sessions.Session')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='experimentaction',
name='experimentstage',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.ExperimentStage'),
),
migrations.AddField(
model_name='eosscontext',
name='user_information',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.UserInformation'),
),
migrations.AddField(
model_name='engineercontext',
name='eosscontext',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.EOSSContext'),
),
migrations.AddField(
model_name='edlcontext',
name='user_information',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.UserInformation'),
),
migrations.AddField(
model_name='design',
name='eosscontext',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='daphne_API.EOSSContext'),
),
migrations.AddField(
model_name='answer',
name='eosscontext',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.EOSSContext'),
),
migrations.AddField(
model_name='allowedcommand',
name='eosscontext',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.EOSSContext'),
),
migrations.AddField(
model_name='activecontext',
name='eosscontext',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='daphne_API.EOSSContext'),
),
migrations.AlterUniqueTogether(
name='userinformation',
unique_together={('session', 'user')},
),
migrations.AlterUniqueTogether(
name='design',
unique_together={('eosscontext', 'activecontext', 'id')},
),
]
|
Python
|
CL
|
04b87d2371ed5e19fa162ba1ca12e46ede6f204a5efc5f404e2fa9600a75c1ab
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import re
from os.path import expanduser, isfile
import logging
import os
import yaml
from yaml import YAMLError
KUBECONFIG_ENV_VAR = "KUBECONFIG"
KUBECONFIG_FILE = "{0}/.kube/config".format(expanduser("~"))
DEFAULT_API_HOST = "http://localhost:8080"
DEFAULT_API_VERSION = "v1"
DEFAULT_NAMESPACE = "default"
SERVICE_ACCOUNT_ROOT = "/var/run/secrets/kubernetes.io/serviceaccount"
SERVICE_ACCOUNT_CA_PATH = "{0}/ca.crt".format(SERVICE_ACCOUNT_ROOT)
SERVICE_ACCOUNT_TOKEN = "{0}/token".format(SERVICE_ACCOUNT_ROOT)
ENV_SERVICE_HOST = "KUBERNETES_SERVICE_HOST"
ENV_SERVICE_PORT = "KUBERNETES_SERVICE_PORT"
VALID_API_VERSIONS = ["v1"]
VALID_IP_RE = re.compile(
r"^(http[s]?\:\/\/)?((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})(:[0-9]+)?$"
)
VALID_HOST_RE = re.compile(r"^(http[s]?\:\/\/)?([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-\.]*[A-Za-z])+(:[0-9]+)?$")
class K8sConfig(object):
def __init__(
self, kubeconfig=None, api_host=None, auth=None, cert=None, namespace=None, pull_secret=None, token=None, version=None
):
"""
Pulls configuration from a kubeconfig file, if present, otherwise accepts user-defined parameters.
See http://kubernetes.io/docs/user-guide/kubeconfig-file/ for information on the kubeconfig file.
:param kubeconfig: Absolute path to the kubeconfig file, if any.
:param api_host: Absolute URI where the API server resides.
:param auth: A tuple of (username, password) for basic authentication.
:param namespace: The namespace to use. Defaults to 'default'
:param pull_secret: The password to use when pulling images from the container repository.
:param token: An authentication token. Mutually exclusive with 'auth'.
:param version: The version of the API to target. Defaults to 'v1'.
"""
super(K8sConfig, self).__init__()
self.api_host = None
self.auth = None
self.ca_cert = None
self.ca_cert_data = None
self.cert = None
self.client_certificate = None
self.client_key = None
self.cert_data = None
self.pull_secret = None
self.namespace = None
self.token = None
self.version = None
self._init_with_defaults()
if kubeconfig is None:
self._init_with_defaults()
else:
self._read_config(filename=kubeconfig)
# Default fallback host.
if self.api_host is None:
logging.debug("Overriding api host with: [ {0} ]".format(DEFAULT_API_HOST))
self.api_host = DEFAULT_API_HOST
# Set defaults if not caught in kubeconfig file or environments.
if self.namespace is None:
logging.debug("Overriding namespace with: [ {0} ]".format(DEFAULT_NAMESPACE))
self.namespace = DEFAULT_NAMESPACE
if self.version is None:
logging.debug("Overriding api version with: [ {0} ]".format(DEFAULT_API_VERSION))
self.version = DEFAULT_API_VERSION
# Process overrides from arguments
if api_host is not None:
if not isinstance(api_host, str) or not (VALID_IP_RE.match(api_host) or VALID_HOST_RE.match(api_host)):
raise SyntaxError("K8sConfig: host: [ {0} ] is invalid.".format(api_host))
schema_re = re.compile(r"^http[s]*")
if not schema_re.search(api_host):
https_port_re = re.compile(r"\:443$")
if not https_port_re:
logging.debug("Pre-pending http to api host [ {0} ] since port is not 443.".format(self.api_host))
api_host = "http://{0}".format(api_host)
else:
logging.debug("Pre-pending https to api host [ {0} ] since port is 443.".format(self.api_host))
api_host = "https://{0}".format(api_host)
self.api_host = api_host
if auth is not None:
if not isinstance(auth, tuple):
raise SyntaxError("K8sConfig: auth: [ {0} ] must be a tuple for basic authentication.".format(auth))
self.auth = auth
if cert is not None:
if not isinstance(cert, tuple):
raise SyntaxError("K8sConfig: cert: [ {0} ] must be a tuple for client certificate/key.".format(cert))
self.cert = cert
if namespace is not None:
if not isinstance(namespace, str):
raise SyntaxError("K8sConfig: namespace: [ {0} ] must be a string.".format(namespace))
self.namespace = namespace
if pull_secret is not None:
if not isinstance(pull_secret, list):
raise SyntaxError("K8sConfig: pull_secret: [ {0} ] must be a list.".format(pull_secret))
self.pull_secret = pull_secret
if token is not None:
if not isinstance(token, str):
raise SyntaxError("K8sConfig: token: [ {0} ] must be a string.".format(token))
self.token = token
if version is not None:
if not isinstance(version, str):
raise SyntaxError("K8sConfig: host: [ {0} ] and version: [ {1} ] must be strings.".format(api_host, version))
if version not in VALID_API_VERSIONS:
valid = ", ".join(VALID_API_VERSIONS)
raise SyntaxError("K8sConfig: api_version: [ {0} ] must be in: [ {1} ]".format(version, valid))
self.version = version
return
def _init_with_defaults(self):
# Try to initialize using the environment variable.
kubeconfig = os.getenv(KUBECONFIG_ENV_VAR, None)
if kubeconfig is not None:
self._read_config(filename=kubeconfig)
return
# Try to initialize using the ~/.kube/config file.
if isfile(KUBECONFIG_FILE):
self._read_config(filename=KUBECONFIG_FILE)
return
# Try in-cluster config
if isfile(SERVICE_ACCOUNT_CA_PATH):
self._from_cluster()
return
def _from_cluster(self):
# Initialize CA cert.
if not isfile(SERVICE_ACCOUNT_CA_PATH):
raise IOError("K8sConfig: Cannot find in-cluster ca certificate [ {0} ] ".format(SERVICE_ACCOUNT_CA_PATH))
self.ca_cert = SERVICE_ACCOUNT_CA_PATH
# Initialize the API server host
host = os.getenv(ENV_SERVICE_HOST, None)
port = os.getenv(ENV_SERVICE_PORT, None)
self.api_host = "https://{0}:{1}".format(host, port)
# Initialize the token
if not isfile(SERVICE_ACCOUNT_TOKEN):
raise IOError("K8sConfig: Cannot find in-cluster token file [ {1} ]".format(SERVICE_ACCOUNT_TOKEN))
with open(SERVICE_ACCOUNT_TOKEN, "r") as stream:
self.token = stream.read()
self.version = DEFAULT_API_VERSION
return
def _read_config(self, filename=None):
if not isfile(filename):
raise IOError("K8sConfig: kubeconfig: [ {0} ] doesn't exist.".format(filename))
try:
with open(filename, "r") as stream:
dotconf = yaml.safe_load(stream)
except YAMLError as err:
raise SyntaxError("K8sConfig: kubeconfig: [ {0} ] is not a valid YAML file: {1}".format(filename, err))
self.clusters = dotconf.get("clusters")
self.contexts = dotconf.get("contexts")
self.current_context = dotconf.get("current-context")
self.current_context_dict = [
context.get("context") for context in self.contexts if context.get("name") == self.current_context
][0]
self.preferences = dotconf.get("preferences", "")
self.users = dotconf.get("users")
self.version = dotconf.get("apiVersion")
if self.clusters:
for cluster in self.clusters:
if cluster["name"] == self.current_context_dict["cluster"]:
if "server" in cluster["cluster"]:
self.api_host = cluster["cluster"]["server"]
if "certificate-authority" in cluster["cluster"]:
self.ca_cert = cluster["cluster"]["certificate-authority"]
if "certificate-authority-data" in cluster["cluster"]:
self.ca_cert_data = cluster["cluster"]["certificate-authority-data"]
if self.users:
for user in self.users:
if user["name"] == self.current_context_dict["user"]:
if "username" in user["user"] and "password" in user["user"]:
self.auth = (user["user"]["username"], user["user"]["password"])
if "token" in user["user"]:
self.token = user["user"]["token"]
if "client-certificate" in user["user"] and "client-key" in user["user"]:
self.client_certificate = user["user"]["client-certificate"]
self.client_key = user["user"]["client-key"]
self.cert = (self.client_certificate, self.client_key)
if "client-certificate-data" in user["user"] and "client-key-data" in user["user"]:
self.client_certificate_data = user["user"]["client-certificate-data"]
self.client_key_data = user["user"]["client-key-data"]
self.cert_data = (self.client_certificate_data, self.client_key_data)
if self.contexts:
for context in self.contexts:
if context["name"] == self.current_context:
if "namespace" in context["context"]:
self.namespace = context["context"]["namespace"]
def serialize(self):
data = {}
if self.api_host is not None:
data["api_host"] = self.api_host
if self.auth is not None:
data["auth"] = self.auth
if self.cert is not None:
data["cert"] = self.cert
if self.namespace is not None:
data["namespace"] = self.namespace
if self.pull_secret is not None:
data["pull_secret"] = self.pull_secret
if self.token is not None:
data["token"] = self.token
if self.version is not None:
data["version"] = self.version
return data
|
Python
|
CL
|
e6cd9f40fb21a2cc9ecc0a927178d69a41ea679b23e61f47b39e07162d4204b5
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0212,R0912,R0914
from decimal import Decimal
from dss.dsl.safe_strings import safe_unicode
from dss.dsl.Serializer import Serializer
from dss.dsl import html
from dss.dsl.xml.serializers import XmlSerializer
from dss.dsl.xml.serializers import (
basic_default_visitors_map, xml_default_visitors_map)
from dss.dsl.xml import (
XmlDoc,
XmlCData,
Comment,
XmlName,
XmlEntityRef,
XmlAttribute,
XmlAttributes,
#XmlElement,
#XmlElementProto,
VisitorMap,
)
################################################################################
## helper funcs
def _test_output(serializer, tree, expected_output):
real_output = serializer.serialize(tree)
if real_output != expected_output:
raise AssertionError(
'\n when serializing %r with %r\n want %r\n got %r'%(
tree,
serializer,
expected_output,
real_output))
def _test_output_set(serializers, data):
if not isinstance(serializers, (list, tuple)):
serializers = [serializers]
for serializer in serializers:
for tree, expected_output in data:
_test_output(serializer, tree, expected_output)
def _make_wrapper_func(_in):
def wrapper_func():
return _in
return wrapper_func
def _make_wrapper_method(_in):
class Foo(object):
def meth(self):
return _in
return Foo().meth
def _convert_test_set_to_func_calls(test_set):
return tuple(
[((lambda x: (lambda : x))(_in), out) # pylint: disable-msg=E0601
for _in, out in test_set]
+[(_make_wrapper_func(_in), out) for _in, out in test_set]
+[(_make_wrapper_method(_in), out) for _in, out in test_set]
)
################################################################################
## Test datasets
class _dummy_repr(object):
def __repr__(self):
return 'dummy_repr'
class _udummy_repr(object):
def __repr__(self):
return u'dummy_repr'
class _unsanitized_dummy_repr(object):
def __repr__(self):
return '&dummy_repr'
BASIC_TYPES_TEST_SET = (
(True, u'True'),
(False, u'False'),
(1, u'1'),
(1.0, u'1.0'),
(Decimal('2.0'), u'2.0'),
(complex(1,2), u'(1+2j)'),
((1,2,3), u'123'),
([1,2,3], u'123'),
([1,2,3,(4,5)], u'12345'),
([1,2,3,(4,5,(6.0))], u'123456.0'),
(set([1]), u'1'),
('abc', u'abc'),
(u'abc', u'abc'),
(('a','b','c'), u'abc'),
(_dummy_repr(), u'dummy_repr'),
(_udummy_repr(), u'dummy_repr'),
#(_unsanitized_dummy_repr(), u'&dummy_repr'),
)
escapings = {
"&": u"&",
"<": u"<",
">": u">",
'"': u""",
"'": u"'"}
ESCAPING_TEST_SET = tuple(
[(safe_unicode(k), k) for k in escapings]
+[(k, v) for k, v in escapings.iteritems()]
+[(k*200, v*200) for k, v in escapings.iteritems()]
+[('--%s--'%k, '--%s--'%v) for k, v in escapings.iteritems()]
+[('------%s'%k, '------%s'%v) for k, v in escapings.iteritems()]
+[('--%s%s'%(k,k), '--%s%s'%(v,v)) for k, v in escapings.iteritems()]
+[('&<>"\'&'*20, '&<>"'&'*20)]
)
ENCODING_TEST_SET = tuple(
[('金', unicode('金', 'utf-8'))]
)
COMBINED_BASIC_TEST_SET = tuple(
list(BASIC_TYPES_TEST_SET)
+list(ESCAPING_TEST_SET)
+list(ENCODING_TEST_SET)
)
BASIC_FUNC_CALL_TEST_SET = _convert_test_set_to_func_calls(BASIC_TYPES_TEST_SET)
BASIC_FUNC_CALL_ESCAPED_TEST_SET = _convert_test_set_to_func_calls(ESCAPING_TEST_SET)
XML_NAMES_TEST_SET = (
(XmlName('foo'), u'foo'),
(XmlName('bar:foo'), u'bar:foo'),
(XmlName(u'bar:foo'), u'bar:foo'),
(XmlName(local='foo', prefix='bar'), u'bar:foo'),
)
XML_COMMENTS_TEST_SET = (
(Comment('foo bar'), u'<!--foo bar-->'),
(Comment(['foo & bar', 1,2,3,(-1,-2)]), u'<!--foo & bar123-1-2-->'),
(Comment('foo & bar'), u'<!--foo & bar-->'),
(Comment('<!-- blah&blah<br /> -->'), # escape nested comments
u'<!--<!-/- blah&blah<br /> -/->-->'),
)
XML_CDATA_TEST_SET = (
(XmlCData('foo bar'), u'<![CDATA[foo bar]]>'),
(XmlCData(['a',1,'&']), u'<![CDATA[a1&]]>'),
(XmlCData(['a',[1,2],'&']), u'<![CDATA[a12&]]>'),
(XmlCData('foo & " bar'), u'<![CDATA[foo & " bar]]>'),
(XmlCData('foo ]]> bar'), u'<![CDATA[foo ]-]-> bar]]>'),
)
XML_ATTRIBUTES_TEST_SET = (
[(XmlAttribute(name='foo', value='bar'), u' foo="bar"'),
(XmlAttribute(name=XmlName('foo:bar'), value=1234),
u' foo:bar="1234"'),
(XmlAttributes([XmlAttribute(name='foo1', value='bar1'),
XmlAttribute(name='foo2', value='bar2')]),
u' foo1="bar1" foo2="bar2"')]
+[(XmlAttribute(name='foo', value=_in), u' foo="%s"'%out)
for _in, out in COMBINED_BASIC_TEST_SET]
)
BASIC_XMLDOC_TEST_SET = (
(XmlDoc(version='1.0', encoding='UTF-8')[html.div],
'<?xml version="1.0" encoding="UTF-8" ?><div></div>'),
(XmlDoc(version='2.0', encoding='ISO-8859-1')[html.div],
'<?xml version="2.0" encoding="ISO-8859-1" ?><div></div>'),
)
HTML_EMPTY_TAGS_TEST_SET = tuple(
[(getattr(html, tag), u'<%s></%s>'%(tag, tag))
for tag in html._non_empty_html_tag_names]
+[(getattr(html, tag), u'<%s />'%tag)
for tag in html._empty_html_tag_names]
)
HTML_ENTITIES_TEST_SET = tuple(
(eref, u'&%s;'%eref.alpha)
for name, eref in html.entities.iteritems())
TAG_ATTRIBUTES_TEST_SET = tuple(
[(html.div(foo=_in), u'<div foo="%s"></div>'%out)
for _in, out in COMBINED_BASIC_TEST_SET]
)
TAG_CLASS_ATTRIBUTE_TEST_SET = tuple(
[(html.div(_in), u'<div class="%s"></div>'%out)
for _in, out in COMBINED_BASIC_TEST_SET]
)
################################################################################
## test functions
def test_init_serializer():
s1 = Serializer()
assert s1.input_encoding == 'utf-8'
assert s1.visitor_map is not basic_default_visitors_map
assert s1.visitor_map == basic_default_visitors_map
s2 = XmlSerializer()
assert s2.input_encoding == 'utf-8'
assert s2.visitor_map is not xml_default_visitors_map
assert s2.visitor_map == xml_default_visitors_map
vmap = VisitorMap()
s3 = XmlSerializer(vmap)
assert s3.input_encoding == 'utf-8'
assert s3.visitor_map is vmap
assert not s3.visitor_map.get_visitor(1)
vmap.parent_map = basic_default_visitors_map
assert (s3.visitor_map.get_visitor(1) ==
basic_default_visitors_map[int])
vmap[int] = basic_default_visitors_map[bool]
assert (s3.visitor_map.get_visitor(1) ==
basic_default_visitors_map[bool])
assert s3.visitor_map == vmap
for ser_class in (Serializer, XmlSerializer):
assert (
ser_class(vmap, 'latin-1').input_encoding
== 'latin-1')
assert (
ser_class(vmap, input_encoding='latin-1').input_encoding
== 'latin-1')
Serializer(vmap)
XmlSerializer(vmap)
def test_basic_types():
_test_output_set((Serializer(), XmlSerializer()), BASIC_TYPES_TEST_SET)
def test_encoding():
_test_output_set((Serializer(), XmlSerializer()), ENCODING_TEST_SET)
def test_escaping():
_test_output_set(XmlSerializer(), ESCAPING_TEST_SET) # not Serializer
def test_basic_func_call():
_test_output_set((Serializer(), XmlSerializer()), BASIC_FUNC_CALL_TEST_SET)
def test_basic_func_call_escaped():
_test_output_set(XmlSerializer(), BASIC_FUNC_CALL_ESCAPED_TEST_SET)
def test_name_objects():
_test_output_set(XmlSerializer(), XML_NAMES_TEST_SET)
def test_xml_comment():
_test_output_set(XmlSerializer(), XML_COMMENTS_TEST_SET)
def test_xml_cdata():
_test_output_set(XmlSerializer(), XML_CDATA_TEST_SET)
def test_xml_attributes():
_test_output_set(XmlSerializer(), XML_ATTRIBUTES_TEST_SET)
def test_xmldoc():
_test_output_set(XmlSerializer(), BASIC_XMLDOC_TEST_SET)
def test_xhtml_dtd():
_test_output(XmlSerializer(), html.XHTML_DTD, html.XHTML_DTD)
def test_xhtml_entities():
e1 = XmlEntityRef(alpha='abc', num=123, description='boo')
e2 = XmlEntityRef('abc', 123, 'boo')
assert e1.num == e2.num == 123
assert e1.alpha == e2.alpha == 'abc'
assert e1.description == e2.description == 'boo'
assert str(e1)==str(e2)=='&abc;'
_test_output_set(XmlSerializer(), HTML_ENTITIES_TEST_SET)
def test_basic_xhtml_tags():
_test_output_set(XmlSerializer(), HTML_EMPTY_TAGS_TEST_SET)
def test_tag_attributes():
_test_output_set(XmlSerializer(), TAG_ATTRIBUTES_TEST_SET)
def test_tag_class_attribute():
_test_output_set(XmlSerializer(), TAG_CLASS_ATTRIBUTE_TEST_SET)
def test_xhtml_simpletable():
_test_output(
XmlSerializer(),
html.table(cellpadding=1)[html.tr[html.td[1], html.td[2]],
html.tr[html.td[1], html.td[2]],
],
unicode('<table cellpadding="1"><tr><td>1</td><td>2</td></tr>'
'<tr><td>1</td><td>2</td></tr></table>'))
def test_xhtml_script_tag():
_test_output(
XmlSerializer(),
html.script['function() { return "&"; }'],
u'''<script>
//<![CDATA[
function() { return "&"; }
//]]>
</script>''')
|
Python
|
CL
|
36c6b6b0b22a4e03da00e58b59d602644c910c99f2e89bb94076a73af29478c5
|
# cs146_p3
from heapq import heappop, heappush
from math import sqrt
def dijkstras_shortest_path(initial_position, destination, graph, adj, initial_xy, dest_xy):
""" Searches for a minimal cost path through a graph using Dijkstra's algorithm.
Args:
initial_position: The initial cell from which the path extends.
destination: The end location for the path.
graph: A loaded level, containing walls, spaces, and waypoints.
adj: An adjacency function returning cells adjacent to a given cell as well as their respective edge costs.
initial_xy: The initial xy coordinates within the initial_position cell
dest_xy: The destination xy coordinates witihin the destination cell
Returns:
If a path exists, return a list containing all cells from initial_position to destination.
Otherwise, return None.
"""
# heuristic just uses euclidian distance
def heuristic (curr, dest):
return vector_dist (curr, dest)
forward_dist = {initial_position: 0} # Distance from initial_position when searching "forward"
backward_dist = {destination: 0} # Distance from destination when searching "backward"
forward_prev = {initial_position: None} # Back links from the "forward" direction
backward_prev = {destination: None} # Back links from the "backward" direction
queue = [(0, initial_position, 'destination')] # The heap/priority queue used
heappush(queue, (1, destination, 'initial_position'))
f_detail_points = {initial_position: initial_xy} # Holds the entry point into each cell
b_detail_points = {destination: dest_xy}
explored_boxes = []
while queue:
# Continue with next min unvisited node
current_distance, current_node, goal = heappop(queue)
explored_boxes.append (current_node)
# If we've reached the opposite frontier
if (goal == 'destination' and current_node in backward_prev) or (goal == 'initial_position' and current_node in forward_prev):
node = current_node
# Build the path from the final point in the backward frontier
# to the src node
point_path = [b_detail_points[current_node]]
while node is not None:
point_path.append(f_detail_points[node])
node = forward_prev[node]
# This path goes from end to beginning, so reverse it
point_path.reverse()
# Now append the path from the final point in the backward frontier
# to the destination node
node = backward_prev[current_node]
while node is not None:
point_path.append(b_detail_points[node])
node = backward_prev[node]
# format of point_path is [((x1,y1), (x2,y2)), ((x2,y2), (x3,y3)), ((x3,y3), (x4,y4))...]
# This can be created by zipping point_path with point_path[1:] (which throws away the first element)
point_path = list(zip(point_path, point_path[1:]))
return (point_path, explored_boxes)
# Assigning the various variables to the values they should be depending on
# which way we are going
is_dest = goal == 'destination'
detail_points = f_detail_points if is_dest else b_detail_points
dist = forward_dist if is_dest else backward_dist
prev = forward_prev if is_dest else backward_prev
dest = dest_xy if is_dest else initial_xy
dest_id = goal
# Calculate tentative distances to adjacent cells
for adjacent_node, edge_cost, detail_point in adj(graph, current_node, detail_points[current_node]):
new_distance = dist[current_node] + edge_cost
if adjacent_node not in dist or new_distance < dist[adjacent_node]:
# Assign new distance and update link to previous cell
dist[adjacent_node] = new_distance
prev[adjacent_node] = current_node
# For priority, use distance + heuristic
heappush(queue, (new_distance + heuristic (detail_point, dest), adjacent_node, dest_id))
detail_points[adjacent_node] = detail_point
# Failed to find a path
print("Failed to find a path from", initial_position, "to", destination)
return None
def navigation_edges (mesh, box, current_point):
result = []
for adj_box in mesh['adj'][box]:
closest_point , dist = shortest_path_to_segment (current_point, get_border(box, adj_box))
result.append ((adj_box, dist, closest_point))
return result
def contains_point (pnt, box):
x,y = pnt
x1, x2, y1, y2 = box
return (x1 < x and x < x2) and (y1 < y and y < y2)
def find_path (src, dest, mesh):
src_box, dest_box = None, None
for box in mesh['boxes']:
if contains_point (src, box):
src_box = box
if contains_point (dest, box):
dest_box = box
if not src_box or not dest_box:
print ("Bad source or destination")
return ([], [])
path = dijkstras_shortest_path (src_box, dest_box, mesh, navigation_edges, src, dest)
if not path:
return ([], [])
return path
# Adapted from the project description
def get_border (box1, box2):
""" Finds the line segment where box1 and box2 overlap
Args:
box1: The first box
box2: The second box
Returns:
Returns the line segment where box1 and box2 overlap
"""
b1x1, b1x2, b1y1, b1y2 = box1
b2x1, b2x2, b2y1, b2y2 = box2
xborder = (max (b1x1, b2x1), min (b1x2, b2x2))
yborder = (max (b1y1, b2y1), min (b1y2, b2y2))
is_xborder = xborder[1] - xborder[0] > 0
is_yborder = yborder[1] - yborder[0] > 0
if is_xborder:
segment = ((xborder[0], yborder[0]), (xborder[1], yborder[0]))
elif is_yborder:
segment = ((xborder[0], yborder[0]), (xborder[0], yborder[1]))
else:
segment = ((xborder[0], yborder[0]),(xborder[0], yborder[0]))
return segment
# Adapated from dist_Point_to_Segment in:
# http://geomalgorithms.com/a02-_lines.html
def shortest_path_to_segment (entry_point, segment):
""" Finds the point closest to entry_point on segment
Args:
entry_point: Starting point. A tuple containing (x, y)
segment: A line segment. A tuple containing ((x1, y1), (x2,y2))
Returns:
Returns a tuple containing:
1. The point on the line segment closest to entry_point
2. The distance between entry_point and the point above
"""
start, end = segment
v = vector_subtract (end, start)
w = vector_subtract (entry_point, start)
c1 = vector_dot (w, v)
if c1 <= 0:
return (start, vector_dist (entry_point, start))
c2 = vector_dot (v, v)
if (c2 <= c1):
return (end, vector_dist (entry_point, end))
b = float(c1) / float(c2)
Pb = vector_add (start, vector_scalar_multiply(b, v))
return (Pb, vector_dist (entry_point, Pb))
def vector_subtract (p1, p2):
""" Performs vector subtraction on the two vectors
Args:
p1: The first vector. An (x,y) tuple
p2: The second vector. An (x,y) tuple
Returns:
Returns the difference of the two vectors. p1 - p2
"""
x1,y1 = p1
x2,y2 = p2
return (x1-x2, y1-y2)
def vector_add (p1, p2):
""" Performs vector addition on the two vectors
Args:
p1: The first vector. An (x,y) tuple
p2: The second vector. An (x,y) tuple
Returns:
Returns the sum of the two vectors
"""
x1,y1 = p1
x2,y2 = p2
return (x1+x2, y1+y2)
def vector_scalar_multiply (scalar, vector):
""" Gives the scalar product of the given scalar and vector
Args:
scalar: The scalar value to multiple vector by
vector: The vector to be multiplied. An (x,y) tuple.
Returns:
Returns the scalar product of scalar and vector
"""
x,y = vector
return (scalar * x, scalar * y)
def vector_dot (p1, p2):
""" Gives the vector dot product of the two vectors
Args:
p1: The first vector. An (x,y) tuple
p2: The second vector. An (x,y) tuple
Returns:
Returns dot product of the two vectors
"""
x1,y1 = p1
x2,y2 = p2
return x1*x2 + y1*y2
def vector_dist (p1, p2):
""" Gives the euclidian distance between p1 and p2
Args:
p1: The first point. An (x,y) tuple
p2: The second point. An (x,y) tuple
Returns:
Returns the euclidian distance.
"""
x1,y1 = p1
x2,y2 = p2
return sqrt ((x2 - x1)**2 + (y2 - y1)**2)
|
Python
|
CL
|
9865eaf374aa25b368e19243b6621baf2ee541b6f412d2bbe84fcfbe9e4e6fee
|
# Generated by Django 2.0.7 on 2018-09-08 13:30
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('intro', '0004_auto_20180908_1204'),
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creationDate', models.DateField(default=datetime.date.today, verbose_name='Date')),
('publish', models.BooleanField(default=True)),
('type', models.CharField(blank=True, choices=[('Notice', 'Notice'), ('Announcement', 'Announcement'), ('other', 'Other')], max_length=10)),
('title', models.CharField(blank=True, max_length=50)),
('content', models.TextField(blank=True)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='intro.Profile')),
],
options={
'ordering': ['creationDate'],
},
),
]
|
Python
|
CL
|
01368be299fd5196f2df430be7cdb49c5db3ea816f206aed47ea679f979cb47f
|
#
# Author: Michele Van Dyne
# Student/Editor: Michael Nelson
# ID#: 799056112
# Code from Lab 6 Solution utilized for this program
# Lab 7 - Ultima 0.1 (Topic: Threads)
#
# Description: Avatar class that describes the data and operations of the
# main player in the Ultima 0.1 games.
#
import StdDraw
from Tile import Tile
import picture
import numpy
minTorchRadius = float(2.0) # Global variable here for better coding practice
class Avatar:
# Constructor for the avatar class
#
# Input parameters x and y are the initial integer positions of the
# avatar within the world
def __init__(self, x, y, hp, damage, torch):
self.x = int(x) # current x location (integer)
self.y = int(y) # current y location (integer)
self.hp = int(hp) # current hp (integer)
self.damage = int(damage) # current damage that the avatar inflicts on a monster per hit (integer)
self.torch = numpy.double(torch) # how powerful the torch is (default of 4.0) (double-precision float)
self.TORCH_DELTA = numpy.double(0.5) # increment/decrement of torch power (default of 0.5) (double)
# Mutator method to set the avatar to a new location
#
# Input parameters are the new integer x and y position
def setLocation(self, x, y):
self.x = x
self.y = y
# Accessor (getter) method
#
# Returns the current hit points of the avatar (cast as an integer)
def getHitPoints(self):
StdDraw.setFontSize(12) # This will adjust the font size for displaying the avatar's HP
return int(self.hp)
# Mutator (setter) method
#
# Reduces the avatar object's hit points by the given damage amount. Damage cast as an integer
def incurDamage(self, damage):
self.hp -= int(damage)
# Accessor (getter) method
#
# Returns the damage output (per "hit") that the avatar causes to monsters
def getDamage(self):
return self.damage
# Accessor method
#
# Returns the x position of the avatar
def getX(self):
return self.x
# Accessor method
#
# Returns the y position of the avatar
def getY(self):
return self.y
# Accessor method
#
# Returns the current radius of the torch
def getTorchRadius(self):
return self.torch
# Make our torch more powerful
#
# Increases the radius of the torch
def increaseTorch(self):
self.torch += self.TORCH_DELTA
# Make our torch less powerful
#
# Decreases the radius of the torch
def decreaseTorch(self):
self.torch -= self.TORCH_DELTA
if self.torch < minTorchRadius:
self.torch = minTorchRadius
# Draw the avatar
#
# Uses the avatar's current position to place and draw the avatar
# on the canvas
def draw(self):
drawX = (self.x + 0.5) * Tile.SIZE
drawY = (self.y + 0.5) * Tile.SIZE
StdDraw.picture(picture.Picture("avatar.gif"), drawX, drawY)
# Main code to test the avatar class
if __name__ == "__main__":
# Create an avatar at 5,5
avatar = Avatar(1, 2, 20, 3, 100.0)
print("%d %d %.1f" % (avatar.getX(), avatar.getY(), avatar.getTorchRadius()))
# Change the avatar's position
avatar.setLocation(1, 4)
print("%d %d %.1f" % (avatar.getX(), avatar.getY(), avatar.getTorchRadius()))
# Increase the torch radius
avatar.increaseTorch()
print("%d %d %.1f" % (avatar.getX(), avatar.getY(), avatar.getTorchRadius()))
# Decrease the torch radius 6 times to make sure it doesn't go below 2.0
for i in range(0, 6):
avatar.decreaseTorch()
print("%d %d %.1f" % (avatar.getX(), avatar.getY(), avatar.getTorchRadius()))
|
Python
|
CL
|
9d0038d61671423e07566056a51343e71c771590f1aff652dd2fd2a71ed952e3
|
# -*- coding: utf-8 -*-
from .. import ENCODING
from .. import sng
from io import BytesIO
import difflib
import io
import os
import pkg_resources
import pytest
import sys
import tempfile
import unittest
SIMPLE = """\
#Title=Mÿ nïcë=tïtlë
#Description=I wröte ä söng ...
---
Textüäl cöntents
inclüdig newlines
---
Möre text
""".encode(ENCODING)
CONVERTING_VALUES = """\
#Categories=füü bär, asdf
#Version=3
---
""".encode(ENCODING)
SIMPLE_parsed = {'Text': ['Textüäl cöntents',
'inclüdig newlines',
'---',
'Möre text'],
'Title': 'Mÿ nïcë=tïtlë',
'Description': 'I wröte ä söng ...'}
class SngParseTests(unittest.TestCase):
"""Testing ..sng.parse()."""
def callFUT(self, data):
return sng.parse(data)
def test_parses_head_and_text_into_dict_from_bytes(self):
self.assertEqual(SIMPLE_parsed, self.callFUT(SIMPLE))
def test_post_processes_some_keys(self):
self.assertEqual({
'Text': [],
'Categories': ['füü bär', 'asdf'],
'Version': 3
}, self.callFUT(CONVERTING_VALUES))
def test_sng__parse__2(caplog):
"""It returns `None` if the file is no SongBeamer file.
It is logging the name of the file.
"""
assert sng.parse('äöü'.encode(ENCODING), 'my-song.sng') is None
assert ("'my-song.sng' cannot be parsed: it does not contain `---`."
in caplog.text)
def test_sng__parse__3(caplog):
"""It returns `None` if the file contains invalid data structures.
It is logging the name of the file.
"""
assert sng.parse('a---b'.encode(ENCODING), 'my-song.sng') is None
assert ("'my-song.sng' cannot be parsed: Invalid data structure in line 1:"
" b'a'\n" in caplog.text)
def test_sng__parse__4():
"""It is able to parse files starting with a UTF-8 BOM."""
song = sng.parse(b'\xef\xbb\xbf#Title=B\xc3\xa4r---Tek\xc3\x9ft')
assert song is not None
assert {'Title': 'Bär',
'Text': ['Tekßt']} == song
def test_sng__open__1(tmpdir):
"""It parses head and text into a dict from a file path."""
tmpdir.join('simple.sng').write_binary(SIMPLE)
song = sng.open(str(tmpdir.join('simple.sng')))
assert SIMPLE_parsed == song
assert 'simple.sng' == song.filename
conversion_table = (
('Title', 'Tïtlë'.encode(ENCODING), 'Tïtlë'),
('Text', b'a\r\nb', ['a', 'b']),
('Version', b'3', 3),
('LangCount', b'1', 1),
('Categories', 'föö, bar baz'.encode(ENCODING), ['föö', 'bar baz']),
('Categories', b'qwe', ['qwe']),
('Comments', b'5HNkZg==', 'äsdf'),
('Chords', b'MTMsMCxEDTcsMTAsRQ0=', [['13', '0', 'D'], ['7', '10', 'E']]),
)
@pytest.mark.parametrize('key,input,output', conversion_table)
def test_sng___Importer__import__1(key, input, output):
"""It converts encoded values to text."""
importer = sng._Importer(ENCODING)
assert importer._import(key, input) == output
@pytest.mark.parametrize('key,output,input', conversion_table)
def test_sng___Exporter__export__1(key, output, input):
"""It converts text to encoded values."""
importer = sng._Exporter(ENCODING, None, None)
assert importer._export(key, input) == output
class SngExportTests(unittest.TestCase):
"""Testing ..sng.SNG.export()."""
def test_export_converts_data_back_to_byte_stream(self):
from .. import SNG
sng = SNG()
sng.update({
'Version': 3,
'Categories': ['foo bar', 'baz'],
'Text': ['Textüäl cöntents',
'inclüdig newlines',
'---',
'Möre text'],
'Title': 'Mÿ nïcë=tïtlë'})
export_result = BytesIO()
sng.export(export_result)
self.assertEqual(
'#Categories=foo bar, baz\r\n'
'#Title=Mÿ nïcë=tïtlë\r\n'
'#Version=3\r\n'
'---\r\n'
'Textüäl cöntents\r\n'
'inclüdig newlines\r\n'
'---\r\n'
'Möre text'.encode(ENCODING), export_result.getvalue())
def test_sng__SNG__export__2():
"""It does not break if there is no `Text` in the song."""
song = sng.SNG()
song['Title'] = 'my title'
export_result = BytesIO()
song.export(export_result)
assert ('#Title=my title\r\n'
'---\r\n'.encode(ENCODING) == export_result.getvalue())
class Sng2sngTests(unittest.TestCase):
"""Testing ..sng.sng2sng()."""
def callFUT(self, *args):
from ..sng import sng2sng
orig_stdout = sys.stdout
orig_argv = sys.argv[:]
stdout = io.StringIO()
argv = ['sng2sng']
argv.extend(args)
try:
sys.stdout = stdout
sys.argv[:] = argv
try:
sng2sng()
except SystemExit as e:
raise SystemExit(str(e), stdout.getvalue())
finally:
sys.stdout = orig_stdout
sys.argv[:] = orig_argv
def test_wrong_number_of_args_leads_to_error_message(self):
with self.assertRaises(SystemExit) as err:
self.callFUT('input.sng')
self.assertEqual(('1', 'Usage: sng2sng <input-file> <output-file>\n'),
err.exception.args)
def test_output_is_equal_input_after_conversion(self):
# Caution: keys in `in_filename` are sorted, because export sorts
# keys alphabetically to be compatible across python versions!
in_filename = pkg_resources.resource_filename(
'icemac.songbeamer.tests', 'example.sng')
try:
out_fd, out_filename = tempfile.mkstemp()
os.close(out_fd)
self.callFUT(in_filename, out_filename)
with open(in_filename, 'r') as in_file:
in_file_cont = in_file.readlines()
with open(out_filename, 'r') as out_file:
out_file_cont = out_file.readlines()
# There are no differences between input and output:
self.assertEqual(
[], list(difflib.context_diff(in_file_cont, out_file_cont)))
finally:
os.unlink(out_filename)
|
Python
|
CL
|
25e3bdebeae604bc670823b739dfbcf29e0642aca664e6137cb0ac59b6c6db02
|
"""
Glossary:
:userid:
The User model instance's ID (a ``uuid.UUID`` instance).
:principal:
In the context of a user's principal, the user's email.
"""
import logging
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.security import Authenticated
from pyramid.settings import asbool
from passlib.context import CryptContext
from paildocket.models import User
logger = logging.getLogger(__name__)
PASSWORD_CONTEXT_DEFAULT_POLICY = {
'schemes': ('bcrypt',),
'default': 'bcrypt',
'bcrypt__default_rounds': 12,
}
def create_password_context(**replacement_kwargs):
for key, value in list(replacement_kwargs.items()):
if value is None:
del replacement_kwargs[key]
kwargs = PASSWORD_CONTEXT_DEFAULT_POLICY.copy()
kwargs.update(replacement_kwargs)
return CryptContext(**kwargs)
Administrator = 'paildocket.Administrator'
ViewPermission = 'paildocket.permission.View'
EditPermission = 'paildocket.permission.Edit'
EditAndViewPermission = (ViewPermission, EditPermission)
def _get_principals(userid, request):
user = User.from_userid(request.db_session, userid)
if user is None:
return None
principals = [Authenticated]
if user.admin:
principals.append(Administrator)
principals.append(user.principal)
return principals
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
def includeme(config):
bcrypt_rounds = config.registry.settings.get(
'paildocket.password.bcrypt_rounds')
config.registry['password_context'] = create_password_context(
bcrypt__default_rounds=bcrypt_rounds
)
_auth_debug = asbool(
config.registry.settings.get('paildocket.authentication.debug', False))
_authn_policy = AuthTktAuthenticationPolicy(
secret=config.registry.settings['paildocket.authentication.secret'],
callback=_get_principals,
timeout=14 * DAY,
reissue_time=1 * DAY,
max_age=30 * DAY,
debug=_auth_debug
)
config.set_authentication_policy(_authn_policy)
_authz_policy = ACLAuthorizationPolicy()
config.set_authorization_policy(_authz_policy)
|
Python
|
CL
|
a81e0f078bc0de4b8fc7033a8c2a8a79055730362ea6cd0b6b87d1839afcc0a3
|
'''
Sample predictive model.
You must supply at least 4 methods:
- fit: trains the model.
- predict: uses the model to perform predictions.
- save: saves the model.
- load: reloads the model.
'''
import pickle
import numpy as np # We recommend to use numpy arrays
from os.path import isfile
import sklearn
from sklearn import pipeline as ppl
from sklearn import preprocessing as pp
from sklearn import decomposition as dc
from sklearn import feature_selection as fs
from sklearn import cluster as cls
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from preprocessing import Preprocessing as CustPp
from time import time
datapath = "../public_data/"
class model:
def __init__(self):
'''
This constructor is supposed to initialize data members.
Use triple quotes for function documentation.
'''
self.num_train_samples=0
self.num_feat=1
self.num_labels=1
self.vt = 0.87
self.is_trained=False
self.mod = RandomForestClassifier(n_estimators = 100)
'''
The HP of this predictor were found with the ./HyperParameter/model.py GS implementation
'''
self.ppl = ppl.Pipeline([('prepro', CustPp()),
('mod', self.mod)])
def fit(self, X, Y):
'''
This function should train the model parameters.
Here we do nothing in this example...
Args:
X: Training data matrix of dim num_train_samples * num_feat.
y: Training label matrix of dim num_train_samples * num_labels.
Both inputs are numpy arrays.
For classification, labels could be either numbers 0, 1, ... c-1 for c classe
or one-hot encoded vector of zeros, with a 1 at the kth position for class k.
The AutoML format support on-hot encoding, which also works for multi-labels problems.
Use data_converter.convert_to_num() to convert to the category number format.
For regression, labels are continuous values.
'''
# For multi-class problems, convert target to be scikit-learn compatible
# into one column of a categorical variable
y=self.convert_to_num(Y, verbose=False)
self.num_train_samples = X.shape[0]
if X.ndim>1: self.num_feat = X.shape[1] # Does not work for sparse matrices
print("FIT: dim(X)= [{:d}, {:d}]".format(self.num_train_samples, self.num_feat))
num_train_samples = y.shape[0]
if y.ndim>1: self.num_labels = y.shape[1]
print("FIT: dim(y)= [{:d}, {:d}]".format(num_train_samples, self.num_labels))
if (self.num_train_samples != num_train_samples):
print("ARRGH: number of samples in X and y do not match!")
self.ppl.fit(X, y)
self.is_trained=True
print("Done fitting !")
def predict(self, X):
'''
This function should provide predictions of labels on (test) data.
Here we just return zeros...
Make sure that the predicted values are in the correct format for the scoring
metric. For example, binary classification problems often expect predictions
in the form of a discriminant value (if the area under the ROC curve it the metric)
rather that predictions of the class labels themselves. For multi-class or multi-labels
problems, class probabilities are often expected if the metric is cross-entropy.
Scikit-learn also has a function predict-proba, we do not require it.
The function predict eventually can return probabilities.
'''
num_test_samples = X.shape[0]
if X.ndim>1: num_feat = X.shape[1]
print("PREDICT: dim(X)= [{:d}, {:d}]".format(num_test_samples, num_feat))
if (self.num_feat != num_feat):
print("ARRGH: number of features in X does not match training data!")
print("PREDICT: dim(y)= [{:d}, {:d}]".format(num_test_samples, self.num_labels))
# Return predictions as class probabilities
y = self.ppl.predict_proba(X)
return y
def save(self, path="./"):
with open(path + '_model.pickle', 'wb') as f:
print('modele name : ', path + '_model.pickle')
pickle.dump(self , f)
def load(self, path="./"):
modelfile = path + '_model.pickle'
if isfile(modelfile):
with open(modelfile, 'rb') as f:
self = pickle.load(f)
print("Model reloaded from: " + modelfile)
return self
def convert_to_num(self, Ybin, verbose=True):
''' Convert binary targets to numeric vector (typically classification target values)'''
if verbose: print("Converting to numeric vector")
Ybin = np.array(Ybin)
if len(Ybin.shape) ==1: return Ybin
classid=range(Ybin.shape[1])
Ycont = np.dot(Ybin, classid)
if verbose: print(Ycont)
return Ycont
def parseFile(path):
with open(path, "r") as f:
data = []
for line in f:
bits = []
for bit in line.split(" "):
bits.append(float(bit))
data.append(bits)
return np.array(data)
if __name__ == '__main__':
traindata = parseFile(datapath+"cifar10_train.data")
validdata = parseFile(datapath+"cifar10_valid.data")
label = parseFile(datapath+"cifar10_train.solution")
model = model()
start = time()
model.fit(traindata, label)
fittime = time()
model.predict(validdata)
predtime = time()
print("Fitting : {}\nPredicting : {}\nScore : {}".format(fittime-start, predtime-fittime, model.ppl.score(traindata, label.argmax(axis=1))))
|
Python
|
CL
|
5ab28445789709644db19bdf3051d52d495de1e038cd84cfcf4a5f1cfd0cf7ed
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
""" Landscape from ElevationMap with model tanks and buildings. Demonstrates using
a function to draw the various parts of the tank and the ElevationMap.pitch_roll()
method to make models conform (aproximately) to the surface of an ElevationMap.
The tank gun is raised as the mouse view point to looking up. This shows how to
combine various rotations about different axes without the objects falling apart!
This demo also uses a tkinter tkwindow but creates it as method of Display. Compare
with the system used in demos/MarsStation.py
Also look out for:
2D shader usage. Drawing onto an ImageSprite canvas placed in front of the camera
imediately after reset() This is used to generate a splash screed during file
loading and to draw a telescopic site view and a navigation map
"""
import math, random, time, traceback
import demo
import pi3d
LOGGER = pi3d.Log(__name__, level='INFO')
# Create a Tkinter window
winw, winh, bord = 1200, 600, 0 #64MB GPU memory setting
# winw,winh,bord = 1920,1200,0 #128MB GPU memory setting
DISPLAY = pi3d.Display.create(tk=True, window_title='Tiger Tank demo in Pi3D',
w=winw, h=winh - bord, far=3000.0,
background=(0.4, 0.8, 0.8, 1), frames_per_second=16)
#inputs = InputEvents()
#inputs.get_mouse_movement()
pi3d.Light(lightpos=(-1, -1, 1), lightcol =(0.8, 0.8, 0.8), lightamb=(0.30, 0.30, 0.32))
win = DISPLAY.tkwin
shader = pi3d.Shader('uv_bump')
flatsh = pi3d.Shader('uv_flat')
shade2d = pi3d.Shader('2d_flat')
#========================================
# create splash screen and draw it
splash = pi3d.ImageSprite("textures/tiger_splash.jpg", shade2d, w=10, h=10, z=0.2)
splash.draw()
DISPLAY.swap_buffers()
# create environment cube
ectex = pi3d.loadECfiles('textures/ecubes/Miramar', 'miramar_256',
suffix='png')
myecube = pi3d.EnvironmentCube(size=1800.0, maptype='FACES')
myecube.set_draw_details(flatsh, ectex)
# Create elevation map
mapwidth = 2000.0
mapdepth = 2000.0
mapheight = 100.0
mountimg1 = pi3d.Texture('textures/mountains3_512.jpg')
bumpimg = pi3d.Texture('textures/grasstile_n.jpg')
tigerbmp = pi3d.Texture('models/Tiger/tiger_bump.jpg')
topbmp = pi3d.Texture('models/Tiger/top_bump.jpg')
mymap = pi3d.ElevationMap(mapfile='textures/mountainsHgt2.png',
width=mapwidth, depth=mapdepth,
height=mapheight, divx=64, divy=64)
mymap.set_draw_details(shader, [mountimg1, bumpimg], 128.0, 0.0)
FOG = (0.5, 0.5, 0.5, 0.8)
mymap.set_fog(FOG, 1000.0)
#Load tank
tank_body = pi3d.Model(file_string='models/Tiger/body.obj', sx=0.1, sy=0.1, sz=0.1)
tank_body.set_shader(shader)
tank_body.set_normal_shine(tigerbmp)
tank_gun = pi3d.Model(file_string='models/Tiger/gun.obj')
tank_gun.set_shader(shader)
tank_turret = pi3d.Model(file_string='models/Tiger/turret.obj')
tank_turret.set_shader(shader)
tank_turret.set_normal_shine(topbmp)
### because these children will inherit matrix operation applied to
# their parent they don't need to be scaled
tank_body.add_child(tank_turret)
tank_turret.add_child(tank_gun)
#Load church
x, z = 20, -320
y = mymap.calcHeight(x,z)
church = pi3d.Model(file_string='models/AllSaints/AllSaints.obj',
sx=0.1, sy=0.1, sz=0.1, x=x, y=y, z=z)
church.set_shader(shader)
#Load cottages
x, z = 250,-40
y = mymap.calcHeight(x,z)
cottages = pi3d.Model(file_string='models/Cottages/cottages_low.obj',
sx=0.1, sy=0.1, sz=0.1, x=x, y=y, z=z, ry=-5)
cottages.set_shader(shader)
#cross-hairs in gun sight
targtex = pi3d.Texture("textures/target.png", blend=True)
target = pi3d.ImageSprite(targtex, shade2d, w=10, h=10, z=0.4)
target.set_2d_size(targtex.ix, targtex.iy, (DISPLAY.width - targtex.ix)/2,
(DISPLAY.height - targtex.iy)/2)
#telescopic gun sight
sniptex = pi3d.Texture("textures/snipermode.png", blend=True)
sniper = pi3d.ImageSprite(sniptex, shade2d, w=10, h=10, z=0.3)
scx = DISPLAY.width/sniptex.ix
scy = DISPLAY.height/sniptex.iy
if scy > scx:
scx = scy # enlarge to fill screen but use same scale for both directions
scw, sch = sniptex.ix * scx, sniptex.iy * scx
sniper.set_2d_size(scw, sch, (DISPLAY.width - scw)/2,(DISPLAY.height - sch)/2)
#corner map and dots
smmap = pi3d.ImageSprite(mountimg1, shade2d, w=10, h=10, z=0.2)
smmap.set_2d_size(w=200, h=200, x=DISPLAY.width - 200, y=DISPLAY.height - 200)
dot1 = pi3d.ImageSprite("textures/red_ball.png", shade2d, w=10, h=10, z=0.1)
dot1.set_2d_size(w=10, h=10) # 10x10 pixels
dot2 = pi3d.ImageSprite("textures/blu_ball.png", shade2d, w=10, h=10, z=0.05)
dot2.set_2d_size(w=10, h=10)
#player tank vars
tankrot = 180.0
turret = 0.0
tankroll = 0.0 #side-to-side roll of tank on ground
tankpitch = 0.0 #too and fro pitch of tank on ground
enemyroll = 0.0
enemypitch = 0.0
#key presses
mymouse = pi3d.Mouse(restrict = False)
mymouse.start()
omx, omy = mymouse.position()
#position vars
mouserot = 0.0
tilt = 0.0
avhgt = 0.85
xm, oxm = 0.0, -1.0
zm, ozm = -200.0, -1.0
ym = mymap.calcHeight(xm, zm) + avhgt
#enemy tank vars
etx = 120
etz = -120
etr = 0.0
ltm = 0.0 #last pitch roll check
smode = False #sniper mode
def drawTiger(x, y, z, rot, roll, pitch, turret, gunangle):
tank_body.position(x, y, z)
tank_body.rotateToX(pitch)
tank_body.rotateToY(rot-90)
tank_body.rotateToZ(roll)
tank_turret.rotateToY(turret - rot)
tank_gun.rotateToZ(gunangle)
tank_body.draw() # children drawn too.
# Update display before we begin (user might have moved window)
win.update()
DISPLAY.resize(win.winx, win.winy, win.width, win.height - bord)
is_running = True
CAMERA = pi3d.Camera.instance()
try:
while DISPLAY.loop_running():
mx, my = mymouse.position()
mouserot -= (mx-omx)*0.2
tilt += (my-omy)*0.2
omx=mx
omy=my
CAMERA.reset()
dot1.set_2d_location(DISPLAY.width - 105.0 + 200.0*xm/mapwidth,
DISPLAY.height - 105.0 - 200.0*zm/mapdepth)
dot2.set_2d_location(DISPLAY.width - 105.0 + 200.0*etx/mapwidth,
DISPLAY.height - 105.0 - 200.0*etz/mapdepth)
dot1.draw()
dot2.draw()
smmap.draw()
# tilt can be used to prevent the view from going under the landscape!
sf = 60 - 55.0 / abs(tilt) if tilt < -1 else 5.0
xoff = sf * math.sin(math.radians(mouserot))
yoff = abs(1.25 * sf * math.sin(math.radians(tilt))) + 3.0
zoff = -sf * math.cos(math.radians(mouserot))
if tilt > -5 and smode == False: # zoom in
CAMERA.reset(lens=(1, 3000, 12.5, DISPLAY.width / DISPLAY.height))
smode = True
elif tilt <= -5 and smode == True: # zoom out
CAMERA.reset(lens=(1, 3000, 45, DISPLAY.width / DISPLAY.height))
smode = False
#adjust CAMERA position in and out so we can see our tank
CAMERA.rotate(tilt, mouserot, 0)
CAMERA.position((xm + xoff, ym + yoff + 5, zm + zoff))
oxm, ozm = xm, zm
#draw player tank with smoothing on pitch and roll to lessen jerkiness
drawTiger(xm, ym, zm, tankrot, tankroll, tankpitch, 180 - turret, ((tilt+20)*-1.0 if tilt > -20.0 else 0.0))
mymap.draw() # Draw the landscape
#Draw enemy tank
etdx = -math.sin(math.radians(etr))
etdz = -math.cos(math.radians(etr))
etx += etdx
etz += etdz
#ety = mymap.calcHeight(etx, etz) + avhgt # see below
etr += 0.5
pitch, roll = mymap.pitch_roll(etx, etz)
ety = mymap.ht_y + avhgt # calcHeight is now called as part of pitch_roll
enemypitch = enemypitch * 0.9 + pitch * 0.1
enemyroll = enemyroll * 0.9 + roll * 0.1
drawTiger(etx, ety, etz, etr, enemyroll, enemypitch, etr, 0)
#Draw buildings
church.draw()
cottages.draw()
myecube.position(xm, ym, zm)
myecube.draw() #Draw environment cube
if smode:
""" because some of the overlays have blend=True they must be done AFTER
other objects have been rendered.
"""
target.draw()
sniper.draw()
# turns player tankt turret towards center of screen which will have a crosshairs
if turret + 2.0 < mouserot:
turret += 2.0
if turret - 2.0 > mouserot:
turret -= 2.0
try:
win.update()
except Exception as e:
LOGGER.info("bye,bye2 %s", e)
DISPLAY.destroy()
try:
win.destroy()
except:
pass
mymouse.stop()
exit()
if win.ev == "resized":
LOGGER.info("resized")
DISPLAY.resize(win.winx, win.winy, win.width, win.height-bord)
CAMERA.reset((DISPLAY.near, DISPLAY.far, DISPLAY.fov,
DISPLAY.width / float(DISPLAY.height)))
win.resized = False
if win.ev == "key":
mv = False
if win.key == "w":
xm -= math.sin(math.radians(tankrot)) * 2
zm -= math.cos(math.radians(tankrot)) * 2
mv = True
elif win.key == "s":
xm += math.sin(math.radians(tankrot)) * 2
zm += math.cos(math.radians(tankrot)) * 2
mv = True
elif win.key == "a":
tankrot -= 2
elif win.key == "d":
tankrot += 2
elif win.key == "p":
pi3d.screenshot("TigerTank.jpg")
elif win.key == "Escape":
try:
LOGGER.info("bye,bye1")
DISPLAY.destroy()
try:
win.destroy()
except:
pass
mymouse.stop()
exit()
except:
pass
if mv: # moved so recalc pitch_roll
pitch, roll = mymap.pitch_roll(xm, zm)
tankpitch = tankpitch * 0.9 + pitch * 0.1
tankroll = tankroll * 0.9 + roll * 0.1
ym = mymap.ht_y + avhgt # calcHeight done by pitch_roll
if win.ev=="drag" or win.ev=="click" or win.ev=="wheel":
xm -= math.sin(math.radians(tankrot)) * 2
zm -= math.cos(math.radians(tankrot)) * 2
ym = (mymap.calcHeight(xm, zm) + avhgt)
else:
win.ev="" #clear the event so it doesn't repeat
except Exception as e:
LOGGER.info("bye,bye3 %s", e)
DISPLAY.destroy()
try:
win.destroy()
except:
pass
mymouse.stop()
exit()
|
Python
|
CL
|
cbcba716c78b45d01e9d9e17811547e6a96149eb63cb0b5024fe838bae77b7a1
|
# -*- coding: utf-8 -*-
"""
Abstract Factory pattern
In Factory Method pattern, one Factory produce one product
which in one kind of products, but Abstract Factory mapping
to many products in different kinds of product family.
"""
class AbstractDecoder(object):
"""
Abstract Product
"""
pass
class UTF8Decoder(AbstractDecoder):
"""
Concrete Product
"""
@staticmethod
def decode(message):
return repr(message.decode('utf-8'))
class GBKDecoder(AbstractDecoder):
"""
Concrete Product
"""
@staticmethod
def decode(message):
return repr(message.decode('gbk'))
class AbstractEncoder(object):
"""
Abstract Encoder
"""
pass
class UTF8Encoder(AbstractEncoder):
"""
Concrete Product
"""
@staticmethod
def encode(message):
return repr(message.encode('utf-8'))
class GBKEncoder(AbstractEncoder):
"""
Concrete Product
"""
@staticmethod
def encode(message):
return repr(message.encode('gbk'))
class AbstractFactory(object):
"""
Abstract Factory
"""
def __init__(self, codes, encoding):
self.codes = codes
self.encoding = encoding
self.codec = {"encoder": EncoderFactory, "decoder": DecoderFactory}
def decode(self, message):
return self.codec[self.codes](encoding=self.encoding).decode(message)
def encode(self, message):
return self.codec[self.codes](encoding=self.encoding).encode(message)
class DecoderFactory(AbstractFactory):
"""
Concrete Factory
"""
def __init__(self, encoding='utf-8'):
self.encoding = encoding
def decode(self, message):
encodings = {"utf-8": UTF8Decoder, "gbk": GBKDecoder}
return encodings[self.encoding]().decode(message)
class EncoderFactory(AbstractFactory):
"""
Concrete Factory
"""
def __init__(self, encoding='utf-8'):
self.encoding = encoding
def encode(self, message):
encodings = {"utf-8": UTF8Encoder, "gbk": GBKEncoder}
return encodings[self.encoding]().encode(message)
if __name__ == '__main__':
utf8_encoder = AbstractFactory("encoder", "utf-8")
print utf8_encoder.encode(u"工厂方法")
utf8_decoder = AbstractFactory("decoder", "utf-8")
print utf8_decoder.decode("工厂方法")
gbk_encoder = AbstractFactory("encoder", "gbk")
print gbk_encoder.encode(u"工厂方法")
gbk_decoder = AbstractFactory("decoder", "gbk")
print gbk_decoder.decode("工厂方法".decode("utf-8").encode("gbk"))
|
Python
|
CL
|
a53f48673530c91c086b2e4c6a0769671d35133c3d5ef6e3b418b11906d0ff0f
|
#!/bin/env python
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError,\
SchemaMissingKeyError
from genie.libs.parser.iosxe.show_access_session import ShowAccessSession,\
ShowAccessSessionInterfaceDetails
class test_show_access_session(unittest.TestCase):
dev1 = Device(name='empty')
dev_c3850 = Device(name='c3850')
empty_output = {'execute.return_value': ' '}
golden_parsed_output = {
'session_count': 1,
'interfaces': {
'GigabitEthernet1/0/1': {
'interface': 'GigabitEthernet1/0/1',
'client': {
'f4cf.beff.9cb1': {
'client': 'f4cf.beff.9cb1',
'method': 'dot1x',
'domain': 'DATA',
'status': 'authenticator',
'session': {
'000000000000000BB6FC9EAF': {
'session_id': '000000000000000BB6FC9EAF',
}
}
}
}
}
}
}
golden_output = {'execute.return_value': '''\
Interface MAC Address Method Domain Status Fg Session ID
--------------------------------------------------------------------------------------------
Gi1/0/1 f4cf.beff.9cb1 dot1x DATA Auth 000000000000000BB6FC9EAF
Session count = 1
'''
}
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowAccessSession(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output)
obj = ShowAccessSession(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
class test_show_access_session_interface_details(unittest.TestCase):
maxDiff = None
empty_output = {'execute.return_value': ' '}
golden_parsed_output = {
'interfaces': {
'GigabitEthernet1/0/21': {
'mac_address': {
'0800.37ff.f585': {
'iif_id': '0x105B0C0000005F5',
'ipv6_address': 'Unknown',
'ipv4_address': '10.4.1.1',
'user_name': 'genie123',
'status': 'Authorized',
'domain': 'DATA',
'current_policy': 'Test_DOT1X-DEFAULT_V1',
'oper_host_mode': 'multi-auth',
'oper_control_dir': 'both',
'session_timeout': {
'type': 'N/A'
},
'restart_timeout': 'N/A',
'common_session_id': '0A7820020000413CCCE37640',
'acct_session_id': '0x00007EAF',
'handle': '0x7100056D',
'server_policies': {
1: {
'name': 'ACS ACL',
'policies': 'xACSACLx-IP-Test_ACL_XeroxPrinters_v1-597a95c4'
}
},
'method_status': {
'dot1x': {
'method': 'dot1x',
'state': 'Stopped'
},
'mab': {
'method': 'mab',
'state': 'Authc Success'
}
}
}
}
}
}
}
golden_output = {'execute.return_value': '''\
dev1#show access-session interface Gi1/0/21 details
Interface: GigabitEthernet1/0/21
IIF-ID: 0x105B0C0000005F5
MAC Address: 0800.37ff.f585
IPv6 Address: Unknown
IPv4 Address: 10.4.1.1
User-Name: genie123
Status: Authorized
Domain: DATA
Oper host mode: multi-auth
Oper control dir: both
Session timeout: N/A
Restart timeout: N/A
Common Session ID: 0A7820020000413CCCE37640
Acct Session ID: 0x00007EAF
Handle: 0x7100056D
Current Policy: Test_DOT1X-DEFAULT_V1
Server Policies:
ACS ACL: xACSACLx-IP-Test_ACL_XeroxPrinters_v1-597a95c4
Method status list:
Method State
dot1x Stopped
mab Authc Success
'''
}
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowAccessSessionInterfaceDetails(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='GigabitEthernet1/0/21')
def test_golden(self):
self.dev_c3850 = Mock(**self.golden_output)
obj = ShowAccessSessionInterfaceDetails(device=self.dev_c3850)
parsed_output = obj.parse(interface='GigabitEthernet1/0/21')
self.assertEqual(parsed_output,self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
|
Python
|
CL
|
af5fd20378e6715b4c3b05b88c17dfdc63f8830ada30d596406fb46e93c2101b
|
#! /usr/bin/env python
from miasm2.core.cpu import parse_ast
from miasm2.arch.x86.arch import mn_x86, base_expr, variable
from miasm2.core.bin_stream import bin_stream
from miasm2.core import parse_asm
from miasm2.expression.expression import *
from elfesteem import *
from pdb import pm
from miasm2.core import asmbloc
import struct
e = pe_init.PE()
s_text = e.SHList.add_section(name="text", addr=0x1000, rawsize=0x1000)
s_iat = e.SHList.add_section(name="iat", rawsize=0x100)
new_dll = [({"name": "USER32.dll",
"firstthunk": s_iat.addr}, ["MessageBoxA"])]
e.DirImport.add_dlldesc(new_dll)
s_myimp = e.SHList.add_section(name="myimp", rawsize=len(e.DirImport))
e.DirImport.set_rva(s_myimp.addr)
reg_and_id = dict(mn_x86.regs.all_regs_ids_byname)
def my_ast_int2expr(a):
return ExprInt32(a)
def my_ast_id2expr(t):
return reg_and_id.get(t, ExprId(t, size=32))
my_var_parser = parse_ast(my_ast_id2expr, my_ast_int2expr)
base_expr.setParseAction(my_var_parser)
blocs, symbol_pool = parse_asm.parse_txt(mn_x86, 32, '''
main:
CALL cipher_code
CALL msgbox_encrypted_start
CALL cipher_code
RET
cipher_code:
PUSH EBP
MOV EBP, ESP
LEA ESI, DWORD PTR [msgbox_encrypted_start]
LEA EDI, DWORD PTR [msgbox_encrypted_stop]
loop:
XOR BYTE PTR [ESI], 0x42
INC ESI
CMP ESI, EDI
JBE loop
MOV ESP, EBP
POP EBP
RET
msgbox_encrypted_start:
PUSH 0
PUSH title
PUSH msg
PUSH 0
CALL DWORD PTR [ MessageBoxA ]
RET
.dontsplit
msgbox_encrypted_stop:
.long 0
title:
.string "Hello!"
msg:
.string "World!"
''')
# fix shellcode addr
symbol_pool.set_offset(symbol_pool.getby_name("main"), e.rva2virt(s_text.addr))
symbol_pool.set_offset(symbol_pool.getby_name_create(
"MessageBoxA"), e.DirImport.get_funcvirt('MessageBoxA'))
e.Opthdr.AddressOfEntryPoint = s_text.addr
for b in blocs[0]:
print b
print "symbols"
print symbol_pool
resolved_b, patches = asmbloc.asm_resolve_final(
mn_x86, blocs[0], symbol_pool)
print patches
ad_start = symbol_pool.getby_name_create("msgbox_encrypted_start").offset
ad_stop = symbol_pool.getby_name_create("msgbox_encrypted_stop").offset
# cipher code
new_patches = dict(patches)
for ad, val in patches.items():
if ad_start <= ad < ad_stop:
new_patches[ad] = "".join([chr(ord(x) ^ 0x42) for x in val])
for offset, raw in new_patches.items():
e.virt[offset] = raw
open('box_x86_32_enc.bin', 'wb').write(str(e))
|
Python
|
CL
|
8be323c7f71f04081be85836b8ad4a93cc49d8eb4fddb9eac3dea798afb71b19
|
'''
Author: Tam M Pham
Created date: 13/02/2019
Modified date: 28/03/2019
Description:
Using Gradient Boosting algorithm for bike prediction
'''
import numpy as np
import pandas as pd
import time
from common import Common
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import math
from sklearn import preprocessing # label encoder
from sklearn import ensemble # library of Gradient Boosting
from sklearn.model_selection import train_test_split # split data to training set and tesing set
from sklearn.metrics import mean_squared_error # calculate MSE
from sklearn.externals import joblib # for saving and loading model
import sys
start = time.time()
Common.create_folder(Common.PREDICTING_PLOTS_DIR)
# get clusters dataframe
clusters = Common.get_dataframe_from_file(Common.CLUSTERED_DATA_FILE_FULL_PATH, True)
# get all data dataframe
all_df = Common.get_dataframe_from_file(Common.CLEAN_DATA_FILE_FULL_PATH, True)
all_df = all_df[(all_df["Date"] >= "2016-10-14") & (all_df["Date"] <= "2017-10-14")].reset_index(drop=True)
# left merge these two dataframes together based on Number, Date and Time
merged_df = pd.merge(all_df
, clusters[["Number", "Time", "Cluster"]]
, on=["Number", "Time"]
, how="left")
# Calculate activity in each cluster
cluster_act = merged_df.copy()
cluster_act["Activity"] = cluster_act["Check In"] + cluster_act["Check Out"]
cluster_act = cluster_act.groupby(["Number", "Cluster"])["Activity"].sum().reset_index(name="Total Activity")
# Find the most active station per cluster
top_stations = cluster_act.copy()
top_stations = top_stations[top_stations.groupby(["Cluster"])["Total Activity"].transform(max) == top_stations["Total Activity"]].reset_index(drop=True)
print(top_stations)
# Find the least active station per cluster
bot_stations = cluster_act.copy()
bot_stations = bot_stations[bot_stations.groupby(["Cluster"])["Total Activity"].transform(min) == bot_stations["Total Activity"]].reset_index(drop=True)
print(bot_stations)
# Turn the station number of the most active station and the least active station into a list
selected = top_stations["Number"].tolist() + bot_stations["Number"].tolist()
# Randomly select other 3 stations in each cluster for the Gradient boosting modelling
for i in range(1, Common.CLUSTERING_NUMBER + 1): # iterate throught from cluster 1 to cluster 4
# select random 3 number which must being neither in the most active station and the least active station
subset = merged_df[(merged_df["Cluster"] == i) & (~merged_df["Number"].isin(selected))].sample(n = 3)
rand_list = subset["Number"].tolist()
selected = selected + rand_list
print("Stations selected randomly is ", selected)
############################################################################
######################## PREPARE DATA FOR MODELLING ########################
############################################################################
# get details of stations based on the selection above
time_df = merged_df[merged_df["Number"].isin(selected)].copy()
# group time into 48 factors
time_df["Time"] = time_df["Time"].apply(lambda x: Common.refine_time(x))
time_df["Season"] = time_df["Date"].apply(lambda x: Common.define_season(x))
time_df[Common.PREDICTING_FACTOR] = time_df["Available Stands"]
time_df = time_df.groupby(["Number", "Name", "Address", "Date", "Time", "Bike Stands", "Weekday", "Season"]).agg({Common.PREDICTING_FACTOR: "mean", "Cluster": "first"}).reset_index()
time_df[Common.PREVIOUS_PREDICTING_FACTOR] = time_df.groupby(["Number", "Name", "Address", "Date"])[Common.PREDICTING_FACTOR].shift(1)
time_df[Common.PREVIOUS_PREDICTING_FACTOR] = time_df.apply(
lambda row: row[Common.PREDICTING_FACTOR] if np.isnan(row[Common.PREVIOUS_PREDICTING_FACTOR]) else row[Common.PREVIOUS_PREDICTING_FACTOR],
axis=1
)
# convert float64 columns to int64 columns, don't know why it converts numeric columns to float64
time_df[Common.PREDICTING_FACTOR] = time_df[Common.PREDICTING_FACTOR].astype(np.int64)
time_df[Common.PREVIOUS_PREDICTING_FACTOR] = time_df[Common.PREVIOUS_PREDICTING_FACTOR].astype(np.int64)
# read CSV file containing geographical info
geo = Common.get_dataframe_from_file("./geo-data/db-geo.csv", True)
gb_df = pd.merge(time_df
, geo[["Number", "Latitude", "Longitude"]]
, on=["Number"]
, how="left")
# read CSV file containing weather info
weather = Common.get_dataframe_from_file("./weather-data/M2_weather.csv", True)
weather = weather.drop_duplicates(subset=["station_id", "datetime", "AtmosphericPressure", "WindSpeed", "AirTemperature"], keep='first')
weather["datetime"] = pd.to_datetime(weather["datetime"], format="%m/%d/%Y %H:%M")
weather["Date"] = weather["datetime"].dt.strftime(Common.DATE_FORMAT)
weather["Time"] = weather["datetime"].dt.strftime(Common.TIME_FORMAT)
# build important factors and formula to predict the bike number
gb_df = pd.merge(gb_df
, weather[["Date", "Time", "AtmosphericPressure", "WindSpeed", "AirTemperature"]]
, on=["Date", "Time"]
, how="left")
gb_df["AtmosphericPressure"].fillna((gb_df["AtmosphericPressure"].mean()), inplace = True)
gb_df["WindSpeed"].fillna((gb_df["WindSpeed"].mean()), inplace = True)
gb_df["AirTemperature"].fillna((gb_df["AirTemperature"].mean()), inplace = True)
gb_df["Weekday Code"] = pd.to_datetime(gb_df["Date"], format=Common.DATE_FORMAT).dt.weekday
# label encoding for weekdays, time and season
le_season = preprocessing.LabelEncoder()
gb_df["Season Code"] = le_season.fit_transform(gb_df["Season"])
le_time = preprocessing.LabelEncoder()
gb_df["Time Code"] = le_time.fit_transform(gb_df["Time"])
#Common.save_csv(gb_df, "./gb_df.csv")
#print(f"Data has {len(gb_df)} rows")
# read CSV file containing holiday info
# TODO
######################################################################
######### TRAINING MODEL USING GRADIENT BOOSTING ALGORITHM ###########
######################################################################
# Create training and testing samples with 67% for training set, 33% for testing set using library
seed = 7
test_size = 0.33
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
model = ensemble.GradientBoostingRegressor(**params)
x = gb_df[Common.CONSIDERING_FACTORS].copy()
y = gb_df[Common.PREDICTING_FACTOR].copy()
x_train, x_test, y_train, y_test = train_test_split(x.values, y.values, test_size=test_size, random_state=seed)
# feed training data to Gradient Boosting model
model.fit(x_train, y_train)
# Plot feature importance
feature_importance = model.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig, ax = plt.subplots()
ax.barh(pos, feature_importance[sorted_idx], align='center')
ax.set(title = 'Variable Importance',xlabel = 'Relative Importance')
plt.yticks(pos, x.columns[sorted_idx])
# set margins
plt.subplots_adjust(left=0.2, right=0.9, top=0.95, bottom=0.1)
fig.savefig(Common.PREDICTING_PLOTS_DIR + "/feature_importance.png")
fig.clear()
# after viewing feature importance, weather information doesn't impact the result
# so take it out
x = gb_df[Common.IMPORTANT_FACTORS].copy()
y = gb_df[Common.PREDICTING_FACTOR].copy()
x_train, x_test, y_train, y_test = train_test_split(x.values, y.values, test_size=test_size, random_state=seed)
# feed training data to Gradient Boosting model
model.fit(x_train, y_train)
# save model
joblib.dump(model, Common.GRADIENT_BOOSTING_MODEL_FULL_PATH)
######################################################################
################ TESTING OUR GRADIENT BOOSTING MODEL #################
######################################################################
y_pred = model.predict(x_test)
mse = mean_squared_error(y_test, y_pred)
rmse = math.sqrt(mse)
print("MSE: %.4f" % mse)
print("RMSE: %.4f" % rmse)
df_test = pd.DataFrame(x_test, columns=Common.IMPORTANT_FACTORS)
df_test["Time"] = le_time.inverse_transform(df_test["Time Code"].astype(np.int64))
df_test = df_test.drop(["Time Code"], axis = 1)
df_test[Common.PREDICTING_FACTOR] = y_test
df_test["pred"] = y_pred.round(0).astype(np.int64)
df_test = pd.merge(df_test
, gb_df[["Number", "Address", "Bike Stands", "Latitude", "Longitude", "Time"]]
, how="left"
, on=["Latitude", "Longitude", "Time"])
df_test = df_test.groupby(["Number", "Address", "Time"]).agg({Common.PREDICTING_FACTOR: "mean", "pred": "mean", "Bike Stands": "max"}).reset_index()
#print(df_test.dtypes)
# get station numbers in testing set
station_numbers = df_test["Number"].unique()
print("Station numbers in testing set: " ,station_numbers)
# calculate number of stations in testing set
n_stations = len(station_numbers)
n_station_row = round(n_stations / Common.MAX_AXES_ROW)
n_station_row = n_station_row + 1 if n_station_row * Common.MAX_AXES_ROW < n_stations else n_station_row
print(f"We need to generate a figure with {n_station_row} rows for {n_stations}")
# ignore data from 00:00:00 to 05:30:00 since Dublin Bikes system doesn't operate in that time period
df_test = df_test[(df_test["Time"] >= "05:30:00")].reset_index(drop=True)
index = 0
fig, axes = plt.subplots(figsize = (12, 10), nrows = n_station_row, ncols = Common.MAX_AXES_ROW, sharex = True, sharey= True, constrained_layout=False)
for row in axes:
for ax in row:
#print(f"Rendering in {index}")
if index >= n_stations:
# locate sticks every 1 hour
ax.xaxis.set_major_locator(mdates.HourLocator(interval = 1))
# show locate label with hour and minute format
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
# set smaller size for tick labels
ax.xaxis.set_tick_params(labelsize=7)
# increase index of next station by 1 before continuing
index += 1
continue
condition = df_test["Number"] == station_numbers[index]
ax_x = pd.to_datetime(df_test[condition]["Time"], format="%H:%M:%S")
ax_y1 = df_test[condition][Common.PREDICTING_FACTOR]
ax_y2 = df_test[condition]["pred"]
ax_y3 = df_test[condition]["Bike Stands"]
ax.plot(ax_x, ax_y1, "b-", label='Actual')
ax.plot(ax_x, ax_y2, "r-", label='Predicted')
ax.plot(ax_x, ax_y3, "-.", color = 'black', label='Bike Stands')
ax.fill_between(ax_x.dt.to_pydatetime(), ax_y2 - rmse, ax_y2 + rmse, facecolor='#3a3a3a', alpha=0.5)
y_min = 0
y_max = all_df["Bike Stands"].max()
ax.set_ylim([y_min, y_max])
# locate sticks every 1 hour
ax.xaxis.set_major_locator(mdates.HourLocator(interval = 1))
# show locate label with hour and minute format
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
# set smaller size for tick labels
ax.xaxis.set_tick_params(labelsize=7)
# set title for each axe
ax_title = df_test[condition]["Address"].unique()[0]
ax.set_title(ax_title)
# margin x at 0 and y at 0.1
ax.margins(x=0.0, y=0.1)
ax.grid(linestyle="-")
# increase index of next station by 1
index += 1
handles, labels = ax.get_legend_handles_labels()
# show rotate tick lables automatically with 90 degree
fig.autofmt_xdate(rotation = "90")
# set title of the figure
fig.suptitle("Gradient Boosting prediction and actual number")
fig.subplots_adjust(hspace=0.6)
# Set common labels
fig.text(0.5, 0.12, "Time", ha='center', va='center', fontsize="medium")
fig.text(0.06, 0.5, "Mean Available Stands", ha='center', va='center', rotation='vertical', fontsize="medium")
# plot the legend
fig.legend(handles, labels, title="Color", loc='center', bbox_to_anchor=(0.5, 0.06, 0., 0.), ncol=4)
fig.savefig(Common.PREDICTING_PLOTS_DIR + "/prediction.png")
fig.clear()
end = time.time()
print("Done exploration after {} seconds".format((end - start)))
sys.exit()
###################################################################
##################### TODO ###########################
###################################################################
hr_unseen_gb_df = unseen_gb_df[(unseen_gb_df["Weekday"] == "Mon") \
& (unseen_gb_df["Time"].dt.strftime(Common.TIME_FORMAT) == "08:00:00")].copy().reset_index(drop=True)
hr_unseen_gb_df["error"] = rmse
hr_unseen_gb_df["max"] = round(hr_unseen_gb_df["pred"] + hr_unseen_gb_df["error"]).astype(np.int64)
hr_unseen_gb_df["min"] = round(hr_unseen_gb_df["pred"] - hr_unseen_gb_df["error"]).astype(np.int64)
hr_unseen_gb_df.loc[hr_unseen_gb_df["min"] < 0, "min"] = 0
hr_unseen_gb_df["diff"] = hr_unseen_gb_df["Avg Bikes"] - hr_unseen_gb_df["pred"]
hr_unseen_gb_df["max_diff"] = np.negative(hr_unseen_gb_df["Avg Bikes"] - hr_unseen_gb_df["max"])
hr_unseen_gb_df["min_diff"] = hr_unseen_gb_df["Avg Bikes"] - hr_unseen_gb_df["min"]
hr_unseen_gb_df[hr_unseen_gb_df["min_diff"] < 0, "min_diff"] = np.negative(hr_unseen_gb_df["min_diff"])
'''
hr_unseen_gb_df["Evaluate Pred"] = hr_unseen_gb_df.apply(lambda x: "Sufficient" if x["Avg Bikes"] >= x["lower_bound"] and \
x["Avg Bikes"] <= x["upper_bound"] \
else "Oversupply" if x["Avg Bikes"] > x["upper_bound"] \
else "Insufficient")
hr_unseen_gb_df["Station Range"] = "1-25" if hr_unseen_gb_df["Number"] < 26 \
else "26-50" if hr_unseen_gb_df["Number"] < 51 \
else "51-75" if hr_unseen_gb_df["Number"] < 76 \
else "76-102"
'''
Common.save_csv(hr_unseen_gb_df, "./hr_unseen_gb_df.csv")
#sys.exit()
fig, ax = plt.subplots(figsize=(10, 6))
ax.errorbar(hr_unseen_gb_df["Number"], hr_unseen_gb_df["Avg Bikes"], yerr=[hr_unseen_gb_df["min_diff"], hr_unseen_gb_df["max_diff"]], fmt='.k')
#ax.plot(hr_unseen_gb_df["Avg Bikes"])
fig.savefig("./inventory.png")
plt.gcf().clear()
|
Python
|
CL
|
cbace6086994c74405f41bbc94f32cae3136e46e6cebc69c224937c11a012ebc
|
import argparse
import datetime
import multiprocessing as mp
import os
import utilities
class ExecConfiguration:
def __init__(self):
self.config = None
class DateConfig:
def __init__(self, date_str):
self.date_with_slash = date_str
self.date_with_hyphen = date_str.replace('/', '-')
def slash(self):
return self.date_with_slash
def hyphen(self):
return self.date_with_hyphen
def run_simulation(command_str):
# Measure simulation duration
start_simulation = datetime.datetime.now()
# Call simulation
process = os.popen(command_str)
process.close()
# Compute elapsed time and update total simulation time
end_simulation = datetime.datetime.now()
elapsed_time = end_simulation - start_simulation
utilities.safe_print('\tCommand:' + command_str + ' \n\t\tElapsed:' + '%.2f seconds' % elapsed_time.total_seconds())
return elapsed_time.total_seconds()
# total simulation time for the record
total_simulations_time = 0
total_simulations_run = 0
def sum_simulation_time(result):
global total_simulations_time
global total_simulations_run
total_simulations_time += int(result)
total_simulations_run += 1
def simulation_error(error):
utilities.safe_print(error)
def to_normalize_path(path):
return r'"%s"' % path
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Cold storage simulation.')
parser.add_argument('--exe', type=str, help='Path of the VarroaPop command line application', required=True)
parser.add_argument('--vrp', type=str, help='Path of the vrp file to use for simulations', required=True)
parser.add_argument('--output_directory', type=str,
help='Output files will be written in an autogenerated folder within OUT_DIR',
metavar='OUT_DIR', required=True)
parser.add_argument('--input_directory', type=str, help='Input directory expecting IN_DIR/SCENARIO.txt',
metavar='IN_DIR', required=True)
parser.add_argument('--weather_directory', type=str, help='Get weather files from WEATHER_DIRECTORY',
metavar='WEATHER_DIRECTORY', required=True)
arguments = parser.parse_args()
print('Working directory: ' + os.getcwd())
if not os.path.isfile(arguments.exe):
print('Cannot find VarroaPop executable at: ' + arguments.exe)
exit(-1)
if not os.path.isfile(arguments.vrp):
print('Cannot find VRP file at: ' + arguments.vrp)
exit(-1)
if os.path.isfile(arguments.output_directory):
print(arguments.output_directory + ' is not a directory')
exit(-1)
if not os.path.isdir(arguments.input_directory):
print('Cannot find input directory at: ' + arguments.input_directory)
exit(-1)
if not os.path.isdir(arguments.weather_directory):
print('Cannot find weather directory at: ' + arguments.weather_directory)
exit(-1)
start_dates = [
DateConfig('09/15'),
DateConfig('09/22'),
DateConfig('09/29'),
DateConfig('10/06'),
DateConfig('10/13'),
DateConfig('10/20')]
end_dates = [
DateConfig('02/15'),
DateConfig('02/22'),
DateConfig('02/29'),
DateConfig('03/01'),
DateConfig('03/08'),
DateConfig('03/15')]
exec_configurations = []
default_command = arguments.exe + ' -f -v ' + to_normalize_path(arguments.vrp) + \
' --forageDayNoTemp --hourlyTemperaturesEstimation --foragersAlwaysAgeBasedOnForageInc' + \
' --adultAgingBasedOnLaidEggs --inOutEvents'
input_files_exists = {}
# gather configurations for simulations
weather_files = os.listdir(arguments.weather_directory)
for weather_file in weather_files:
info = utilities.parse_weather_filename(weather_file)
output_directory = os.path.join(arguments.output_directory, os.path.join(info.location, info.scenario))
# get input filename and check if it exists
input_file = os.path.join(arguments.input_directory, info.scenario + '.txt')
if not input_file in input_files_exists:
input_files_exists[input_file] = os.path.exists(input_file)
if not input_files_exists[input_file]:
print('Missing input file ' + input_file)
exit(-1)
command = default_command + ' -i ' + to_normalize_path(input_file)
command += ' -w ' + to_normalize_path(os.path.join(arguments.weather_directory, weather_file))
command += ' --binaryWeatherFileFormat ' + utilities.get_valid_binary_format_identifier(info.scenario)
# add configuration without cold storage
output_filename = info.model + '_default'
output_file = os.path.join(output_directory, output_filename + '.txt')
exec_command = command + ' -o ' + to_normalize_path(output_file)
exec_configurations.append(exec_command)
# add configurations for cold storage
for start_date in start_dates:
for end_date in end_dates:
output_filename = info.model + '_cold_storage_' + start_date.hyphen() + '_' + end_date.hyphen()
output_file = os.path.join(output_directory, output_filename + '.txt')
exec_command = command + ' -o ' + to_normalize_path(output_file)
exec_command += ' --coldStorage --coldStorageStartDate %s --coldStorageEndDate %s' \
% (start_date.slash(), end_date.slash())
exec_configurations.append(exec_command)
# run simulations
print('Executing Cold Storage Simulations: ')
simulation_time = datetime.datetime.now()
# Step 1: Init multiprocessing.Pool()
pool = mp.Pool(mp.cpu_count())
# Step 2: Use loop to parallelize
for configuration in exec_configurations:
pool.apply_async(run_simulation,
args=(configuration,),
callback=sum_simulation_time,
error_callback=simulation_error)
# Step 3: Don't forget to close
pool.close()
# Step 4: Wait for processes to complete
pool.join()
print('Total duration (s):' + '%.2f' % (datetime.datetime.now() - simulation_time).total_seconds())
print('Total duration accumulated (s):' + '%.2f' % total_simulations_time)
print('Total simulations executed :' + '%d' % total_simulations_run)
|
Python
|
CL
|
973f675eaf72f16c5158e2dbf6dbbc696d3c7ef60c3614fe19e7d192e49d0699
|
from googleapiclient import build
from oauth2client import GoogleCredentials
PROJECT_NAME = 'twittest-1140'
def create_logging_client():
"""Returns a client for accessing the logging api."""
credentials = GoogleCredentials.get_application_default()
return build('logging', 'v1beta', credentials=credentials)
def list_logs(client=None, project=PROJECT_NAME):
"""Returns a list of all the logs for the project"""
if not client:
client = create_logging_client()
next_page_token = None # paged
finished = False
log_names = []
while not finished:
resp = clients.project().logs().list(
projectsId=project,
pageToken=next_page_token).execute()
for log in resp['logs']:
log_names.append(log)
next_page_token = resp.get('nextPageToken')
finished = False if next_page_token else True
return log_names
def publish_file(fname, logname, project=PROJECT_NAME):
"""Reads a file and uploads it line by line to the specified log"""
# set up the metadata
# ideally we would read the timestamps from the file or something but meh
client = create_logging_client()
metadata = {
'timestamp':datetime.datetime.now().strftime("%Y-%m-%dT%H:%M.%SZ"),
'region':'asia-east1',
'zone':'asia-east1-b',
'serviceName':'compute.googleapis.com',
'severity':'INFO',
'labels':{}
}
with open(fname, 'r') as f:
body = {
'commonLabels': {
'compute.googleapis.com/resource_id':'???',# todo, find out what this should be
'compute.googleapis.com/resource_type':'instance'
},
'entries': [
{
'metadata':metadata,
'log':logname,
'textPayload':line
}
for line in f
]
}
resp = client.pojects().logs().entries().write(
projectsId=project, logsId=logname, body=body).execute()
def _setup_argparser():
parser = argparse.ArgumentParser(description='Helper to upload files to cloud logging')
parser.add_argument('--input-file', '-i', action='store', dest='filename',
help='input file to read, will be uploaded one msg per line')
parser.add_argument('--log-name', '-l', action='store', dest='logname',
help='name of the log to write to')
parser.add_argument('--project', '-p', actions='store', dest='project_name',
help='name of the project')
return parser
if __name__ == '__main__':
import argparse
import sys
parser = _setup_argparser()
args = parser.parse_args()
if args.project_name:
PROJECT_NAME = args.project_name
if not args.filename:
print('Need a filename!')
sys.exit(-1)
if not args.logname:
print('need a log name!')
sys.exit(-1)
print('uploading logs!')
publish_file(args.filename,
args.logname)
|
Python
|
CL
|
e9c948f76b305c8182aaa4da15924d00a19769cbef7d9a21221a08c93dbb5855
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-10 20:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
FIELD_MAPPINGS = {
'ProcessNextStepPlugin': [
('button-type', 'button_type'),
('button-size', 'button_size'),
('button-options', 'button_options'),
('quick-float', 'quick_float'),
('icon-left', 'icon_left'),
('icon-right', 'icon_right'),
],
'ShopProceedButton': [
('button-type', 'button_type'),
('button-size', 'button_size'),
('button-options', 'button_options'),
('quick-float', 'quick_float'),
('icon-left', 'icon_left'),
('icon-right', 'icon_right'),
],
}
def forwards(apps, schema_editor):
field_mappings = {}
for key, maps in FIELD_MAPPINGS.items():
field_mappings[key] = dict(maps)
migrate_glossary(apps, field_mappings)
def backwards(apps, schema_editor):
field_mappings = {}
for key, maps in FIELD_MAPPINGS.items():
field_mappings[key] = dict((m[1], m[0]) for m in maps)
migrate_glossary(apps, field_mappings)
def migrate_glossary(apps, field_mappings):
CascadeElement = apps.get_model('cmsplugin_cascade', 'CascadeElement')
for element in CascadeElement.objects.all():
if element.plugin_type not in field_mappings:
continue
glossary = dict(element.glossary)
for srckey, value in element.glossary.items():
dstkey = field_mappings[element.plugin_type].get(srckey)
if dstkey and srckey in glossary:
glossary[dstkey] = glossary.pop(srckey)
element.glossary = glossary
element.save()
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20151016_1451'),
]
operations = []
if 'cmsplugin_cascade' in settings.INSTALLED_APPS:
dependencies.append(('cmsplugin_cascade', '0014_glossary_field'))
operations.append(migrations.RunPython(forwards, reverse_code=backwards))
|
Python
|
CL
|
69d92614b50cf5730c92aa108ad0caa6cb05ca3aa6f4eb46b5f8df0ace7237f1
|
# -*- coding: utf-8 -*-
"""Main component of the system.
"""
import uuid
from typing import Set
from diamond_eye.action import make_action
from diamond_eye.utils import MetaSingleton
class Application(metaclass=MetaSingleton):
"""Main component of the system.
"""
@staticmethod
def issue_new_uuid(known_uuids: Set[str]) -> str:
"""Generate new unique id that do not interfere with existing ones.
"""
new_uuid = str(uuid.uuid4())
while new_uuid in known_uuids:
new_uuid = str(uuid.uuid4())
return new_uuid
def issue_and_save_new_uuid(self, state) -> str:
"""Generate and save the new id.
"""
known_uuids = state.get_variable('app', 'known_uuids', set())
new_uuid = self.issue_new_uuid(known_uuids)
state.set_variable('app', 'add', 'known_uuids', new_uuid)
return new_uuid
def register_new_user(self, user_name: str, state, filesystem) -> None:
"""Issue new id, save it to the state and filesystem.
"""
user_id = self.issue_and_save_new_uuid(state)
filesystem.save_user_key(user_name, user_id)
state.set_variable('app', 'set', 'user_id', user_id)
state.set_variable('app', 'set', 'user_name', user_name)
action = make_action(
user_id=user_id,
user_name=user_name,
version=state.get_variable('app', 'version'),
action={
'branch': 'app',
'method': 'add',
'key': 'known_users',
'value': user_name
}
)
filesystem.append_to_log(action)
|
Python
|
CL
|
52c22a01c3b3a39237afdc36b7a824ee319f8ddfc7678d1cd3ae8d9833e50d3c
|
# Copyright 2016 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import optparse
import pickle
import unittest
from unittest import mock
from blinkpy.common.checkout.baseline_copier import BaselineCopier
from blinkpy.common.net.results_fetcher import Build
from blinkpy.common.net.web_test_results import (
Artifact,
WebTestResult,
WebTestResults,
)
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.common.system.executive_mock import MockExecutive
from blinkpy.tool.commands.rebaseline import (
AbstractParallelRebaselineCommand, Rebaseline, TestBaselineSet)
from blinkpy.tool.mock_tool import MockBlinkTool
from blinkpy.web_tests.builder_list import BuilderList
from blinkpy.web_tests.port.factory_mock import MockPortFactory
from blinkpy.web_tests.port.test import MOCK_WEB_TESTS
class BaseTestCase(unittest.TestCase):
command_constructor = lambda: None
def setUp(self):
self.tool = MockBlinkTool()
self.command = self.command_constructor()
self.command._tool = self.tool # pylint: disable=protected-access
self.tool.builders = BuilderList({
'MOCK Mac10.10 (dbg)': {
'port_name': 'test-mac-mac10.10',
'specifiers': ['Mac10.10', 'Debug'],
},
'MOCK Mac10.10': {
'port_name': 'test-mac-mac10.10',
'specifiers': ['Mac10.10', 'Release'],
},
'MOCK Mac10.11 (dbg)': {
'port_name': 'test-mac-mac10.11',
'specifiers': ['Mac10.11', 'Debug'],
},
'MOCK Mac10.11 ASAN': {
'port_name': 'test-mac-mac10.11',
'specifiers': ['Mac10.11', 'Release'],
},
'MOCK Mac10.11': {
'port_name': 'test-mac-mac10.11',
'specifiers': ['Mac10.11', 'Release'],
'steps': {
'blink_web_tests (with patch)': {},
},
},
'MOCK Precise': {
'port_name': 'test-linux-precise',
'specifiers': ['Precise', 'Release'],
},
'MOCK Trusty': {
'port_name': 'test-linux-trusty',
'specifiers': ['Trusty', 'Release'],
},
'MOCK Trusty Multiple Steps': {
'port_name': 'test-linux-trusty',
'specifiers': ['Trusty', 'Release'],
'steps': {
'blink_web_tests (with patch)': {},
'not_site_per_process_blink_web_tests (with patch)': {
'flag_specific': 'disable-site-isolation-trials',
},
},
},
'MOCK Win10': {
'port_name': 'test-win-win10',
'specifiers': ['Win10', 'Release'],
},
'MOCK Win7 (dbg)': {
'port_name': 'test-win-win7',
'specifiers': ['Win7', 'Debug'],
},
'MOCK Win7 (dbg)(1)': {
'port_name': 'test-win-win7',
'specifiers': ['Win7', 'Debug'],
'steps': {
'blink_web_tests (with patch)': {},
},
},
'MOCK Win7 (dbg)(2)': {
'port_name': 'test-win-win7',
'specifiers': ['Win7', 'Debug'],
},
'MOCK Win7': {
'port_name': 'test-win-win7',
'specifiers': ['Win7', 'Release'],
'steps': {
'blink_web_tests (with patch)': {},
},
},
'MOCK wpt(1)': {
'port_name': 'test-linux-trusty',
'specifiers': ['Trusty', 'Release'],
},
'MOCK wpt(2)': {
'port_name': 'test-linux-trusty',
'specifiers': ['Trusty', 'Release'],
},
})
self.mac_port = self.tool.port_factory.get_from_builder_name(
'MOCK Mac10.11')
self.test_expectations_path = self.mac_port.path_to_generic_test_expectations_file(
)
self._write(
'VirtualTestSuites',
json.dumps([{
"prefix":
"prefix",
"platforms": ["Linux", "Mac"],
"bases": [
"userscripts/first-test.html",
'userscripts/second-test.html'
],
"args": ["--enable-features=flag"]
}]))
self._write(
'FlagSpecificConfig',
json.dumps([
{
'name': 'disable-site-isolation-trials',
'args': ['--disable-site-isolation-trials'],
},
]))
# Create some dummy tests (note _setup_mock_build_data uses the same
# test names). Also, create some dummy baselines to avoid the implicit
# all-pass warning.
self._write('userscripts/first-test.html', 'Dummy test contents')
self._write('userscripts/first-test-expected.txt', 'Dummy baseline')
self._write('userscripts/first-test-expected.png', 'Dummy baseline')
self._write('userscripts/first-test-expected.wav', 'Dummy baseline')
self._write('userscripts/second-test.html', 'Dummy test contents')
self._write('userscripts/second-test-expected.txt', 'Dummy baseline')
self._write('userscripts/second-test-expected.png', 'Dummy baseline')
self._write('userscripts/second-test-expected.wav', 'Dummy baseline')
self._write('userscripts/third-test.html', 'Dummy test contents')
# In AbstractParallelRebaselineCommand._rebaseline_commands, a default port
# object is gotten using self.tool.port_factory.get(), which is used to get
# test paths -- and the web tests directory may be different for the "test"
# ports and real ports. Since only "test" ports are used in this class,
# we can make the default port also a "test" port.
self.original_port_factory_get = self.tool.port_factory.get
self._test_port = self.tool.port_factory.get('test')
def get_test_port(port_name=None, options=None, **kwargs):
if not port_name:
return self._test_port
return self.original_port_factory_get(port_name, options, **kwargs)
self._mocks = contextlib.ExitStack()
self._mock_copier = mock.Mock(wraps=BaselineCopier(self.tool))
# See https://docs.python.org/3/library/unittest.mock.html#where-to-patch
# for why `blinkpy.common.checkout.baseline_copier.BaselineCopier` is
# not patched instead.
self._mocks.enter_context(
mock.patch('blinkpy.tool.commands.rebaseline.BaselineCopier',
return_value=self._mock_copier))
self._mocks.enter_context(
mock.patch('blinkpy.tool.blink_tool.BlinkTool',
return_value=self.tool))
self._mocks.enter_context(
mock.patch.object(self.tool, 'main', create=True, return_value=0))
self._mocks.enter_context(
mock.patch('blinkpy.common.message_pool.get', self._get_mock_pool))
self._mocks.enter_context(
mock.patch.object(self.tool.port_factory, 'get', get_test_port))
self._mocks.enter_context(
mock.patch.object(self.tool, 'web', mock.Mock()))
self.tool.web.get_binary.side_effect = lambda url: url.encode()
def _get_mock_pool(self, caller, worker_factory, num_workers):
"""A mock for `message_pool.get(...)`.
This simply invokes a single worker serially according to the message
pool protocol.
"""
worker_process = mock.Mock()
worker_process.host = self.tool
worker_process.post = lambda name, *args: caller.handle(
name, 'worker/0', *_serialize_round_trip(args))
worker = worker_factory(worker_process)
def run(tasks):
if hasattr(worker, 'start'):
worker.start()
for message_name, *args in tasks:
worker.handle(message_name, 'manager',
*_serialize_round_trip(args))
if hasattr(worker, 'stop'):
worker.stop()
message_pool = mock.Mock()
message_pool.run = run
message_pool = contextlib.nullcontext(message_pool)
return message_pool
def tearDown(self):
self._mocks.close()
def _expand(self, path):
if self.tool.filesystem.isabs(path):
return path
return self.tool.filesystem.join(self.mac_port.web_tests_dir(), path)
def _read(self, path):
return self.tool.filesystem.read_text_file(self._expand(path))
def _write(self, path, contents):
self.tool.filesystem.write_text_file(self._expand(path), contents)
def _remove(self, path):
self.tool.filesystem.remove(self._expand(path))
def _zero_out_test_expectations(self):
for port_name in self.tool.port_factory.all_port_names():
port = self.tool.port_factory.get(port_name)
for path in port.default_expectations_files():
self._write(path, '')
self.tool.filesystem.written_files = {}
def _setup_mock_build_data(self):
for builder in ['MOCK Win7', 'MOCK Win7 (dbg)', 'MOCK Mac10.11']:
self.tool.results_fetcher.set_results(
Build(builder),
WebTestResults.from_json(
{
'tests': {
'userscripts': {
'first-test.html': {
'expected': 'PASS',
'actual': 'FAIL',
'is_unexpected': True,
# The real format of these URLs is more
# complex, but adding that detail to the
# test doesn't add value. We mostly just
# care about which builder and test the
# baseline was downloaded for.
'artifacts': {
'actual_image': [
f'https://results.api.cr.dev/{builder}/first/actual_image'
],
'expected_image': [
f'https://results.api.cr.dev/{builder}/first/expected_image'
],
'actual_text': [
f'https://results.api.cr.dev/{builder}/first/actual_text'
],
'expected_text': [
f'https://results.api.cr.dev/{builder}/first/expected_text'
],
}
},
'second-test.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'artifacts': {
'actual_image': [
f'https://results.api.cr.dev/{builder}/second/actual_image'
],
'expected_image': [
f'https://results.api.cr.dev/{builder}/second/expected_image'
],
'actual_audio': [
f'https://results.api.cr.dev/{builder}/second/actual_audio'
],
'expected_audio': [
f'https://results.api.cr.dev/{builder}/second/expected_audio'
],
}
}
}
}
},
step_name='blink_web_tests (with patch)'))
def _assert_baseline_downloaded(self, url: str, dest: str):
self.tool.web.get_binary.assert_any_call(url)
self.assertEqual(self._read(dest), url)
class TestAbstractParallelRebaselineCommand(BaseTestCase):
"""Tests for the base class of multiple rebaseline commands.
This class only contains test cases for utility methods. Some common
behaviours of various rebaseline commands are tested in TestRebaseline.
"""
command_constructor = AbstractParallelRebaselineCommand
def test_builders_to_fetch_from(self):
build_steps_to_fetch = self.command.build_steps_to_fetch_from([
('MOCK Win10', 'blink_web_tests (with patch)'),
('MOCK Win7 (dbg)(1)', 'blink_web_tests (with patch)'),
('MOCK Win7 (dbg)(2)', 'blink_web_tests (with patch)'),
('MOCK Win7', 'blink_web_tests (with patch)'),
])
# Win7 debug builders are shadowed by release builder.
self.assertEqual(
build_steps_to_fetch, {
('MOCK Win7', 'blink_web_tests (with patch)'),
('MOCK Win10', 'blink_web_tests (with patch)'),
})
def test_builders_to_fetch_from_flag_specific(self):
build_steps_to_fetch = self.command.build_steps_to_fetch_from([
('MOCK Trusty', 'blink_web_tests (with patch)'),
])
# Ports are the same, but the fallback paths differ.
self.assertEqual(
build_steps_to_fetch, {
('MOCK Trusty', 'blink_web_tests (with patch)'),
})
build_steps_to_fetch = self.command.build_steps_to_fetch_from([
('MOCK Trusty Multiple Steps', 'blink_web_tests (with patch)'),
('MOCK Trusty Multiple Steps',
'not_site_per_process_blink_web_tests (with patch)'),
])
self.assertEqual(len(build_steps_to_fetch), 2)
self.assertIn(
('MOCK Trusty Multiple Steps', 'blink_web_tests (with patch)'),
build_steps_to_fetch)
self.assertIn(('MOCK Trusty Multiple Steps',
'not_site_per_process_blink_web_tests (with patch)'),
build_steps_to_fetch)
def test_unstaged_baselines(self):
git = self.tool.git()
git.unstaged_changes = lambda: {
RELATIVE_WEB_TESTS + 'x/foo-expected.txt': 'M',
RELATIVE_WEB_TESTS + 'x/foo-expected.something': '?',
RELATIVE_WEB_TESTS + 'x/foo-expected.png': '?',
RELATIVE_WEB_TESTS + 'x/foo.html': 'M',
'docs/something.md': '?', }
self.assertEqual(self.command.unstaged_baselines(), [
MOCK_WEB_TESTS + 'x/foo-expected.png',
MOCK_WEB_TESTS + 'x/foo-expected.txt',
])
def test_suffixes_for_actual_failures_for_non_wpt(self):
# pylint: disable=protected-access
build = Build('MOCK Win7')
self.tool.results_fetcher.set_results(
build,
WebTestResults.from_json({
'tests': {
'pixel.html': {
'expected': 'PASS',
'actual': 'FAIL',
'artifacts': {
'actual_image': ['pixel-actual.png'],
},
}
}
}))
self.assertEqual(
self.command._suffixes_for_actual_failures('pixel.html', build),
{'png'},
)
class TestRebaseline(BaseTestCase):
"""Tests for the blink_tool.py rebaseline command.
Also tests some common behaviours of all rebaseline commands.
"""
command_constructor = Rebaseline
def setUp(self):
super(TestRebaseline, self).setUp()
self.tool.executive = MockExecutive()
self._setup_mock_build_data()
def tearDown(self):
super(TestRebaseline, self).tearDown()
@staticmethod
def options(**kwargs):
return optparse.Values(
dict(
{
'optimize': True,
'dry_run': False,
'verbose': True,
'results_directory': None,
}, **kwargs))
def test_rebaseline_test_passes_on_all_builders(self):
self.tool.results_fetcher.set_results(
Build('MOCK Win7'),
WebTestResults.from_json({
'tests': {
'userscripts': {
'first-test.html': {
'expected': 'REBASELINE',
'actual': 'PASS'
}
}
}
}))
self._write(self.test_expectations_path,
'Bug(x) userscripts/first-test.html [ Failure ]\n')
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Win7'))
self.command.rebaseline(self.options(), test_baseline_set)
self.tool.main.assert_not_called()
def test_rebaseline_all(self):
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Win7'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
self._mock_copier.find_baselines_to_copy.assert_has_calls(
[
mock.call('userscripts/first-test.html', 'txt',
test_baseline_set),
mock.call('userscripts/first-test.html', 'png',
test_baseline_set),
],
any_order=True)
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_text',
'platform/test-win-win7/userscripts/first-test-expected.txt')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_image',
'platform/test-win-win7/userscripts/first-test-expected.png')
self.tool.main.assert_called_once_with([
'echo',
'optimize-baselines',
'--no-manifest-update',
'--verbose',
'userscripts/first-test.html',
])
def test_rebaseline_debug(self):
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Win7 (dbg)'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
self._mock_copier.find_baselines_to_copy.assert_has_calls(
[
mock.call('userscripts/first-test.html', 'txt',
test_baseline_set),
mock.call('userscripts/first-test.html', 'png',
test_baseline_set),
],
any_order=True)
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7 (dbg)/first/actual_text',
'platform/test-win-win7/userscripts/first-test-expected.txt')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7 (dbg)/first/actual_image',
'platform/test-win-win7/userscripts/first-test-expected.png')
self.tool.main.assert_called_once_with([
'echo',
'optimize-baselines',
'--no-manifest-update',
'--verbose',
'userscripts/first-test.html',
])
def test_rebaseline_reftest_with_text_failure(self):
"""Ensure that a reftest can still have any text output [0] rebaselined.
[0]: https://chromium.googlesource.com/chromium/src/+/HEAD/docs/testing/writing_web_tests.md#tests-that-are-both-pixel_reference-tests-and-text-tests
"""
build = Build('MOCK Win7', 1000)
self.tool.results_fetcher.set_results(
build,
WebTestResults.from_json(
{
'tests': {
'reftest.html': {
'expected': 'PASS',
'actual': 'FAIL',
'is_unexpected': True,
'artifacts': {
'actual_text': [
'https://results.api.cr.dev/reftest-actual.txt',
],
'actual_image': [
'https://results.api.cr.dev/reftest-actual.png',
],
},
},
},
},
step_name='blink_web_tests (with patch)'))
self._write('reftest-expected.html', 'reference page')
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('reftest.html', build,
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
self._assert_baseline_downloaded(
'https://results.api.cr.dev/reftest-actual.txt',
'platform/test-win-win7/reftest-expected.txt')
self.assertNotIn(
mock.call('https://results.api.cr.dev/reftest-actual.png'),
self.tool.web.get_binary.call_args_list)
self.assertFalse(
self.tool.filesystem.exists(
self._expand('platform/test-win-win7/reftest-expected.png')))
def test_rebaseline_with_cache_hit(self):
results = WebTestResults([
WebTestResult('userscripts/first-test.html', {
'actual': 'FAIL',
'is_unexpected': True,
}, {
'actual_image': [
Artifact('https://results.usercontent.cr.dev/actual_image',
'3a778bf'),
],
}),
],
step_name='blink_web_tests (with patch)')
self.tool.web.get_binary.side_effect = lambda _: b'actual image'
self.tool.results_fetcher.set_results(Build('MOCK Win7'), results)
self.tool.results_fetcher.set_results(Build('MOCK Mac10.11'), results)
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Win7'),
'blink_web_tests (with patch)')
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Mac10.11'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
self.tool.web.get_binary.assert_called_once_with(
'https://results.usercontent.cr.dev/actual_image')
self.assertEqual(
self._read(
'platform/test-win-win7/userscripts/first-test-expected.png'),
'actual image')
self.assertEqual(
self._read('platform/test-mac-mac10.11/'
'userscripts/first-test-expected.png'), 'actual image')
self.assertEqual(self.command.baseline_cache_stats.hit_count, 1)
self.assertEqual(self.command.baseline_cache_stats.hit_bytes, 12)
self.assertEqual(self.command.baseline_cache_stats.total_count, 2)
self.assertEqual(self.command.baseline_cache_stats.total_bytes, 24)
def test_no_optimize(self):
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Win7'),
'blink_web_tests (with patch)')
self.command.rebaseline(
self.options(optimize=False), test_baseline_set)
self._mock_copier.find_baselines_to_copy.assert_has_calls(
[
mock.call('userscripts/first-test.html', 'txt',
test_baseline_set),
mock.call('userscripts/first-test.html', 'png',
test_baseline_set),
],
any_order=True)
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_text',
'platform/test-win-win7/userscripts/first-test-expected.txt')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_image',
'platform/test-win-win7/userscripts/first-test-expected.png')
self.tool.main.assert_not_called()
def test_results_directory(self):
self._write('/tmp/userscripts/first-test-actual.txt', 'actual text')
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Win7'),
'blink_web_tests (with patch)')
self.command.rebaseline(
self.options(optimize=False, results_directory='/tmp'),
test_baseline_set)
self._mock_copier.find_baselines_to_copy.assert_has_calls(
[
mock.call('userscripts/first-test.html', 'txt',
test_baseline_set),
mock.call('userscripts/first-test.html', 'png',
test_baseline_set),
],
any_order=True)
self.assertEqual(
self._read(
'platform/test-win-win7/userscripts/first-test-expected.txt'),
'actual text')
self.assertFalse(
self.tool.filesystem.exists(
self._expand(
'platform/test-win-win7/userscripts/first-test-expected.png'
)))
self.tool.main.assert_not_called()
def test_rebaseline_with_different_port_name(self):
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Win7'),
'blink_web_tests (with patch)', 'test-win-win10')
self.command.rebaseline(self.options(), test_baseline_set)
self._mock_copier.find_baselines_to_copy.assert_has_calls(
[
mock.call('userscripts/first-test.html', 'txt',
test_baseline_set),
mock.call('userscripts/first-test.html', 'png',
test_baseline_set),
],
any_order=True)
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_text',
'platform/test-win-win10/userscripts/first-test-expected.txt')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_image',
'platform/test-win-win10/userscripts/first-test-expected.png')
self.tool.main.assert_called_once_with([
'echo',
'optimize-baselines',
'--no-manifest-update',
'--verbose',
'userscripts/first-test.html',
])
@unittest.skip('Disabled because this does not reflect the behavior of '
"'rebaseline-test-internal' now. Reenable after implementing "
'crbug.com/1149035.')
class TestRebaselineUpdatesExpectationsFiles(BaseTestCase):
"""Tests for the logic related to updating the test expectations file."""
command_constructor = Rebaseline
def setUp(self):
super(TestRebaselineUpdatesExpectationsFiles, self).setUp()
def mock_run_command(*args, **kwargs): # pylint: disable=unused-argument
return '{"add": [], "remove-lines": [{"test": "userscripts/first-test.html", "port_name": "test-mac-mac10.11"}]}\n'
self.tool.executive = MockExecutive(run_command_fn=mock_run_command)
@staticmethod
def options():
return optparse.Values({
'optimize': False,
'dry_run': False,
'verbose': True,
'results_directory': None,
})
# In the following test cases, we use a mock rebaseline-test-internal to
# pretend userscripts/first-test.html can be rebaselined on Mac10.11, so
# the corresponding expectation (if exists) should be updated.
def test_rebaseline_updates_expectations_file(self):
self._write(self.test_expectations_path, (
'# tags: [ Mac10.10 Mac Linux ]\n'
'# tags: [ Debug ]\n'
'# results: [ Failure ]\n'
'crbug.com/123 [ Debug Mac ] userscripts/first-test.html [ Failure ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'))
self._setup_mock_build_data()
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Mac10.11'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(new_expectations, (
'# tags: [ Mac10.10 Mac Linux ]\n'
'# tags: [ Debug ]\n'
'# results: [ Failure ]\n'
'crbug.com/123 [ Debug Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'))
def test_rebaseline_updates_expectations_file_all_platforms(self):
self._write(self.test_expectations_path,
('# tags: [ linux mac10.10 win ]\n# results: [ Failure ]\n'
'userscripts/first-test.html [ Failure ]\n'))
self._setup_mock_build_data()
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Mac10.11'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
('# tags: [ linux mac10.10 win ]\n'
'# results: [ Failure ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'
'[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
'[ Win ] userscripts/first-test.html [ Failure ]\n'))
def test_rebaseline_handles_platform_skips(self):
# This test is just like test_rebaseline_updates_expectations_file_all_platforms(),
# except that if a particular port happens to SKIP a test in an overrides file,
# we count that as passing, and do not think that we still need to rebaseline it.
self._write(
self.test_expectations_path,
'# tags: [ Linux Mac10.10 Win ]\n# results: [ Failure ]\nuserscripts/first-test.html [ Failure ]\n'
)
self._write('NeverFixTests', ('# tags: [ Android ]\n'
'# results: [ Skip ]\n'
'[ Android ] userscripts [ Skip ]\n'))
self._setup_mock_build_data()
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Mac10.11'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
('# tags: [ Linux Mac10.10 Win ]\n'
'# results: [ Failure ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'
'[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
'[ Win ] userscripts/first-test.html [ Failure ]\n'))
def test_rebaseline_handles_skips_in_file(self):
# This test is like test_rebaseline_handles_platform_skips, except that the
# Skip is in the same (generic) file rather than a platform file. In this case,
# the Skip line should be left unmodified. Note that the first line is now
# qualified as "[Linux Mac Win]"; if it was unqualified, it would conflict with
# the second line.
self._write(self.test_expectations_path,
('# tags: [ Linux Mac Mac10.10 Win ]\n'
'# results: [ Failure Skip ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'
'[ Mac ] userscripts/first-test.html [ Failure ]\n'
'[ Win ] userscripts/first-test.html [ Skip ]\n'))
self._setup_mock_build_data()
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Mac10.11'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
('# tags: [ Linux Mac Mac10.10 Win ]\n'
'# results: [ Failure Skip ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'
'[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
'[ Win ] userscripts/first-test.html [ Skip ]\n'))
def test_rebaseline_handles_slow_in_file(self):
self._write(self.test_expectations_path,
('# tags: [ Linux Mac Mac10.10 Win ]\n'
'# results: [ Failure Slow ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'
'[ Mac ] userscripts/first-test.html [ Failure ]\n'
'[ Win ] userscripts/first-test.html [ Failure Slow ]\n'))
self._setup_mock_build_data()
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Mac10.11'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
('# tags: [ Linux Mac Mac10.10 Win ]\n'
'# results: [ Failure Slow ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'
'[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
'[ Win ] userscripts/first-test.html [ Failure Slow ]\n'))
def test_rebaseline_handles_smoke_tests(self):
# This test is just like test_rebaseline_handles_platform_skips, except that we check for
# a test not being in the SmokeTests file, instead of using overrides files.
# If a test is not part of the smoke tests, we count that as passing on ports that only
# run smoke tests, and do not think that we still need to rebaseline it.
self._write(
self.test_expectations_path,
'# tags: [ Linux Mac10.10 Win ]\n# results: [ Failure ]\nuserscripts/first-test.html [ Failure ]\n'
)
self._write('SmokeTests', 'fast/html/article-element.html')
self._setup_mock_build_data()
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/first-test.html',
Build('MOCK Mac10.11'),
'blink_web_tests (with patch)')
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
('# tags: [ Linux Mac10.10 Win ]\n'
'# results: [ Failure ]\n'
'[ Linux ] userscripts/first-test.html [ Failure ]\n'
'[ Mac10.10 ] userscripts/first-test.html [ Failure ]\n'
'[ Win ] userscripts/first-test.html [ Failure ]\n'))
# In the following test cases, the tests produce no outputs (e.g. clean
# passing reftests, skipped tests, etc.). Hence, there are no baselines to
# fetch (empty baseline suffixes), and rebaseline-test-internal wouldn't be
# called. However, in some cases the expectations still need to be updated.
def test_rebaseline_keeps_skip_expectations(self):
# [ Skip ] expectations should always be kept.
self._write(self.test_expectations_path,
('# tags: [ Mac Win ]\n'
'# results: [ Skip ]\n'
'[ Mac ] userscripts/skipped-test.html [ Skip ]\n'
'[ Win ] userscripts/skipped-test.html [ Skip ]\n'))
self._write('userscripts/skipped-test.html', 'Dummy test contents')
self.tool.results_fetcher.set_results(
Build('MOCK Mac10.11'),
WebTestResults.from_json({
'tests': {
'userscripts': {
'skipped-test.html': {
'expected': 'SKIP',
'actual': 'SKIP',
}
}
}
}))
self.tool.results_fetcher.set_results(
Build('MOCK Win7'),
WebTestResults.from_json({
'tests': {
'userscripts': {
'skipped-test.html': {
'expected': 'SKIP',
'actual': 'SKIP',
}
}
}
}))
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/skipped-test.html',
Build('MOCK Mac10.11'))
test_baseline_set.add('userscripts/skipped-test.html',
Build('MOCK Win7'))
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
('# tags: [ Mac Win ]\n'
'# results: [ Skip ]\n'
'[ Mac ] userscripts/skipped-test.html [ Skip ]\n'
'[ Win ] userscripts/skipped-test.html [ Skip ]\n'))
self.assertEqual(self.tool.executive.calls, [])
def test_rebaseline_keeps_flaky_expectations(self):
# Flaky expectations should be kept even if the test passes.
self._write(
self.test_expectations_path,
'# results: [ Pass Failure ]\nuserscripts/flaky-test.html [ Pass Failure ]\n'
)
self._write('userscripts/flaky-test.html', 'Dummy test contents')
self.tool.results_fetcher.set_results(
Build('MOCK Mac10.11'),
WebTestResults.from_json({
'tests': {
'userscripts': {
'flaky-test.html': {
'expected': 'PASS FAIL',
'actual': 'PASS',
}
}
}
}))
test_baseline_set = TestBaselineSet(self.tool.builders)
test_baseline_set.add('userscripts/flaky-test.html',
Build('MOCK Mac10.11'))
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
'# results: [ Pass Failure ]\nuserscripts/flaky-test.html [ Pass Failure ]\n'
)
self.assertEqual(self.tool.executive.calls, [])
def test_rebaseline_test_passes_unexpectedly(self):
# The test passes without any output. Its expectation should be updated
# without calling rebaseline-test-internal.
self._write(
self.test_expectations_path,
'# tags: [ Linux Mac10.10 Win ]\n# results: [ Failure ]\nuserscripts/all-pass.html [ Failure ]\n'
)
self._write('userscripts/all-pass.html', 'Dummy test contents')
test_baseline_set = TestBaselineSet(self.tool.builders)
self.tool.results_fetcher.set_results(
Build('MOCK Mac10.11'),
WebTestResults.from_json({
'tests': {
'userscripts': {
'all-pass.html': {
'expected': 'FAIL',
'actual': 'PASS',
'is_unexpected': True
}
}
}
}))
test_baseline_set.add('userscripts/all-pass.html',
Build('MOCK Mac10.11'))
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
('# tags: [ Linux Mac10.10 Win ]\n'
'# results: [ Failure ]\n'
'[ Linux ] userscripts/all-pass.html [ Failure ]\n'
'[ Mac10.10 ] userscripts/all-pass.html [ Failure ]\n'
'[ Win ] userscripts/all-pass.html [ Failure ]\n'))
self.assertEqual(self.tool.executive.calls, [])
def test_rebaseline_test_passes_unexpectedly_everywhere(self):
# Similar to test_rebaseline_test_passes_unexpectedly, except that the
# test passes on all ports.
self._write(
self.test_expectations_path,
'# results: [ Failure ]\nuserscripts/all-pass.html [ Failure ]\n')
self._write('userscripts/all-pass.html', 'Dummy test contents')
test_baseline_set = TestBaselineSet(self.tool.builders)
for builder in [
'MOCK Win7', 'MOCK Win10', 'MOCK Mac10.10', 'MOCK Mac10.11',
'MOCK Precise', 'MOCK Trusty'
]:
self.tool.results_fetcher.set_results(
Build(builder),
WebTestResults.from_json({
'tests': {
'userscripts': {
'all-pass.html': {
'expected': 'FAIL',
'actual': 'PASS',
'is_unexpected': True
}
}
}
}))
test_baseline_set.add('userscripts/all-pass.html', Build(builder))
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(new_expectations, '# results: [ Failure ]\n')
self.assertEqual(self.tool.executive.calls, [])
def test_rebaseline_test_passes_unexpectedly_but_on_another_port(self):
# Similar to test_rebaseline_test_passes_unexpectedly, except that the
# build was run on a different port than the port we are rebaselining
# (possible when rebaseline-cl --fill-missing), in which case we don't
# update the expectations.
self._write(
self.test_expectations_path,
'# results: [ Failure ]\nuserscripts/all-pass.html [ Failure ]\n')
self._write('userscripts/all-pass.html', 'Dummy test contents')
test_baseline_set = TestBaselineSet(self.tool.builders)
self.tool.results_fetcher.set_results(
Build('MOCK Mac10.11'),
WebTestResults.from_json({
'tests': {
'userscripts': {
'all-pass.html': {
'expected': 'FAIL',
'actual': 'PASS',
'is_unexpected': True
}
}
}
}))
test_baseline_set.add('userscripts/all-pass.html',
Build('MOCK Mac10.11'), 'MOCK Mac10.10')
self.command.rebaseline(self.options(), test_baseline_set)
new_expectations = self._read(self.test_expectations_path)
self.assertMultiLineEqual(
new_expectations,
'# results: [ Failure ]\nuserscripts/all-pass.html [ Failure ]\n')
self.assertEqual(self.tool.executive.calls, [])
class TestRebaselineExecute(BaseTestCase):
"""Tests for the main execute function of the blink_tool.py rebaseline command."""
command_constructor = Rebaseline
@staticmethod
def options():
return optparse.Values({
'results_directory': False,
'optimize': False,
'dry_run': False,
'builders': None,
'verbose': True,
})
def test_rebaseline(self):
# pylint: disable=protected-access
self.command._builders_to_pull_from = lambda: ['MOCK Win7']
self._setup_mock_build_data()
self.command.execute(self.options(), ['userscripts/first-test.html'],
self.tool)
baseline_set = TestBaselineSet(self.tool.builders)
baseline_set.add('userscripts/first-test.html', Build('MOCK Win7'),
'blink_web_tests (with patch)')
self._mock_copier.find_baselines_to_copy.assert_has_calls(
[
mock.call('userscripts/first-test.html', 'txt', baseline_set),
mock.call('userscripts/first-test.html', 'png', baseline_set),
],
any_order=True)
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_text',
'platform/test-win-win7/userscripts/first-test-expected.txt')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_image',
'platform/test-win-win7/userscripts/first-test-expected.png')
self.tool.main.assert_not_called()
def test_rebaseline_directory(self):
# pylint: disable=protected-access
self.command._builders_to_pull_from = lambda: ['MOCK Win7']
self._setup_mock_build_data()
self.command.execute(self.options(), ['userscripts'], self.tool)
baseline_set = TestBaselineSet(self.tool.builders)
baseline_set.add('userscripts/first-test.html', Build('MOCK Win7'),
'blink_web_tests (with patch)')
self._mock_copier.find_baselines_to_copy.assert_has_calls(
[
mock.call('userscripts/first-test.html', 'txt', baseline_set),
mock.call('userscripts/first-test.html', 'png', baseline_set),
],
any_order=True)
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_text',
'platform/test-win-win7/userscripts/first-test-expected.txt')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/first/actual_image',
'platform/test-win-win7/userscripts/first-test-expected.png')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/second/actual_audio',
'platform/test-win-win7/userscripts/second-test-expected.wav')
self._assert_baseline_downloaded(
'https://results.api.cr.dev/MOCK Win7/second/actual_image',
'platform/test-win-win7/userscripts/second-test-expected.png')
self.tool.main.assert_not_called()
class TestBaselineSetTest(unittest.TestCase):
def setUp(self):
host = MockBlinkTool()
host.port_factory = MockPortFactory(host)
port = host.port_factory.get()
base_dir = port.web_tests_dir()
host.filesystem.write_text_file(base_dir + 'a/x.html', '<html>')
host.filesystem.write_text_file(base_dir + 'a/y.html', '<html>')
host.filesystem.write_text_file(base_dir + 'a/z.html', '<html>')
host.builders = BuilderList({
'MOCK Mac10.12': {
'port_name': 'test-mac-mac10.12',
'specifiers': ['Mac10.12', 'Release']
},
'MOCK Trusty': {
'port_name': 'test-linux-trusty',
'specifiers': ['Trusty', 'Release'],
},
'MOCK Win10': {
'port_name': 'test-win-win10',
'specifiers': ['Win10', 'Release']
},
'some-wpt-bot': {
'port_name': 'linux-trusty',
'specifiers': ['Trusty', 'Release']
},
})
self.host = host
def test_add_and_iter_tests(self):
test_baseline_set = TestBaselineSet(self.host.builders)
test_baseline_set.add('a/x.html', Build('MOCK Trusty'))
test_baseline_set.add('a/y.html', Build('MOCK Trusty'))
test_baseline_set.add('a/z.html', Build('MOCK Trusty'))
test_baseline_set.add('a/z.html', Build('MOCK Win10'),
'blink_web_tests (with patch)')
self.assertEqual(list(test_baseline_set), [
('a/x.html', Build(builder_name='MOCK Trusty'), None,
'test-linux-trusty'),
('a/y.html', Build(builder_name='MOCK Trusty'), None,
'test-linux-trusty'),
('a/z.html', Build(builder_name='MOCK Trusty'), None,
'test-linux-trusty'),
('a/z.html', Build(builder_name='MOCK Win10'),
'blink_web_tests (with patch)', 'test-win-win10'),
])
self.assertEqual(test_baseline_set.all_tests(),
['a/x.html', 'a/y.html', 'a/z.html'])
def test_str_empty(self):
test_baseline_set = TestBaselineSet(self.host.builders)
self.assertEqual(str(test_baseline_set), '<Empty TestBaselineSet>')
def test_str_basic(self):
test_baseline_set = TestBaselineSet(self.host.builders)
test_baseline_set.add('a/x.html', Build('MOCK Mac10.12'))
test_baseline_set.add('a/x.html', Build('MOCK Win10'),
'blink_web_tests (with patch)')
self.assertRegex(str(test_baseline_set),
'a/x.html: .*, None, test-mac-mac10.12')
self.assertRegex(
str(test_baseline_set),
'a/x.html: .*, blink_web_tests \(with patch\), test-win-win10')
def test_getters(self):
test_baseline_set = TestBaselineSet(self.host.builders)
test_baseline_set.add('a/x.html', Build('MOCK Mac10.12'))
test_baseline_set.add('a/x.html', Build('MOCK Win10'))
self.assertEqual(test_baseline_set.all_tests(), ['a/x.html'])
self.assertEqual(
test_baseline_set.build_port_pairs('a/x.html'),
[(Build(builder_name='MOCK Mac10.12'), 'test-mac-mac10.12'),
(Build(builder_name='MOCK Win10'), 'test-win-win10')])
def test_non_prefix_mode(self):
test_baseline_set = TestBaselineSet(self.host.builders)
# This test does not exist in setUp.
test_baseline_set.add('wpt/foo.html', Build('some-wpt-bot'))
# But it should still appear in various getters since no test lookup is
# done when prefix_mode=False.
self.assertEqual(
list(test_baseline_set),
[('wpt/foo.html', Build('some-wpt-bot'), None, 'linux-trusty')])
self.assertEqual(test_baseline_set.all_tests(), ['wpt/foo.html'])
self.assertEqual(test_baseline_set.build_port_pairs('wpt/foo.html'),
[(Build('some-wpt-bot'), 'linux-trusty')])
def _serialize_round_trip(obj):
"""An identity function that raises when the argument is not pickleable.
The purpose of this function is to simulate passing messages across a
process boundary. A test that attempts to pass an unpickleable object across
the simulated boundary should fail, as it would with real processes.
"""
return pickle.loads(pickle.dumps(obj))
|
Python
|
CL
|
8ff07c19977a6d03a13aeb36d76af54c9e427b805b433a948f5ba28167abf3fc
|
import unittest
import jsonschema
class TestSchema(unittest.TestCase):
def setUp(self):
self.schema_dict = {
'title': 'test_schema',
'type': 'object',
'properties': {
'name': {
'type': 'string'
},
'value': {
'type': 'number'
},
'is_good': {
'type': 'boolean'
}
}
}
self.test_schema = JSONSchema(self.schema_dict)
def test_base_exceptions(self):
bs = BaseSchema({})
with self.assertRaises(NotImplementedError):
schema = bs.schema
with self.assertRaises(NotImplementedError):
bs.validate({})
def test_json_schema_validation(self):
self.assertEqual(self.schema_dict, self.test_schema.schema)
good_object = {'name': 'jerry', 'value': 10.5, 'is_good': True}
bad_object = {'name': 'jerry', 'value': True, 'is_good': 10.5}
result = self.test_schema.validate(good_object)
self.assertTrue(result)
with self.assertRaises(jsonschema.exceptions.ValidationError):
result = self.test_schema.validate(bad_object)
def test_recursive_traverse(self):
outer_schema = JSONSchema({
'title': 'outer_schema',
'type': 'object',
'properties': {
'innerSchema': self.test_schema
}
})
expected = {
'title': 'outer_schema',
'type': 'object',
'properties': {
'innerSchema': {
'title': 'test_schema',
'type': 'object',
'properties': {
'is_good': {
'type': 'boolean'
},
'name': {
'type': 'string'
},
'value': {
'type': 'number'
}
},
}
},
}
self.assertEqual(outer_schema.schema, expected)
def test_adjust_paths(self):
inner_schema = JSONSchema({
'title': 'inner_schema',
'type': 'object',
'properties': {
'point': {
'type': 'object',
'$ref': '#/definitions/point'
}
},
'definitions': {
'point': {
'type': 'object',
'properties': {
'x': {
'type': 'number'
},
'y': {
'type': 'number'
}
}
}
}
}, recompute_refs=False)
outer_schema = JSONSchema({
'title': 'outer_schema',
'type': 'object',
'properties': {
'inner_schema': {
'type': 'object',
'$ref': '#/definitions/inner_schema'
}
},
'definitions': {
'inner_schema': inner_schema
}
}, recompute_refs=False)
test_element = {
'inner_schema': {
'point': {
'x': 5,
'y': 10
}
}
}
# before we recompute the definition locations we shouldn't
# be able to validate this element because we can't find
# the proper definitions
with self.assertRaises(jsonschema.exceptions.RefResolutionError):
result = outer_schema.validate(test_element)
# now let's recompute the def locations
outer_schema.adjust_references()
result = outer_schema.validate(test_element)
# presto
self.assertTrue(result)
|
Python
|
CL
|
ec537ea0e848895252a778a7452d0b12052e97c95d66df032c5310e1ecd4f74a
|
# -*- coding: utf-8 -*-
"""
pip_services3_components.cache.MemoryCache
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Memory cache component implementation
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import threading
from typing import Any, Optional
from pip_services3_commons.config import ConfigParams
from pip_services3_commons.config.IReconfigurable import IReconfigurable
from pip_services3_commons.run.ICleanable import ICleanable
from .CacheEntry import CacheEntry
from .ICache import ICache
class MemoryCache(ICache, IReconfigurable, ICleanable):
"""
Cache that stores values in the process memory.
Remember: This implementation is not suitable for synchronization of distributed processes.
### Configuration parameters ###
options:
- timeout: default caching timeout in milliseconds (default: 1 minute)
- max_size: maximum number of values stored in this cache (default: 1000)
Example:
.. code-block:: python
cache = MemoryCache()
cache.store("123", "key1", "ABC", 0)
"""
__default_timeout: int = 60000
__default_max_size: int = 1000
def __init__(self):
"""
Creates a new instance of the cache.
"""
self.__cache: dict = {}
self.__count: int = 0
self.__max_size: int = self.__default_max_size
self.__timeout: int = self.__default_timeout
self.__lock: threading.Lock = threading.Lock()
def configure(self, config: ConfigParams):
"""
Configures component by passing configuration parameters.
:param config: configuration parameters to be set.
"""
self.__timeout = config.get_as_long_with_default("options.timeout", self.__default_timeout)
self.__max_size = config.get_as_long_with_default("options.max_size", self.__default_max_size)
def __cleanup(self):
oldest = None
self.__count = 0
# Cleanup obsolete entries and find the oldest
for (key, entry) in self.__cache.items():
# Remove obsolete entry
if entry.is_expired():
self.__cache.pop(key, None)
# Count the remaining entry
else:
self.__count += 1
if oldest is None or oldest.expiration > entry.expiration:
oldest = entry
# Remove the oldest if cache size exceeded maximum
if self.__count > self.__max_size and not (oldest is None):
self.__cache.pop(oldest.key, None)
self.__count -= 1
def retrieve(self, correlation_id: Optional[str], key: str) -> Any:
"""
Retrieves cached value from the cache using its key.
If value is missing in the cache or expired it returns None.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:return: a cached value or None if value wasn't found or timeout expired.
"""
self.__lock.acquire()
try:
# Cache has nothing
if key not in self.__cache:
return None
# Get entry from the cache
entry = self.__cache[key]
# Remove entry if expiration set and entry is expired
if entry.is_expired():
self.__cache.pop(key, None)
self.__count -= 1
return None
# Update access timeout
return entry.get_value()
finally:
self.__lock.release()
def store(self, correlation_id: Optional[str], key: str, value: Any, timeout: int) -> Any:
"""
Stores value in the cache with expiration time.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:param value: a value to store.
:param timeout: expiration timeout in milliseconds.
:return: a cached value stored in the cache.
"""
timeout = timeout if timeout > 0 else self.__default_timeout
self.__lock.acquire()
try:
entry = None
if key in self.__cache:
entry = self.__cache[key]
# Shortcut to remove entry from the cache
if value is None:
if not (entry is None):
self.__cache.pop(key, None)
self.__count -= 1
return None
# Update the entry
if not (entry is None):
entry.set_value(value, timeout)
# Or create a new entry
else:
entry = CacheEntry(key, value, timeout)
self.__cache[key] = entry
self.__count += 1
# Clean up the cache
if self.__max_size > 0 and self.__count > self.__max_size:
self.__cleanup()
return value
finally:
self.__lock.release()
def remove(self, correlation_id: Optional[str], key: str):
"""
Removes a value from the cache by its key.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
"""
self.__lock.acquire()
try:
# Get the entry
entry = self.__cache.pop(key, None)
# Remove entry from the cache
if not (entry is None):
self.__count -= 1
finally:
self.__lock.release()
def clear(self, correlation_id: Optional[str]):
"""
Clears component state.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
self.__lock.acquire()
try:
self.__cache = {}
finally:
self.__lock.release()
|
Python
|
CL
|
0a49e9ca29667cb0333f4828818a572f53ba55204e6b6cfc72da5369dbac1a74
|
###################################################
# This file is part of py-smc2.
# http://code.google.com/p/py-smc2/
#
# py-smc2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# py-smc2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py-smc2. If not, see <http://www.gnu.org/licenses/>.
###################################################
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import os, os.path, sys
from numpy import random, power, sqrt, exp, zeros, zeros_like,\
ones, mean, average, prod, log, sum, repeat, newaxis, \
array, float32, int32, cov, load, isinf, isnan, zeros_like, \
var, linalg, pi, dot, argmax, transpose, diag
from numpy import max as numpymax
from numpy import min as numpymin
from numpy import sum as numpysum
import scipy.weave as weave
def ESSfunction(weights):
"""
Computes the ESS, given unnormalized weights.
"""
norm_weights = weights / sum(weights)
sqweights = power(norm_weights, 2)
return 1 / sum(sqweights)
def fastWeightedCov(X, unnormalizedw):
weights = unnormalizedw / numpysum(unnormalizedw)
Xbar = average(X, weights = weights, axis = 0)
code = \
"""
int row,col;
for (row = 0; row < d(0); row++)
{
for(col = 0; col < d(0); col++)
{
for (int index = 0; index < N(0); index ++){
covariance(row, col) += weights(index) * (X(index, row) - Xbar(row)) * (X(index, col) - Xbar(col));
}
}
}
"""
d = X.shape[1]
covariance = zeros((d, d))
d = array([d])
N = array([X.shape[0]])
weave.inline(code,['covariance', 'd', 'N', 'Xbar', 'X', 'weights'], \
type_converters=weave.converters.blitz, libraries = ["m"])
weightedcovariance = covariance / (1 - numpysum(power(weights, 2)))
return {"mean": Xbar, "cov": weightedcovariance}
def progressbar(ratio, text=None, ticks=50):
progress = int(ticks * ratio)
s = '%.1f%%' % (100.0 * ratio)
length = len(s)
if progress > ticks / 2 - length:
sys.stdout.write('\r[' + int(ticks / 2 - length) * '-' + s
+ int(progress - ticks / 2) * '-' + int(min(ticks -
progress, ticks / 2)) * ' ' + ']')
else:
sys.stdout.write('\r[' + progress * '-' + int(ticks / 2 - length - progress) * ' ' + s
+ int(ticks / 2) * ' ' + ']')
if not text is None: sys.stdout.write(text)
sys.stdout.flush()
|
Python
|
CL
|
8dbbb662128f00aac881e6ed72d50d800aca1642fc0e9a2cf728b4d44b1cd94a
|
# FiberFBot.py
# 纤程FBot机器人的实现。
# 外部通过创建纤程单元,即可不断执行。通过Message的方式进行执行和调用
# start by sk. 180413
from TYBotSDK2.FBot.fbotV4 import FBotV4
from TYBotUtilsSDK2.Log.log_and_monitor import CTYLB_Log, CTYLB_MainSys_MiscFunc, CSkBot_Common_Share
from .NatsMsgProcWideLangComu import CAsyncNats_MultiLang_Proc_Comu # 异步NATs服务器通信
from TYBotSDK2.FiberFBot.FiberMangReal import FiberMang
import json
# 异步通信定义
class CAsyncNats_ProcComu(CAsyncNats_MultiLang_Proc_Comu):
def __init__(self, strNATSServerAddr, strSelfRecvName):
CAsyncNats_MultiLang_Proc_Comu.__init__(self, strNATSServerAddr, strSelfRecvName)
def CheckRecv_Handle(self):
iExecCount = 0
origRecvMsgArray = self.CheckRecvMsg()
for eachRecvMsg in origRecvMsgArray:
# 处理各个消息单元
#eachRecvMsg.s_strPeerMsgName
#eachRecvMsg.s_iMsgType
#eachRecvMsg.s_strMsgContent
self.v_HandleRecvPacket(eachRecvMsg.s_strPeerMsgName, eachRecvMsg.s_iMsgType, eachRecvMsg.s_strMsgContent)
if(not FBotV4.GetGlobalIsRunning()):
break
pass
return iExecCount
# 处理接收到的数据包
def v_HandleRecvPacket(self, strFromName, iMsgType, strMsgContent):
# self.AddContentToSend(eachRecvMsg.s_strPeerMsgName, eachRecvMsg.s_iMsgType, strReplyContent)
pass
# URC各个节点,通信,纤程单元列表的结构
class CUTRC_NATs_ComuFiberList:
s_g_str_Section_TaskUID="task_uid"
s_g_str_Section_TaskName_StrParam="name_strparam"
s_g_str_Section_TaskName_LongParam="name_longParam"
def __init__(self):
self.Clear()
pass
# 导出到字符串
def ExportToStr(self):
exDict={
self.s_g_str_Section_TaskUID: self.s_dict_TaskUID,
self.s_g_str_Section_TaskName_StrParam: self.s_dict_TaskName_strParam_UID,
self.s_g_str_Section_TaskName_LongParam: self.s_dict_TaskName_LongParam_UID,
}
strTotal=json.dumps(exDict, ensure_ascii=True)
return strTotal
# 从字符串中读取
def LoadFromStr(self, strContent):
bRet = False
self.Clear()
if(strContent):
try:
dictContent=json.loads(strContent)
strTryKey=self.s_g_str_Section_TaskUID
if ( strTryKey in dictContent.keys()):
self.s_dict_TaskUID.update(dictContent[strTryKey])
strTryKey = self.s_g_str_Section_TaskName_StrParam
if (strTryKey in dictContent.keys()):
self.s_dict_TaskName_strParam_UID.update(dictContent[strTryKey])
strTryKey = self.s_g_str_Section_TaskName_LongParam
if (strTryKey in dictContent.keys()):
self.s_dict_TaskName_LongParam_UID.update(dictContent[strTryKey])
bRet=True
except Exception as e:
#CTYLB_Log.ShowLog(1, "comu-fiber-list load msg error", str(e))
CTYLB_MainSys_MiscFunc.ShowExceptionInfo(e)
return bRet
def Clear(self):
self.s_dict_TaskUID={} # 任务UID列表。key=UID, 内容=0
self.s_dict_TaskName_strParam_UID={} # 任务名字-参数:UID
self.s_dict_TaskName_LongParam_UID={} #任务名字-long参数,UID
# 从FiberMang中读取内容
def GetFromFiberMang(self, fiberMang):
self.Clear()
for eachFiberUID in fiberMang.s_dictUIDFiberTasks.keys():
fiberUnit = fiberMang.s_dictUIDFiberTasks[eachFiberUID]
if(fiberUnit.s_bExRemoteCallMe):
self.s_dict_TaskUID[eachFiberUID] = 0
for eachFiberStrParam in fiberMang.s_dictStrParamFiberTasks.keys():
fiberUnit = fiberMang.s_dictStrParamFiberTasks[eachFiberStrParam]
if(fiberUnit.s_bExRemoteCallMe):
self.s_dict_TaskName_strParam_UID[eachFiberStrParam]= fiberUnit.s_lUniqueID
for eachFiberLongParam in fiberMang.s_dictLongLongParamFiberTasks.keys():
fiberUnit = fiberMang.s_dictLongLongParamFiberTasks[eachFiberLongParam]
if(fiberUnit.s_bExRemoteCallMe):
self.s_dict_TaskName_LongParam_UID[eachFiberLongParam] = fiberUnit.s_lUniqueID
# 判断是否包含
def IsContain_Key_StrParam(self, strParamKey):
bRet=False
if( strParamKey and (strParamKey in self.s_dict_TaskName_strParam_UID.keys())):
bRet=True
return bRet
# 判断是否包含
def IsContain_Key_StrLongParam(self, strLongParamKey):
bRet=False
if( strLongParamKey and (strLongParamKey in self.s_dict_TaskName_LongParam_UID.keys())):
bRet=True
return bRet
# 天元FiberBot实例,实现NATs客户端
class TYFiberBot_Mang_NATS_Instance_Base:
def __init__(self, config_file="config/config.yaml", funcCallBack=None):
# 保存本地变了
self.s_funcTimerCallBack = funcCallBack
# 创建进程间通信单元
self.s_AsyncNATS_ProcComu=None
self.v_CreateAsyncProcComu(config_file)
if(self.s_AsyncNATS_ProcComu):
self.s_AsyncNATS_ProcComu.StartComu()
# 纤程管理单元实现
self.s_FiberMang = FiberMang()
# 初始化Fiber管理
# 对信息传递 以纤程单元实现。收到nats服务器信息,判断是否在本地队列。不在,则发送给tybot
pass
# 创建异步进程通信单元
def v_CreateAsyncProcComu(self, config_file):
pass
def Run(self):
while(FBotV4.GetGlobalIsRunning()):
self.LoopEventCallBack()
# 单位时间不断调用
def v_TimerCheck(self):
return False
def LoopEventCallBack(self):
if(self.s_AsyncNATS_ProcComu):
self.s_AsyncNATS_ProcComu.CheckRecv_Handle()
self.s_FiberMang.TimerCheck()
self.s_FiberMang.CheckTaskSleep()
if (self.s_funcTimerCallBack):
self.s_funcTimerCallBack()
self.v_TimerCheck()
pass
def Quit(self):
global IS_SYS_RUNNING
IS_SYS_RUNNING = False
def AddFiberUnit(self, fiberUnit):
self.s_FiberMang.AddTask(fiberUnit)
fiberUnit.v_SetParentMang(self.s_FiberMang)
|
Python
|
CL
|
a76204ebc45d544ed467288438e0abfa7162b2064fd05f9c47763b9e8c401f96
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
from enum import Enum, auto
from abc import abstractmethod
from arjuna.tpi.constant import *
from arjuna.core.constant import *
from arjuna.tpi.error import GuiLabelNotPresentError
from arjuna.interact.gui.auto.finder.wmd import GuiWidgetMetaData
from arjuna.interact.gui.auto.finder._with import ImplWith
class FileFormat(Enum):
# GNS = auto()
# MGNS = auto()
XLS = auto()
XLSX = auto()
YAML = auto()
class GuiNamespaceLoaderFactory:
# Returns GuiNamespaceLoader
@classmethod
def create_namespace_loader(cls, config, ns_file_path):
from arjuna.tpi.constant import ArjunaOption
multi_context_enabled = config.value(ArjunaOption.GUIAUTO_DEF_MULTICONTEXT)
context = multi_context_enabled and None or config.guiauto_context
_, file_extension = os.path.splitext(ns_file_path)
ext = file_extension.upper()[1:]
considered_path = ns_file_path
try:
file_format = FileFormat[ext]
except:
raise Exception("Unsupported format for namespace: {}".format(file_extension))
else:
full_file_path = ns_file_path
if os.path.isdir(full_file_path):
raise Exception("Namespace file path is a directory and not a file: {}".format(considered_path))
elif not os.path.isfile(full_file_path):
from arjuna import log_warning
log_warning("Namespace file path does not exist: {}".format(considered_path))
return DummyGnsLoader(considered_path)
# raise Exception()
# if file_format == FileFormat.GNS:
# if multi_context_enabled:
# return MGNSFileLoader(full_file_path)
# else:
# return GNSFileLoader(full_file_path, context)
if file_format == FileFormat.YAML:
return YamlGnsLoader(full_file_path, context)
else:
raise Exception("Unsupported format for namespace: {}".format(file_extension))
class GuiNamespace:
def __init__(self, name):
self.__name = name
# dict <string, dict<GuiAutomationContext, GuiWidgetMetaData>>
self.__ns = {}
def is_empty(self):
return not self.__ns
def add_element_meta_data(self, name, context, raw_locators, meta):
from arjuna import log_debug
log_debug("Loading {} label. Meta data: {}".format(name, str(meta)))
wmd = GuiWidgetMetaData.create_wmd(*raw_locators, meta=meta)
name = name.lower()
if not self.has(name):
self.__ns[name] = {}
self.__ns[name][context] = wmd
log_debug("Loaded {} label. EMD: {}".format(name, str(wmd)))
def add_reference(self, name, value):
self.__ns[name] = value
def has(self, name):
return name.lower() in self.__ns
def has_context(self, name, context):
if self.has(name):
return context in self.__ns[name.lower()]
return False
# Needs to be thread-safe
# Returns wmd for a context for a given gui name
def get_meta_data(self, label, context):
msg = ""
if self.is_empty():
msg = "Namespace is empty"
if not self.has(label):
raise GuiLabelNotPresentError(self.__name, label, msg=msg)
elif not self.has_context(label, context):
raise GuiLabelNotPresentError(self.__name, label, context, msg=msg)
return self.__ns[label.lower()][context]
@property
def root_element_name(self):
return self.__ns["__root__"]
@property
def anchor_element_name(self):
return self.__ns["__anchor__"]
class BaseGuiNamespaceLoader:
def __init__(self, name):
self.__name = name
self.__namespace = GuiNamespace(name)
@property
def name(self):
return self.__name
@property
def namespace(self):
return self.__namespace
# Needs to be thread safe
def add_element_meta_data(self, name, context, locators, meta):
self.__namespace.add_element_meta_data(name, context, locators, meta)
def add_reference(self, name, value):
self.__namespace.add_reference(name, value)
def _raise_notafile_exception(self, file_path):
raise Exception("{} is not a file.".format(file_path))
def _raise_filenotfound_exception(self, file_path):
raise Exception("{} is not a valid file path.".format(file_path))
def _raise_relativepath_exception(self, file_path):
raise Exception("Gui namespace loader does not accept relative file path. {} is not a full file path.".format(file_path))
def load(self):
pass
class DummyGnsLoader(BaseGuiNamespaceLoader):
def __init__(self, ns_file_path):
super().__init__(os.path.basename(ns_file_path))
class YamlGnsLoader(BaseGuiNamespaceLoader):
def __init__(self, ns_file_path, context):
super().__init__(os.path.basename(ns_file_path))
self.__context = context
self.__ns_file = None
self.__ns_path = None
self.__ns = {}
if not os.path.isabs(ns_file_path):
super()._raise_relativepath_exception(ns_file_path)
elif not os.path.exists(ns_file_path):
super()._raise_filenotfound_exception(ns_file_path)
elif not os.path.isfile(ns_file_path):
super()._raise_notafile_exception(ns_file_path)
self.__ns_path = ns_file_path
self.__contexts = [context]
self.__withx = None
# self.__process()
def load(self):
from arjuna import Arjuna, log_debug
from arjuna.configure.validator import Validator
from arjuna.interact.gui.auto.finder._with import WithType
from arjuna.tpi.parser.yaml import Yaml
creation_context="Gui Namespace file at {}".format(self.__ns_path)
yaml = Yaml.from_file(self.__ns_path, allow_any=True)
if yaml is None: return
if not yaml.has_section("labels"):
# print("No labels configured. Skipping...")
return
from arjuna.interact.gui.auto.finder.withx import WithX
if yaml.has_section("withx"):
self.__withx = WithX(yaml.get_section("withx").as_map())
else:
self.__withx = WithX()
common_withx = Arjuna.get_withx_ref()
from arjuna.tpi.error import GuiWidgetDefinitionError
for label, label_map in yaml.get_section("labels").as_map().items():
log_debug("Loading label: " + label)
Validator.name(label)
self.__ns[label.lower()] = {"locators" : {self.__context: []}, "meta": dict()}
for entry in label_map:
if type(label_map) is dict:
loc, loc_obj = entry, label_map[entry]
elif type(label_map) is list:
if type(entry) is not dict or len(entry) != 1:
raise GuiWidgetDefinitionError("The GNS entry for label {} is not correctly formatted. For list content type, each list item should be a single item dictionary. Found: {}".format(label, label_map))
loc, loc_obj = list(entry.keys())[0], list(entry.values())[0]
else:
raise GuiWidgetDefinitionError("The GNS entry for label {} is not correctly formatted. The content should either be a YAML mapping or YAML list. Found: {}".format(label, label_map))
log_debug("Loading locator: " + loc)
loc = loc.lower()
wtype, wvalue = None, None
if not self.__withx.has_locator(loc) and not common_withx.has_locator(loc):
wtype, wvalue = loc.upper(), loc_obj
if wtype in dir(WithType):
log_debug("Loading Arjuna defined Locator: " + loc)
if wtype in {'ATTR', 'FATTR', 'BATTR', 'EATTR'}:
if len(wvalue) > 1:
raise Exception("attr/fattr/battr/eattr entries in GNS should have a single key value pair mapping. Found: {} for locator type: {} for label: {}".format(wvalue, loc, label))
final_value = dict()
for k,v in wvalue.items():
final_value['name'] = k
final_value['value'] = v
wvalue = final_value
iloc = ImplWith(wtype=wtype, wvalue=wvalue, has_content_locator=False)
self.__ns[label.lower()]["locators"][self.__context].append(iloc)
else:
log_debug("Loading meta data for key: " + loc)
self.__ns[label.lower()]["meta"][wtype.lower()] = wvalue
else:
if self.__withx.has_locator(loc):
wx = self.__withx
elif common_withx.has_locator(loc):
wx = common_withx
else:
raise Exception("No WithX locator with name {} found. Check GNS file at {}.".format(name, self.__ns_path))
try:
wtype, wvalue = wx.format(loc, loc_obj)
except Exception as e:
raise Exception("Error in implementation of withx locator extension: {} for label {}. Implementation: {}. Error: {}.".format(loc, label, wvalue, str(e)))
iloc = ImplWith(wtype=wtype, wvalue=wvalue, has_content_locator=False)
self.__ns[label.lower()]["locators"][self.__context].append(iloc)
if not self.__ns[label.lower()]["locators"][self.__context]:
raise Exception("No locators defined for label: {}".format(label))
if yaml.has_section("load"):
self.__load_targets = yaml.get_section("load").as_map()
if "root" in self.__load_targets:
self.__ns["__root__"] = self.__load_targets["root"].lower()
else:
self.__ns["__root__"] = None
if "anchor" in self.__load_targets:
self.__ns["__anchor__"] = self.__load_targets["anchor"].lower()
else:
self.__ns["__anchor__"] = None
else:
self.__ns["__root__"] = None
self.__ns["__anchor__"] = None
for ename, wmd in self.__ns.items():
if ename not in {'__root__', '__anchor__'}:
context_data = wmd["locators"]
for context, locators in context_data.items():
self.add_element_meta_data(ename, context, locators, wmd["meta"])
log_debug("Loading {} label for {} context with locators: {} and meta {}.".format(ename, context, [str(l) for l in locators], wmd["meta"]))
self.add_reference("__root__", self.__ns["__root__"])
self.add_reference("__anchor__", self.__ns["__anchor__"])
|
Python
|
CL
|
4f4fc9bc9c5aa91f2849fc3801bf4b32c1a1ee0ca7fbf274ff904e9416c7ad31
|
from functools import wraps
import logging
from pyramid.httpexceptions import HTTPBadRequest, HTTPFound
from pyramid.security import NO_PERMISSION_REQUIRED, remember
from pyramid.response import Response
from pyramid.view import view_config
from .configuration import (
CONFIG_CLIENT_ID,
CONFIG_CLIENT_SECRET,
CONFIG_OP_AUTHZ_URI,
CONFIG_OP_PUBLIC_KEY,
CONFIG_OP_TOKEN_URI,
CONFIG_OP_USERINFO_URI)
from .oidc import OidcSession
log = logging.getLogger(__name__)
@view_config(route_name='oidc_authn', permission=NO_PERMISSION_REQUIRED)
def oidc_authn(request):
settings = request.registry.settings
client_id = settings[CONFIG_CLIENT_ID]
op_authz_uri = settings[CONFIG_OP_AUTHZ_URI]
oidc = OidcSession(
client_id=client_id,
redirect_uri=request.route_url('oidc_callback'),
scope=['openid'])
url, state, nonce = oidc.authorization_url(op_authz_uri)
request.session['oidc_state'] = state
request.session['oidc_nonce'] = nonce
return HTTPFound(url)
@view_config(route_name='oidc_callback', permission=NO_PERMISSION_REQUIRED)
def oidc_callback(request):
"""
Accepts OIDC authentication response, obtains a access token and finally
authenticates the user.
This is configured as the OIDC client redirect_uri.
"""
settings = request.registry.settings
client_id = settings[CONFIG_CLIENT_ID]
client_secret = settings[CONFIG_CLIENT_SECRET]
op_public_key = settings[CONFIG_OP_PUBLIC_KEY]
op_token_uri = settings[CONFIG_OP_TOKEN_URI]
op_userinfo_uri = settings[CONFIG_OP_USERINFO_URI]
try:
state = request.GET.getone('state')
code = request.GET.getone('code')
except KeyError as exc:
msg = (
"Bad or missing query params {} in request."
.format(request.GET))
log.warn(msg)
return HTTPBadRequest(detail=msg)
# TODO check state against session
# TODO check nonce exists in session
nonce = request.session.get('oidc_nonce')
oidc = OidcSession(
client_id=client_id,
public_key=op_public_key,
state=state,
redirect_uri=request.route_url('oidc_callback'))
token = oidc.fetch_token(
op_token_uri,
nonce,
client_secret=client_secret,
authorization_response=request.url)
print oidc.token
#request.session['oidc_userinfo'] = oidc.fetch_userinfo(op_userinfo_uri)
userinfo = oidc.fetch_userinfo(op_userinfo_uri)
print userinfo
remember(request, userinfo['preferred_username'])
return Response(status_int=200)
|
Python
|
CL
|
6e9f82dd5a3436fc54ebe1002df2aff49622da8e0b409f668fea66eae6224f42
|
# Generated by Django 3.0.8 on 2020-07-11 11:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Proposal',
fields=[
('title', models.CharField(max_length=150, unique=True)),
('proposal_slug', models.SlugField(max_length=200, primary_key=True, serialize=False)),
('description', models.TextField(blank=True)),
('due_date', models.DateField(blank=True, null=True)),
('form_complete', models.BooleanField(default=False)),
('status', models.CharField(choices=[('Proposed', 'Proposed'), ('Ongoing', 'Ongoing'), ('Completed', 'Completed')], default='Proposed', max_length=15)),
('workspace_url', models.URLField(blank=True)),
('approved_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='approved_proposals', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='proposals', to=settings.AUTH_USER_MODEL)),
],
),
]
|
Python
|
CL
|
7e36379a6051600c404fa959ce7bc41796c61eac0b0f174b2df0d35ac46a2a1b
|
# -*- coding: utf-8 -*-
"""Zope2-specific helpers and layers using ZServer
"""
from __future__ import absolute_import
from plone.testing import Layer
from plone.testing import zodb
from plone.testing import zope
from plone.testing._z2_testbrowser import Browser # noqa
from plone.testing.zope import addRequestContainer
from plone.testing.zope import installProduct
from plone.testing.zope import login # noqa
from plone.testing.zope import logout # noqa
from plone.testing.zope import setRoles # noqa
from plone.testing.zope import TestIsolationBroken
from plone.testing.zope import uninstallProduct
import contextlib
import os
import transaction
@contextlib.contextmanager
def zopeApp(db=None, connection=None, environ=None):
"""Context manager for working with the Zope2 app::
with zopeApp() as app:
...
The ``app`` object has a request container and a simple ``REQUEST``.
To set the request environment, pass a dict ``environ``. See
``addRequestContainer()`` for details.
Pass a ZODB handle as ``db`` to use a specificdatabase. Alternatively,
pass an open connection as ``connection`` (the connection will not be
closed).
"""
from ZServer import Zope2
closeConn = True
if connection is not None:
closeConn = False
if connection is None and db is not None:
connection = db.open()
app = addRequestContainer(Zope2.app(connection), environ=environ)
if connection is None:
connection = app._p_jar
# exceptions in finally clauses can mask exceptions
# in the preceeding code block. So we catch
# every exception and throw it instead of the exception
# in the finally clause
inner_exception = None
try:
yield app
except Exception as e:
inner_exception = e
try:
transaction.abort()
except Exception as e:
inner_exception = e
raise
raise
else:
try:
transaction.commit()
except Exception as e:
inner_exception = e
finally:
try:
app.REQUEST.close()
if closeConn:
transaction.abort()
connection.close()
except Exception:
if inner_exception:
raise inner_exception
else:
raise
# Startup layer - you probably don't want to use this one directly
class Startup(zope.Startup):
"""This layer does what ZopeLite and ZopeTestCase's base.TestCase did:
start up a minimal Zope instance and manages the application and
request state.
You probably don't want to use this layer directly. Instead, you should
use one of the layers that has it as a base.
The following resources are exposed:
* ``zodbDB`` is the ZODB with the test fixture
* ``configurationContext`` is the ``zope.configuration`` context for
ZCML loading.
* ``host`` and ``port`` are the fake hostname and port number,
respectively.
"""
threads = 1
# Layer lifecycle helper methods
def setUpThreads(self):
"""Set the thread count for ZServer. This defaults to 1.
"""
# We can't use setNumberOfThreads() because that function self-
# destructs, literally, when called.
from ZServer.Zope2.Startup import config
self._zserverThreads = config.ZSERVER_THREADS
config.ZSERVER_THREADS = self.threads
def tearDownThreads(self):
"""Reset the ZServer thread count.
"""
from ZServer.Zope2.Startup import config
config.ZSERVER_THREADS = self._zserverThreads
del self._zserverThreads
def setUpApp(self):
"""Trigger Zope startup and set up the application.
"""
# If the Testing module has been imported, the testinghome
# variable is set and changes the way Zope2.startup() works.
# We want the standard behavior so we remove it.
import App.config
config = App.config.getConfiguration()
try:
self._testingHome = config.testinghome
except AttributeError:
pass
else:
del config.testinghome
App.config.setConfiguration(config)
# This uses the DB from the dbtab, as configured in setUpDatabase().
# That DB then gets stored as Zope2.DB and becomes the default.
from ZServer import Zope2
Zope2.startup()
# At this point, Zope2.DB is set to the test database facade. This is
# the database will be used by default when someone does Zope2.app().
def tearDownApp(self):
"""Undo Zope 2 startup by unsetting the global state it creates.
"""
import Zope2
import ZServer.Zope2
ZServer.Zope2.app()._p_jar.close()
ZServer.Zope2._began_startup = 0
Zope2.DB = None
Zope2.bobo_application = None
Zope2.zpublisher_transactions_manager = None
Zope2.zpublisher_validated_hook = None
Zope2.zpublisher_exception_hook = None
Zope2.__bobo_before__ = None
import App.config
try:
self._testingHome
except AttributeError:
pass
else:
config = App.config.getConfiguration()
config.testinghome = self._testingHome
App.config.setConfiguration(config)
del self._testingHome
# Clear out the app reference cached in get_module_info's
# 'modules' parameter default dict. (waaaaa)
import ZPublisher.Publish
defaults = ZPublisher.Publish.get_module_info.func_defaults
if defaults:
d = list(defaults)
d[0] = {}
ZPublisher.Publish.get_module_info.func_defaults = tuple(d)
def setUpBasicProducts(self):
"""Install a minimal set of products required for Zope 2.
"""
with zopeApp() as app:
installProduct(app, 'Products.PluginIndexes')
installProduct(app, 'Products.OFSP')
def tearDownBasicProducts(self):
"""Tear down the minimal set of products
"""
with zopeApp() as app:
uninstallProduct(app, 'Products.PluginIndexes')
uninstallProduct(app, 'Products.OFSP')
# It's possible for Five's _register_monkies and _meta_type_regs
# global variables to contain duplicates. This causes an unecessary
# error in the LayerCleanup layer's tear-down. Guard against that
# here
try:
from OFS import metaconfigure
except ImportError:
# Zope <= 2.12
from Products.Five import fiveconfigure as metaconfigure
metaconfigure._register_monkies = list(
set(metaconfigure._register_monkies))
metaconfigure._meta_type_regs = list(
set(metaconfigure._meta_type_regs))
STARTUP = Startup()
# Basic integration and functional test and layers. These are the simplest
# Zope 2 layers that are generally useful
class IntegrationTesting(zope.IntegrationTesting):
"""This layer extends ``STARTUP`` to add rollback of the transaction
after each test. It does not manage a fixture and has no layer lifecyle,
only a test lifecycle.
The application root is available as the resource ``app`` and the request
is available as the resource ``request``, set up and torn down for each
test.
Hint: If you want to create your own fixture on top of ``STARTUP``,
create a new layer that has ``STARTUP`` as a base. Then instantiate
this layer with your new "fixture" layer as a base, e.g.::
from plone.testing import zserver
from plone.testing import Layer
class MyFixture(Layer):
...
MY_FIXTURE = MyFixture(bases=(zserver.STARTUP,), name='MyFixture')
MY_INTEGRATION_TESTING = zserver.IntegrationTesting(bases=(MY_FIXTURE,), name='MyFixture:Integration') # noqa
"""
defaultBases = (STARTUP,)
def testSetUp(self):
from ZServer import Zope2
# Open a new app and save it as the resource ``app``.
environ = {
'SERVER_NAME': self['host'],
'SERVER_PORT': str(self['port']),
}
app = addRequestContainer(Zope2.app(), environ=environ)
request = app.REQUEST
request['PARENTS'] = [app]
# Make sure we have a zope.globalrequest request
try:
from zope.globalrequest import setRequest
setRequest(request)
except ImportError:
pass
# Start a transaction
transaction.begin()
self._original_commit = transaction.commit
def you_broke_it():
raise TestIsolationBroken("""You are in a Test Layer
(IntegrationTesting) that is fast by just aborting transactions between each
test. You just committed something. That breaks the test isolation. So I stop
here and let you fix it.""")
# Prevent commits in integration tests which breaks test isolation.
transaction.commit = you_broke_it
# Save resources for tests to access
self['app'] = app
self['request'] = request
INTEGRATION_TESTING = IntegrationTesting()
class FunctionalTesting(zope.FunctionalTesting):
"""An alternative to ``INTEGRATION_TESTING`` suitable for functional testing.
This one pushes and pops a ``DemoStorage`` layer for each test. The
net result is that a test may commit safely.
As with ``INTEGRATION_TESTING``, the application root is available as the
resource ``app`` and the request is available as the resource ``request``,
set up and torn down for each test.
Hint: If you want to create your own fixture on top of ``STARTUP``,
create a new layer that has ``STARTUP`` as a base. Then instantiate
this layer with your new "fixture" layer as a base, e.g.::
from plone.testing import zserver
from plone.testing import Layer
class MyFixture(Layer):
...
MY_FIXTURE = MyFixture(bases=(zserver.STARTUP,), name='MyFixture')
MY_FUNCTIONAL_TESTING = zserver.FunctionalTesting(bases=(MY_FIXTURE,), name='MyFixture:Functional') # noqa
"""
defaultBases = (STARTUP,)
def testSetUp(self):
from ZServer import Zope2
# Override zodbDB from the layer setup. Since it was set up by
# this layer, we can't just assign a new shadow. We therefore keep
# track of the original so that we can restore it on tear-down.
self['zodbDB'] = zodb.stackDemoStorage(
self.get('zodbDB'),
name='FunctionalTest')
# Save the app
environ = {
'SERVER_NAME': self['host'],
'SERVER_PORT': str(self['port']),
}
app = addRequestContainer(Zope2.app(), environ=environ)
request = app.REQUEST
request['PARENTS'] = [app]
# Make sure we have a zope.globalrequest request
try:
from zope.globalrequest import setRequest
setRequest(request)
except ImportError:
pass
# Start a transaction
transaction.begin()
# Save resources for the test
self['app'] = app
self['request'] = request
FUNCTIONAL_TESTING = FunctionalTesting()
# More advanced functional testing - running ZServer and FTP server
class ZServer(Layer):
"""Start a ZServer that accesses the fixture managed by the
``STARTUP`` layer.
The host and port are available as the resources ``host`` and ``port``,
respectively.
This should *not* be used in parallel with the ``FTP_SERVER`` layer, since
it shares the same async loop.
The ``ZSERVER_FIXTURE`` layer must be used as the base for a layer that
uses the ``FunctionalTesting`` layer class. The ``ZSERVER`` layer is
an example of such a layer.
"""
defaultBases = (STARTUP,)
host = os.environ.get('ZSERVER_HOST', '')
port = int(os.environ.get('ZSERVER_PORT', 0))
timeout = 5.0
log = None
def setUp(self):
from threading import Thread
import time
self['host'] = self.host
self['port'] = self.port
self._shutdown = False
self.setUpServer()
self.thread = Thread(
name='{0} server'.format(self.__name__),
target=self.runner,
)
self.thread.start()
time.sleep(0.5)
def tearDown(self):
import time
self._shutdown = True
self.thread.join(self.timeout)
time.sleep(0.5)
self.tearDownServer()
del self['host']
del self['port']
def setUpServer(self):
"""Create a ZServer server instance and save it in self.zserver
"""
from StringIO import StringIO
from ZServer import logger
from ZServer import zhttp_handler
from ZServer import zhttp_server
log = self.log
if log is None:
log = StringIO()
zopeLog = logger.file_logger(log)
server = zhttp_server(
ip=self.host,
port=self.port,
resolver=None,
logger_object=zopeLog,
)
# If we dynamically set the host/port, we want to reset it to localhost
# Otherwise this will depend on, for example, the local network setup
if self.host in ('', '0.0.0.0', '127.0.0.1', ):
server.server_name = 'localhost'
# Refresh the hostname and port in case we dynamically picked them
self['host'] = self.host = server.server_name
self['port'] = self.port = server.server_port
zhttpHandler = zhttp_handler(module='Zope2', uri_base='')
server.install_handler(zhttpHandler)
self.zserver = server
def tearDownServer(self):
"""Close the ZServer socket
"""
self.zserver.close()
# Thread runner
def runner(self):
"""Thread runner for the main asyncore loop. This function runs in a
separate thread.
"""
import asyncore
# Poll
socket_map = asyncore.socket_map
while socket_map and not self._shutdown:
asyncore.poll(self.timeout, socket_map)
# Fixture layer - use as a base layer, but don't use directly, as it has no
# test lifecycle
ZSERVER_FIXTURE = ZServer()
# Functional testing layer that uses the ZSERVER_FIXTURE
ZSERVER = FunctionalTesting(
bases=(
ZSERVER_FIXTURE,
),
name='ZServer:Functional')
class FTPServer(ZServer):
"""FTP variant of the ZServer layer.
This will not play well with the ZServer layer. If you need both
ZServer and FTPServer running together, you can subclass the ZServer
layer class (like this layer class does) and implement setUpServer()
and tearDownServer() to set up and close down two servers on different
ports. They will then share a main loop.
The ``FTP_SERVER_FIXTURE`` layer must be used as the base for a layer that
uses the ``FunctionalTesting`` layer class. The ``FTP_SERVER`` layer is
an example of such a layer.
"""
defaultBases = (STARTUP,)
host = os.environ.get('FTPSERVER_HOST', '')
port = int(os.environ.get('FTPSERVER_PORT', 0))
threads = 1
timeout = 5.0
log = None
def setUpServer(self):
"""Create an FTP server instance and save it in self.ftpServer
"""
from StringIO import StringIO
from ZServer import logger
from ZServer.FTPServer import FTPServer
log = self.log
if log is None:
log = StringIO()
zopeLog = logger.file_logger(log)
self.ftpServer = FTPServer(
'Zope2',
ip=self.host,
port=self.port,
logger_object=zopeLog,
)
# Refresh the hostname and port in case we dynamically picked them
self.host, self.port = self.ftpServer.socket.getsockname()
# If we dynamically set the host/port, we want to reset it to localhost
# Otherwise this will depend on, for example, the local network setup
if self.host in ('', '0.0.0.0', '127.0.0.1', ):
self.host = 'localhost'
self.ftpServer.hostname = 'localhost'
self.ftpServer.ip = '127.0.0.1'
self['host'] = self.host
self['port'] = self.port
def tearDownServer(self):
"""Close the FTPServer socket
"""
self.ftpServer.close()
# Fixture layer - use as a base layer, but don't use directly, as it has no
# test lifecycle
FTP_SERVER_FIXTURE = FTPServer()
# Functional testing layer that uses the FTP_SERVER_FIXTURE
FTP_SERVER = FunctionalTesting(
bases=(
FTP_SERVER_FIXTURE,
),
name='FTPServer:Functional')
|
Python
|
CL
|
1fcd674e74ad916edcaa6d24e825adb2c7e646b8f39df62dd64ba94ed2fb862f
|
# coding: utf-8
# <h1 align="center"> Desafio Lopes LABS </h1>
#
# <h2 align="right"> Bruno Ramalho Furlan </h2>
# **Objetivo:** Criar um modelo para estimar a qualidade do vinho.
# <h3 align="justified">Importando dados </h3>
#
# <p align="justified"> Verificando tipo de dados para cada coluna e quantidade de linhas e colunas </p>
# In[1]:
import csv
import pandas as pd
df = pd.read_csv("winequality.csv", sep=";")
df.head(1000)
# In[2]:
print(df.shape)
# In[3]:
print(df.info())
# <p align="justified"> Foi verificado que o tipo da coluna com a variavel álcool ("alcohol") está como object e não como float. Para isso foi realizada a alteração do tipo de coluna para float.</p>
# In[4]:
df["alcohol"] = pd.to_numeric(df["alcohol"], errors="coerce")
# In[5]:
df.info()
# <h3 align="justified">Eliminando duplicatas e linhas com valores nulos </h3>
# <p align="justified">
# Eliminando dados duplicados ou que não apresentam valores em todas as colunas para uma estimação do modelo a partir de todos os parâmetros.</p>
# In[6]:
import numpy as np
df = df.drop_duplicates()
df = df.dropna(how='any',axis=0)
df.info()
# <h3 align="justified">Verificando a correlação dos dados com a qualidade do vinho (método de Pearson)</h3>
#
#
# In[7]:
df.corrwith(df["quality"], axis=0, drop=False, method='pearson')
# <p align="justified">As 3 variáveis que apresentam maior correlação encontradas foram:</p>
#
# 1. Álcool ("alcohol"), correlação positiva (0.469674);
# 2. Volatilidade da acidez ("volatile acidity"), correlação negativa (-0.266608);
# 3. Cloretos ("chlorides"), correlação negativa (-0.201844);
#
# <p align="justified">Estas 3 variáveis apresentaram correlação fraca, as demais apresentaram correlação inferior, em módulo, a 0,1.</p>
# <p align="justified">Para fins de comparação, feitas as correlações utilizando outros 2 métodos de correlação (Spearman e Kendall), sendo selecionadas as 3 maiores correlações em módulo.</p>
#
#
# <h3 align="justified">Verificando a correlação dos dados com a qualidade do vinho (método de Spearman)</h3>
#
# In[8]:
df.corrwith(df["quality"], axis=0, drop=False, method='spearman')
# <p align="justified">As 3 variáveis que apresentam maior correlação encontradas foram:</p>
#
# 1. Álcool ("alcohol"), correlação positiva (0.479853);
# 2. Densidade ("density"), correlação negativa (-0.349593);
# 3. Cloretos ("chlorides"), correlação negativa (-0.303533);
#
#
#
# <h3 align="justified">Verificando a correlação dos dados com a qualidade do vinho (método de Kendall)</h3>
#
# In[9]:
df.corrwith(df["quality"], axis=0, drop=False, method='kendall')
# <p align="justified">As 3 variáveis que apresentam maior correlação encontradas foram:</p>
#
# 1. Álcool ("alcohol"), correlação positiva (0.377853);
# 2. Densidade ("density"), correlação negativa (-0.268305);
# 3. Cloretos ("chlorides"), correlação negativa (-0.235499);
#
# <p align="justified">Estas 3 variáveis apresentaram correlação fraca, as demais apresentaram correlação despresível (inferior, em módulo a 0,1).</p>
# <p align="justified">Como o método de Kendall apresentou menor correlação das variáveis em comparação com os outros métodos, especialmente na variável álcool (que apresenta menor correlação em comparação com os demais métodos). Este não será utilizado na modelagem da qualidade. Assim ,serão comarados os métodos de Spearman e Pearson.</p>
# <h3 align="justified">Verificando dados para a modelagem </h3>
#
# In[10]:
df.describe()
# <p align="justified"> Para para a modelagem nos dois métodos foram utilizados os valores máximo e mínimo das seguintes variáveis, além da variável de qualidade (utilizada para validar o modelo):</p>
#
# 1. Álcool;
# 2. Volatilidade da acidez;
# 3. Cloretos;
# 4. Densidade;
#
#
# <p align="justified"> Para o método de Pearson foram utilizadas as variáveis álcool, volatilidade de acidez e cloretos enquanto que para o método de Spearman foram utilizadas as variáveis álcool, densidade e cloretos. para cada uma destas variáveis, foram dados conceitos de qualidade de 0 a 10 (para correlação positiva) e de 10 a 0 (para correlação negativa). Como o valor máximo de qualidade encontrado foi de 9 e o mínimo foi de 3, foram feitos os seguintes métodos para a estimação dos conceitos das variáveis:</p>
#
#
# * Foi feita a subtração do menor valor para o maior valor de cada uma das váriáveis, pelo menor valor e este foi dividido por 6 (intervalo entre o maior e o menor valor da variável qualidade) para determinar o tamanho do intervalo de dados que receberia cada conceito;
# * Com o tamanho do intervalo de dados para cada variável, foram feitas as divisões de intervalos para variável e atribuidos conceitos para cada variável:de 3 a 9 para correlações positivas e de 9 a 3 para correlações negativas;
# * Com a utilização de cada intervalo de dados, foram estimados os possíveis intervalos para os conceitos de 0,1,2,3 e 10;
#
#
# <p align="justified">Assim, voram estimados os seguintes intervalos e conceitos para cada variável</p>
#
# 1. Álcool;
#
# | Intervalo | | Conceito |
# |-----------|-------|----------|
# | Início | Fim | |
# | 0 | 5,7 | 0 |
# | 5,7 | 6,85 | 1 |
# | 6,85 | 8 | 2 |
# | 8 | 9,15 | 3 |
# | 9,15 | 10,3 | 4 |
# | 10,3 | 11,45 | 5 |
# | 11,45 | 12,6 | 6 |
# | 12,6 | 13,75 | 7 |
# | 13,75 | 14,9 | 8 |
# | 14,9 | 16,05 | 9 |
# | <= 16,05 | | 10 |
#
# 2. Volatilidade da acidez;
#
# | Intervalo | | Conceito |
# |-----------|------|----------|
# | Início | Fim | |
# | 0 | 0,08 | 10 |
# | 0,08 | 0,33 | 9 |
# | 0,33 | 0,58 | 8 |
# | 0,58 | 0,83 | 7 |
# | 0,83 | 1,08 | 6 |
# | 1,08 | 1,33 | 5 |
# | 1,33 | 1,58 | 4 |
# | 1,58 | 1,83 | 3 |
# | 1,83 | 2,08 | 2 |
# | 2,08 | 2,33 | 1 |
# | <= 2,33 | | 0 |
#
# 3. Cloretos;
#
# | Intervalo | | Conceito |
# |-----------|--------|----------|
# | Início | Fim | |
# | 0,0000 | 0,0090 | 10 |
# | 0,0090 | 0,1093 | 9 |
# | 0,1093 | 0,2097 | 8 |
# | 0,2097 | 0,3100 | 7 |
# | 0,3100 | 0,4103 | 6 |
# | 0,4103 | 0,5107 | 5 |
# | 0,5107 | 0,6110 | 4 |
# | 0,6110 | 0,7113 | 3 |
# | 0,7113 | 0,8117 | 2 |
# | 0,8117 | 0,9120 | 1 |
# | <= 0,9120 | | 0 |
#
# 4. Densidade;
#
# | Intervalo | | Conceito |
# |-----------|----------|----------|
# | Início | Fim | |
# | 0,0000 | 0,9871 | 10 |
# | 0,9871 | 18,1389 | 9 |
# | 18,1389 | 35,2907 | 8 |
# | 35,2907 | 52,4426 | 7 |
# | 52,4426 | 69,5944 | 6 |
# | 69,5944 | 86,7462 | 5 |
# | 86,7462 | 103,8980 | 4 |
# | 103,8980 | 121,0498 | 3 |
# | 121,0498 | 138,2016 | 2 |
# | 138,2016 | 155,3534 | 1 |
# | <= 155,353| | 0 |
#
#
# <p align="justified">* O final de cada intervalo é aberto</p>
# <p align="justified"> Em seguida, é feita uma cópia do banco de dados para cada método, sendo calculados os conceitos para cada variável em cada vinho, sendo também feita as sua qualidade estimada. O cálculo da qualidade estimada, é feito pela média ponderada, multiplicando cada conceito de cada variável pelo módulo da correlação da mesma e dividindo pela soma do módulo das correlações. </p>
#
# <p align="justified"> O erro de estimação também foi calculado para cada vinho, sendo este o módulo da diferença da qualidade no banco da dados e da qualidade estimada no modelo. Assim, para a determinação do modelo mais eficaz foi considerado o que apresentou menor média de erro de estimação dentre os dois métodos e por tipo de vinho.</p>
# In[11]:
dfp=df.copy()
dfs=df.copy()
# <h3 align="justified">Criando o modelo (método de Pearson)</h3>
# In[12]:
dfp.insert(13, 'qualidade_alcool', 0)
dfp.insert(14, 'qualidade_volatilidade da acidez', 0)
dfp.insert(15, 'qualidade_cloretos', 0)
dfp.insert(16, 'qualidade_estimada', 0)
dfp.insert(17, 'erro', 0)
dfp.describe()
# In[13]:
for index, row in dfp.iterrows():
if row['alcohol']<5.7:
dfp.at[index,'qualidade_alcool']=0
elif row['alcohol']<6.85:
dfp.at[index,'qualidade_alcool']=1
elif row['alcohol']<8:
dfp.at[index,'qualidade_alcool']=2
elif row['alcohol']<9.15:
dfp.at[index,'qualidade_alcool']=3
elif row['alcohol']<10.30:
dfp.at[index,'qualidade_alcool']=4
elif row['alcohol']<11.45:
dfp.at[index,'qualidade_alcool']=5
elif row['alcohol']<12.60:
dfp.at[index,'qualidade_alcool']=6
elif row['alcohol']<13.75:
dfp.at[index,'qualidade_alcool']=7
elif row['alcohol']<14.90:
dfp.at[index,'qualidade_alcool']=8
elif row['alcohol']<16.05:
dfp.at[index,'qualidade_alcool']=9
elif row['alcohol']>16.05:
dfp.at[index,'qualidade_alcool']=10
# In[14]:
for index, row in dfp.iterrows():
if row['volatile acidity']<0.08:
dfp.at[index,'qualidade_volatilidade da acidez']=10
elif row['volatile acidity']>=0.08:
dfp.at[index,'qualidade_volatilidade da acidez']=9
elif row['volatile acidity']>=0.33:
dfp.at[index,'qualidade_volatilidade da acidez']=8
elif row['volatile acidity']>=0.58:
dfp.at[index,'qualidade_volatilidade da acidez']=7
elif row['volatile acidity']>=0.83:
dfp.at[index,'qualidade_volatilidade da acidez']=6
elif row['volatile acidity']>=1.08:
dfp.at[index,'qualidade_volatilidade da acidez']=5
elif row['volatile acidity']>=1.33:
dfp.at[index,'qualidade_volatilidade da acidez']=4
elif row['volatile acidity']>=1.58:
dfp.at[index,'qualidade_volatilidade da acidez']=3
elif row['volatile acidity']>=1.83:
dfp.at[index,'qualidade_volatilidade da acidez']=2
elif row['volatile acidity']>=2.08:
dfp.at[index,'qualidade_volatilidade da acidez']=1
elif row['volatile acidity']>=2.33:
dfp.at[index,'qualidade_volatilidade da acidez']=0
# In[15]:
for index, row in dfp.iterrows():
if row['chlorides']<0.009:
dfp.at[index,'qualidade_cloretos']=10
elif row['chlorides']>=0.009:
dfp.at[index,'qualidade_cloretos']=9
elif row['chlorides']>=0.1093:
dfp.at[index,'qualidade_cloretos']=8
elif row['chlorides']>=0.2097:
dfp.at[index,'qualidade_cloretos']=7
elif row['chlorides']>=0.3100:
dfp.at[index,'qualidade_cloretos']=6
elif row['chlorides']>=0.4103:
dfp.at[index,'qualidade_cloretos']=5
elif row['chlorides']>=0.5107:
dfp.at[index,'qualidade_cloretos']=4
elif row['chlorides']>=0.6110:
dfp.at[index,'qualidade_cloretos']=3
elif row['chlorides']>=0.7113:
dfp.at[index,'qualidade_cloretos']=2
elif row['chlorides']>=0.8117:
dfp.at[index,'qualidade_cloretos']=1
elif row['chlorides']>=0.9120:
dfp.at[index,'qualidade_cloretos']=0
# In[16]:
dfp['qualidade_estimada']=((dfp['qualidade_alcool']*0.47)+ (dfp['qualidade_volatilidade da acidez']*0.25)+(dfp['qualidade_cloretos']*0.2))/(0.47+0.25+0.2)
dfp['erro']=np.sqrt((dfp['qualidade_estimada']-dfp['quality'])**2)
# In[17]:
dfp.describe()
# <h3 align="justified">Criando o modelo (método de Spearman)</h3>
# In[18]:
dfs.insert(13, 'qualidade_alcool', 0)
dfs.insert(14, 'qualidade_densidade', 0)
dfs.insert(15, 'qualidade_cloretos', 0)
dfs.insert(16, 'qualidade_estimada', 0)
dfs.insert(17, 'erro', 0)
dfs.describe()
# In[19]:
for index, row in dfs.iterrows():
if row['alcohol']<5.7:
dfs.at[index,'qualidade_alcool']=0
elif row['alcohol']<6.85:
dfs.at[index,'qualidade_alcool']=1
elif row['alcohol']<8:
df.at[index,'qualidade_alcool']=2
elif row['alcohol']<9.15:
dfs.at[index,'qualidade_alcool']=3
elif row['alcohol']<10.30:
dfs.at[index,'qualidade_alcool']=4
elif row['alcohol']<11.45:
dfs.at[index,'qualidade_alcool']=5
elif row['alcohol']<12.60:
dfs.at[index,'qualidade_alcool']=6
elif row['alcohol']<13.75:
dfs.at[index,'qualidade_alcool']=7
elif row['alcohol']<14.90:
dfs.at[index,'qualidade_alcool']=8
elif row['alcohol']<16.05:
dfs.at[index,'qualidade_alcool']=9
elif row['alcohol']>16.05:
dfs.at[index,'qualidade_alcool']=10
# In[20]:
for index, row in dfs.iterrows():
if row['density']<0.98711:
dfs.at[index,'qualidade_densidade']=10
elif row['density']>=0.98711:
dfs.at[index,'qualidade_densidade']=9
elif row['density']>=18.138925:
dfs.at[index,'qualidade_densidade']=8
elif row['density']>=35.29074:
dfs.at[index,'qualidade_densidade']=7
elif row['density']>=52.442555:
dfs.at[index,'qualidade_densidade']=6
elif row['density']>=69.59437:
dfs.at[index,'qualidade_densidade']=5
elif row['density']>=86.746185:
dfs.at[index,'qualidade_densidade']=4
elif row['density']>=103.898:
dfs.at[index,'qualidade_densidade']=3
elif row['density']>=121.049815:
dfs.at[index,'qualidade_densidade']=2
elif row['density']>=138.20163:
dfs.at[index,'qualidade_densidade']=1
elif row['density']>=155.353445:
dfs.at[index,'qualidade_densidade']=0
# In[21]:
for index, row in dfs.iterrows():
if row['chlorides']<0.009:
dfs.at[index,'qualidade_cloretos']=10
elif row['chlorides']>=0.009:
dfs.at[index,'qualidade_cloretos']=9
elif row['chlorides']>=0.1093:
dfs.at[index,'qualidade_cloretos']=8
elif row['chlorides']>=0.2097:
dfs.at[index,'qualidade_cloretos']=7
elif row['chlorides']>=0.3100:
dfs.at[index,'qualidade_cloretos']=6
elif row['chlorides']>=0.4103:
dfs.at[index,'qualidade_cloretos']=5
elif row['chlorides']>=0.5107:
dfs.at[index,'qualidade_cloretos']=4
elif row['chlorides']>=0.6110:
dfs.at[index,'qualidade_cloretos']=3
elif row['chlorides']>=0.7113:
dfs.at[index,'qualidade_cloretos']=2
elif row['chlorides']>=0.8117:
dfs.at[index,'qualidade_cloretos']=1
elif row['chlorides']>=0.9120:
dfs.at[index,'qualidade_cloretos']=0
# In[22]:
dfs['qualidade_estimada']=((dfs['qualidade_alcool']*0.48)+(dfs['qualidade_densidade']*0.34)+(dfs['qualidade_cloretos']*0.30))/(0.48+0.34+0.30)
dfs['erro']=np.sqrt((dfs['qualidade_estimada']-dfs['quality'])**2)
# In[23]:
dfs.describe()
# <h3 align="justified">Comparando o erro de estimação nos diferentes tipos de vinho</h3>
# In[24]:
dfp.groupby('type')['erro'].describe()
# In[25]:
dfs.groupby("type")['erro'].describe()
# <h3 align="justified">Conclusão</h3>
#
# <p align="justified"> O erro de estimação médio para o método de correlação de Pearson se mostrou menor que no método de Spearman, tanto para os valores totais quanto para o cada tipo de vinho , sendo este o mais adequado para a modelagem.</p>
|
Python
|
CL
|
5464601c135b42f9d630d2e9f57e931862cbfb0ea0e8d5543dc6f28a1d5855f9
|
# Course: CS261 - Data Structures
# Student Name: Melanie Huynh
# Assignment: Assignment 5, Min heaps
# Description: Implementation of a MinHeap class, using a dynamic array to store the hash table.
# Import pre-written DynamicArray and LinkedList classes
from a5_include import *
class MinHeapException(Exception):
"""
Custom exception to be used by MinHeap class
DO NOT CHANGE THIS CLASS IN ANY WAY
"""
pass
class MinHeap:
def __init__(self, start_heap=None):
"""
Initializes a new MinHeap
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
self.heap = DynamicArray()
# populate MH with initial values (if provided)
# before using this feature, implement add() method
if start_heap:
for node in start_heap:
self.add(node)
def __str__(self) -> str:
"""
Return MH content in human-readable form
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
return 'HEAP ' + str(self.heap)
def is_empty(self) -> bool:
"""
Return True if no elements in the heap, False otherwise
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
return self.heap.length() == 0
def add(self, node: object) -> None:
"""
This method adds a new object to the MinHeap maintaining heap property.
Runtime complexity must be O(logN).
"""
# begin by adding the node object at the end of the heap array
self.heap.append(node)
# then define the index of the node
index = self.heap.length() - 1
# finally, percolate the node up until it finds it proper spot
self.percolate_up(index)
def get_min(self) -> object:
"""
This method returns an object with a minimum key without removing it from the heap.
If heap is empty, raise exception
"""
if self.is_empty() == True:
raise MinHeapException()
return self.heap.get_at_index(0) # return the root, which should be the minimum
def remove_min(self) -> object:
"""
This method returns an object with a minimum key and removes it from the heap.
If heap is empty, raise exception
"""
if self.heap.length() < 0:
raise MinHeapException()
# define the first val in the heap
cur_val = self.get_min()
# swap the first and last element
self.heap.swap(0, self.heap.length() - 1)
# then remove the last element
self.heap.pop()
# finally, percolate down the root node
self.percolate_down(0)
return cur_val
def build_heap(self, da: DynamicArray) -> None:
"""
This method receives a dynamic array with objects in any order and builds a proper
MinHeap from there. Current content of the MinHeap is lost.
Runtime complexity must be O(N).
"""
# must clear current content of minheap
new_heap = DynamicArray()
for i in range(da.length()): # taking the overall length of the array
new_heap.append(da.get_at_index(i)) # adds the value into the array, using add method to ensure it is a proper heap
# then set new_heap as the current heap, clearing current content
self.heap = new_heap
# begin percolation through all non-parent node
# define the parent node
parent_node = (da.length()) // 2 - 1
while (parent_node != -1): #begin at parent and stop at root
self.percolate_down(parent_node)
# increment parent
parent_node -= 1
def percolate_up(self, index):
"""
Helper function that allows for percolation up the min-heap until it reaches the root.
"""
parent_index = (index - 1) // 2
while index != 0:
if (self.heap.get_at_index(parent_index) > self.heap.get_at_index(index)):
# swap the nodes
self.heap.swap(parent_index, index)
# redefine the indices
index = parent_index
parent_index = (index - 1) // 2
else:
index = parent_index
parent_index = (index - 1) // 2
def percolate_down(self, index):
"""
Helper function that allows for percolation down the min-heap until it reaches a leaf.
"""
# define the children
left = 2 * index + 1
right = 2 * index + 2
less = index # define the lesser value node
if left <= self.heap.length() -1 and self.heap.get_at_index(index) > self.heap.get_at_index(left):
less = left
if right <= self.heap.length() - 1 and self.heap.get_at_index(less) > self.heap.get_at_index(right):
less = right
# otherwise, swap with parent to percolate down
if less != index:
self.heap.swap(index, less)
# recurse the percolation using the lesser value
self.percolate_down(less)
# BASIC TESTING
if __name__ == '__main__':
print("\nPDF - add example 1")
print("-------------------")
h = MinHeap()
print(h, h.is_empty())
for value in range(300, 200, -15):
h.add(value)
print(h)
print("\nPDF - add example 2")
print("-------------------")
h = MinHeap(['fish', 'bird'])
print(h)
for value in ['monkey', 'zebra', 'elephant', 'horse', 'bear']:
h.add(value)
print(h)
print("\nPDF - get_min example 1")
print("-----------------------")
h = MinHeap(['fish', 'bird'])
print(h)
print(h.get_min(), h.get_min())
print("\nPDF - remove_min example 1")
print("--------------------------")
h = MinHeap([1, 10, 2, 9, 3, 8, 4, 7, 5, 6])
while not h.is_empty():
print(h, end=' ')
print(h.remove_min())
print("\nPDF - build_heap example 1")
print("--------------------------")
da = DynamicArray([100, 20, 6, 200, 90, 150, 300])
h = MinHeap(['zebra', 'apple'])
print(h)
h.build_heap(da)
print(h)
da.set_at_index(0, 500)
print(da)
print(h)
|
Python
|
CL
|
b35ed7ec8b98e28c85a0fc5ec46fb682aa9fd540df4c4cc896c83a47e497d930
|
import json
import os
import sys
from metaflow.exception import MetaflowException
class ArgoClientException(MetaflowException):
headline = "Argo Client error"
class ArgoClient(object):
def __init__(self, namespace=None):
try:
from kubernetes import client, config
except (NameError, ImportError):
raise MetaflowException(
"Could not import module 'kubernetes'.\n\nInstall kubernetes "
"Python package (https://pypi.org/project/kubernetes/) first.\n"
"You can install the module by executing - "
"%s -m pip install kubernetes\n"
"or equivalent through your favorite Python package manager."
% sys.executable
)
if os.getenv("KUBERNETES_SERVICE_HOST"):
# We are inside a pod, authenticate via ServiceAccount assigned
# to us
config.load_incluster_config()
else:
# Use kubeconfig, likely $HOME/.kube/config
# TODO (savin):
# 1. Support generating kubeconfig on the fly using boto3
# 2. Support auth via OIDC -
# https://docs.aws.amazon.com/eks/latest/userguide/authenticate-oidc-identity-provider.html
config.load_kube_config()
self._client = client
self._namespace = namespace or "default"
self._group = "argoproj.io"
self._version = "v1alpha1"
def get_workflow_template(self, name):
try:
return self._client.CustomObjectsApi().get_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="workflowtemplates",
name=name,
)
except self._client.rest.ApiException as e:
if e.status == 404:
return None
raise ArgoClientException(
json.loads(e.body)["message"] if e.body is not None else e.reason
)
def register_workflow_template(self, name, workflow_template):
# Unfortunately, Kubernetes client does not handle optimistic
# concurrency control by itself unlike kubectl
try:
workflow_template["metadata"][
"resourceVersion"
] = self._client.CustomObjectsApi().get_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="workflowtemplates",
name=name,
)[
"metadata"
][
"resourceVersion"
]
except self._client.rest.ApiException as e:
if e.status == 404:
try:
return (
self._client.CustomObjectsApi().create_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="workflowtemplates",
body=workflow_template,
)
)
except self._client.rest.ApiException as e:
raise ArgoClientException(
json.loads(e.body)["message"]
if e.body is not None
else e.reason
)
else:
raise ArgoClientException(
json.loads(e.body)["message"] if e.body is not None else e.reason
)
try:
return self._client.CustomObjectsApi().replace_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="workflowtemplates",
body=workflow_template,
name=name,
)
except self._client.rest.ApiException as e:
raise ArgoClientException(
json.loads(e.body)["message"] if e.body is not None else e.reason
)
def trigger_workflow_template(self, name, parameters={}):
body = {
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Workflow",
"metadata": {"generateName": name + "-"},
"spec": {
"workflowTemplateRef": {"name": name},
"arguments": {
"parameters": [
{"name": k, "value": json.dumps(v)}
for k, v in parameters.items()
]
},
},
}
try:
return self._client.CustomObjectsApi().create_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="workflows",
body=body,
)
except self._client.rest.ApiException as e:
raise ArgoClientException(
json.loads(e.body)["message"] if e.body is not None else e.reason
)
def schedule_workflow_template(self, name, schedule=None):
# Unfortunately, Kubernetes client does not handle optimistic
# concurrency control by itself unlike kubectl
body = {
"apiVersion": "argoproj.io/v1alpha1",
"kind": "CronWorkflow",
"metadata": {"name": name},
"spec": {
"suspend": schedule is None,
"schedule": schedule,
"workflowSpec": {"workflowTemplateRef": {"name": name}},
},
}
try:
body["metadata"][
"resourceVersion"
] = self._client.CustomObjectsApi().get_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="cronworkflows",
name=name,
)[
"metadata"
][
"resourceVersion"
]
except self._client.rest.ApiException as e:
# Scheduled workflow does not exist and we want to schedule a workflow
if e.status == 404:
if schedule is None:
return
try:
return (
self._client.CustomObjectsApi().create_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="cronworkflows",
body=body,
)
)
except self._client.rest.ApiException as e:
raise ArgoClientException(
json.loads(e.body)["message"]
if e.body is not None
else e.reason
)
else:
raise ArgoClientException(
json.loads(e.body)["message"] if e.body is not None else e.reason
)
try:
return self._client.CustomObjectsApi().replace_namespaced_custom_object(
group=self._group,
version=self._version,
namespace=self._namespace,
plural="cronworkflows",
body=body,
name=name,
)
except self._client.rest.ApiException as e:
raise ArgoClientException(
json.loads(e.body)["message"] if e.body is not None else e.reason
)
|
Python
|
CL
|
d56883b86675cb45988bc2beb28f8d2ad50ae3e4c009274ad82fb04b674a0e1f
|
# -*- coding: utf-8 -*-
"""
Created by: Andres Segura Tinoco
Version: 1.2.0
Created on: Nov 23, 2020
Updated on: Dec 16, 2020
Description: Main class of the descriptive-engine solution.
"""
# Import Python
import os
import logging
import pandas as pd
import numpy as np
import scipy.stats as ss
from datetime import datetime
# Import custom libraries
import util_lib as ul
######################
### CORE FUNCTIONS ###
######################
# Core function - Create results folders
def create_result_folders(folder_name):
folder_path = '../result/' + folder_name
ul.create_folder(folder_path)
# Core function - Read the CSV dataset and convert it to a dictionary by entity
def get_data_by_entity(filename, entity_filter, frequency):
data_list = dict()
# Validation
if os.path.exists(filename):
# Read divipola dictionary
divipola_code = dict()
divipola_data = pd.read_csv('config/divipola.csv')
for ix, row in divipola_data.iterrows():
entity = row['entity']
code = row['divipola']
divipola_code[entity] = code
# Read data from CSV dataset
raw_data = pd.read_csv(filename)
# Filter data by entity
if len(raw_data):
entity_list = raw_data['entity'].unique()
# Filtering and grouping data by entity
for entity in entity_list:
# Check permission to be processed
if (len(entity_filter) == 0 or entity in entity_filter) and (entity in divipola_code.keys()):
entity_code = str(divipola_code[entity]).zfill(5)
# Filter data by entity
entity_data = raw_data[raw_data['entity'] == entity]
entity_data = entity_data.groupby(['entity', 'year']).agg('sum')
entity_data.reset_index(inplace=True)
# Grouping data by frequency
if frequency == 'weekly':
temp_data = pd.DataFrame(columns=['entity', 'year', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'])
# Grouping data by periods
for ix, row_data in entity_data.iterrows():
year = row_data['year']
values = []
periods = []
for week in range(1, 54):
value = row_data[str(week)]
values.append(value)
if (len(values) == 4 and week != 52) or (len(values) == 5 and week == 53):
total = sum(values)
periods.append(total)
values = []
# Save data row
temp_data.loc[len(temp_data)] = [entity, year] + periods
elif frequency == 'periodically':
temp_data = entity_data.copy()
# Save data
data_list[entity_code] = temp_data
print((entity, entity_code), '->', len(temp_data))
else:
print(' = Entity without permission to be processed: ' + entity)
return data_list
# Core function - Get population by entity and year
def get_population_by_entity():
pop_data = {}
raw_data = pd.read_csv('config/population.csv')
if len(raw_data):
for ix, row in raw_data.iterrows():
code = str(row['divipola'])
# Apply data quality to code
if len(code) == 1:
code = '0' + code + '000'
elif len(code) == 2:
code = code + '000'
elif len(code) == 4:
code = '0' + code
# Get population data
for year in range(2010, 2021):
year = str(year)
pop_value = row[year]
# Save key, population pair
key = code + '_' + year
pop_data[key] = pop_value
return pop_data
# Core function - Calculate descriptive stats by entity and period
def calc_desc_stats(data_list, pop_data, rate_enable, max_year, skip_years):
gr_data = pd.DataFrame(columns=['entity', 'year', 'period', 'total'])
stats_data = pd.DataFrame(columns=['entity', 'period', 'total', 'mean', 'stdev', 'min', 'p25', 'p50', 'p75', 'max', 'no_data', 'pv_period', 'pv_value', 'pv_min_lim', 'pv_max_lim'])
# Loop through year, weeks
for entity, data in data_list.items():
n_rows = len(data)
temp_df = pd.DataFrame(columns=['year', 'period', 'value'])
# Grouping data by periods
for ix in range(n_rows):
row_data = data.iloc[ix]
year = row_data['year']
key = entity + '_' + str(year)
entity_pop = pop_data[key]
for period in range(1, 14):
total = row_data[str(period)]
# Change totals per rates
if rate_enable:
div = 100000
rate = round(total / entity_pop * div, 4)
curr_value = rate
else:
curr_value = total
# Save data in memory
gr_data.loc[len(gr_data)] = [entity, year, period, curr_value]
if not year in skip_years:
temp_df.loc[len(temp_df)] = [year, period, curr_value]
# Calculate variation coefficient
all_values = list(temp_df[temp_df['year'] < max_year]['value'])
var_coef = round(100.0 * ss.variation(all_values ), 4)
# Calculate stats
for period in range(1, 14):
# Calculate percentage variation by years
perc_var_list = []
for year in range(max_year, max_year - 5, -1):
n1_values = list(temp_df[(temp_df['period'] == period) & (temp_df['year'] == year)]['value'])
n2_values = list(temp_df[(temp_df['period'] == period) & (temp_df['year'] == (year - 1))]['value'])
perc_var = 0
if len(n1_values) and len(n2_values):
n1_value = n1_values[0]
n2_value = n2_values[0]
if n1_value > 0 and n2_value > 0:
perc_var = (n1_value - n2_value) / n2_value
perc_var_list.append(perc_var)
# Percentage variations local variables
pv_period = str(max_year) + '-' + str(max_year - 1)
pv_value = 0
pv_min_lim = 0
pv_max_lim = 0
if len(perc_var_list) == 5:
pv_value = round(perc_var_list[0], 4)
pv_min_lim = round(min(perc_var_list[1:]), 4)
pv_max_lim = round(max(perc_var_list[1:]), 4)
# Filter data by period
values = temp_df[(temp_df['period'] == period) & (temp_df['year'] < max_year)]['value']
values = [x for x in values if x > 0]
# Entity-period vars
total = 0
mean = 0
stdev = 0
min_value = 0
max_value = 0
p25 = 0
p50 = 0
p75 = 0
# Not taking into account current year
no_data = n_rows - len(values) - 1
if len(values) > 0:
values.sort()
# Calc stats
total = round(sum(values), 4)
mean = round(np.mean(values), 4)
stdev = round(np.std(values), 4)
min_value = round(min(values), 4)
max_value = round(max(values), 4)
p25 = round(np.percentile(values, 25), 4)
p50 = round(np.percentile(values, 50), 4)
p75 = round(np.percentile(values, 75), 4)
# Save row item
row_item = {'entity': entity, 'period': period, 'total': total, 'mean': mean, 'stdev': stdev, 'min': min_value,
'p25': p25, 'p50':p50, 'p75': p75, 'max': max_value, 'no_data': no_data, 'var_coef': var_coef,
'pv_period': pv_period, 'pv_value': pv_value, 'pv_min_lim': pv_min_lim, 'pv_max_lim': pv_max_lim}
stats_data = stats_data.append(row_item, ignore_index=True)
# Return result datasets
return gr_data, stats_data
# Core function - Save to CSV file the result stats by entity
def save_results(curr_event, df, exec_date, file_name):
exec_col = 'exec_date'
# Save model data results
if df is not None and len(df):
# Post processing of the data
df.reset_index(inplace=True)
df.insert(0, exec_col, str(exec_date))
# Persist data
filename = '../result/' + curr_event + '/' + file_name + '.csv'
ul.save_df_to_csv_file(filename, df, False)
#####################
### START PROGRAM ###
#####################
if __name__ == "__main__":
# 0. Program variables
log_path = 'log/log_file.log'
config_path = 'config/config.json'
logging.basicConfig(filename=log_path, level=logging.INFO)
logging.info('>> START PROGRAM: ' + str(datetime.now()))
# 1. Read config params
setup_params = ul.get_dict_from_json(config_path)
event_list = setup_params['event_list']
entity_filter = setup_params['entity_filter']
# 2. Loop through entities
for curr_event in event_list:
event_name = curr_event['name'].lower()
if event_name and curr_event['enabled']:
# Save event params
logging.info(' = Event: ' + event_name)
logging.info(curr_event)
# 3. Create result folders
create_result_folders(event_name)
# 4. Get list of datasets by entities
logging.info(' = Read data by entity - ' + str(datetime.now()))
filename = '../data/' + event_name + '_dataset.csv'
data_list = get_data_by_entity(filename, entity_filter, curr_event['frequency'])
# 5. Get population by entity and year
pop_data = get_population_by_entity()
# 6. Calculate descriptive stats
logging.info(' = Calculate descriptive stats - ' + str(datetime.now()))
exec_date = datetime.now()
rate_enable = curr_event['rate_enable']
skip_years = curr_event['skip_years']
max_year = 2020
gr_data, stats_data = calc_desc_stats(data_list, pop_data, rate_enable, max_year, skip_years)
# 7. Save grouped data by entity
logging.info(' = Save grouped data by entity - ' + str(datetime.now()))
save_results(event_name, gr_data, exec_date, 'raw_data')
# 8. Save stats results by entity
logging.info(' = Save stats results by entity - ' + str(datetime.now()))
save_results(event_name, stats_data, exec_date, 'result_data')
logging.info(">> END PROGRAM: " + str(datetime.now()))
logging.shutdown()
#####################
#### END PROGRAM ####
#####################
|
Python
|
CL
|
523fe11ace1ce4c036d8efe536b549e31dda77c28a17b3d3ba2c0b266d1f7b44
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author: kerlomz <kerlomz@gmail.com>
import sys
import random
from tqdm import tqdm
import tensorflow as tf
from config import *
from constants import RunMode
_RANDOM_SEED = 0
class DataSets:
"""此类用于打包数据集为TFRecords格式"""
def __init__(self, model: ModelConfig):
self.model = model
if not os.path.exists(self.model.dataset_root_path):
os.makedirs(self.model.dataset_root_path)
@staticmethod
def read_image(path):
"""
读取图片
:param path: 图片路径
:return:
"""
with open(path, "rb") as f:
return f.read()
def dataset_exists(self):
"""数据集是否存在判断函数"""
for file in (self.model.trains_path[DatasetType.TFRecords] + self.model.validation_path[DatasetType.TFRecords]):
if not os.path.exists(file):
return False
return True
@staticmethod
def bytes_feature(values):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def input_to_tfrecords(self, input_data, label):
return tf.train.Example(features=tf.train.Features(feature={
'input': self.bytes_feature(input_data),
'label': self.bytes_feature(label),
}))
def convert_dataset(self, output_filename, file_list, mode: RunMode, is_add=False):
if is_add:
output_filename = self.model.dataset_increasing_name(mode)
if not output_filename:
raise FileNotFoundError('Basic data set missing, please check.')
output_filename = os.path.join(self.model.dataset_root_path, output_filename)
with tf.io.TFRecordWriter(output_filename) as writer:
pbar = tqdm(file_list)
for i, file_name in enumerate(pbar):
try:
image_data = self.read_image(file_name)
labels = re.search(self.model.extract_regex, file_name.split(PATH_SPLIT)[-1])
if labels:
labels = labels.group()
else:
raise NameError('invalid filename {}'.format(file_name))
labels = labels.encode('utf-8')
example = self.input_to_tfrecords(image_data, labels)
writer.write(example.SerializeToString())
pbar.set_description('[Processing dataset %s] [filename: %s]' % (mode, file_name))
except IOError as e:
print('could not read:', file_list[1])
print('error:', e)
print('skip it \n')
@staticmethod
def merge_source(source):
if isinstance(source, list):
origin_dataset = []
for trains_path in source:
origin_dataset += [
os.path.join(trains_path, trains).replace("\\", "/") for trains in os.listdir(trains_path)
]
elif isinstance(source, str):
origin_dataset = [os.path.join(source, trains) for trains in os.listdir(source)]
else:
return
random.seed(0)
random.shuffle(origin_dataset)
return origin_dataset
def make_dataset(self, trains_path=None, validation_path=None, is_add=False, callback=None, msg=None):
if self.dataset_exists() and not is_add:
state = "EXISTS"
if callback:
callback()
if msg:
msg(state)
return
if not self.model.dataset_path_root:
state = "CONF_ERROR"
if callback:
callback()
if msg:
msg(state)
return
trains_path = trains_path if is_add else self.model.trains_path[DatasetType.Directory]
validation_path = validation_path if is_add else self.model.validation_path[DatasetType.Directory]
trains_path = [trains_path] if isinstance(trains_path, str) else trains_path
validation_path = [validation_path] if isinstance(validation_path, str) else validation_path
if validation_path:
trains_dataset = self.merge_source(trains_path)
validation_dataset = self.merge_source(validation_path)
self.convert_dataset(
self.model.validation_path[DatasetType.TFRecords][-1 if is_add else 0],
validation_dataset,
mode=RunMode.Validation,
is_add=is_add,
)
self.convert_dataset(
self.model.trains_path[DatasetType.TFRecords][-1 if is_add else 0],
trains_dataset,
mode=RunMode.Trains,
is_add=is_add,
)
else:
origin_dataset = self.merge_source(trains_path)
trains_dataset = origin_dataset[self.model.validation_set_num:]
if self.model.validation_set_num > 0:
validation_dataset = origin_dataset[:self.model.validation_set_num]
self.convert_dataset(
self.model.validation_path[DatasetType.TFRecords][-1 if is_add else 0],
validation_dataset,
mode=RunMode.Validation,
is_add=is_add
)
elif self.model.validation_set_num < 0:
self.convert_dataset(
self.model.validation_path[DatasetType.TFRecords][-1 if is_add else 0],
trains_dataset,
mode=RunMode.Validation,
is_add=is_add
)
self.convert_dataset(
self.model.trains_path[DatasetType.TFRecords][-1 if is_add else 0],
trains_dataset,
mode=RunMode.Trains,
is_add=is_add
)
state = "DONE"
if callback:
callback()
if msg:
msg(state)
return
if __name__ == '__main__':
model_conf = ModelConfig(sys.argv[-1])
_dataset = DataSets(model_conf)
_dataset.make_dataset()
|
Python
|
CL
|
12617b97ea74f6237e8d512906ede9870ee4c023027de0ad86530e16cc34fe18
|
import pandas as pd
import numpy as np
from datetime import datetime
import json
import os
import gc
# df_train.shape = (184903890, 9)
# test_supplement_partial_processed.shape = (57536872, 8)
input_dir = '../data/input/'
feature_dir = '../data/input/features/'
config = 'feature_list.json'
fmt = '%Y-%m-%d %H:%M:%S' # Time string format.
# Load feature config file.
with open(os.path.join(input_dir,'feature_config.json'),'r',encoding='utf-8') as data_file:
feature_dict = json.load(data_file)
# Flatten feature_dict to feature_list.
feature_list = []
for k, v in feature_dict.items():
feature_list.extend(v)
# Load Sample index file.
df_train_balanced_index_10_fold = pd.read_feather('../data/input/df_train_balanced_index_10_fold.feather')
# Check the existence of all feature files.
for feature in feature_list:
if os.path.isfile(os.path.join(feature_dir, feature + '.feather')):
print('\nOK! {}.feather exists!'.format(feature))
feature_df = pd.read_feather(os.path.join(feature_dir, feature + '.feather'))
column_name = feature_df.columns.tolist()[0]
print('\nColumn name: {}'.format(column_name))
if column_name != feature:
feature_df.rename(columns={column_name:feature}).to_feather(os.path.join(feature_dir, feature + '.feather'))
print('\nRename column name from {} to {}'.format(column_name, feature))
# Downsample and save.
for sample_set_column in df_train_balanced_index_10_fold:
print('\n{} - Creating fold {}...'.format(datetime.now().strftime(fmt), sample_set_column))
sample_index = df_train_balanced_index_10_fold[sample_set_column]
new_file_name = sample_set_column + '_' + feature + '.feather'
print('\n{} - Saving {}...'.format(datetime.now().strftime(fmt), new_file_name))
feature_df.loc[sample_index].reset_index().to_feather(os.path.join(input_dir, new_file_name))
# Release memory.
del feature_df
del sample_index
print('\n{} - Objects collected: {}.'.format(datetime.now().strftime(fmt), gc.collect()))
else:
print('\nError! {}.feather does not exist!'.format(feature))
'''
# Loop resample index sets.
for sample_set_column in df_train_balanced_index_10_fold:
print('\n{} - Creating fold {}...'.format(datetime.now().strftime(fmt), sample_set_column))
sample_index = df_train_balanced_index_10_fold[sample_set_column]
train_set_tmp = pd.DataFrame({})
# Use sample_index to extract training set from full feature set.
for feature in feature_list:
print('\n{} - Sampling feature {}...'.format(datetime.now().strftime(fmt), feature))
feature_df = pd.read_feather(os.path.join(feature_dir, feature + '.feather'))
feature_sampled_df = feature_df.loc[sample_index]
# Append column to train_set_tmp.
train_set_tmp = pd.concat([train_set_tmp, feature_sampled_df], axis=1)
del feature_df
del feature_sampled_df
gc.collect()
print('\n{} - Saving fold {}...'.format(datetime.now().strftime(fmt), sample_set_column))
train_set_tmp.reset_index().to_feather(os.path.join(input_dir, 'train_' + sample_set_column + '.feather'))
del train_set_tmp
gc.collect()
'''
|
Python
|
CL
|
ee401790cf1a50b05299acab1e606170e3fb9f31d46660f85e191d38c7a4607a
|
# Generated by Django 3.2 on 2021-05-09 13:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CheckList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('facility_type', models.CharField(choices=[('Depot', 'Depot'), ('Truck', 'Truck'), ('Service', 'Service'), ('Filling Station', 'Filling Station')], max_length=50)),
('code', models.CharField(max_length=50)),
('name', models.CharField(max_length=150)),
('registration_no', models.CharField(max_length=50)),
('contact_person', models.CharField(max_length=50)),
('contact_no', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=50)),
('region', models.CharField(max_length=200)),
('district', models.CharField(max_length=50)),
('county', models.CharField(max_length=50)),
('sub_county', models.CharField(max_length=50)),
('post_code', models.CharField(max_length=50)),
('village', models.CharField(max_length=50)),
('ownership', models.CharField(max_length=50)),
('parish', models.CharField(max_length=50)),
('fax', models.CharField(max_length=50)),
('tin', models.CharField(max_length=50)),
('logo', models.ImageField(blank=True, null=True, upload_to=None)),
('distance', models.CharField(max_length=100, null=True, verbose_name='Distance from Nearest Licensed Station.')),
],
options={
'verbose_name': 'Company',
'verbose_name_plural': 'Companys',
},
),
migrations.CreateModel(
name='CompanyInspection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('check_list_no', models.CharField(max_length=25)),
('inspection_date', models.DateField()),
('inspector', models.CharField(max_length=50, verbose_name='Inspected By')),
('recommendation', models.TextField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='app.company')),
],
),
migrations.CreateModel(
name='Field_enforcement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serial_number', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Gas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('LPG_prices', models.PositiveIntegerField()),
('LPG_item', models.CharField(max_length=150)),
('LPG_Description', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Inspection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('part1', models.CharField(max_length=200, verbose_name='Possession of certificate of approval of Environment Impact Assessment Certificate')),
],
),
migrations.CreateModel(
name='Suppliers',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
),
migrations.CreateModel(
name='SampleRequest',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('reg_no', models.CharField(max_length=50, unique=True, verbose_name='Request No.')),
('registration_date', models.DateField(null=True)),
('representative', models.CharField(max_length=50)),
('report_date', models.CharField(max_length=50, verbose_name='Expected Report Date')),
('remarks', models.TextField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
options={
'verbose_name': 'SampleRequest',
'verbose_name_plural': 'SampleRequests',
},
),
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('product_type', models.CharField(choices=[('PMS', 'PMS'), ('AGO', 'AGO'), ('BIK', 'BIK'), ('OTHERS', 'OTHERS')], max_length=200)),
('tank_details', models.CharField(max_length=200)),
('stock', models.CharField(max_length=200)),
('product_prices', models.CharField(max_length=150)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
),
migrations.CreateModel(
name='ProductPrics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_prices', models.CharField(max_length=150)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.products')),
],
),
migrations.CreateModel(
name='Permits',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('construction_permit', models.CharField(max_length=200, verbose_name='Construction Permit Number')),
('operation_permit', models.CharField(max_length=200, verbose_name='Operating License Number')),
('TIN', models.CharField(max_length=200, verbose_name='Company Tax identification Number')),
('company', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
),
migrations.CreateModel(
name='NemaCertifcate',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('certificate_no', models.CharField(max_length=25, verbose_name='NEMA Certificate No.')),
('create_date', models.DateField()),
('audit_due_date', models.DateField()),
('project', models.CharField(max_length=50)),
('project_purpose', models.TextField()),
('received_date', models.DateField()),
('certifcate_one', models.FileField(upload_to='')),
('certifcate_two', models.FileField(upload_to='')),
('certifcate_three', models.FileField(upload_to='')),
('status', models.CharField(max_length=100)),
('company', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
),
migrations.CreateModel(
name='InspectionCheckList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField()),
('remarks', models.TextField()),
('checkList', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='app.checklist', verbose_name='Particular')),
('company_inspection', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='app.companyinspection')),
],
),
migrations.CreateModel(
name='Employees',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('female', models.PositiveIntegerField()),
('male', models.PositiveIntegerField()),
('company', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
),
migrations.CreateModel(
name='Branches',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
),
migrations.CreateModel(
name='Attachments',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('attachment_file', models.FileField(upload_to='attachments')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.company')),
],
),
migrations.CreateModel(
name='Sample',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('fuel_type', models.CharField(choices=[('PMS(Gasoline)', 'PMS(Gasoline)'), ('DS', 'AGO(Diesel)'), ('KS', 'BIK(Keresone)'), ('EO', 'Engine Oil'), ('JF', 'Jet Fuel'), ('FO', 'Furnance Oil')], max_length=50)),
('parameter', models.CharField(choices=[('Mk', 'Marker'), ('DS', 'Density'), ('Qu', 'Quality')], max_length=20)),
('type_method', models.CharField(max_length=10)),
('test_method', models.CharField(max_length=10)),
('unit_fee', models.IntegerField()),
('quantity', models.IntegerField(verbose_name='Quantity(mls)')),
('sample_request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.samplerequest')),
],
options={
'verbose_name': 'Sample',
'verbose_name_plural': 'Samples',
'unique_together': {('sample_request', 'fuel_type')},
},
),
]
|
Python
|
CL
|
9db5e5f2b364b375918045d83cb616fcbf5fbbfd92a2bc74f2242c5ce3cfbe61
|
from typing import Dict, Union, Tuple, Iterable
from pathlib import Path, WindowsPath
from os import sep, utime
import time
import logging
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import font as tkfont
import toml
import attr
from attr.validators import instance_of
from appdirs import user_config_dir
from .config_parser import Config
import pysight
from pysight.nd_hist_generator.movie import ImagingSoftware
def is_positive(instance, attribute, value):
if value < 0:
return ValueError("TAG Bit value has to be greater than 0.")
def end_is_greater(instance, attribute, value):
if value < instance.start:
return ValueError("TAG Bit 'end' value has to be equal or greater to 'start'.")
@attr.s(slots=True)
class TagBits(object):
"""
Storage for TAG bits
"""
value = attr.ib(default="None", validator=instance_of(str))
start = attr.ib(default=0, validator=[instance_of(int), is_positive])
end = attr.ib(default=1, validator=[instance_of(int), is_positive, end_is_greater])
DATA_SOURCES = (
"PMT1",
"PMT2",
"PMT3",
"PMT4",
"Lines",
"Frames",
"Laser",
"TAG Lens",
"Empty",
)
class GuiAppLst:
"""
Main GUI for the multiscaler code.
Note - class variables should contain "entry" in their name if they point
to an entry TTK object. Also, no variable should contain "root" in its name.
"""
def __init__(self):
self.root = Tk()
self.root.title(f"PySight \uFF5C PBLab \uFF5C v{pysight.__version__}")
self.root.rowconfigure(16, weight=1)
self.root.columnconfigure(16, weight=1)
main_frame = ttk.Frame(self.root, width=1000, height=1300)
main_frame.grid(column=0, row=0)
main_frame["borderwidth"] = 2
style = ttk.Style()
style.theme_use("clam")
self.normal_font = tkfont.Font(family="Helvetica", size=10)
self.bold_font = tkfont.Font(family="Helvetica", size=12, weight="bold")
self.config_row = 14
self.__create_vars()
# Run widgets
self.__browse_file(main_frame)
self.__advanced_win(main_frame)
self.__input_channels(main_frame)
self.__num_of_frames(main_frame)
self.__outputs(main_frame)
self.__image_size(main_frame)
self.__tag_bits(main_frame)
self.__imaging_software(main_frame)
# Only saving\loading functions after this point
self.__save_cfg(main_frame)
self.__load_cfg(main_frame)
self.__load_last_used_cfg(main_frame)
# Define the last quit button and wrap up GUI
quit_button = ttk.Button(main_frame, text="Start", command=self.root.destroy)
quit_button.grid(row=16, column=2, sticky="ns")
self.root.bind("<Return>", self.__dest)
for child in main_frame.winfo_children():
child.grid_configure(padx=3, pady=2)
self.root.wait_window()
def __dest(self, event):
self.root.destroy()
def __create_vars(self):
self.debug = BooleanVar(value=False)
self.phase = DoubleVar(value=-2.78)
self.reprate = DoubleVar(
value=80e6
) # 80e6 for the Chameleon, 0 to raise ZeroDivisionError
self.gating = BooleanVar(
value=False
) # difference between pulse and arrival to sample
self.binwidth = DoubleVar(value=800e-12)
self.tag_freq = DoubleVar(value=0.189e6)
self.tag_pulses = IntVar(value=1)
self.tag_offset = IntVar(value=0)
self.fill_frac = DoubleVar(value=72.0) # percent
self.bidir = BooleanVar(value=False)
self.keep_unidir = BooleanVar(value=False)
self.flim: BooleanVar = BooleanVar(value=False)
self.flim_downsampling_space: IntVar = IntVar(value=1)
self.flim_downsampling_time: IntVar = IntVar(value=1)
self.censor: BooleanVar = BooleanVar(value=False)
self.line_freq = DoubleVar(value=7930.0) # Hz
self.sweeps_as_lines = BooleanVar(value=False)
self.frame_delay = DoubleVar(value=0.001) # sec
self.interleaved = BooleanVar(value=False)
def __browse_file(self, main_frame):
file_row = 0
self.filename = StringVar(value="")
browse_button = ttk.Button(main_frame, text="Browse", command=self.__browsefunc)
browse_button.grid(column=0, row=file_row, sticky="ns")
browse_entry = ttk.Entry(main_frame, textvariable=self.filename, width=80)
browse_entry.grid(column=1, row=file_row, sticky="we", columnspan=2)
def __imaging_software(self, main_frame):
imaging_software_label = ttk.Label(
main_frame, text="Imaging Software", font=self.bold_font
)
imaging_software_label.grid(row=5, column=2, sticky="ns")
self.imaging_software = StringVar()
cb_image = ttk.Combobox(
main_frame, textvariable=self.imaging_software, width=10
)
cb_image.grid(row=6, column=2, sticky="ns")
cb_image.set(ImagingSoftware.SCANIMAGE.value)
cb_image["values"] = [item.value for item in ImagingSoftware]
def __input_channels(self, main_frame):
# Comboboxes
inputs_row = 1
input_channels_label = ttk.Label(
main_frame,
text="Input Channels ",
font=self.bold_font,
)
input_channels_label.grid(column=0, row=inputs_row, columnspan=2)
self.input_start = StringVar()
self.input_stop1 = StringVar()
self.input_stop2 = StringVar()
self.input_stop3 = StringVar()
self.input_stop4 = StringVar()
self.input_stop5 = StringVar()
mb1 = ttk.Combobox(main_frame, textvariable=self.input_start, width=10)
mb1.grid(column=1, row=inputs_row + 1, sticky="w")
mb1.set("PMT1")
mb1["values"] = DATA_SOURCES
mb2 = ttk.Combobox(main_frame, textvariable=self.input_stop1, width=10)
mb2.grid(column=1, row=inputs_row + 2, sticky="w")
mb2.set("Empty")
mb2["values"] = DATA_SOURCES
mb3 = ttk.Combobox(main_frame, textvariable=self.input_stop2, width=10)
mb3.grid(column=1, row=inputs_row + 3, sticky="w")
mb3.set("Lines")
mb3["values"] = DATA_SOURCES
mb4 = ttk.Combobox(main_frame, textvariable=self.input_stop3, width=10)
mb4.grid(column=1, row=inputs_row + 4, sticky="w")
mb4.set("Empty")
mb4["values"] = DATA_SOURCES
mb5 = ttk.Combobox(main_frame, textvariable=self.input_stop4, width=10)
mb5.grid(column=1, row=inputs_row + 5, sticky="w")
mb5.set("Empty")
mb5["values"] = DATA_SOURCES
mb6 = ttk.Combobox(main_frame, textvariable=self.input_stop5, width=10)
mb6.grid(column=1, row=inputs_row + 6, sticky="w")
mb6.set("Empty")
mb6["values"] = DATA_SOURCES
# Labels
input_channel_1 = ttk.Label(main_frame, text="START", font=self.normal_font)
input_channel_1.grid(column=0, row=inputs_row + 1, sticky="ns")
input_channel_2 = ttk.Label(main_frame, text="STOP1", font=self.normal_font)
input_channel_2.grid(column=0, row=inputs_row + 2, sticky="ns")
input_channel_3 = ttk.Label(main_frame, text="STOP2", font=self.normal_font)
input_channel_3.grid(column=0, row=inputs_row + 3, sticky="ns")
input_channel_4 = ttk.Label(main_frame, text="STOP3", font=self.normal_font)
input_channel_4.grid(column=0, row=inputs_row + 4, sticky="ns")
input_channel_5 = ttk.Label(main_frame, text="STOP4", font=self.normal_font)
input_channel_5.grid(column=0, row=inputs_row + 5, sticky="ns")
input_channel_6 = ttk.Label(main_frame, text="STOP5", font=self.normal_font)
input_channel_6.grid(column=0, row=inputs_row + 6, sticky="ns")
def __num_of_frames(self, main_frame):
# Number of frames in the data
frame_label = ttk.Label(
main_frame, text="Number of frames", font=self.normal_font
)
frame_label.grid(column=2, row=4, sticky="w")
self.num_of_frames = IntVar(value=1)
self.num_frames_entry = ttk.Entry(
main_frame, textvariable=self.num_of_frames, width=3
)
self.num_frames_entry.grid(column=2, row=4, sticky="ns")
self.num_frames_entry.config(state="disabled")
# Disable number of frames unless all inputs but one are empty
self.input_start.trace("w", self.__check_if_empty)
self.input_start.trace("w", self.__check_if_tag_lens_exists)
self.input_stop1.trace("w", self.__check_if_empty)
self.input_stop1.trace("w", self.__check_if_tag_lens_exists)
self.input_stop2.trace("w", self.__check_if_empty)
self.input_stop2.trace("w", self.__check_if_tag_lens_exists)
self.input_stop3.trace("w", self.__check_if_empty)
self.input_stop3.trace("w", self.__check_if_tag_lens_exists)
self.input_stop4.trace("w", self.__check_if_empty)
self.input_stop4.trace("w", self.__check_if_tag_lens_exists)
self.input_stop5.trace("w", self.__check_if_empty)
self.input_stop5.trace("w", self.__check_if_tag_lens_exists)
def __outputs(self, main_frame):
""" Wanted outputs """
outputs_row = 9
outputs_column = 2
outputs_label = ttk.Label(main_frame, text="Outputs", font=self.bold_font)
outputs_label.grid(column=outputs_column, row=outputs_row - 1, sticky="ns")
self.summed = BooleanVar(value=False)
summed_array = ttk.Checkbutton(
main_frame, text="Summed Stack", variable=self.summed
)
summed_array.grid(column=outputs_column, row=outputs_row, sticky="ns")
self.memory = BooleanVar(value=False)
in_memory = ttk.Checkbutton(main_frame, text="In Memory", variable=self.memory)
in_memory.grid(column=outputs_column, row=outputs_row + 1, sticky="ns")
self.stack = BooleanVar(value=True)
tif = ttk.Checkbutton(main_frame, text="Full Stack", variable=self.stack)
tif.grid(column=outputs_column, row=outputs_row + 2, sticky="ns")
def __image_size(self, main_frame):
image_size_row = 1
image_size_label = ttk.Label(main_frame, text="Image Size", font=self.bold_font)
image_size_label.grid(column=2, row=image_size_row, sticky="ns", columnspan=1)
x_size_label = ttk.Label(main_frame, text="X", font=self.normal_font)
x_size_label.grid(column=2, row=image_size_row + 1, sticky="w")
y_size_label = ttk.Label(main_frame, text="Y", font=self.normal_font)
y_size_label.grid(column=2, row=image_size_row + 1, sticky="ns")
z_size_label = ttk.Label(main_frame, text="Z", font=self.normal_font)
z_size_label.grid(column=2, row=image_size_row + 1, sticky="e")
self.x_pixels = IntVar(value=512)
self.y_pixels = IntVar(value=512)
self.z_pixels = IntVar(value=1)
x_pixels_entry = ttk.Entry(main_frame, textvariable=self.x_pixels, width=5)
x_pixels_entry.grid(column=2, row=image_size_row + 2, sticky="w")
y_pixels_entry = ttk.Entry(main_frame, textvariable=self.y_pixels, width=5)
y_pixels_entry.grid(column=2, row=image_size_row + 2, sticky="ns")
self.z_pixels_entry = ttk.Entry(main_frame, textvariable=self.z_pixels, width=5)
self.z_pixels_entry.grid(column=2, row=image_size_row + 2, sticky="e")
self.z_pixels_entry.config(state="disabled")
def __debug(self, main_frame):
""" Read a smaller portion of data for debugging """
debug_check = ttk.Checkbutton(main_frame, text="Debug?", variable=self.debug)
debug_check.grid(column=2, row=10, sticky="ns")
def __interleaved(self, main_frame):
""" Unmix two data channel in the same PMT1 analog channel """
inter_check = ttk.Checkbutton(
main_frame, text="Interleaved?", variable=self.interleaved
)
inter_check.grid(column=2, row=9, sticky="ns")
def __mirror_phase(self, main_frame):
phase_text = ttk.Label(main_frame, text="Mirror phase [us]: ")
phase_text.grid(column=0, row=1, sticky="w")
phase_entry = ttk.Entry(main_frame, textvariable=self.phase, width=8)
phase_entry.grid(column=0, row=1, sticky="e")
def __reprate(self, main_frame):
""" Laser repetition rate"""
laser1_label = ttk.Label(main_frame, text="Laser nominal rep. rate (FLIM) [Hz]")
laser1_label.grid(column=2, row=8, sticky="ns")
reprate_entry = ttk.Entry(main_frame, textvariable=self.reprate, width=11)
reprate_entry.grid(column=3, row=8, sticky="ns")
def __gating(self, main_frame):
self.gating_check = ttk.Checkbutton(
main_frame, text="With Gating?", variable=self.gating
)
self.gating_check.grid(column=2, row=7, sticky="ns")
self.gating_check.config(state="disabled")
def __binwidth(self, main_frame):
""" Binwidth of Multiscaler (for FLIM) """
binwidth_label = ttk.Label(main_frame, text="Multiscaler binwidth [sec]")
binwidth_label.grid(column=2, row=1, sticky="ns")
binwidth_entry = ttk.Entry(main_frame, textvariable=self.binwidth, width=9)
binwidth_entry.grid(column=3, row=1, sticky="ns")
def __tag_lens(self, main_frame):
""" TAG lens nominal frequency """
tag_row = 7
tag_label = ttk.Label(
main_frame,
text=" TAG nominal freq. [Hz]\noffset [deg] n. pulses",
)
tag_label.grid(column=0, row=tag_row, columnspan=2, sticky="w")
tag_label_entry = ttk.Entry(main_frame, textvariable=self.tag_freq, width=10)
tag_label_entry.grid(column=0, row=tag_row + 1, sticky="ns")
tag_pulses_entry = ttk.Entry(main_frame, textvariable=self.tag_pulses, width=3)
tag_pulses_entry.grid(column=0, row=tag_row + 1, sticky="e")
tag_pulses_entry.config(state="disabled")
self.tag_offset_entry = ttk.Entry(
main_frame, textvariable=self.tag_offset, width=3
)
self.tag_offset_entry.grid(column=0, row=tag_row + 1, sticky="w")
def __tag_bits(self, main_frame):
""" TAG bits """
tag_bits_row = 9
tag_bits_label = ttk.Label(
main_frame, text="TAG Bits Allocation", font=self.bold_font
)
tag_bits_label.grid(column=1, row=tag_bits_row, sticky="ns")
self.tag_bits = BooleanVar(value=False)
tag_bit_check = ttk.Checkbutton(main_frame, text="Use?", variable=self.tag_bits)
tag_bit_check.grid(column=1, row=tag_bits_row, sticky="w")
self.bits_grp_1_start = IntVar(value=1)
self.bits_grp_1_end = IntVar(value=3)
self.bits_grp_2_start = IntVar(value=4)
self.bits_grp_2_end = IntVar(value=5)
self.bits_grp_3_start = IntVar(value=6)
self.bits_grp_3_end = IntVar(value=16)
self.bits_grp_1_label = StringVar()
self.bits_grp_2_label = StringVar()
self.bits_grp_3_label = StringVar()
self.tag_bits_group_options = (
"Power",
"Slow axis",
"Fast axis",
"Z axis",
"None",
)
bits_grp_1 = ttk.Combobox(
main_frame, textvariable=self.bits_grp_1_label, width=10
)
bits_grp_1.grid(column=0, row=tag_bits_row + 1, sticky="e")
bits_grp_1.set("None")
bits_grp_1["values"] = self.tag_bits_group_options
bits_grp_2 = ttk.Combobox(
main_frame, textvariable=self.bits_grp_2_label, width=10
)
bits_grp_2.grid(column=0, row=tag_bits_row + 2, sticky="e")
bits_grp_2.set("None")
bits_grp_2["values"] = self.tag_bits_group_options
bits_grp_3 = ttk.Combobox(
main_frame, textvariable=self.bits_grp_3_label, width=10
)
bits_grp_3.grid(column=0, row=tag_bits_row + 3, sticky="e")
bits_grp_3.set("None")
bits_grp_3["values"] = self.tag_bits_group_options
bits_grp_1_start_lab = ttk.Label(main_frame, text="\tStart")
bits_grp_1_start_lab.grid(column=1, row=tag_bits_row + 1, sticky="w")
bits_grp_1_start_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_1_start, width=3
)
bits_grp_1_start_ent.grid(column=1, row=tag_bits_row + 1, sticky="ns")
bits_grp_1_end_lab = ttk.Label(main_frame, text="End")
bits_grp_1_end_lab.grid(column=1, row=tag_bits_row + 1, sticky="e")
bits_grp_1_end_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_1_end, width=3
)
bits_grp_1_end_ent.grid(column=2, row=tag_bits_row + 1, sticky="w")
bits_grp_2_start_lab = ttk.Label(main_frame, text="\tStart")
bits_grp_2_start_lab.grid(column=1, row=tag_bits_row + 2, sticky="w")
bits_grp_2_start_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_2_start, width=3
)
bits_grp_2_start_ent.grid(column=1, row=tag_bits_row + 2, sticky="ns")
bits_grp_2_end_lab = ttk.Label(main_frame, text="End")
bits_grp_2_end_lab.grid(column=1, row=tag_bits_row + 2, sticky="e")
bits_grp_2_end_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_2_end, width=3
)
bits_grp_2_end_ent.grid(column=2, row=tag_bits_row + 2, sticky="w")
bits_grp_3_start_lab = ttk.Label(main_frame, text="\tStart")
bits_grp_3_start_lab.grid(column=1, row=tag_bits_row + 3, sticky="w")
bits_grp_3_start_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_3_start, width=3
)
bits_grp_3_start_ent.grid(column=1, row=tag_bits_row + 3, sticky="ns")
bits_grp_3_end_lab = ttk.Label(main_frame, text="End")
bits_grp_3_end_lab.grid(column=1, row=tag_bits_row + 3, sticky="e")
bits_grp_3_end_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_3_end, width=3
)
bits_grp_3_end_ent.grid(column=2, row=tag_bits_row + 3, sticky="w")
self.tag_bits_dict = {}
self.tag_bits_dict = {
0: TagBits(
value=self.bits_grp_1_label.get(),
start=self.bits_grp_1_start.get(),
end=self.bits_grp_1_end.get(),
),
1: TagBits(
value=self.bits_grp_2_label.get(),
start=self.bits_grp_2_start.get(),
end=self.bits_grp_2_end.get(),
),
2: TagBits(
value=self.bits_grp_3_label.get(),
start=self.bits_grp_3_start.get(),
end=self.bits_grp_3_end.get(),
),
}
def __fill_frac(self, main_frame):
""" Percentage of time mirrors spend "inside" the image """
fill_frac_text = ttk.Label(main_frame, text="Fill fraction [%]: ")
fill_frac_text.grid(column=0, row=4, sticky="w")
fill_frac_entry = ttk.Entry(main_frame, textvariable=self.fill_frac, width=8)
fill_frac_entry.grid(column=0, row=4, sticky="e")
def __browsefunc(self):
filetypes = [("List files", "*.lst"), ("All files", "*.*")]
if self.filename.get() != "":
self.filename.set(
filedialog.askopenfilename(
filetypes=filetypes,
title="Choose a list or pickle file",
initialdir=str(Path(self.filename.get()).parent),
)
)
else:
self.filename.set(
filedialog.askopenfilename(
filetypes=filetypes,
title="Choose a list or pickle file",
initialdir=".",
)
)
def __check_if_empty(self, *args):
list_of_values = [
self.input_start.get(),
self.input_stop1.get(),
self.input_stop2.get(),
self.input_stop3.get(),
self.input_stop4.get(),
self.input_stop5.get(),
]
if 2 == list_of_values.count("Empty"):
if "PMT1" in list_of_values or "PMT2" in list_of_values:
self.num_frames_entry.config(state="normal")
else:
self.num_frames_entry.config(state="disabled")
def __check_if_tag_lens_exists(self, *args):
list_of_values = [
self.input_start.get(),
self.input_stop1.get(),
self.input_stop2.get(),
self.input_stop3.get(),
self.input_stop4.get(),
self.input_stop5.get(),
]
if "TAG Lens" in list_of_values:
self.z_pixels_entry.config(state="normal")
# self.tag_offset_entry.config(state='normal')
else:
self.z_pixels_entry.config(state="disabled")
# self.tag_offset_entry.config(state='disabled')
def __bidir(self, main_frame):
""" Checkbox for bi-directional scan """
bidir_check = ttk.Checkbutton(
main_frame, text="Bi-directional scan", variable=self.bidir
)
bidir_check.grid(column=0, row=5, sticky="ns")
self.bidir.trace("w", self.__check_if_bidir)
def __check_if_bidir(self, *args):
if self.bidir:
self.keep_unidir_check.config(state="normal")
if not self.bidir:
self.keep_unidir_check.config(state="disabled")
def __keep_unidir_events(self, main_frame):
""" Checkbox to see if events taken in the returning phase of a resonant mirror should be kept. """
self.keep_unidir_check = ttk.Checkbutton(
main_frame, text="Keep unidirectional?", variable=self.keep_unidir
)
self.keep_unidir_check.grid(column=0, row=6, sticky="ns")
self.keep_unidir_check.config(state="disabled")
def __flim(self, main_frame):
"""
Defines the mapping between one pulse and the missing pulses.
For example, downsampling factor of 8 means that every pulse that is
received starts an event of 8 pulses, with the next recorded pulse being the 9th.
:param main_frame: ttk.Frame
"""
flim_check: ttk.Checkbutton = ttk.Checkbutton(
main_frame, variable=self.flim, text="FLIM?"
)
flim_check.grid(row=2, column=2, sticky="ns")
self.flim.trace("w", self.__check_if_flim)
def __flim_downsampling_space(self, main_frame):
downsamping_space_text = ttk.Label(main_frame, text="Downsampling in space:")
downsamping_space_text.grid(column=2, row=3, sticky="ns")
self.downsamping_space_entry = ttk.Entry(
main_frame, textvariable=self.flim_downsampling_space, width=4
)
self.downsamping_space_entry.grid(column=3, row=3, sticky="ns")
self.downsamping_space_entry.config(
state="normal" if self.flim.get() else "disabled"
)
def __flim_downsampling_time(self, main_frame):
downsamping_time_text = ttk.Label(
main_frame, text="Downsampling in time (frames):"
)
downsamping_time_text.grid(column=2, row=4, sticky="ns")
self.downsamping_time_entry = ttk.Entry(
main_frame, textvariable=self.flim_downsampling_time, width=4
)
self.downsamping_time_entry.grid(column=3, row=4, sticky="ns")
self.downsamping_time_entry.config(
state="normal" if self.flim.get() else "disabled"
)
def __censor(self, main_frame):
"""
If FLIM is active, this checkbox enables the use of censor correction on the generated images.
:param main_frame: ttk.Frame
"""
self.censor_check: ttk.Checkbutton = ttk.Checkbutton(
main_frame, variable=self.censor, text="Censor Correction"
)
self.censor_check.grid(row=5, column=2, sticky="ns")
self.censor_check.config(state="disabled")
def __check_if_flim(self, *args):
state = "normal" if self.flim.get() else "disabled"
for check in (
self.censor_check,
self.gating_check,
self.downsamping_space_entry,
self.downsamping_time_entry,
):
check.config(state=state)
self.root.update_idletasks()
def __line_freq(self, main_frame):
""" Frequency of the line scanning mirror """
line_freq_label = ttk.Label(main_frame, text="Line freq [Hz]: ")
line_freq_label.grid(row=3, column=0, sticky="w")
line_freq_entry = ttk.Entry(main_frame, textvariable=self.line_freq, width=8)
line_freq_entry.grid(row=3, column=0, sticky="e")
def __sweeps_as_lines(self, main_frame):
""" Use the sweeps as lines for the image generation """
sweeps_cb = ttk.Checkbutton(
main_frame, variable=self.sweeps_as_lines, text="Sweeps as lines?"
)
sweeps_cb.grid(row=6, column=2, sticky="ns")
def __advanced_win(self, main_frame):
advanced_but = ttk.Button(
main_frame, text="Advanced", command=self.__open_advanced
)
advanced_but.grid(row=13, column=2, sticky="ns")
def __open_advanced(self, *args):
self.advanced_win = Toplevel(self.root)
frame = ttk.Frame(self.advanced_win, width=300, height=300)
frame.grid(column=0, row=0)
frame["borderwidth"] = 2
style = ttk.Style()
style.theme_use("clam")
self.__setup_advanced_frame(frame)
self.__gating(frame)
self.__flim(frame)
self.__flim_downsampling_space(frame)
self.__flim_downsampling_time(frame)
self.__censor(frame)
self.__sweeps_as_lines(frame)
self.__debug(frame)
self.__mirror_phase(frame)
self.__fill_frac(frame)
self.__reprate(frame)
self.__binwidth(frame)
self.__keep_unidir_events(frame)
self.__bidir(frame)
self.__check_if_bidir(frame)
self.__tag_lens(frame)
self.__frame_delay(frame)
self.__line_freq(frame)
self.__interleaved(frame)
for child in frame.winfo_children():
child.grid_configure(padx=3, pady=2)
def __setup_advanced_frame(self, frame):
scan_lab = ttk.Label(frame, text=" Scanner Settings", font=self.bold_font)
scan_lab.grid(row=0, column=0, sticky="ns")
hardware_lab = ttk.Label(
frame, text=" Hardware Settings", font=self.bold_font
)
hardware_lab.grid(row=0, column=2, sticky="ns")
def __frame_delay(self, main_frame):
frame_delay_label = ttk.Label(main_frame, text="Frame delay [sec]: ")
frame_delay_label.grid(row=2, column=0, sticky="w")
frame_delay_entry = ttk.Entry(
main_frame, textvariable=self.frame_delay, width=8
)
frame_delay_entry.grid(row=2, column=0, sticky="e")
####### ONLY SAVE\LOAD FUNCS AFTER THIS POINT ########
def __save_cfg(self, main_frame):
""" A button to write a .toml with current configs """
config_label = ttk.Label(
main_frame, text="Configuration File", font=self.bold_font
)
config_label.grid(column=1, row=self.config_row, sticky="ns")
self.save_as: StringVar = StringVar(value="default")
save_label = ttk.Label(main_frame, text="Config file name to save:")
save_label.grid(
column=0, row=self.config_row + 1, sticky="ns", columnspan=2, padx=10
)
save_entry = ttk.Entry(main_frame, textvariable=self.save_as, width=8)
save_entry.grid(column=1, row=self.config_row + 1, sticky="e")
save_button = ttk.Button(
main_frame, text="Save cfg", command=self.__callback_save_cur_cfg
)
save_button.grid(column=1, row=self.config_row + 2, sticky="w")
def __callback_save_cur_cfg(self) -> None:
"""
Takes a GUIApp() instance and saves it to a .toml file
"""
cfg_dict_to_save = Config.from_gui(self)
cfg_dict_to_save.to_disk()
def __load_cfg(self, main_frame: ttk.Frame):
"""
Load a specific .toml file and change all variables accordingly
"""
self.cfg_filename: StringVar = StringVar(value="default")
load_button: Button = ttk.Button(
main_frame, text="Load cfg", command=self.__browsecfg
)
load_button.grid(column=1, row=self.config_row + 2, sticky="e")
def __browsecfg(self, new_cfg=None):
if not new_cfg:
self.cfg_filename.set(
filedialog.askopenfilename(
filetypes=[("Config files", "*.toml")],
title=f"Choose a configuration file",
initialdir=user_config_dir("pysight"),
)
)
else:
self.cfg_filename.set(new_cfg)
with open(self.cfg_filename.get(), "r") as f:
self.config = toml.load(f)
try:
utime(self.cfg_filename.get(), (time.time(), time.time()))
except PermissionError:
pass
self.__modify_vars()
def __modify_vars(self):
"""
With the dictionary loaded from the TOML file, change all variables
"""
from_cfg_to_vars = self._build_config_dict()
for cfg_key, cfg_val in self.config.items():
if isinstance(cfg_val, dict):
for inner_key, inner_val in cfg_val.items():
if isinstance(inner_val, dict):
for innner_key, innner_val in inner_val.items():
from_cfg_to_vars[innner_key].set(innner_val)
else:
from_cfg_to_vars[inner_key].set(inner_val)
else:
from_cfg_to_vars[cfg_key].set(cfg_val)
self.root.update_idletasks()
def __load_last_used_cfg(self, main_frame):
direc = Path(user_config_dir("pysight"))
all_cfg_files: Iterable = direc.glob("*.toml")
latest_filename: str = ""
latest_file_date: int = 0
for cfg_file in all_cfg_files:
cur_date_modified = cfg_file.stat()[8]
if cur_date_modified > latest_file_date:
latest_filename = str(cfg_file)
latest_file_date = cur_date_modified
if latest_filename != "":
with open(latest_filename, "r") as f:
try:
self.config = toml.load(f)
except ValueError:
self.config = {}
self.__modify_vars()
def _build_config_dict(self):
""" Helper method to populate a new GUI instance from a config file """
from_config_to_vars = {
"cfg_title": self.save_as,
"stop1": self.input_stop1,
"stop2": self.input_stop2,
"stop3": self.input_stop3,
"stop4": self.input_stop4,
"stop5": self.input_stop5,
"start": self.input_start,
"num_of_frames": self.num_of_frames,
"x_pixels": self.x_pixels,
"y_pixels": self.y_pixels,
"z_pixels": self.z_pixels,
"imaging_software": self.imaging_software,
"data_filename": self.filename,
"summed": self.summed,
"memory": self.memory,
"stack": self.stack,
"debug": self.debug,
"phase": self.phase,
"reprate": self.reprate,
"gating": self.gating,
"binwidth": self.binwidth,
"tag_freq": self.tag_freq,
"tag_pulses": self.tag_pulses,
"tag_offset": self.tag_offset,
"fill_frac": self.fill_frac,
"bidir": self.bidir,
"keep_unidir": self.keep_unidir,
"flim": self.flim,
"flim_downsampling_space": self.flim_downsampling_space,
"flim_downsampling_time": self.flim_downsampling_time,
"censor": self.censor,
"line_freq": self.line_freq,
"sweeps_as_lines": self.sweeps_as_lines,
"frame_delay": self.frame_delay,
"interleaved": self.interleaved,
"tag_bits": self.tag_bits,
"label1": self.bits_grp_1_label,
"start1": self.bits_grp_1_start,
"end1": self.bits_grp_1_end,
"label2": self.bits_grp_2_label,
"start2": self.bits_grp_2_start,
"end2": self.bits_grp_2_end,
"label3": self.bits_grp_3_label,
"start3": self.bits_grp_3_start,
"end3": self.bits_grp_3_end,
}
return from_config_to_vars
if __name__ == "__main__":
app = GuiAppLst()
|
Python
|
CL
|
8be059cf15d2f2c8a57b082d8037c14bfa542e114d9668aec35dd66ab9e281cc
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import subprocess
sys.path.insert(0, '../..')
import test_harness
def run_test(name):
if name.endswith('_emulator'):
basename = name[0:-len('_emulator')]
isverilator = False
elif name.endswith('_verilator'):
basename = name[0:-len('_verilator')]
isverilator = True
test_harness.compile_test([basename + '.c'])
if isverilator:
result = test_harness.run_verilator()
else:
result = test_harness.run_emulator()
test_harness.check_result(basename + '.c', result)
tests = [
'creg_non_supervisor',
'eret_non_supervisor',
'syscall'
]
for name in tests:
test_harness.register_tests(run_test, [name + '_verilator'])
test_harness.register_tests(run_test, [name + '_emulator'])
test_harness.execute_tests()
|
Python
|
CL
|
359b3489b49577808f1fe6af26f937e8e46cc243e7899cca6a2bc461528d3f72
|
import numpy as np
import math
from numpy import linalg as LA
from math import factorial
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def convolve_all(Iin, RF, opt):
Iin = Iin.copy()
RF = RF.copy()
maskZero = Iin[..., :] <= 0
maxVal = np.max(Iin)
Iin[maskZero] = maxVal + 1
index = Iin.ndim
try:
shapeSig = (Iin.shape[0], Iin.shape[1])
M = [np.meshgrid(Iin[i], Iin[i]) for i in xrange(shapeSig[0])]
M = np.array(M)[:, 0, ...]
except IndexError:
shapeSig = (1, Iin.shape[0])
M, T = np.meshgrid(Iin, Iin)
try:
shapeRF = (RF.shape[0], RF.shape[1])
except IndexError:
shapeRF = (1, RF.shape[0])
if shapeSig[1] != shapeRF[0]:
rp = np.tile(RF[shapeRF[0] - 1::], (((shapeSig[1]) - shapeRF[0]), 1))
RF = np.vstack((RF, rp))
try:
shapeRF = (RF.shape[0], RF.shape[1])
except IndexError:
shapeRF = (1, RF.shape[0])
il = np.tril(M[..., :(shapeRF[1] - 1), :])
iu = np.triu(M)
mask = il[..., :, ::] > 0
mask_1 = iu[..., :, ::] > 0
justified_mask = np.sort(mask[..., :, ::], index)
justified_mask_1 = np.sort(mask_1[..., :, ::], index)
justified_mask = justified_mask[..., :, ::]
justified_mask_1 = justified_mask_1[..., :, ::-1]
out = np.zeros_like(il[..., :, :])
out_1 = np.zeros_like(iu[..., :, :])
out[justified_mask] = il[..., :, :, ][mask]
out_1[justified_mask_1] = iu[..., :, :][mask_1]
mask_maxval = out == maxVal + 1
out[:, :][mask_maxval] = 0
mask1_maxval = out_1 == maxVal + 1
out_1[:, :][mask1_maxval] = 0
if index > 1:
mod_input = np.hstack((out, out_1))
else:
mod_input = np.vstack((out, out_1))
RFZero = RF[..., :] <= 0
maxValRF = np.max(RF)
RF[RFZero] = maxValRF + 1
diags = [np.concatenate((RF[:, ::-1].diagonal(i), np.zeros(shapeSig[1] - len(RF[:, ::-1].diagonal(i)))), axis=0) for
i in range(-shapeRF[0] + 1, shapeRF[1])]
diags = np.array(diags[::-1])
mask = diags[:(shapeRF[1] - 1), :] > 0
justified_mask = np.sort(mask, 1)
diags[:(shapeRF[1] - 1), :][justified_mask] = diags[:(shapeRF[1] - 1), :][mask]
diags[:(shapeRF[1] - 1), :][~justified_mask] = 0
maskD_maxval = diags == maxValRF + 1
diags[:, :][maskD_maxval] = 0
if index > 1:
multi = np.vstack(mod_input * diags)
else:
multi = mod_input * diags
convolve = np.sum(multi, axis=1)
if index > 1:
convolve = convolve.reshape(shapeSig[0], len(convolve) / shapeSig[0])
if (shapeRF[1]) % 2 == 0:
start = ((shapeRF[1]) / 2) - 1
else:
start = (shapeRF[1]) / 2
same = convolve[..., start:start + shapeSig[1]]
if opt == 'full':
return (convolve)
if opt == 'same':
return (same)
def convolve(Iin, k, opt):
# str_arr_I = raw_input('insert only the values of the first 1D array with the space between them:').split(' ')
# Iin=0.0* np.ones(len(str_arr_I))
# for i,j in zip (str_arr_I, xrange(len(str_arr_I))):
# Iin[j]=i
# str_arr_k = raw_input('insert only values of the second 1D array with the space between them:').split(' ')
# k=0.0* np.ones(len(str_arr_k))
# for i,j in zip (str_arr_k, xrange(len(str_arr_k))):
# k[j]=i
Iin = Iin.copy()
maskZero = Iin <= 0
maxVal = np.max(Iin)
Iin[maskZero] = maxVal + 1
kT = k[::-1]
if len(Iin) < len(k):
kT = Iin[::-1]
kT0 = kT
kT1 = kT
if len(k) != len(Iin):
kT_ = [0] * abs(len(k) - len(Iin))
kT0 = np.concatenate((kT_, kT), axis=0)
kT1 = np.concatenate((kT, kT_), axis=0)
length_s = 0
if len(Iin) > len(k):
length_s = len(Iin)
else:
length_s = len(k)
if len(Iin) < len(k):
Iin = k
M, T = np.meshgrid(Iin, Iin)
il = np.tril(M[:(len(kT) - 1), :])
iu = np.triu(M)
mask = il > 0
mask_1 = iu > 0
justified_mask = np.sort(mask, 1)
justified_mask_1 = np.sort(mask_1, 1)
justified_mask = justified_mask[:, ::]
justified_mask_1 = justified_mask_1[:, ::-1]
out = np.zeros_like(il[:, :])
out_1 = np.zeros_like(iu)
out[justified_mask] = il[:, :][mask]
out_1[justified_mask_1] = iu[mask_1]
mask_maxval = out == maxVal + 1
out[:, :][mask_maxval] = 0
mask1_maxval = out_1 == maxVal + 1
out_1[:, :][mask1_maxval] = 0
Rr = np.concatenate((np.dot(out, kT0), np.dot(kT1, out_1)), axis=0)
# print("for full mode {}".format(Rr) )
if (len(kT)) % 2 == 0:
start = ((len(kT)) / 2) - 1
else:
start = (len(kT)) / 2
realSame = Rr[start:start + length_s]
length_f = len(Rr)
off = length_f - length_s
off_eachend = off / 2
f_idx = int(off_eachend)
e_idx = f_idx + length_s
Sr = Rr[f_idx:e_idx]
# print("for same mode {}".format(Sr))
if opt == 'full':
return (Rr)
if opt == 'same':
return (Sr)
if opt == 'sameR':
return (realSame)
def convolve_NS(sig,mask):
"""convolution of 1 D array with 2D array
Parameters
----------
sig=1 D array
data to be deconvolved
mask :2 D array
Resolution Function
"""
con= np.dot(sig, mask)
return(con)
def convolve_RelBlur(Iin,k):
convp=np.dot(Iin,k)
conv=convp[...,::-1]
return (conv)
# def shrink(y,a):
# if y>a:
# r=y-a
#
# if -a<y<a:
# r=0
#
# if y<a:
# r=y+a
#
# return r
def shrink(y,a):
L1norm=LA.norm(y, ord=1)
# print L1norm
r=(y/L1norm)*np.max([L1norm-a,0])
return r
def FWHM(Y,X):
d = Y - (max(Y) / 2)
indexes = np.where(d > 0)[0]
return abs(X[indexes[-1]] - X[indexes[0]])
def scale(Y, minS,maxS):
zeroTO1=(Y-np.min(Y))/(np.max(Y)-np.min(Y))
scaling=(zeroTO1*(maxS-minS))+minS
return scaling
def split_Bregman(sig, mask, initial_d, initial_b, mu, lamda, ninnner,nouter, max_cg):
"""decolvolution using the split Bregman Iteration with non-stationary Resolution Function
Parameters
----------
sig=array
data to be deconvolved
mask :array
Resolution Function
initial_d :array
Bragmen Parameter
initial_b :array
Bragmen Parameter
mu=float
noise controlling parameter
lamda=float
step size
ninner: integer
number of iteration for inner loop
nouter: integer
number of iteration for outer loop
max_cg: integer
number of iteration for conjugate gradient
"""
sigT=sig[np.newaxis].transpose()
maskT = mask.transpose()
uk=np.dot(maskT, sigT)
dk_x=initial_d[np.newaxis].transpose()
bk_x=initial_b[np.newaxis].transpose()
fk = sigT
for jouter in xrange (nouter):
for jinner in xrange(ninnner):
ukp=uk
ifkt=np.dot(maskT, sigT)
rhs=mu*ifkt+lamda*(dk_x-bk_x)
ruk = np.dot(mask, uk)
iukt = np.dot(maskT,ruk)
r = rhs - mu * iukt -lamda *uk
p = r
rsold = np.dot(r.transpose(), r)
for i in xrange(max_cg):
rp=np.dot(mask,p)
irpt = np.dot(maskT ,rp)
Ap = mu * irpt + lamda *p
alpha = rsold / np.dot(p.transpose(),Ap)
uk = uk + alpha * p
r = r - alpha * Ap
rsnew = np.dot(r.transpose(),r)
if rsnew < 1e-32:
break
p = r + rsnew / rsold * p;
rsold = rsnew
sk_x = uk + bk_x
dk_x = np.maximum(np.abs(sk_x)-1/lamda,0)*np.sign(sk_x)
bk_x = sk_x-dk_x
fk = fk + sigT - np.dot(mask, uk)
rec_tv = uk
return (uk)
def bregman_NS(sig, mask,iniGuessV, iniGuessU, neu_N, delta_ER, option,value): #neu_inverseNoise, delta_energyResolution
"""decolvolution using the Linearized Bregman Iteration with non-stationary resolution functions
Parameters
----------
sig=array
data to be deconvolved
mask :array
Resolution Function
iniGuessV :array
initial guess of the data
iniGuessU :array
initial guess of the data
option=string
'iteration' or 'error'
value: integer or float
number of iteration or the tolerance value for error
"""
mask_mir = mask.transpose()
def main(iniGuessU, iniGuessV, mask,sig,mask_mir,neu_N, delta_ER ):
sigC=convolve_NS(iniGuessU,mask)
relative_blur=sig-sigC
# with np.errstate(divide='ignore'):
# relative_blur[np.isinf(relative_blur)] = -2
deconvV = iniGuessV + convolve_NS(relative_blur, mask_mir)
deconvU = delta_ER*shrink(deconvV, 1/neu_N)
error=LA.norm((deconvV-iniGuessV))
errorBL = LA.norm(convolve_NS(deconvV, mask) - sig)
iniGuessV=deconvV
iniGuessU = deconvU
return(iniGuessV,iniGuessU,error,errorBL)
if option=='iteration':
error=0
it=value
for i in xrange(value):
iniGuessV, iniGuessU,error,errorBL=main(iniGuessU, iniGuessV, mask,sig,mask_mir,neu_N, delta_ER )
if option=='errorModel':
it=0
while True:
iniGuessV, iniGuessU,error,errorBL=main(iniGuessU, iniGuessV, mask,sig,mask_mir,neu_N, delta_ER )
it=it+1
# if error<value:
if errorBL >value:
break
print('number of iteration: {}'.format(it))
if option == 'error':
it = 0
while True:
iniGuessV, iniGuessU, error, errorBL = main(iniGuessU, iniGuessV, mask, sig, mask_mir, neu_N, delta_ER)
it = it + 1
if error<value:
break
print('number of iteration: {}'.format(it))
return (iniGuessV,iniGuessU,error,it,errorBL)
def deconvolve_NS(sig,mask,deconV,option,value):
"""decolvolution using the Lucy-Richardson algorithm with non-stationary RF
Parameters
----------
sig=array
data to be deconvolved
mask :array
Resolution Function
deconV :array
initial guess of the data
option=string
iteration
value: integer
number of iteration
"""
sig0=sig
mask_mir=mask[...,::-1]
deconv = deconV
def main(deconv,mask,sig0,mask_mir):
sigC=convolve_NS(deconv,mask)
relative_blur=sig0/sigC
with np.errstate(divide='ignore'):
relative_blur[np.isinf(relative_blur)] = -2
deconvP=deconv*convolve_RelvBlur(relative_blur,mask_mir)
error=LA.norm((deconvP-deconv))
deconv=deconvP
return(deconv,error)
if option=='iteration':
error=0
it=value
for i in xrange(value):
deconv,error=main(deconv,mask,sig0,mask_mir)
if option=='error':
it=0
while True:
deconv,error=main(deconv,mask,sig0,mask_mir)
it=it+1
if error<value:
break
print('number of iteration: {}'.format(it))
return (deconv,error,it)
def deconvolve_TV_NS(sig,mask,deconV,eps,rgP,option,value):
"""decolvolution using the Lucy-Richardson algorithm with total variation minimization regularization for nonstationary RF
Parameters
----------
sig=array
data to be deconvolved
mask :array
Resolution Function
deconV :array
initial guess of the data
eps: floats
smoothing parameter for TV gradient
rgp : float
regularization parameter
option=string
iteration
value: integer
number of iteration
"""
sig0=sig
mask_mir=mask[...,::-1]
#m_tst=F.convolve(sig,mask,conv)
deconv = deconV
def main(deconv,mask,sig0,mask_mir,eps,rgP):
sigC=convolve_NS(deconv,mask)
relative_blur=sig0/sigC
with np.errstate(divide='ignore'):
relative_blur[np.isinf(relative_blur)] = -2
grad=np.gradient(deconv)
norm=np.sqrt(grad**2)
mod_norm=np.sqrt(eps**2+norm**2)
division=(grad)/mod_norm
division[np.isnan(division)] = 0.0
with np.errstate(divide='ignore'):
division[np.isinf(division)] = -2
divergence=np.gradient(division)
div_rgp=rgP*divergence
deconvP=(deconv/(1-(div_rgp)))*convolve_RelBlur(relative_blur,mask_mir)
error=np.abs(deconvP-deconv)
deconv=deconvP
return(deconv,error)
if option=='iteration':
error=0
for i in xrange(value):
deconv,error=main(deconv,mask,sig0,mask_mir,eps,rgP)
if option=='error':
it=0
while True:
deconv,error=main(deconv,mask,sig0,mask_mir,eps,rgP)
it=it+1
if np.all(error<value):
break
print('number of iteration: {}'.format(it))
return(deconv)
def deconvolve_L1_NS(sig,mask,deconV,eps,rgP,option,value):
"""decolvolution using the Lucy-Richardson algorithm with L1 norm regularization for nonstationary RF
Parameters
----------
sig=array
data to be deconvolved
mask :array
Resolution Function
deconV :array
initial guess of the data
eps: floats
smoothing parameter for TV gradient
rgp : float
regularization parameter
option=string
iteration
value: integer
number of iteration
"""
sig0=sig
mask_mir=mask[...,::-1]
deconv = deconV
def main(deconv,mask,sig0,mask_mir,eps,rgP):
sigC=convolve_NS(deconv,mask)
relative_blur=sig0/sigC
with np.errstate(divide='ignore'):
relative_blur[np.isinf(relative_blur)] = -2
norm=np.sqrt(deconv**2)
mod_norm=np.sqrt(eps**2+norm**2)
div_rgp=rgP*mod_norm
deconvP=(deconv/(1-(div_rgp)))*convolve_RelBlur(relative_blur,mask_mir)
error=np.abs(deconvP-deconv)
deconv=deconvP
return(deconv,error)
if option=='iteration':
error=0
for i in xrange(value):
deconv,error=main(deconv,mask,sig0,mask_mir,eps,rgP)
if option=='error':
it=0
while True:
deconv,error=main(deconv,mask,sig0,mask_mir,eps,rgP)
it=it+1
if np.all(error<value):
break
print('number of iteration: {}'.format(it))
return(deconv)
|
Python
|
CL
|
15999abc1a67d7eb4baaa036f87bf93a6e0e0347ed3f6783565c3a952d295c9f
|
import keras.backend as K
import matplotlib as mpl
import numpy as np
import skimage
import tensorflow as tf
from PIL import Image
from keras.applications.inception_v3 import InceptionV3 as PTModel
from keras.applications.inception_v3 import preprocess_input
from keras.layers import BatchNormalization
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Input, Conv2D, multiply, Lambda
from keras.models import Model
from skimage import color
from skimage import img_as_ubyte
from tensorflow.python.keras import backend as Kt
class PredictionPipeline:
in_shape = (512, 512, 3)
class_nb = 2
weight_path = "{}_weights.best.hdf5".format('retina')
def __init__(self):
self.model = self.model_creator(self.in_shape, self.class_nb, self.weight_path)
self.graph = tf.get_default_graph()
def predict_image(self, path):
with self.graph.as_default():
img = self.open_image(path)
prediction = self.model.predict(np.expand_dims(img, axis=0))
img_heatmap = self.grad_cam(img, prediction)
return prediction, Image.fromarray(img_as_ubyte(img_heatmap))
@staticmethod
def pre_processing(X):
out_size = (512, 512)
with tf.name_scope('image_augmentation'):
with tf.name_scope('input'):
X = np.asarray(X)
X = tf.convert_to_tensor(X, np.float64)
X = tf.image.resize_images(X, out_size)
return preprocess_input(X)
@staticmethod
def model_creator(in_shape, class_nb, weight_path):
in_lay = Input(in_shape)
base_pretrained_model = PTModel(input_shape=in_shape, include_top=False,
weights=None)
base_pretrained_model.trainable = False
pt_depth = base_pretrained_model.get_output_shape_at(0)[-1]
pt_features = base_pretrained_model(in_lay)
bn_features = BatchNormalization()(pt_features)
attn_layer = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu')(Dropout(0.5)(bn_features))
attn_layer = Conv2D(16, kernel_size=(1, 1), padding='same', activation='relu')(attn_layer)
attn_layer = Conv2D(8, kernel_size=(1, 1), padding='same', activation='relu')(attn_layer)
attn_layer = Conv2D(1,
kernel_size=(1, 1),
padding='valid',
activation='sigmoid')(attn_layer)
up_c2_w = np.ones((1, 1, 1, pt_depth))
up_c2 = Conv2D(pt_depth, kernel_size=(1, 1), padding='same',
activation='linear', use_bias=False, weights=[up_c2_w], name='outcnn')
up_c2.trainable = False
attn_layer = up_c2(attn_layer)
mask_features = multiply([attn_layer, bn_features])
gap_features = GlobalAveragePooling2D()(mask_features)
gap_mask = GlobalAveragePooling2D()(attn_layer)
gap = Lambda(lambda x: x[0] / x[1], name='RescaleGAP')([gap_features, gap_mask])
gap_dr = Dropout(0.25)(gap)
dr_steps = Dropout(0.25)(Dense(128, activation='relu')(gap_dr))
out_layer = Dense(class_nb, activation='softmax')(dr_steps)
retina_model = Model(inputs=[in_lay], outputs=[out_layer])
retina_model.load_weights(weight_path)
return retina_model
def open_image(self, img):
img = self.pre_processing(img)
sess = Kt.get_session()
img = sess.run(img)
img = np.copy(img)
return img
def grad_cam(self, img, prediction, layer_output='outcnn', ratio=1.2):
for attn_layer in self.model.layers:
c_shape = attn_layer.get_output_shape_at(0)
if len(c_shape) == 4:
if c_shape[-1] == 1:
print(attn_layer)
break
class_idx = np.argmax(prediction[0])
class_output = self.model.output[:, class_idx]
last_conv_layer = self.model.get_layer(layer_output)
x = np.expand_dims(img, axis=0)
grads = K.gradients(class_output, last_conv_layer.output)[0]
pooled_grads = K.mean(grads, axis=(0, 1, 2))
iterate = K.function([self.model.input], [pooled_grads, last_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([x])
for i in range(512):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
img = np.copy(np.clip(img * 127 + 127, 0, 255).astype(np.uint8))
heatmap = skimage.transform.resize(heatmap, (512, 512))
cm_hot = mpl.cm.get_cmap('hsv')
heatmap = cm_hot(heatmap)[:, :, :3]
heatmap = np.uint8(255 * heatmap)
img_hsv = color.rgb2hsv(img)
color_mask_hsv = color.rgb2hsv(heatmap)
img_hsv[..., 0] = color_mask_hsv[..., 0]
img_hsv[..., 1] = color_mask_hsv[..., 1] * ratio
superimposed_img = color.hsv2rgb(img_hsv)
return superimposed_img
|
Python
|
CL
|
2172f1b970d95619f1ea36c053a93cb5e01d17340539635c06a643d861ff7ba9
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# File : select_protocol.py
# Author : bssthu
# Project : rtk_trans
# Description : 编辑本文件,根据 config 决定使用的协议解析工具
#
from rtk_protocol.base_protocol_handler import BaseProtocolHandler
def select_protocol(config):
"""根据配置选择协议解析类
Args:
config (dict): 配置
Returns:
return (BaseProtocolHandler): 协议解析工具的实例
"""
return BaseProtocolHandler(config)
|
Python
|
CL
|
3d1df0ec038eb1fd22f29d95be8a7e57fdce3c0268ddca7338702dcca27124cc
|
# Generated by Django 2.0.6 on 2018-06-26 22:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bookmark', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PersonalBookmark',
fields=[
('bookmark_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='bookmark.Bookmark')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=('bookmark.bookmark',),
),
migrations.RemoveField(
model_name='bookmark',
name='added_at',
),
migrations.RemoveField(
model_name='bookmark',
name='location_folder',
),
migrations.AddField(
model_name='bookmark',
name='bookmark_type',
field=models.CharField(choices=[('u', 'url'), ('f', 'folder')], default='u', max_length=6),
),
migrations.AddField(
model_name='bookmark',
name='url',
field=models.URLField(default='', editable=False),
),
]
|
Python
|
CL
|
0b90cf3ea8ab88911cd09cc7810e0375bf8be932ff7c99a25f8c41ae9318df5e
|
import socket
import binascii
import struct
import sys
import time
import random
import string
import threading
import queue
#import RPi.GPIO as GPIO # DESCOMENTAR
# Sensor constants.
PATH_LENGTH = 50
FREQUENCY = 1
# Sensor data queue.
QUEUE_SIZE = 10000
lectures_queue = queue.Queue(QUEUE_SIZE)
class MovementSensor:
"""
Routine of constant motion checking.Method wich manages an ultrasonic
sensor. Provides approximationsof human presence inside a area.
Parameters:
self:implicit object provided by python (Kind of "this") when
make a call from other file.
path_length: Size of the access path to the area of study
frequency: how often adds a lecture to the queue
"""
def batarang_thrower(self):
trig = 23
echo = 24
GPIO.setmode(GPIO.BCM)
GPIO.setup(trig, GPIO.OUT)
GPIO.setup(echo, GPIO.IN)
people_counter = 0
chrono_start = time.time()
while True:
GPIO.output(trig, GPIO.LOW)
time.sleep(0.5)
GPIO.output(trig, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(trig, GPIO.LOW)
while True:
if GPIO.input(echo) == GPIO.HIGH:
pulso_inicio = time.time()
break
while True:
if GPIO.input(echo) == GPIO.LOW:
pulso_fin = time.time()
break
duracion = pulso_fin - pulso_inicio
distancia = (34300 * duracion) / 2
if distancia < PATH_LENGTH:
people_counter += 1
if time.time()-chrono_start >= FREQUENCY:
lectures_queue.put(people_counter)
chrono_start = time.time()
"""
Return the values of the queue to the client
Parameter
self:implicit object provided by python (Kind of "this") when
make a call from other file.
"""
def getMovementData(self):
return random.randint(1,777)
#return lectures_queue.get(True)
"""
Initializer of the thread that execute lectures constantly
Parameters:
self:implicit object provided by python (Kind of "this") when
make a call from other file.
path_length: Size of the access path to the area of study
frequency: how often adds a lecture to the queue
"""
def throw_batarang(self):
batarang = threading.Thread(target=self.batarang_thrower)
batarang.start()
|
Python
|
CL
|
1688165cb9bb4dd8ca82a65b7e594fddac7295187118d2d1de49673683e1c01b
|
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import QThread, QObject, pyqtSignal
import sys
import helpers
import csv
from datetime import datetime
import time
err= {}
read_err_occured = False
class Worker(QObject):
finished = pyqtSignal() # give worker class a finished signal
def __init__(self, devices, device_addresses, registers, save_location, timeinterval, maxreading):
super(Worker, self).__init__()
self.save_location = save_location
self.devices = devices
self.device_addresses = device_addresses
self.registers = registers
self.timeinterval = timeinterval
self.maxreading = maxreading
def logging(self):
self.continue_logging = True
# opens output file
csvfile = open(self.save_location, 'w', newline='')
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
csvwriter.writerow(['Time Stamp', 'Address', 'Register', 'Value'])
num_entries = 0
file_num = 0
while self.continue_logging:
time_then = time.time()
for address in self.device_addresses:
for register in self.registers[address]:
time_now = time.time()
try:
reading = self.devices[address].read_register(register)
if True:
if reading > 65500:
reading = reading - 65535
except:
e = sys.exc_info()
reading = f"{e[0]}: {e[1]}"
global err
err[address][register] += 1
global read_err_occured
read_err_occured = True
dt = datetime.fromtimestamp(time_now).strftime('%m/%d/%Y %H:%M:%S.%f')
entry = [dt, address, register, reading]
csvwriter.writerow(entry)
num_entries += 1
if num_entries == self.maxreading:
csvfile.close()
file_num += 1
file_name = self.save_location[:-4] + f"{file_num:03}" + ".csv"
csvfile = open(file_name, 'w', newline='')
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
csvwriter.writerow(['Time Stamp', 'Address', 'Register', 'Value'])
num_entries = 0
while time.time()-time_then < self.timeinterval:
continue
# when logging stopped
csvfile.close()
self.devices[self.device_addresses[0]].serial.close()
self.finished.emit()
def stop_logging(self):
self.continue_logging = False
class Ui(QtWidgets.QMainWindow):
stop_signal = pyqtSignal() # make a stop signal to communicate with the worker in another thread
def __init__(self):
super(Ui, self).__init__() # Call the inherited classes __init__ method
uic.loadUi('gui.ui', self) # Load the .ui file
self.show() # show window when obj created
self.refresh_ports()
self.portselection.currentTextChanged.connect(self.portselect)
# creating handles for modbus reading data
self.devices = {}
self.device_addresses = []
self.registers = {}
self.port = ""
self.save_location="./logs/log results.csv"
self.filenamebox.setText(self.save_location)
self.filenamebox.textChanged.connect(self.update_location)
# disconnects devices before closing window
quit = QtWidgets.QAction("Quit", self)
quit.triggered.connect(self.exit_window)
# refreshes available ports
self.refreshport.clicked.connect(self.refresh_ports)
# load json button reads json file, and assigns values to the reading parameters
self.loadjson.clicked.connect(self.load_json)
# opens a file browser to select save location
self.filebrowse.clicked.connect(self.open_save_location)
# Start Button action:
self.startbutton.clicked.connect(self.start)
# Stop Button action:
self.stopbutton.clicked.connect(self.stop_thread)
# Thread:
def create_thread(self):
self.thread = QThread()
self.worker = Worker(self.devices,
self.device_addresses,
self.registers,
self.save_location,
self.timeinterval.value(),
self.maxreading.value())
self.stop_signal.connect(self.worker.stop_logging) # connect stop signal to worker stop method
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.logging)
self.thread.finished.connect(self.worker.stop_logging)
self.worker.finished.connect(self.thread.quit) # connect the workers finished signal to stop thread
self.worker.finished.connect(self.worker.deleteLater) # connect the workers finished signal to clean up worker
self.thread.finished.connect(self.thread.deleteLater) # connect threads finished signal to clean up thread
global read_err_occured
read_err_occured = False
def start(self):
global err
err = {address:{register:0 for register in self.registers[address]} for address in self.device_addresses}
self.groupBox.setEnabled(False)
self.startbutton.setEnabled(False)
self.stopbutton.setEnabled(True)
self.statusdisplay.append(f"logging started at {datetime.fromtimestamp(time.time()).strftime('%m/%d/%Y %H:%M:%S')}")
self.create_thread()
self.thread.start()
# When stop_btn is clicked this runs. Terminates the worker and the thread.
def stop_thread(self):
self.stop_signal.emit() # emit the finished signal on stop
self.groupBox.setEnabled(True)
self.stopbutton.setEnabled(False)
self.startbutton.setEnabled(True)
self.statusdisplay.append(f"logging stopped at {datetime.fromtimestamp(time.time()).strftime('%m/%d/%Y %H:%M:%S')}")
if read_err_occured:
self.statusdisplay.append("number of reading errors encountered:")
self.statusdisplay.append(str(err))
else:
self.statusdisplay.append("no reading errors occured.")
def exit_window(self, event):
if self.devices:
self.devices[self.device_addresses[0]].serial.close()
self.close()
def open_save_location(self):
file_name, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save as", filter="*.csv")
self.filenamebox.setText(file_name)
def update_location(self):
inputtext = self.filenamebox.text()
if inputtext:
self.save_location = inputtext
else:
self.save_location="./logs/log results.csv"
self.filenamebox.setText(self.save_location)
def load_json(self):
file_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open JSON file", filter="*.json")
try: # try to open json file, and error handling if screw up
# assigns values to reading parameters
self.device_addresses, self.registers = helpers.open_file(file_name)
except:
self.statusdisplay.append("Error opening JSON. Check JSON file formatting.")
return
self.statusdisplay.append(f"Loaded json: {file_name}")
self.statusdisplay.append("Registers to read:")
self.statusdisplay.append(str(self.registers))
try: # creates connection to devices.
self.devices = helpers.open_devices(self.port, self.device_addresses)
except:
self.statusdisplay.append("Device connection failed. try refreshing ports, check json file/device connection")
self.check_addresses()
def portselect(self, txt):
if txt:
self.port = txt
self.statusdisplay.append(f"Reading from port {self.port}")
'''else:
self.statusdisplay.append("No ports found")'''
def refresh_ports(self):
self.portselection.clear()
ports = helpers.find_ports()
if ports:
for i in ports:
self.portselection.addItem(i)
self.statusdisplay.append("ports refreshed")
if ports:
self.portselect(ports[0])
def check_addresses(self): # checks if addresses in JSON are valid. if so, enables start button
temp = False
slaveAddressRegister = 17697
for address in self.device_addresses:
try:
assert address == self.devices[address].read_register(slaveAddressRegister)
self.statusdisplay.append(f"Successfully connected to device {address}.")
temp = True
except:
self.statusdisplay.append(f"Failed to connect to device {address}.")
if temp:
self.startbutton.setEnabled(True)
self.statusdisplay.append("Ready to start.")
else:
self.startbutton.setEnabled(False)
self.statusdisplay.append("Device connection failed. try refreshing ports, check json file/device connection")
self.statusdisplay.append("Reload JSON to try again.")
#start ui when run
app = QtWidgets.QApplication(sys.argv)
window = Ui()
app.exec_()
|
Python
|
CL
|
5e643400606b7b33d7aa404446cff0f399fa7c367ed0640e67fe82d65658dcf6
|
import requests
import logging
from requests.exceptions import ConnectionError
logger = logging.getLogger(__name__)
class CrossrefUnwantedType(Exception):
pass
class CrossrefResponseException(Exception):
pass
class CrossrefNothingFoundException(Exception):
pass
class Crossref:
'''
CrossRef service
@see: http://api.crossref.org/
'''
API_URL = 'http://api.crossref.org/'
MAPPING = {'type': 'type',
'publisher': 'publisher',
'issue': 'issue',
'DOI': 'doi',
'volumne': 'volumne'}
API_ENDPOINT_WORKS = 'works'
API_RESPONSE_STATUS = 'status'
API_RESPONSE_STATUS_OK = 'ok'
API_RESPONSE_ESSAGE = 'message'
API_RESPONSE_MESSAGE_TYPE = 'message-type'
def query(self, resource, query=None, filter=None, sort=None, order=None):
'''
Query CrossRef
:param resource:
:param query:
:param filter:
:param sort:
:param order:
:raise CrossrefResponseException:
:raise CrossrefNothingFoundException:
'''
try:
params = {'query': query,
'filter': filter,
'sort': sort,
'order': order}
url = self.API_URL + resource
r = requests.get(url, params=params)
if r.status_code == 404:
raise CrossrefNothingFoundException('Nothing found')
elif r.status_code != 200:
raise CrossrefResponseException('Expected response status code 200, but it is {}'.format(r.status_code))
j = r.json()
if j[self.API_RESPONSE_STATUS] != self.API_RESPONSE_STATUS_OK:
raise CrossrefResponseException('Expected response status "ok", but it is "{}": '.format(j[self.API_RESPONSE_STATUS], j[self.API_RESPONSE_ESSAGE]))
return j[self.API_RESPONSE_MESSAGE_TYPE], j[self.API_RESPONSE_ESSAGE]
except(ConnectionError) as e:
raise e
def query_works(self, **kwargs):
'''
:raise CrossrefResponseException:
:raise CrossrefNothingFoundException:
'''
results = []
_, message = self.query(self.API_ENDPOINT_WORKS, **kwargs)
for item in message['items']:
try:
results.append(self.__parse_publication(item))
except(CrossrefUnwantedType) as e:
logger.info(str(e))
return results
def query_works_doi(self, doi, **kwargs):
'''
:param doi:
:raise CrossrefResponseException:
:raise CrossrefNothingFoundException:
:raise CrossrefUnwantedType:
'''
_, message = self.query('{}/{}'.format(self.API_ENDPOINT_WORKS, doi))
return self.__parse_publication(message)
def __parse_publication(self, item):
'''
:param item:
:raise CrossrefUnwantedType:
'''
if 'type' not in item:
raise CrossrefUnwantedType('Unwanted Crossref type: no type')
result = {'publication': {}}
container_title = None
for key, value in item.iteritems():
if key == 'type':
if value in ('proceedings-article', 'paper-conference'):
result['publication'][key] = 'inproceedings'
elif 'article' in value: # "article", "article-journal"
result['publication'][key] = 'article'
elif value in ('chapter', 'book-chapter', 'inbook'):
result['publication'][key] = 'incollection'
elif value == 'book':
result['publication'][key] = value
elif value == 'thesis':
result['publication'][key] = 'phdthesis'
else: # reference-entry, journal, dataset, component, standard
#result['publication'][key] = value
raise CrossrefUnwantedType('Unwanted Crossref type: {}'.format(value))
elif key == 'title' and len(value) > 0:
result['publication']['title'] = value[0]
elif key == 'container-title' and len(value) > 0:
container_title = value[-1].title()
elif key == 'page':
tmp = value.split('-')
result['publication']['page_from'] = tmp[0]
if len(tmp) == 2:
result['publication']['page_to'] = tmp[1]
del tmp
elif key == 'published-online' and 'date-parts' in value:
result['publication']['year'] = value['date-parts'][0][0]
elif key == 'license' and 'URL' in value:
result['publication']['copyright'] = value['URL']
elif key == 'subject':
result['keywords'] = value
elif key in self.MAPPING:
result['publication'][self.MAPPING[key]] = value
# relation: author
elif key == 'author':
result['authors'] = []
for author in value:
name = author['family']
if 'given' in author:
name += ', ' + author['given']
result['authors'].append(name)
# relation urls
elif key == 'link':
result['urls'] = []
for url in value:
result['urls'].append('{},{}'.format(url['content-type'], url['URL']))
if container_title:
if result['publication']['type'] == 'inproceedings':
result['conference_name'] = container_title
elif result['publication']['type'] == 'article':
result['journal_name'] = container_title
elif result['publication']['type'] in ('incollection', 'book'):
result['publication']['booktitle'] = container_title
else:
result['publication']['container-title'] = container_title
return result
|
Python
|
CL
|
59580d62198310a3d6063be18198bb246f29b6afea0833b7be6b334ab61e4c4c
|
# Copyright 2023 Consoli Solutions, LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`parse_cli` - Parses CLI output.
Public Methods & Data::
+-----------------------+---------------------------------------------------------------------------------------+
| Method | Description |
+=======================+=======================================================================================+
| switchshow | Adds a switch object to a project object from switchshow output |
+-----------------------+---------------------------------------------------------------------------------------+
| portbuffershow | Adds the portbuffershow output to the ports in a switch object |
+-----------------------+---------------------------------------------------------------------------------------+
| portstatsshow | Parse portstatsshow and add to the port objects |
+-----------------------+---------------------------------------------------------------------------------------+
| portstats64show | Parse portstats64show and add to the port objects |
+-----------------------+---------------------------------------------------------------------------------------+
| chassisshow | Adds a chassis object to a project object from chassisshow output |
+-----------------------+---------------------------------------------------------------------------------------+
| fabricshow | Adds a fabric object to a project object from fabricshow output |
+-----------------------+---------------------------------------------------------------------------------------+
| nsshow | Parse nsshow outpu |
+-----------------------+---------------------------------------------------------------------------------------+
| sfpshow | Parse sfpshow output |
+-----------------------+---------------------------------------------------------------------------------------+
| cfgshow | Parse cfgshow output |
+-----------------------+---------------------------------------------------------------------------------------+
| ficonshow | Parse ficonshow output |
+-----------------------+---------------------------------------------------------------------------------------+
| slotshow_d576 | Parse slotshow_d576 output |
+-----------------------+---------------------------------------------------------------------------------------+
| defzone | Parse defzone output |
+-----------------------+---------------------------------------------------------------------------------------+
Version Control::
+-----------+---------------+-----------------------------------------------------------------------------------+
| Version | Last Edit | Description |
+===========+===============+===================================================================================+
| 4.0.0 | 04 Aug 2023 | Re-Launch |
+-----------+---------------+-----------------------------------------------------------------------------------+
"""
__author__ = 'Jack Consoli'
__copyright__ = 'Copyright 2023 Consoli Solutions, LLC'
__date__ = '04 August 2023'
__license__ = 'Apache License, Version 2.0'
__email__ = 'jack_consoli@yahoo.com'
__maintainer__ = 'Jack Consoli'
__status__ = 'Released'
__version__ = '4.0.0'
import re
import time
import collections
import copy
import brcdapi.log as brcdapi_log
import brcdapi.util as brcdapi_util
import brcdapi.gen_util as gen_util
import brcddb.brcddb_common as brcddb_common
import brcddb.util.util as brcddb_util
import brcddb.brcddb_port as brcddb_port
def _conv_to_int(buf):
"""
:param buf: Value to convert to an integer
:type buf: str
:return: None if non-integer, otherwise the value in buf converted to an integer
:rtype: None, int
"""
return int(buf) if buf.isnumeric() else None
def _conv_to_lower(buf):
"""
:param buf: Value to convert to lower case
:type buf: str
:return: Value as passed if buf is not a string. Otherwise buf converted to lower case
:rtype: str
"""
return buf.lower() if isinstance(buf, str) else buf
_switchshow_tbl = {
'switchName': brcdapi_util.bfs_sw_user_name,
'switchType': brcdapi_util.bfs_model,
'switchDomain': brcdapi_util.bfs_did,
'switchId': brcdapi_util.bfs_fcid_hex,
'switchWwn': brcdapi_util.bfs_name,
'Fabric Name': brcdapi_util.bfs_fab_user_name,
}
_switch_0_1_boolean_off_on = {
# 'Base Switch': brcdapi_util.bfls_base_sw_en,
# 'Default Switch': brcdapi_util.bfls_def_sw_status,
# 'Ficon Switch': brcdapi_util.bfls_ficon_mode_en,
}
_switch_0_1_boolean_yes_no = {
'HIF Mode': brcdapi_util.bfls_ficon_mode_en,
'Base Switch': brcdapi_util.bfls_base_sw_en,
'Default Switch': brcdapi_util.bfls_def_sw_status,
'Ficon Switch': brcdapi_util.bfls_ficon_mode_en,
}
_switch_attributes_T_F = {
'Allow XISL Use': brcdapi_util.bfc_xisl_en,
}
_physical_port_state = {
'No_Light': 'no_light',
'No_Module': 'no_module',
'Mod_Val': 'mod_val',
'Mod_Inv': 'mod_inv',
'Mod_Uns': 'no_port',
'No_SigDet': 'no_sigdet',
'No_Sync': 'no_sync',
'In_Sync': 'in_sync',
'Laser_Flt': 'laser_flt',
'Port_Flt': 'port_flt',
'Hard_Flt': 'hard_flt',
'Lock_Ref': 'lock_ref',
'Testing': 'testing',
'Offline': 'offline',
'Online': 'online',
'Transient': 'unknown'
}
_skip_in_switch = ('port-member-list', 'ge-port-member-list', 'fibrechannel', 'media-rdp', '_neighbor', 'rnid')
# Port conversion tables. Used in portstats64show().
_portstats_to_api = {
# 'xxx': 'address-errors', In portcamshow
'er64_bad_eof': 'bad-eofs-received',
'er_bad_eof': 'bad-eofs-received',
'tim64_txcrd_z': 'bb-credit-zero',
'tim_txcrd_z': 'bb-credit-zero',
# 'xxx': 'class-1-frames',
'stat64_c2_frx': 'class-2-frames',
'stat_c2_frx': 'class-2-frames',
'er64_disc_c3': 'class-3-discards',
'er_disc_c3': 'class-3-discards',
'er64_tx_c3_timeout': 'class3-out-discards',
'er_tx_c3_timeout': 'class3-out-discards',
'stat64_c3_frx': 'class-3-frames',
'stat_c3_frx': 'class-3-frames',
'er64_rx_c3_timeout': 'class3-in-discards',
'er_rx_c3_timeout': 'class3-in-discards',
'er64_crc': 'crc-errors',
'er_crc': 'crc-errors',
# 'xxx': 'delimiter-errors', In portcamshow
# 'xxx': 'encoding-disparity-errors', In portcamshow
'er64_enc_out': 'encoding-errors-outside-frame',
'er_enc_out': 'encoding-errors-outside-frame',
# 'xxx': 'f-busy-frames',
# 'xxx': 'f-rjt-frames',
# 'xxx': 'frames-processing-required', In portcamshow
# 'xxx': 'frames-timed-out',
'er64_toolong': 'frames-too-long',
'er_toolong': 'frames-too-long',
# 'xxx': 'frames-transmitter-unavailable-errors', In portcamshow
'Invalid_CRC': 'in-crc-errors',
'er_crc_good_eof': 'in-crc-errors',
'stat64_rateRxFrame': 'in-frame-rate',
'stat64_frx': 'in-frames',
'stat_frx': 'in-frames',
'stat64_lc_rx': 'in-lcs',
'stat_lc_rx': 'in-lcs',
'Lr_in': 'in-link-resets',
# 'xxx': 'in-max-frame-rate',
'stat64_mc_rx': 'in-multicast-pkts',
'stat_mc_rx': 'in-multicast-pkts',
'stat64_wrx': 'in-octets',
'stat_wrx': 'in-octets',
'Ols_in': 'in-offline-sequences',
'stat64_rateRxPeakFrame': 'in-peak-rate',
# 'xxx': 'in-rate',
'stat64_inputBuffersFull': 'input-buffer-full',
'er_bad_os': 'invalid-ordered-sets',
'Invalid_word': 'invalid-transmission-words',
'Link_failure': 'link-failures',
'lli64': 'link-level-interrupts',
'Loss_of_sig': 'loss-of-signal',
'Loss_of_sync': 'loss-of-sync',
'stat64_mc_to': 'multicast-timeouts',
'stat_mc_to': 'multicast-timeouts',
'stat64_rateTxFrame': 'out-frame-rate',
'stat64_ftx': 'out-frames',
'stat_ftx': 'out-frames',
'Lr_out': 'out-link-resets',
# 'xx': 'out-max-frame-rate',
'stat64_mc_tx': 'out-multicast-pkts',
'stat_mc_tx': 'out-multicast-pkts',
'stat64_wtx': 'out-octets',
'stat_wtx': 'out-octets',
'Ols_out': 'out-offline-sequences',
'stat64_rateTxPeakFrame': 'out-peak-rate',
# 'xxx': 'out-rate',
'Fbsy': 'p-busy-frames',
'Frjt': 'p-rjt-frames',
'er64_pcs_blk': 'pcs-block-errors',
'er_pcs_blk': 'pcs-block-errors',
'Protocol_err': 'primitive-sequence-protocol-error',
'er64_trunc': 'truncated-frames',
'er_trunc': 'truncated-frames',
}
# SFP (media-rdp) used in sfpshow()
_sfp_to_api_1 = {
'Connector': dict(p=2, id='media-rdp/connector', type='str'),
'Current': dict(p=1, id=brcdapi_util.sfp_current, type='float'),
'Date Code': dict(p=2, id='media-rdp/date-code', type='str'),
'Encoding': dict(p=2, id='media-rdp/encoding', type='str'),
'Identifier': dict(p=2, id='media-rdp/identifier', type='str'),
'Vendor PN': dict(p=2, id=brcdapi_util.sfp_pn, type='str'),
'Pwr On Time:': dict(p=5, id=brcdapi_util.sfp_power_on, type='int'),
'RX Power': dict(p=4, id=brcdapi_util.sfp_rx_pwr, type='float'),
'Serial No': dict(p=2, id=brcdapi_util.sfp_sn, type='str'),
'Temperature': dict(p=1, id=brcdapi_util.sfp_temp, type='int'),
'TX Power': dict(p=4, id=brcdapi_util.sfp_tx_pwr, type='float'),
'Vendor Name': dict(p=2, id=brcdapi_util.sfp_vendor, type='str'),
'Vendor OUI': dict(p=2, id=brcdapi_util.sfp_oui, type='str'),
'Vendor Rev': dict(p=2, id='media-rdp/vendor-revision', type='str'),
'Voltage': dict(p=1, id=brcdapi_util.sfp_volt, type='float'),
'Wavelength': dict(p=1, id=brcdapi_util.sfp_wave, type='int'),
}
# Used in _pbs_port_type() to interpret the Port Type in portbuffershow output
_pbs_port_types = dict(
E=brcddb_common.PORT_TYPE_E,
F=brcddb_common.PORT_TYPE_F,
)
# Used in xxx to interpret "Avg Buffer Usage & FrameSize"
_pbs_avg_buf_conv = (
'average-transmit-buffer-usage',
'average-transmit-frame-size',
'average-receive-buffer-usage',
'average-receive-frame-size')
# Build a reverse port type lookup table
_physical_pbs_port_type = dict()
for _key, _v in brcddb_common.port_conversion_tbl[brcdapi_util.fc_port_type].items():
if _key != brcddb_common.PORT_TYPE_UNKONWN:
_physical_pbs_port_type.update({_v: _key})
# Used in _slotshow_d576(), _chassishow_wwn, _chassishow_blade(), _chassishow_ps() & _chassishow_ps
_unit_conv_tbl = {
'AP_BLADE': dict(key=brcdapi_util.fru_blade, unit='slot-number', status='blade-state', ok_status='enabled', b=True),
'CP_BLADE': dict(key=brcdapi_util.fru_blade, unit='slot-number', status='blade-state', ok_status='enabled', b=True),
'CP BLADE Slot': dict(key=brcdapi_util.fru_blade, unit='slot-number', status='blade-state', ok_status='enabled',
b=True),
'SW_BLADE': dict(key=brcdapi_util.fru_blade, unit='slot-number', status='blade-state', ok_status='enabled', b=True),
'SW BLADE Slot': dict(key=brcdapi_util.fru_blade, unit='slot-number', status='blade-state', ok_status='enabled',
b=True),
'CORE_BLADE': dict(key=brcdapi_util.fru_blade, unit='slot-number', status='blade-state', ok_status='enabled',
b=True),
'CORE BLADE Slot': dict(key=brcdapi_util.fru_blade, unit='slot-number', status='blade-state', ok_status='enabled',
b=True),
'PWR_SUPP': dict(key=brcdapi_util.fru_ps, unit='unit-number', status='operational-state', ok_status='ok', b=False),
'POWER SUPPLY Unit': dict(key=brcdapi_util.fru_ps, unit='unit-number', status='operational-state',
ok_status='ok', b=False),
'BLOWER': dict(key=brcdapi_util.fru_fan, unit='unit-number', status='operational-state', ok_status='ok', b=False),
'FAN Unit': dict(key=brcdapi_util.fru_fan, unit='unit-number', status='operational-state', ok_status='ok', b=False),
'WWN_CARD': dict(key=brcdapi_util.fru_wwn, unit='unit-number', status='operational-state', ok_status='ok', b=False),
'WWN Unit': dict(key=brcdapi_util.fru_wwn, unit='unit-number', status='operational-state', ok_status='ok', b=False),
'UNKNOWN': dict(key=None, unit=None, status=None, ok_status=None, b=False),
}
""" _slotshow_d576_tbl
key API leaf
api i: position in in command line after conditioning with xxx and split on ' '
c: If present, the conversion between the command output and the value for the API
int: If True, convert to an integer. The default is False
"""
_slotshow_fru_id = {brcdapi_util.fru_blade: 'blade-id',
brcdapi_util.fru_ps: 'unit-number',
brcdapi_util.fru_fan: 'unit-number',
brcdapi_util.fru_wwn: 'unit-number'}
_slotshow_state = dict(ON='enabled',
ENABLED='enabled',
OFF='disabled',
DISABLED='disabled',
OUT='vacant',
FLTY='faulty')
_slotshow_ps = {'unit-number': dict(i=0, int=True), 'operational-state': dict(i=2, c=dict(ON='ok', FLTY='faulty'))}
_slotshow_d576_tbl = dict(
AP_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=3, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(AP_BLADE='ap blade'))},),
CP_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=3, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(CP_BLADE='cp blade'))},),
SW_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=3, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(SW_BLADE='sw blade'))},),
CORE_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=3, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(CORE_BLADE='core blade'))},),
UNKNOWN=dict(key=brcdapi_util.fru_blade,
api={'slot-number': dict(i=0, int=True),
'blade-state': dict(i=2, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(UNKNOWN='unknown'))},),
PWR_SUPP=dict(key=brcdapi_util.fru_ps, api=_slotshow_ps),
BLOWER=dict(key=brcdapi_util.fru_fan, api=_slotshow_ps),
WWN_CARD=dict(key=brcdapi_util.fru_wwn, api={'unit-number': dict(i=0)})
)
_slotshow_m_tbl = dict(
AP_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=4, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(AP_BLADE='ap blade'))},),
CP_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=4, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(CP_BLADE='cp blade'))},),
SW_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=4, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(SW_BLADE='sw blade'))},),
CORE_BLADE=dict(key=brcdapi_util.fru_blade,
api={'blade-id': dict(i=2, int=True),
'slot-number': dict(i=0, int=True),
'blade-state': dict(i=4, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(CORE_BLADE='core blade'))},),
UNKNOWN=dict(key=brcdapi_util.fru_blade,
api={'slot-number': dict(i=0, int=True),
'blade-state': dict(i=2, c=_slotshow_state),
'blade-type': dict(i=1, c=dict(UNKNOWN='unknown'))},),
PWR_SUPP=dict(key=brcdapi_util.fru_ps, api=_slotshow_ps),
BLOWER=dict(key=brcdapi_util.fru_fan, api=_slotshow_ps),
WWN_CARD=dict(key=brcdapi_util.fru_wwn, api={'unit-number': dict(i=0)})
)
def _split_parm(buf):
"""Splits lines with param: value. Returns value less any leading/trailing space
:param buf: Line from CLI output to split
:type buf: str
:return k: Parameter key. '' if ':' not in buf
:return v: Value - input str, buf, less the parameter key. '' if ':' not in buf
:rtype v: str, None
"""
if isinstance(buf, str):
tl = buf.split(':')
if len(tl) > 1:
tl[1] = tl[1].lstrip()
return tl[0], ':'.join(tl[1:]).rstrip()
return '', ''
def switchshow(obj, content, append_buf=''):
"""Adds a switch object to a project object from switchshow output
:param obj: Project object or object with a project object associated with it
:type obj: brcddb.classes.project.ProjectObj
:param content: Begining of switchshow output text
:type content: list
:param append_buf: Text to append to the WWN when creating a key
:type append_buf: str
:return switch_obj: Switch object
:rtype switch_obj: brcddb.classes.switch.SwitchObj
:return i: Index into content where we left off
:rtype i: int
"""
switch_obj, proj_obj = None, obj.r_project_obj()
for buf in content:
if 'switchWwn:' in buf:
k, v = _split_parm(buf)
switch_obj = proj_obj.s_add_switch(v + append_buf)
break
if switch_obj is None:
brcdapi_log.exception('Could not find switchWwn in', echo=True)
return switch_obj
# Get the basic switch information
i = 0
while len(content) > i:
buf = content[i]
if len(buf) > len('Index') and buf[0: len('Index')] == 'Index' or 'LS Attributes:' in buf:
break
k, v = _split_parm(buf)
if k == 'switchId':
v = '0x' + v
elif k == 'switchDomain':
v = int(v.replace(' (unconfirmed)', ''))
if k in _switchshow_tbl:
brcddb_util.add_to_obj(switch_obj, _switchshow_tbl[k], v)
elif k == 'switchRole':
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_principal, 1 if 'Principal' in v else 0)
elif k == 'switchState':
if v == 'Online':
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_op_status, 2)
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_enabled_state, True)
else:
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_op_status, 3)
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_enabled_state, False)
elif k in _switch_attributes_T_F.keys():
brcddb_util.add_to_obj(switch_obj, _switch_attributes_T_F[k], False if 'OFF' in v.upper() else True)
elif k in _switch_0_1_boolean_off_on.keys():
brcddb_util.add_to_obj(switch_obj, _switch_0_1_boolean_off_on[k], 0 if 'OFF' in v.upper() else 1)
elif k in _switch_0_1_boolean_yes_no.keys():
brcddb_util.add_to_obj(switch_obj, _switch_0_1_boolean_yes_no[k], 0 if 'NO' in v.upper() else 1)
i += 1
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_sw_user_name, switch_obj.r_get(brcdapi_util.bfs_sw_user_name))
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_did, switch_obj.r_get(brcdapi_util.bfs_did))
# Get the logical switch attributes. Note that these are formated on a single line rather than in a list as the
# other switch attributes are displayed.
if 'LS Attributes:' in buf:
for t_buf in buf[len('LS Attributes:'):].replace('[', '').replace(']', '').replace('\t', '').strip().split(','):
cl = [c.strip() for c in t_buf.split(':')]
if len(cl) == 1 and 'Address Mode' in cl[0]:
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfc_area_mode, int(cl[0].split(' ')[2]))
elif len(cl) == 2 and cl[0] in _switch_0_1_boolean_off_on.keys():
brcddb_util.add_to_obj(switch_obj,
_switch_0_1_boolean_off_on[cl[0]],
0 if 'OFF' in cl[1].upper() else 1)
elif len(cl) == 2 and cl[0] in _switch_0_1_boolean_yes_no.keys():
brcddb_util.add_to_obj(switch_obj, _switch_0_1_boolean_yes_no[cl[0]], 0 if 'NO' in cl[1].upper() else 1)
i += 1
# Figure out where the indices are for the port parameters. Note that they are different for bladed vs. fixed port
# switches and ge ports do not have an index
port_index = dict()
while len(content) > i:
buf = content[i]
if 'Index' in buf and 'Media' in buf:
cl = gen_util.remove_duplicate_char(buf, ' ').strip().split(' ')
for x in range(0, len(cl)):
port_index.update({cl[x]: x})
break
i += 1
# Now get the port information
switch_port_list = list()
brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfc_area_mode, switch_port_list)
i += 2 # Skip the line just below it that has ================ in it
while len(content) > i:
buf = content[i].replace('\t', ' ').strip()
cl = gen_util.remove_duplicate_char(buf, ' ').split(' ')
if len(cl) < 6:
break
if 'ge' in cl[0]:
cl.insert(1, None) # It's a fixed port switch. ge ports do not have an FC address
cl.insert(0, None) # ge ports do not have an index
elif 'ge' in cl[1]:
cl.insert(2, None) # It's a director. ge ports do not have an FC address
cl.insert(0, None) # ge ports do not have an index or an FC address
else:
cl[port_index['Index']] = int(cl[port_index['Index']])
cl[port_index['Address']] = '0x' + cl[port_index['Address']]
proto = cl[port_index['Proto']]
if proto == 'FC' or proto == 'VE' or proto == 'FCIP':
port_desc = ' '.join(cl[port_index['Proto']:])
port_num = '0' if port_index.get('Slot') is None else cl[port_index.get('Slot')]
port_num += '/' + cl[port_index['Port']]
physical_state = _physical_port_state.get(cl[port_index['State']])
try:
speed = int(gen_util.non_decimal.sub('', cl[port_index['Speed']])) * 1000000000
except ValueError:
speed = 32000000000
port_d = {
'name': port_num,
'index': cl[port_index['Index']],
'fcid-hex': cl[port_index['Address']],
'auto-negotiate': 1 if 'N' in cl[port_index['Speed']] else 0,
'speed': speed,
'operational-status': 2 if 'Online' in cl[port_index['State']] else 3,
'is-enabled-state': False if 'Disabled' in port_desc or 'license not assigned' in port_desc else True,
'physical-state': 'unknown' if physical_state is None else physical_state,
'neighbor': dict(wwn=list()),
}
for k, v in _physical_pbs_port_type.items():
if k in port_desc:
port_d.update(({'port-type': v}))
break
if port_d.get('port-type') is None:
port_d.update({'port-type': brcddb_common.PORT_TYPE_U}) # Typical of an offline port
switch_port_list.append(port_num)
port_obj = switch_obj.s_add_port(port_num) if proto == 'FC' \
else switch_obj.s_add_ve_port(port_num) if proto == 'VE' \
else switch_obj.s_add_ge_port(port_num) if proto == 'FCIP' \
else None
if port_obj is None:
brcdapi_log.exception('Unexpected error in: ' + buf, echo=True)
port_obj.s_new_key('fibrechannel', port_d)
i += 1
return switch_obj, i
# Case statement methods in portbuffershow()
def _pbs_user_port(port_obj, v):
brcddb_util.add_to_obj(port_obj, brcdapi_util.fc_index, int(v) if v.isnumeric() else 0)
def _pbs_port_type(port_obj, v):
port_type = _pbs_port_types.get(v)
brcddb_util.add_to_obj(port_obj,
brcdapi_util.fc_port_type,
brcddb_common.PORT_TYPE_UNKONWN if port_type is None else port_type)
def _pbs_lx_mode(port_obj, v):
return # $ToDo: Finish _pbs_lx_mode()
def _pbs_max_resv(port_obj, v):
return # $ToDo: Finish _pbs_max_resv()
def _pbs_avg_buffer_usage(port_obj, v):
tl = v.replace('-', '0').replace(' ', '').replace(')', '(').split('(')
for i in range(0, len(_pbs_avg_buf_conv)):
try:
val = int(tl[i])
except (IndexError, ValueError):
val = 0
brcddb_util.add_to_obj(port_obj, 'fibrechannel/' + _pbs_avg_buf_conv[i], val)
def _pbs_buffer_usage(port_obj, v):
brcddb_util.add_to_obj(port_obj, 'fibrechannel/current-buffer-usage', int(v) if v.isnumeric() else 0)
def _pbs_needed_buffers(port_obj, v):
return # $ToDo: _pbs_needed_buffers() - is this 'reserved-buffers'? I don't think so but what?
def _pbs_link_distance(port_obj, v):
return # $ToDo: _pbs_link_distance() - Finish
def _pbs_remaining_buffers(port_obj, v):
return # $ToDo: Finish _pbs_remaining_buffers()
def portbuffershow(obj, content):
"""Adds the portbuffershow output to the ports in a switch object
:param obj: Switch object or object with a switch object associated with it
:type obj: brcddb.classes.switch.SwitchObj
:param content: List of portbuffershow output text
:type content: list
"""
switch_obj = obj.r_switch_obj()
# The output is formated for a human so we have to figure out the begining and end of each item
# Create a dictionary to put the start and end indicies in
port_buf_d = collections.OrderedDict()
port_buf_d['user_port'] = dict(a=_pbs_user_port)
port_buf_d['port_type'] = dict(a=_pbs_port_type)
port_buf_d['lx_mode'] = dict(a=_pbs_lx_mode)
port_buf_d['max_resv'] = dict(a=_pbs_max_resv)
port_buf_d['avg_pbs_buffer_usage'] = dict(a=_pbs_avg_buffer_usage)
port_buf_d['buffer_usage'] = dict(a=_pbs_buffer_usage)
port_buf_d['needed_buffers'] = dict(a=_pbs_needed_buffers)
port_buf_d['link_distance'] = dict(a=_pbs_link_distance)
port_buf_d['remaining_buffers'] = dict(a=_pbs_remaining_buffers)
# Figure out where everything aligns. $ToDo - Parse Remaining Buffers
buf_l = [content.pop(0) for i in range(0, 3)]
key_l = list(port_buf_d.keys())
active_d = port_buf_d[key_l.pop(0)]
last_d, state, i = None, 0, 0
for char in buf_l[2]:
if state == 0:
if char == '-':
if isinstance(last_d, dict):
last_d.update(e=i-1)
active_d.update(s=i)
if len(key_l) > 0:
last_d = active_d
active_d = port_buf_d[key_l.pop(0)]
else:
break
state = 1
else:
if char == ' ':
state = 0
i += 1
active_d.update(e=len(buf_l[2])-1)
# Now parse the portbuffershow output
for buf in content:
if '----------------------------------------------------------------------------' in buf:
continue
for k, d in port_buf_d.items():
v = buf[port_buf_d[k]['s']:port_buf_d[k]['e']].strip()
if k == 'user_port':
port_obj = brcddb_port.port_obj_for_index(switch_obj, int(v))
d['a'](port_obj, v)
return
# Case methods used in _portstatsshow_special
def _stats_tim_txcrd_z_vc(port_obj):
return
def _stats_phy_stats_clear_ts(port_obj):
return
def _stats_lgc_stats_clear_ts(port_obj):
return
def _stats_latency_dma_ts(port_obj):
return
_portstatsshow_special = dict(
tim_txcrd_z_vc=_stats_tim_txcrd_z_vc,
phy_stats_clear_ts=_stats_phy_stats_clear_ts,
lgc_stats_clear_ts=_stats_lgc_stats_clear_ts,
latency_dma_ts=_stats_latency_dma_ts,
)
def portstatsshow(obj, content):
"""Parse portstatsshow and add to the port objects
:param obj: Switch object or object with a switch object associated with it
:type obj: brcddb.classes.switch.SwitchObj
:param content: List of portstatsshow output text
:type content: list
"""
global _portstats_to_api
port_obj, port_stats_d, switch_obj = None, None, obj.r_switch_obj()
for buf in content:
buf = buf.replace('er_single_credit_loss', 'er_single_credit_loss ')
buf = buf.replace('er_multi_credit_loss', 'er_multi_credit_loss ')
buf = buf.replace('fec_corrected_rate', 'fec_corrected_rate ')
buf = buf.replace('latency_dma_ts', 'latency_dma_ts ')
tl = gen_util.remove_duplicate_char(buf.replace('\t',' '), ' ').split(' ')
if len(tl) < 2:
continue
if tl[0] == 'port:':
port_obj = brcddb_port.port_obj_for_index(switch_obj, int(tl[1].strip()))
if port_obj is None:
brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log
raise Exception('Could not find port matching: ' + buf)
port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)
if port_stats_d is None:
port_stats_d = dict(name=port_obj.r_obj_key())
port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)
elif tl[0] in _portstatsshow_special:
_portstatsshow_special[tl[0]](port_obj)
else:
key = _portstats_to_api.get(tl[0])
if key is not None:
port_stats_d.update({key: int(tl[1])})
def portstats64show(obj, content):
"""Parse portstats64show and add to the port objects
:param obj: Chassis object or object with a chassis object associated with it
:type obj: brcddb.classes.chassis.ChassisObj
:param content: List of portstats64show output text
:type content: list
:return i: Index into content where we left off
:rtype i: int
"""
global _portstats_to_api
i, x, chassis_obj = 0, len('portstats64show'), obj.r_chassis_obj()
while len(content) > i:
# Get the port object
buf = gen_util.remove_duplicate_char(content[i].replace('\t', ' '), ' ')
if len(buf) == 0:
i += 1
continue
if len(buf) < x or buf[0:x] != 'portstats64show':
break
index = int(buf.split(' ')[1])
port_obj = brcddb_port.port_obj_for_index(chassis_obj, int(buf.split(' ')[1]))
if port_obj is None:
brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log
raise Exception('Could not find port matching: ' + buf)
port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)
if port_stats_d is None:
port_stats_d = dict()
port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)
# Parse the port statistics
i += 1
while len(content) > i and len(content[i]) > 0:
buf = gen_util.remove_duplicate_char(content[i].replace('\t', ' '), ' ')
cl = buf.split(' ')
key = _portstats_to_api.get(cl[0])
if key is not None:
if 'top_int :' in buf:
i += 1
lv = int(gen_util.remove_duplicate_char(content[i].replace('\t', ' ').strip().split(' ')[0], ' '))
v = int('{:x}'.format(int(cl[1])) + '{:08x}'.format(lv), 16)
else:
v = int(cl[1])
port_stats_d.update({key: v})
i += 1
return i
"""Cases for chassisshow. All parameters are as follows:
chassis_obj The chassis object in _parsed_ss
cl Current line parsed into a list, .split(':)
i Index into content for the current line
n If not None, the API branch & leaf associated with the value
return Index into content for the next line to be processed"""
_chassis_to_api = { # supportshow names converted to API names
'System AirFlow': 'airflow-direction',
'Power Consume Factor': 'power-usage',
'Factory Part Num': 'part-number',
'Factory Serial Num': 'serial-number',
'Generation Num': 'generation-number',
'Time Alive': 'time-alive',
'Time Awake': 'time-awake',
}
def _chassishow_unit_parse(chassis_obj, content, cl, i, n, d):
x = i
while len(cl) > 1:
if cl[0] in _chassis_to_api:
if cl[0] in ('Time Alive', 'Time Awake', 'Power Consume Factor', 'Generation Num'):
d.update({_chassis_to_api[cl[0]]: int(gen_util.non_decimal.sub('', cl[1]))})
else:
d.update({_chassis_to_api[cl[0]]: cl[1]})
x += 1
cl = [p.strip() for p in gen_util.remove_duplicate_char(content[x].replace('\t', ' '), ' ').split(':')]
return x
def _chassishow_add(chassis_obj, content, cl, i, n):
brcddb_util.add_to_obj(chassis_obj, n, cl[1])
return i + 1
def _chassishow_add_int(chassis_obj, content, cl, i, n):
brcddb_util.add_to_obj(chassis_obj, n, int(gen_util.non_decimal.sub('', cl[1])))
return i + 1
def _chassishow_unit(chassis_obj, content, cl, i, key):
# Get this object entry - we may have captured this blade already with slotshow
try:
obj = _chassis_unit_obj(chassis_obj, key, _unit_conv_tbl[cl[0]]['unit'], int(cl[1]))
return _chassishow_unit_parse(chassis_obj, content, cl, i, key, obj)
except ValueError:
return i + 1 # This happens when there is an * by the unit number which is typical of faulty components
_chassisshow_actions = {
'Chassis Family': dict(m=_chassishow_add, n=brcdapi_util.bc_product_name),
'Chassis Backplane Revision': dict(m=_chassishow_add, n=brcdapi_util.bc_vendor_rev_num),
'Chassis Factory Serial Num': dict(m=_chassishow_add, n=brcdapi_util.bc_serial_num),
'Time Alive': dict(m=_chassishow_add_int, n=brcdapi_util.bc_time_alive),
'Time Awake': dict(m=_chassishow_add_int, n=brcdapi_util.bc_time_awake),
'WWN Unit': dict(m=_chassishow_unit, n=brcdapi_util.fru_wwn),
'SW BLADE Slot': dict(m=_chassishow_unit, n=brcdapi_util.fru_blade),
'CP BLADE Slot': dict(m=_chassishow_unit, n=brcdapi_util.fru_blade),
'CORE BLADE Slot': dict(m=_chassishow_unit, n=brcdapi_util.fru_blade),
'POWER SUPPLY Unit': dict(m=_chassishow_unit, n=brcdapi_util.fru_ps),
'FAN Unit': dict(m=_chassishow_unit, n=brcdapi_util.fru_fan),
}
def chassisshow(obj, content):
"""Adds a chassis object to a project object from chassisshow output
:param obj: Project object or object with a project object associated with it
:type obj: brcddb.classes.project.ProjectObj
:param content: Begining of chassisshow output text
:type content: list
:return chassis_obj: Chassis object
:rtype chassis_obj: brcddb.classes.chassis.ChassisObj
:return ri: Index into content where we left off
:rtype ri: int
"""
ri, chassis_obj, proj_obj = 0, None, obj.r_project_obj()
for buf in content:
if 'Chassis Factory Serial Num:' in buf:
chassis_obj = proj_obj.s_add_chassis(buf.split(':')[1].strip())
break
if chassis_obj is None:
# If we haven't found it yet, pick the first WWN card. Get a chassis S/N by first finding "WWN Unit:", then
# look for Factory Serial Num:
for buf in content:
ri += 1
if 'WWN Unit:' in buf:
break
elif 'timeout' in buf:
return chassis_obj, ri
for buf in content[ri:]:
ri += 1
if 'Factory Serial Num:' in buf:
chassis_obj = proj_obj.s_add_chassis(buf.split(':')[1].strip())
break
elif 'timeout' in buf:
break
# Parse the chassis data and add to the chassis object
if chassis_obj != None:
tl = content[0:ri]
i = 1
while len(tl) > i:
buf = tl[i]
cl = [p.strip() for p in gen_util.remove_duplicate_char(buf.replace('\t', ' '), ' ').split(':')]
if len(cl) > 1:
if cl[0] in _chassisshow_actions:
i = _chassisshow_actions[cl[0]]['m'](chassis_obj, tl, cl, i, _chassisshow_actions[cl[0]]['n'])
else:
i += 1
else:
i += 1
return chassis_obj, ri
def fabricshow(obj, content):
"""Adds a fabric object to a project object from fabricshow output
:param obj: Project object or object with a project object associated with it
:type obj: brcddb.classes.project.ProjectObj
:param content: Begining of fabricshow output text
:type content: list
:return fabric_obj: Fabric object
:rtype fabric_obj: brcddb.classes.fabric.FabricObj
:return ri: Index into content where we left off
:rtype ri: int
"""
ri, fab_obj, proj_obj = 0, None, obj.r_project_obj()
# Skip to where the fabric list starts (after the '-----------------------')
for buf in content:
buf = content[ri]
ri += 1
if '-version' in buf or 'no fabric' in buf or 'SS CMD END' in buf:
return fab_obj, ri
if '-----------------------' in buf:
break
brocade_fabric = list()
while len(content) > ri:
buf = content[ri]
ri += 1
if len(buf) == 0 or 'The Fabric has' in buf or 'Fabric had' in buf or 'SS CMD END' in buf:
break
l = gen_util.remove_duplicate_char(buf.strip(), ' ').split(' ')
if len(l) > 5:
if l[5][0] == '>': # It's the principal switch
fab_obj = proj_obj.s_add_fabric(l[2])
brocade_fabric.append({
'domain-id': int(l[0].replace(':', '')),
'fcid-hex': '0x' + l[1],
'name': l[2],
'ip-address': brcdapi_util.mask_ip_addr(l[3]),
'fcip-address': brcdapi_util.mask_ip_addr(l[4]),
'principal': 1 if '>' in l[5] else 0,
'switch-user-friendly-name': l[5].replace('"', '').replace('>', ''),
})
if fab_obj is not None:
brcddb_util.add_to_obj(fab_obj, 'brocade-fabric/fabric-switch', brocade_fabric)
for d in brocade_fabric:
fab_obj.s_add_switch(d['name'])
return fab_obj, ri
"""nsshow CLI output to API map. Used in nsshow() to add data from nsshow output to the login object. The sub-dictionary
is as follow:
+-----------+---------------+---------------------------------------------------------------------------------------+
| Key | Type | Description |
+===========+===============+=======================================================================================+
| uri | str | URI used in the API |
+-----------+---------------+---------------------------------------------------------------------------------------+
| conv | None, dict | Conversion table or method to convert the values from CLI output to the API value. |
| | method | Note: As of this writting, there were no dictionaries but the mechanics are present |
| | | in the code to use one. The ability to hardcode an int, str, list, or tuple has also |
| | | been coded. |
+-----------+---------------+---------------------------------------------------------------------------------------+
"""
_nsshow_to_api = {
'SCR': dict(uri=brcdapi_util.bns_scr),
'PortSymb': dict(uri=brcdapi_util.bns_port_symbol),
'NodeSymb': dict(uri=brcdapi_util.bns_node_symbol),
'Fabric Port Name': dict(uri=brcdapi_util.bns_fab_port_name, conv=_conv_to_lower),
'Permanent Port Name': dict(uri=brcdapi_util.bns_perm_port_name,
conv=_conv_to_lower),
'Port Index': dict(uri=brcdapi_util.bns_port_index, conv=_conv_to_int),
'Partial': dict(uri=brcdapi_util.bns_partial, conv=_conv_to_lower),
'LSAN': dict(uri=brcdapi_util.bns_lsan, conv=_conv_to_lower),
'Slow Drain Device': dict(uri=brcdapi_util.bns_sddq, conv=_conv_to_lower),
'Device link speed': dict(uri=brcdapi_util.bns_link_speed),
'Connected through AG': dict(uri=brcdapi_util.bns_connect_ag, conv=_conv_to_lower),
'Real device behind AG': dict(uri=brcdapi_util.bns_dev_behind_ag, conv=_conv_to_lower),
'FCoE': dict(uri=brcdapi_util.bns_fcoe_dev, conv=_conv_to_lower),
}
def nsshow(obj, content):
"""Parse nsshow output
:param obj: Fabric object or object with a fabric object associated with it
:type obj: brcddb.classes.fabric.FabricObj
:param content: Begining of nsshow output text
:type content: list
:return ri: Index into content where we left off
:rtype ri: int
"""
fab_obj, port_obj, ri = obj.r_fabric_obj(), None, 0
buf = content[ri]
if 'nsshow' in buf:
ri += 1
buf = content[ri] # Skip past the invocation line
if len(buf) == 0 or 'There is no entry' in buf:
return ri + 1
while len(content) > ri:
buf = content[ri]
ri += 1
# Are we done processing nshsow output?
if len(buf) == 0 or '}' in buf:
break
if len(buf) > 3:
if buf[0:3] in (' N ', ' U ', ' NL'): # Is there a new login?
cl = [b.lower() for b in buf[3:].replace(' ', '').split(';')]
login_obj = fab_obj.s_add_login(cl[2].lower())
brcddb_util.add_to_obj(login_obj, brcdapi_util.bns_port_id, '0x' + cl[0])
brcddb_util.add_to_obj(login_obj, brcdapi_util.bns_node_name, cl[3])
brcddb_util.add_to_obj(login_obj, brcdapi_util.bns_port_name, cl[2])
port_obj = fab_obj.r_port_obj_for_pid(cl[0])
if port_obj is not None:
nl = port_obj.r_get(brcdapi_util.fc_neighbor_wwn)
if nl is None:
nl = list()
brcddb_util.add_to_obj(port_obj, brcdapi_util.fc_neighbor_wwn, nl)
nl.append(cl[2])
else:
cl = [b.strip() for b in buf.split(':', 1)]
cntl_d = _nsshow_to_api.get(cl[0])
if isinstance(cntl_d, dict):
api_k = cntl_d['uri']
if api_k is not None:
val = cl[1]
val_c = cntl_d.get('conv')
if callable(val_c):
val = val_c(val)
elif isinstance(val_c, dict):
val = val if val_c.get(val) is None else val_c[val]
elif isinstance(val_c, (int, str, list, tuple)):
val = val_c
brcddb_util.add_to_obj(login_obj, api_k, val)
return ri
_sfpshow_state_start = 0 # Looking first ============== above port number
_sfpshow_state_1st_sep = _sfpshow_state_start + 1 # Looking subsequent ============== above port number
_sfpshow_state_port = _sfpshow_state_1st_sep + 1 # Next line should be the port
_sfpshow_state_2nd_sep = _sfpshow_state_port + 1 # Next line should be ========= separator after port number
_sfpshow_state_parms = _sfpshow_state_2nd_sep + 1 # Next line should be one of the SFP parameters.
_sfp_sep = '======'
_sfp_sep_len = len(_sfp_sep)
_sfp_start_match = re.compile(r'(sfpshow|Media not installed|does not use)', re.IGNORECASE)
_sfp_skip_match = re.compile(r'(No SFP installed|does not use)', re.IGNORECASE)
_sfp_clean_port = re.compile(r'(Slot|Port|:|\t| )')
def sfpshow(obj, content):
"""Parse sfpshow output
:param obj: Switch object or object with a switch object associated with it
:type obj: brcddb.classes.switch.SwitchObj
:param content: Begining of nsshow output text
:type content: list
:return ri: Index into content where we left off
:rtype ri: int
"""
global _sfp_sep, _sfp_sep_len, _sfpshow_state_start, _sfpshow_state_port, _sfpshow_state_1st_sep, _sfp_start_match
global _sfp_to_api_1
switch_obj, state, port_num, port_obj, ri = obj.r_switch_obj(), _sfpshow_state_start, None, None, 0
for buf in content:
buf = gen_util.remove_duplicate_char(buf.replace('\t', ' '), ' ')
if 'CURRENT CONTEXT' in buf:
pass
elif state == _sfpshow_state_start:
# I don't remember why I check for the port seperator, ===== right away. It should always begin with
# sfpshow -all.
if len(buf) >= _sfp_sep_len and buf[0:_sfp_sep_len] == _sfp_sep:
port_num, port_obj, state = None, None, _sfpshow_state_port
elif len(buf) > 0: # Ignore blank lines
if 'sfpshow -all' in buf:
state = _sfpshow_state_1st_sep
else:
break # There are no SFPs in this switch
elif state == _sfpshow_state_1st_sep: # Looking for the first line separator before the port number
port_num = port_obj = None
if len(buf) >= _sfp_sep_len and buf[0:_sfp_sep_len] == _sfp_sep:
state = _sfpshow_state_port
elif len(buf) == 0 or _sfp_start_match.search(buf):
pass
else:
ri -= 1
break
elif state == _sfpshow_state_port: # This should be the port number
port_num = _sfp_clean_port.sub('', buf)
if '/' not in port_num:
port_num = '0/' + port_num
port_obj = switch_obj.r_port_obj(port_num)
if port_obj is None:
brcdapi_log.exception(port_num + ' not found.', echo=False) # It's probably an IP port so just log it
state = _sfpshow_state_2nd_sep
elif state == _sfpshow_state_2nd_sep: # Looking for ==== separator after port number
if len(buf) >= _sfp_sep_len and buf[0:_sfp_sep_len] == _sfp_sep:
state = _sfpshow_state_parms
else:
brcdapi_log.exception('Invalid sfpshow output. Expected "=====", found ' + buf, echo=True)
state = _sfpshow_state_start
elif state == _sfpshow_state_parms: # Parsing parameters. Exit this state on "Last poll time:"
if _sfp_skip_match.search(buf):
state = _sfpshow_state_1st_sep
ri += 1
continue
if port_obj.r_get('media-rdp/name') is None:
brcddb_util.add_to_obj(port_obj, 'media-rdp/name', 'fc/' + port_num)
cl = gen_util.remove_duplicate_char(buf.replace(':', ': ', 1), ' ').split(' ')
param = buf.split(':')[0]
# Transceiver requires special handling
if param == 'Transceiver':
try:
vl = [int(gen_util.non_decimal.sub('', c)) for c in cl[2].split(',')]
except ValueError:
vl = list() # Typical of older SFP
brcddb_util.add_to_obj(port_obj, brcdapi_util.sfp_speed, vl)
# 'Long_dist' is the most common for LWL optics but there are others such as Smart Optics. I have no
# idea what they look like in supportshow output and getting it exactly right wasn't important for
# anything I was working on at the time I wrote this so just 'long' was good enough.
vl = ['short'] if 'Short_dist' in buf else ['long']
brcddb_util.add_to_obj(port_obj, brcdapi_util.sfp_distance, vl)
else:
# Process normal parameters
d = _sfp_to_api_1.get(param)
if d is not None:
try:
if d['type'] == 'int':
v = int(gen_util.non_decimal.sub('', cl[d['p']]))
elif d['type'] == 'float':
v = float(gen_util.non_decimal.sub('', cl[d['p']]))
else:
v = cl[d['p']]
except ValueError:
v = cl[d['p']] # typically -inf for nothing read
brcddb_util.add_to_obj(port_obj, d['id'], v)
if 'Last poll time' in buf:
state = _sfpshow_state_1st_sep
ri += 1
return ri
def cfgshow_zone_gen(fab_obj, member_l):
zone_type, peer_mem_l, pmem_l = brcddb_common.ZONE_STANDARD_ZONE, list(), list()
if len(member_l) > 0 and gen_util.is_wwn(member_l[0], full_check=False) and member_l[0].split(':')[0] == '00':
"""It's a peer zone. Note that a WWN with a leading '00' is not a valid WWN so this is used to indicate that the
WWN is a property parameter for a peer zone. This is easiest to explain with a example:
00:02:00:00:00:03:01:02, principal_alias_1, principal_alias_2, member_alias_1, member_alias_2
The only bytes I ever look at are the first byte and the last byte of the WWN. Breaking the WWN down:
00 This indicates it's a peer zone and that this WWN is a peer zone property member (not an actual
zone member)
02:00:00:00 Not relevant
03:01 I can take a guess but I don't use this. Since I don't use, my example may not be correct.
02 The last byte is the number of principal WWN members, not the number of aliases that follow. Keep in
mind that an alias can have multiple WWNs. Assuming each alias represents a single WWN, this means
the next two members are the principal members. All remaining members therefore are the peer members
Keep in mind that all bytes in a WWN, including the property member described above, are hex values.
"""
zone_type, p, i, pc = brcddb_common.ZONE_USER_PEER, int(member_l[0].split(':')[7], 16), 1, 0
while pc < p:
alias_obj = fab_obj.r_alias_obj(member_l[i])
pc += 1 if alias_obj is None else len(alias_obj.r_members())
i += 1
pmem_l, peer_mem_l = member_l[1:i], member_l[i:]
else:
peer_mem_l = member_l
return zone_type, peer_mem_l, pmem_l
def _cfgshow_def_zone_act(fab_obj, name, mem_l):
zone_type, sl, pl = cfgshow_zone_gen(fab_obj, mem_l)
fab_obj.s_add_zone(name, zone_type, sl, pl)
def _cfgshow_alias_act(fab_obj, name, mem_l):
fab_obj.s_add_alias(name, mem_l)
def _cfgshow_def_cfg_act(fab_obj, name, mem_l):
fab_obj.s_add_zonecfg(name, mem_l)
def _cfgshow_eff_zone_act(fab_obj, name, mem_l):
zone_type, sl, pl = cfgshow_zone_gen(fab_obj, mem_l)
fab_obj.s_add_eff_zone(name, zone_type, sl, pl)
def _cfgshow_eff_cfg_act(fab_obj, name, mem_l):
brcddb_util.add_to_obj(fab_obj.s_add_eff_zonecfg(mem_l), brcdapi_util.bz_eff_cfg, name)
"""A state machine is used to parse the cfgshow output. The state machine is designed to accomplish two objectives:
* Find the transitions from:
* Start
* Defined zone section (note that the actions differ for defined zones and effective zones)
* Effective zone
* End
* The action to take for each item after parsing is complete
The dictionaries used in _cfgshow_operand_tbl are as follows:
state The next state after processing of the current state is complete
da The action to take for this item when it is in the defined zone
ea The action to take for this item when it is in the effective zone
"""
_cfgshow_state_start = 0 # Looking for "Defined configuration:"
_cfgshow_state_def = _cfgshow_state_start + 1 # Found "Defined configuration:"
_cfgshow_state_eff = _cfgshow_state_def + 1 # Found "Effective configuration:"
_cfgshow_state_continue = _cfgshow_state_eff + 1 # Continue processing cfg:, zone:, and alais:
_cfgshow_state_exit = _cfgshow_state_continue + 1 # Finished processing cfgshow output
_cfgshow_operand_tbl = {
'Defined_configuration:': dict(state=_cfgshow_state_def),
'Effective_configuration:': dict(state=_cfgshow_state_eff),
'cfg:': dict(state=_cfgshow_state_continue, da=_cfgshow_def_cfg_act, ea=_cfgshow_eff_cfg_act),
'zone:': dict(state=_cfgshow_state_continue, da=_cfgshow_def_zone_act, ea=_cfgshow_eff_zone_act),
'alias:': dict(state=_cfgshow_state_continue, da=_cfgshow_alias_act),
}
_cfgshow_clean_buf = (
(';', ' '),
('\t', ' '),
('Defined configuration:', 'Defined_configuration:'),
('Effective configuration:', 'Effective_configuration:'),
)
def _cfgshow_process(state, buf):
"""Sorts out parameters in cfgshow() and checks for state changes
:param state: Current state - one of _cfgshow_state_*
:type state: int
:param buf: Current line being processed
:type buf: str
:return state: Next state
:rtype state: int
:return operand: Opperand (name of configuration, zone, or alias). None if not present
:rtype operand: str, None
:return rl: List of members associated with the operand
:rtype rl: list()
"""
global _cfgshow_state_eff, _cfgshow_state_exit
operand, rl, next_state, t_buf, key = None, list(), None, buf, None
# Clean up the line for processing
for tl in _cfgshow_clean_buf:
t_buf = t_buf.replace(tl[0], tl[1])
tl = [b.strip() for b in gen_util.remove_duplicate_char(t_buf.strip(), ' ').split(' ') if len(b.strip()) > 0]
# Figure out what the key, operand, and content is
k = tl[0] if len(tl) > 0 else None
if k is not None and k in _cfgshow_operand_tbl:
operand = tl[1] if len(tl) > 1 else None
rl = tl[2:] if len(tl) > 2 else list()
else:
k, operand, rl = None, None, tl
# Figure out what the next state is
if len(tl) == 0:
next_state = _cfgshow_state_exit if state == _cfgshow_state_eff else _cfgshow_state_eff
elif 'no configuration defined' in buf:
next_state = _cfgshow_state_eff
elif 'no configuration in effect' in buf:
next_state = _cfgshow_state_exit
elif operand is not None and operand in _cfgshow_operand_tbl:
next_state = _cfgshow_operand_tbl[operand]['state']
return next_state, k, operand, rl
_cfgshow_template_d = dict(key='null', operand=None, mem_l=list())
def cfgshow(obj, content):
"""Parse cfgshow output
:param obj: Fabric object or object with a fabric object associated with it
:type obj: brcddb.classes.fabric.FabricObj
:param content: Begining of nsshow output text
:type content: list
:return ri: Index into content where we left off
:rtype ri: int
"""
global _cfgshow_state_exit, _cfgshow_state_start
# Initialize local and return varriables
fab_obj, ri, mem_l, last_key, last_operand = obj.r_fabric_obj(), 0, list(), None, None
last_state = state = _cfgshow_state_start
active_d, def_l, eff_l = _cfgshow_template_d.copy(), list(), list()
active_l = def_l
# Parse the cfgshow output
for buf in content:
state, key, operand, mem_l = _cfgshow_process(state, buf)
if state is not None and state == _cfgshow_state_exit:
break
if key is not None:
active_l.append(active_d)
active_d = _cfgshow_template_d.copy()
active_d['key'], active_d['operand'], active_d['mem_l'] = key, operand, mem_l
if key == 'Defined_configuration:':
active_l = def_l
elif key == 'Effective_configuration:':
active_l = eff_l
else:
active_d['mem_l'].extend(mem_l)
ri += 1
active_l.append(active_d.copy())
# Process (add to brcddb objects) the parsed data. Note that an alias must be unbundled, see comments in
# cfgshow_zone_gen(), before evaluating peer zone. Hence the order below.
action_key = 'da'
for active_l in (def_l, eff_l):
for cfg_key in ('alias:', 'zone:', 'cfg:'):
action = _cfgshow_operand_tbl[cfg_key].get(action_key)
if callable(action):
for active_d in [d for d in active_l if d['key'] == cfg_key]:
action(fab_obj, active_d['operand'], active_d['mem_l'])
action_key = 'ea'
return ri
def ficonshow(obj, content):
"""Parse ficonshow output
:param obj: Switch object or object with a switch object associated with it
:type obj: brcddb.classes.switch.SwitchObj
:param content: Begining of nsshow output text
:type content: list
:return ri: Index into content where we left off
:rtype ri: int
"""
switch_obj, ri = obj.r_switch_obj(), 0
# Find where the first entry is by searching for 'Sequence#' in the header
for buf in content:
ri += 1
if '}' in buf:
return ri
if 'Sequence#' in buf:
break
# Process all the entries
for buf in content[ri:]:
ri += 1
if '}' in buf:
break
# Process each entry
cl = gen_util.remove_duplicate_char(buf.replace('\t', ' '), ' ').strip().split(' ')
if len(cl) > 12: # It should always be 13
pid = '0x' + cl[2].lower()
port_obj = switch_obj.r_port_obj_for_pid(pid)
if port_obj is None:
brcdapi_log.exception(['Could not find port matching ' + pid + ' in:', buf], echo=True)
continue
ficon_d = {
'link-address': pid[0:6],
'format': cl[0],
'port-type': cl[1],
'registered-port-wwn': cl[3],
'registered-node-wwn': cl[4],
'flags': cl[5],
'node-parameters': cl[6],
'type-number': cl[7],
'model-number': cl[8],
'manufacturer': cl[9],
'plant': cl[10],
'sequence-number': cl[11],
'tag': '0x' + cl[12],
}
port_obj.s_new_key('rnid', ficon_d)
else:
brcdapi_log.exception(['Invalid data for ficonshow rnid table:', buf], echo=True)
return ri
_slotshow_d576_clean = (
('\t', ' '),
(' BLADE', '_BLADE'),
(' SUPP', '_SUPP'),
(' CARD', '_CARD'),
)
_slotshow_d576_int = dict(
CP_BLADE={
'blade-state': dict(ON='enabled', OFF='disabled', FLTY='faulty')
}
)
def _chassis_unit_obj(chassis_obj, key, unit, unit_num):
"""Finds a chassis unit (blade, power supply, fan, or WWN) in the chassis. Creates one if not found
:param chassis_obj: Chassis object as in _parsed_ss['chassis']
:type chassis_obj: dict
:param key: API key for the unit
:type key: str
:param unit:
:return: Dictionary for the switch structure
:rtype: dict
"""
unit_list = gen_util.convert_to_list(brcddb_util.get_from_obj(chassis_obj, key))
for obj in unit_list:
if obj[unit] == unit_num:
return obj
obj = dict(unit=unit_num)
unit_list.append(obj)
return obj
def _slotshow_get_fru(chassis_obj, api_key):
rl, rd = chassis_obj.r_get(api_key), None
if rl is None:
rl = list()
brcddb_util.add_to_obj(chassis_obj, api_key, rl)
return rl, rd
s_key = _slotshow_fru_id.get(api_key)
if s_key is None:
brcdapi_log.exception('Unknown key: ' + api_key, echo=True)
return rl, rd
for d in rl:
if d.get(s_key) is not None and d.get(s_key) == id:
return rl, d
return rl, rd
def _slotshow(obj, content, slotshow_d):
"""Parse slotshow -d576 output
:param obj: Chassis object or object with a switch object associated with it
:type obj: brcddb.classes.chassis.ChassisObj
:param content: Begining of slotshow output text
:type content: list
:param slotshow_d: Either _slotshow_d576_tbl or _slotshow_m_tbl
:type slotshow_d: dict
:return ri: Index into content where we left off
:rtype ri: int
"""
global _slotshow_d576_clean, _slotshow_get_fru
chassis_obj, ri = obj.r_chassis_obj(), 0
# Skip past the header
for buf in content:
ri += 1
if '--------' in buf:
break
# Parse the output
for buf in content[ri:]:
ri += 1
for tl in _slotshow_d576_clean:
buf = buf.replace(tl[0], tl[1])
cl = gen_util.remove_duplicate_char(buf.strip(), ' ').split(' ')
if len(cl) < 4:
break
if '*' in cl[0]: # It's a note at the end of the slotshow for one of the FRUs - typically faulty
break
# Get the FRU
unit_d = slotshow_d.get(cl[1])
if unit_d is None:
brcdapi_log.exception(['Unkown FRU Type: ' + cl[1] + ' in:', buf], echo=True)
continue
fru_l, fru_d = _slotshow_get_fru(chassis_obj, unit_d['key'])
api_d = unit_d['api']
d = dict()
for k, v in api_d.items():
val = int(cl[v['i']]) if v.get('int') is not None and v.get('int') else cl[v['i']]
if v.get('c') is not None and v.get('c').get(val) is not None:
val = v['c'][val]
d.update({k: val})
if fru_d is None:
fru_l.append(d)
else: # We already have this FRU. Just add to the dictionary
for k, v in d.items():
if k not in d: # Only add the leaf if it's not already in the dictionary for this FRU
d.update({k: val})
return ri
def slotshow_d576(obj, content):
"""Parse slotshow -d576 output
:param obj: Chassis object or object with a switch object associated with it
:type obj: brcddb.classes.chassis.ChassisObj
:param content: Begining of slotshow output text
:type content: list
:return ri: Index into content where we left off
:rtype ri: int
"""
global _slotshow_d576_tbl
return _slotshow(obj, content, _slotshow_d576_tbl)
def slotshow_m(obj, content):
"""Parse slotshow -m output
:param obj: Chassis object or object with a switch object associated with it
:type obj: brcddb.classes.chassis.ChassisObj
:param content: Begining of slotshow output text
:type content: list
:return ri: Index into content where we left off
:rtype ri: int
"""
global _slotshow_m_tbl
return _slotshow(obj, content, _slotshow_m_tbl)
def defzone(obj, content):
"""Parse defzone output
:param obj: Fabric object or object with a fabric object associated with it
:type obj: brcddb.classes.fabric.FabricObj
:param content: Begining of nsshow output text
:type content: list
:return ri: Index into content where we left off
:rtype ri: int
"""
ri, fabric_obj = 0, obj.r_fabric_obj()
all_access = fabric_obj.r_get(brcdapi_util.bz_eff_default_zone)
for buf in content:
ri += 1
if 'committed' in buf:
if all_access is None:
access = brcddb_common.DEF_ZONE_ALLACCESS if 'All Access' in buf else brcddb_common.DEF_ZONE_NOACCESS
brcddb_util.add_to_obj(fabric_obj, brcdapi_util.bz_eff_default_zone, access)
break
elif 'zone --show' in buf and 'defzone' not in buf:
brcdapi_log.exception(['Could not find in "committed" in:'] + content[0:7], echo=True)
ri = max(0, ri-1)
break
return ri
|
Python
|
CL
|
921a31f160f38388fbf5a8bacc527cc54c4af3d84ef6ad6667315de37835b1bd
|
captions = ['Adapter Name', 'Dns Suffix', 'Description', 'Friendly Name', 'Physical Address (MAC)', 'Physical Address Length', 'Flags', 'Mtu', 'If Type', 'Oper Status', 'Ipv6IfIndex', 'ZoneIndices']
from pywingui.windows import *
from pywingui.wtl import *
from pywingui import comctl
from pywingui.lib import form
from pywingui.error import NO_ERROR, ERROR_NO_DATA
from pywingui.network.iphlpapi import GetAdaptersAddresses
from pywingui.network.ipifcons import *
from pywingui.network.iptypes import GAA_FLAG_SKIP_UNICAST, GAA_FLAG_SKIP_ANYCAST, GAA_FLAG_SKIP_MULTICAST, GAA_FLAG_SKIP_DNS_SERVER
comctl.InitCommonControls(comctl.ICC_USEREX_CLASSES)
class Form(form.Form):
_form_menu_ = [(MF_POPUP, '&File', [(MF_STRING, '&Exit', form.ID_EXIT)])]
_window_title_ = 'GetAdaptersAddresses Example'
def __init__(self, *args, **kwargs):
form.Form.__init__(self, *args, **kwargs)
#~ self.list_view.SetItemCount(len(captions))
#~ self.list_view.SetRedraw(1)
lvcolumn = comctl.LVCOLUMN(comctl.LVCF_TEXT|comctl.LVCF_WIDTH, 0, 150, 'item')
self.list_view.InsertColumn(0, lvcolumn)
lvcolumn = comctl.LVCOLUMN(comctl.LVCF_TEXT|comctl.LVCF_WIDTH, 0, 350, 'value')
self.list_view.InsertColumn(1, lvcolumn)
item_flags = comctl.LVIF_TEXT|comctl.LVIF_DI_SETITEM
items = []
for i in range(len(captions)):
item = comctl.LVITEM(item_flags)
item.iItem = i
item.pszText = captions[i]
self.list_view.InsertItem(item)
# now setup second column of current row, change iSubItem
item.iSubItem = 1
item.pszText = 'value %d' % i
self.list_view.SetItem(item)
items.append(item)
#~ dwRetval, adapter_addresses, size = GetAdaptersAddresses(family = GAA_FLAG_SKIP_UNICAST | GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER, flags = 2)#AF_INET)
dwRetval, adapter_addresses, size = GetAdaptersAddresses(0, 0, None)
if dwRetval != NO_ERROR:
print('Call to GetAdaptersAddresses failed with error: %d' % dwRetval)
if dwRetval == ERROR_NO_DATA:
print('No addresses were found for the requested parameters')
else:
print('Error description: "%s"' % FormatError(dwRetval))
else:
items[0].pszText = adapter_addresses.AdapterName
items[1].pszText = adapter_addresses.DnsSuffix
items[2].pszText = adapter_addresses.Description
items[3].pszText = adapter_addresses.FriendlyName
physical_address_as_string, i = '', 0
if adapter_addresses.PhysicalAddressLength:
for value in adapter_addresses.PhysicalAddress:# MAC Address
if i <= adapter_addresses.PhysicalAddressLength:
physical_address_as_string += '%.2X-' % value
else:
physical_address_as_string += '%.2X' % value
i += 1
items[4].pszText = physical_address_as_string
items[5].pszText = '%d' % adapter_addresses.PhysicalAddressLength
items[6].pszText = '%d' % adapter_addresses.Flags
items[7].pszText = '%d' % adapter_addresses.Mtu
type_as_string = 'Unknown type %d' % adapter_addresses.IfType
if adapter_addresses.IfType == MIB_IF_TYPE_OTHER:
type_as_string = 'Other'
elif adapter_addresses.IfType == MIB_IF_TYPE_ETHERNET:
type_as_string = 'Ethernet'
elif adapter_addresses.IfType == MIB_IF_TYPE_TOKENRING:
type_as_string = 'Token Ring'
elif adapter_addresses.IfType == MIB_IF_TYPE_FDDI:
type_as_string = 'FDDI'
elif adapter_addresses.IfType == MIB_IF_TYPE_PPP:
type_as_string = 'PPP'
elif adapter_addresses.IfType == MIB_IF_TYPE_LOOPBACK:
type_as_string = 'Lookback'
elif adapter_addresses.IfType == MIB_IF_TYPE_SLIP:
type_as_string = 'Slip'
items[8].pszText = type_as_string
items[9].pszText = '%d' % adapter_addresses.OperStatus
items[10].pszText = '%d' % adapter_addresses.Ipv6IfIndex
items[11].pszText = ''.join(['%d' % value for value in adapter_addresses.ZoneIndices])
for item in items:
self.list_view.SetItem(item)
def OnCreate(self, event):
self.list_view = comctl.ListView(parent = self, rcPos = RECT(5, 10, 200, 100, orExStyle = WS_EX_CLIENTEDGE))
self.controls.Add(form.CTRL_VIEW, self.list_view)
self.controls.Add(form.CTRL_STATUSBAR, comctl.StatusBar(parent = self))
if __name__ == '__main__':
mainForm = Form(rcPos = RECT(0, 0, 550, 350))
mainForm.ShowWindow()
application = Application()
application.Run()
|
Python
|
CL
|
9d1d98664b622b2895c3f518c1226f8912ff39d9d154610ecbcbf3f9019cb13f
|
#!/usr/bin/env python
"""
Create an HDF5 file from BOSS data
TODO:
- include comments in meta/attrs
- platelist quantities
"""
from __future__ import division, print_function
#from __future__ import absolute_import
from mpi4py import MPI
import h5py
from h5boss.select import *
import sys,os
import time
import optparse
import csv
import traceback
#import pandas as pd
import numpy as np
import optparse
import argparse
from collections import defaultdict
meta=['plugmap', 'zbest', 'zline',
'photo/match', 'photo/matchflux', 'photo/matchpos']
def list_csv(x):
columns = defaultdict(list) # each value in each column is appended to a list
try:
with open(x) as f:
reader = csv.DictReader(f,delimiter=' ') # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns[k].append(v) # append the value into the appropriate list
# based on column name k
except Exception as e:
print ("read pmf csv error")
traceback.print_exc()
sys.exit()
return columns
def parse_pmf(input,output,pmflist,rank):
'''
input: HDF5 files list, i.e., source data
output: HDF5 file, to be created or updated
pmflist: Plates/mjds/fibers numbers to be quried
This function is to check the input/output and pmflist
return plates, mjds, fibers as separate numpy arrays
'''
# check output file and its path
if os.path.exists(output):
if rank==0:
print ("The output file %s is existed, your job is going to overwrite it or update it"%output)
elif os.access(os.path.dirname(output),os.W_OK):
if rank==0:
print ("The output file %s is not existed, your job will create a new file"%output)
else:
if rank==0:
print ("The output file's path does not exist, job exits now")
sys.exit()
# parse plates/mjds/fibers
plates=[]
mjds=[]
fibers=[]
try:
df = list_csv(pmflist)
plates = df['plates']
mjds = df['mjds']
fibers = df['fibers']
except Exception as e:
print("pmflist csv read error or not exist:%s"%e,pmflist)
traceback.print_exc()
print("Note: 1st row of csv should start with 'plates mjds fibers'")
if len(plates)==0:
print ("No query is found, plate is empty")
sys.exit()
try:
with open(input,'rt') as f:
reader = csv.reader(f)
hdfsource = list(reader)
hdfsource = [x for sublist in hdfsource for x in sublist]
except Exception as e:
print ("HDF5 inputlist csv read error or not exist: %s"%e,input)
if(len(hdfsource)==0):
print("HDF5 source is empty")
sys.exit(0)
plates = np.asarray(plates)
mjds = np.asarray(mjds)
fibers = np.asarray(fibers)
return (plates,mjds,fibers,hdfsource)
def parallel_select():
'''
Select a set of (plates,mjds,fibers) from the realesed BOSS data in HDF5 formats.
Args:
input: HDF5 files list, i.e., source data, [csv file]
output: HDF5 file, to be created or updated
pmf: Plates/mjds/fibers numbers to be quried, [csv file]
'''
parser = argparse.ArgumentParser(prog='subset')
parser.add_argument("input", help="HDF5 input list")
parser.add_argument("master", help="HDF5 output master file")
parser.add_argument("pmf", help="Plate/mjd/fiber list")
parser.add_argument("--mpi", help="using mpi yes/no")
opts=parser.parse_args()
infiles = opts.input
masterfile = opts.master
pmflist = opts.pmf
global meta
if opts.mpi is None or opts.mpi=="no":
#starts seirial processing
print ("Try the subset.py or subset command")
sys.exit()
elif opts.mpi and opts.mpi=="yes":
comm =MPI.COMM_WORLD
nproc = comm.Get_size()
rank = comm.Get_rank()
(plates,mjds,fibers,hdfsource) = parse_pmf(infiles, masterfile, pmflist,rank)
if rank==0:
print ("HDF5 source: %d files:"%len(hdfsource))
print ("Output: master file: %s "%masterfile)
plates_uni_array = np.unique(np.asarray(plates))
print ("Number of plates to be quired: %d; and %d uniquely"%(plates.size,plates_uni_array.size))
#collectively open the output file
master_dir=os.path.dirname(os.path.realpath(masterfile))+'/'+os.path.basename(masterfile).split('.')[0]
if rank==0:
try:
os.stat(master_dir)
except:
os.mkdir(master_dir)
comm.Barrier()
try:
hx = h5py.File(masterfile,'w',driver='mpio', comm=MPI.COMM_WORLD)
except Exception as e:
print ("Output file creat error:%s"%masterfile)
traceback.print_exc()
comm.Barrier()
tstart=time.time()
if rank==0: print ("Number of processes %d"%nproc)
#each rank gets a subset of the filelist
total_files=len(hdfsource)
#distribute the workload evenly to each process
step=total_files / nproc
rank_start =int( rank * step)
rank_end = int(rank_start + step)
if(rank==nproc-1):
rank_end=total_files # adjust the last rank's range
range_files=hdfsource[rank_start:rank_end]
for i in range(0,len(range_files)):
sub_select(range_files[i],plates,mjds,fibers,masterfile,rank,i)
comm.Barrier()
try:
hx.close()
except Exception as e:
print ("Master file closing error:%s"%outfile)
traceback.print_exc()
if rank==0:
print ('Cost: %.2f'%(time.time()-tstart))
if __name__=='__main__':
parallel_select()
|
Python
|
CL
|
1a4d59718e4627300461cd50f81b22e6832a380ad5fe278b1ec5048768f993a9
|
import argparse
import json
import logging
import os
import sys
import traceback
from datetime import datetime
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from AWS.db_reports import parse_dates
from AWS.redshift_handler import upload_logs, create_tables, delete_logs
from AWS.s3_telemetry import create_s3_conn
from AWS.s3t3_telemetry import T3_EVENT_CLASS_FILE_PREFIXES
from misc.utc_datetime import UtcDateTime
try:
from cloghandler import ConcurrentRotatingFileHandler as RFHandler
except ImportError:
from logging.handlers import RotatingFileHandler as RFHandler
# load json config
def json_config(file_name):
with open(file_name) as data_file:
json_data = json.load(data_file)
return json_data
def get_user_device_prefixes(logger, config, startdt_prefix):
aws_config = config["aws_config"]
s3_config = config["s3_config"]
prefixes = []
conn = create_s3_conn(aws_config["aws_access_key_id"], aws_config["aws_secret_access_key"])
bucket_name = s3_config["client_t3_log_bucket"]
bucket = conn.get_bucket(bucket_name)
for l1_prefix in bucket.list(prefix=startdt_prefix + '/', delimiter='/'):
for l2_prefix in bucket.list(prefix=l1_prefix.name, delimiter='/'):
for l3_prefix in bucket.list(prefix=l2_prefix.name, delimiter='/'):
prefixes.append(l3_prefix.name + 'NachoMail')
return prefixes
def get_upload_error_stats(logger, config, event_class, start, end):
error_stats = {}
return error_stats
def get_email_backend(email_config):
from django.core.mail.backends.smtp import EmailBackend
server = email_config['smtp_server']
port = email_config['port']
username = email_config['username']
if username:
password = email_config['password']
else:
password = None
start_tls = email_config['start_tls']
tls = email_config['tls']
backend = EmailBackend(host=server, port=port, username=username,
password=password, use_tls=start_tls)
return backend
def send_email(logger, email_config, html_part, start, project_name, attachments=None):
text_part = strip_tags(html_part)
subject = "Daily Redshift Upload Summary %s for %s" % (project_name, start)
report_name = "RSUpload%s-%s" % (project_name, start)
username = email_config['username']
if username:
password = email_config['password']
else:
password = None
from_address = email_config['from_address']
to_addresses = email_config['recipients'].split(',')
num_retries = 0
backend = get_email_backend(email_config)
while num_retries < 5:
try:
logger.info('Sending email to %s...', ', '.join(to_addresses))
from django.core.mail import EmailMessage
email = EmailMessage(subject, '', from_address,
to_addresses, connection=backend)
email.attach(report_name + ".html", html_part, "text/html")
email.attach(report_name + ".txt", text_part, "text/plain")
import mimetypes
for attachment in attachments:
email.attach_file(attachment, mimetypes.guess_type(attachment)[0])
email.send()
# send_mail(subject, text_part, from_address, to_addresses,
# fail_silently=False, auth_user=username, auth_password=password, connection=backend, html_message=html_part)
break
except Exception, e:
logger.error('fail to send email: %s', e)
logger.error(traceback.format_exc())
num_retries += 1
else:
logger.error('fail to send email after %d retries' % num_retries)
return False
# main
def main():
parser = argparse.ArgumentParser(description='T3 RedShift Loader')
parser.add_argument('--config', required=True, type=json_config, metavar="config_file",
help='the config(json) file for the deployment', )
parser.add_argument('--period',
help='Indicate the periodicity with which this job runs',
default=None, type=str)
parser.add_argument('--start',
help='Date window starting time in ISO-8601 UTC. e.g 2015-06-18',
dest='start',
default=None)
parser.add_argument('--end',
help='Date window ending time in ISO-8601 UTC or "now" for the current time. e.g 2015-06-18',
dest='end',
default=None)
parser.add_argument('--event_class',
help="Event Class. Specify one of 'PROTOCOL','LOG', 'COUNTER', \
'STATISTICS2','UI', 'DEVICEINFO', 'SAMPLES', 'TIMESERIES',\
'SUPPORT', 'PINGER', if you don't need all",
default='ALL',
type=str)
parser.add_argument('--email',
help='Send email notification',
action='store_true',
default=False)
parser.add_argument('--logdir',
help='Where to write the logfiles. Default is ./logs/<config-file-basename>',
default=None, type=str)
parser.add_argument('-d', '--debug',
help='Debug',
action='store_true',
default=False)
parser.add_argument('--no-delete',
help="Don't delete timespan before loading.",
default=False,
action="store_true")
parser.add_argument('--prefix',
help="The table prefix",
default=None,
type=str)
args = parser.parse_args()
config = args.config
start, end = parse_dates(args)
project = config['general_config']['project']
if not args.logdir:
args.logdir = './log'
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
log_filename = 't3_redshift_loader-%s-%s-%s.%s.log' % (
project, start.datetime.strftime('%Y%m%d'), end.datetime.strftime('%Y%m%d'), UtcDateTime(datetime.now()))
log_file = os.path.abspath(os.path.join(args.logdir, log_filename))
logging_format = '%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
handler = RFHandler(log_file, maxBytes=10 * 1024 * 1024, backupCount=10)
handler.setLevel(logging.DEBUG if args.debug else logging.INFO)
handler.setFormatter(logging.Formatter(logging_format))
logger.addHandler(handler)
if args.debug:
streamhandler = logging.StreamHandler(sys.stdout)
streamhandler.setLevel(logging.DEBUG if args.debug else logging.INFO)
streamhandler.setFormatter(logging.Formatter(logging_format))
logger.addHandler(streamhandler)
if args.period and args.period != 'daily':
logger.error("Invalid period (%s). Only daily is supported for now.", args.period)
exit(-1)
if args.period and args.start and args.end:
logger.warn("Ignoring period (%s). Both start (%s) and end (%s) are defined.", args.period, start, end)
exit(-1)
if not start:
logger.error("Invalid start time(%s)/period(%s)", args.start, args.period)
exit(-1)
if not end:
logger.error("Invalid end time(%s)/period(%s)", args.end, args.period)
exit(-1)
if args.event_class not in T3_EVENT_CLASS_FILE_PREFIXES.keys():
logger.error("Invalid event class %s. Pick one of %s", args.event_class, T3_EVENT_CLASS_FILE_PREFIXES.keys())
exit(-1)
summary = {}
summary["start"] = start
summary["end"] = end
event_classes = T3_EVENT_CLASS_FILE_PREFIXES[args.event_class]
if isinstance(event_classes, list):
summary["event_classes"] = event_classes
for ev_class in event_classes:
if "table_name" in summary:
summary["table_name"] = summary["table_name"] + ", " + \
project + \
"_nm_" + T3_EVENT_CLASS_FILE_PREFIXES[ev_class]
else:
summary["table_name"] = project + \
"_nm_" + T3_EVENT_CLASS_FILE_PREFIXES[ev_class]
else:
summary["event_classes"] = args.event_class
summary["table_name"] = "nm_" + T3_EVENT_CLASS_FILE_PREFIXES[args.event_class]
logger.info("Running T3 Redshift Uploader for the period %s to %s", start, end)
create_tables(logger, project, config, args.event_class, args.prefix)
if not args.no_delete:
delete_logs(logger, project, config, args.event_class, start, end, args.prefix)
upload_stats = upload_logs(logger, project, config, args.event_class, start, end, args.prefix)
get_upload_error_stats(logger, config, args.event_class, start, end)
template_dir = config['general_config']['src_root'] + '/T3Viewer/templates'
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True, TEMPLATE_DIRS=(template_dir,),
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',))
report_data = {'summary': summary, 'upload_stats': upload_stats, "general_config": config["general_config"]}
html_part = render_to_string('upload_report_plain.html', report_data)
if args.email:
send_email(logger, config["email_config"], html_part, start,
project, [os.path.join(args.logdir, log_filename)])
elif args.debug:
print html_part
exit()
if __name__ == '__main__':
main()
|
Python
|
CL
|
372228f2d230df7c7cd02e743473201d0b259a5299a4ccd7fb64b858990835cd
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
A Python wrapper for the multitaper library of German A. Prieto (see link_).
.. _link: http://wwwprof.uniandes.edu.co/~gprieto/software/mwlib.html.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de) and
Moritz Beyreuther, 2010-2015
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
from .multitaper import mtspec, sine_psd, dpss # NOQA
from .multitaper import wigner_ville_spectrum, mt_coherence # NOQA
|
Python
|
CL
|
5cc2d04f907e2f62aac30906c539b68fcfbf4f7f319d5f6faf8d195cb1360a56
|
import logging
import operator
import os
from re import match
import yaml
PGA_NAME_SEPARATOR = "--"
__CONTAINER_CONF = None
__PROPERTIES = {}
__EVALUATED_INDIVIDUALS = []
# YAML command
def parse_yaml(yaml_file_path):
with open(yaml_file_path, mode="r", encoding="utf-8") as yaml_file:
content = yaml.safe_load(yaml_file) or {}
return content
# Commands for population and individuals
def collect_and_reset_received_individuals():
global __EVALUATED_INDIVIDUALS
received = sort_population_by_fitness(__EVALUATED_INDIVIDUALS)
__EVALUATED_INDIVIDUALS = []
return received
def save_received_individual(individual):
global __EVALUATED_INDIVIDUALS
__EVALUATED_INDIVIDUALS.append(individual)
current_length = __EVALUATED_INDIVIDUALS.__len__()
return current_length >= int(get_property("POPULATION_SIZE")), current_length
def sort_population_by_fitness(population):
# Sorts and returns population by fitness, in descending order (fittest first).
return sorted(population, key=operator.attrgetter("fitness"), reverse=True)
# Commands for properties
def get_messaging_source():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["source"]
def get_messaging_init_gen():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["init_gen"]
def get_messaging_init_eval():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["init_eval"]
def get_messaging_pga():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["pga"]
def get_pga_id():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["pga_id"]
def __retrieve_container_config():
# Retrieve locally saved config file.
files = [f for f in os.listdir("/") if match(r'[0-9]+--runner-config\.yml', f)]
# https://stackoverflow.com/questions/2225564/get-a-filtered-list-of-files-in-a-directory/2225927#2225927
# https://regex101.com/
if not files.__len__() > 0:
raise Exception("Error retrieving the container config: No matching config file found!")
config = parse_yaml("/{}".format(files[0]))
global __CONTAINER_CONF
__CONTAINER_CONF = {
"pga_id": config.get("pga_id"),
"source": config.get("source"),
"init_gen": config.get("init_gen"),
"init_eval": config.get("init_eval"),
"pga": config.get("pga")
}
logging.info("Container config retrieved: {conf_}".format(conf_=__CONTAINER_CONF))
def get_property(property_key):
return __PROPERTIES[property_key]
def set_property(property_key, property_value):
__PROPERTIES[property_key] = property_value
|
Python
|
CL
|
ebb06264f7468d53861c252324831539a030f71d42acc5ef142e3b8374a4c62c
|
#Goes through reddit picking up profiles, obtaining their submission locations, and creating links between multiple profiles based on that. End result is an adjacency matrix
import os
import sys
import praw
import json
import time
import copy
from Digital_Library.lib import const_lib
from Digital_Library.lib import path_lib
from Digital_Library.lib import arg_lib
from Digital_Library.lib import console_lib
from Digital_Library.lib.log_lib import *
module_name = 'reddit_crawler'
path = const_lib.load_module_paths(module_name)
const = const_lib.load_module_const(module_name)
global_paths = const_lib.load_global_paths()
#Prints the functions available to an object
#
#@input obj<Object>: object to examine
#
def _examine_object(obj):
[print(x) for x in dir(obj) if x[0] != '_']
#Creates the reddit user agent
#
#@input user_agent<string>: User agent string
#@return reddit<praw.Reddit>: reddit object
#
def _create_user_agent(user_agent):
return praw.Reddit(user_agent=user_agent)
#Get top submissions from a subreddit
#
#@input reddit<praw.Reddit>: reddit object
#@input subreddit<string>: subreddit name
#@input num_submissions<int>: number of submissions to get
#@input checked_submissions<list<string>>: list of submissions already processed
#@return comments<list<Comment>>: list of comments in the subreddit
#@return checked_submissions<list<string>>: list of submissions already processed
#
def _get_top_submission_comments(reddit, subreddit, num_submissions, checked_submissions):
comments = []
submissions = reddit.get_subreddit(subreddit).get_top_from_all(limit=num_submissions)
submission = next(submissions, None)
submission_number = 0
while submission != None:
console_lib.update_progress_bar(submission_number/num_submissions, 'Processing {} of {} submissions...'.format(submission_number, num_submissions))
submission_id = submission.fullname
if not submission_id in checked_submissions:
checked_submissions.append(submission_id)
#submission.replace_more_comments(limit=None, threshold=0)
c = praw.helpers.flatten_tree(submission.comments)
comments.extend(c)
submission = next(submissions, None)
submission_number += 1
console_lib.update_progress_bar(1, 'Done processing submissions.', end=True)
return (comments, checked_submissions)
#Expands a list of comments that may contain MoreComments objects. Recusively calls this function until all comments have been expanded
#
#@input comments<list<praw.objects.MoreComments>>: List of reddit comments. May contain More Comments
#@return comments<list<praw.objects.Comments>>: List of reddit comments with no MoreComments
#
def _expand_MoreComments(comments):
new_comments = []
cur_c = 0
tot_c = len(comments)
for c in comments:
console_lib.update_progress_bar(cur_c/tot_c, "Expanding comment {} of {}...".format(cur_c, tot_c))
if type(c) == praw.objects.MoreComments:
if c.count > 0:
expanded_comments = c.comments()
new_comments.extend(expanded_comments)
else:
new_comments.append(c)
cur_c += 1
console_lib.update_progress_bar(1, "Done.", end=True)
if len(new_comments) == len(comments):
return new_comments
else:
return _expand_MoreComments(new_comments)
#Converts comment list to a list of users
#
#@input comments<list<Comment>>: list of comment objects
#@return users<list<string>>: list of usernames
#
def _convert_comment_list_to_user_list(comments):
users = []
cur_com = 0
tot_com = len(comments)
for c in comments:
console_lib.update_progress_bar(cur_com/tot_com, "Converting comment {} of {}...".format(cur_com, tot_com))
try:
auth = c.author.name
users.append(auth)
except AttributeError:
pass
cur_com += 1
console_lib.update_progress_bar(1, "Done", end=True)
return list(set(users))
#Processes comments expanding MoreComments and converting regular comments to users.
#Combines _expand_MoreComments, _convert_comment_list_to_user_list, and user dict creation code
#
#@input comments<list<praw.objects.MoreComments and praw.objects.Comments>>: List of reddit comments and MoreComments
#@input users<dict>: dictionary object of users
#@input log_file<string>: path to log file
#@input user_file<string>: path to users json file
#
def _convert_comments_to_users(comments, users, log_file, user_file):
cur_c = 0
tot_c = len(comments)
while len(comments) > 0:
console_lib.update_progress_bar(cur_c/tot_c, "Processing comment {} of {}...".format(cur_c, tot_c))
c = comments[0]
del comments[0]
log(log_file, "Processing comment {} of {}...".format(cur_c, tot_c), print_to_console=False)
try:
if type(c) == praw.objects.MoreComments:
if c.count > 0:
log(log_file, "Expanding comment...", print_to_console=False)
expanded_comments = c.comments()
log(log_file, "{} new comments expanded".format(len(expanded_comments)), print_to_console=False)
tot_c += len(expanded_comments)
comments.extend(expanded_comments)
else:
try:
auth = c.author.name
log(log_file, "User extracted = {}".format(auth), print_to_console=False)
if not auth in users:
users[auth] = {'processed':False}
log(log_file, "Added new user, saving JSON structure...", print_to_console=False)
with open(user_file, 'w') as f:
json.dump(users, f, sort_keys=True, indent=4)
else:
log(log_file, "User already present.", print_to_console=False)
except AttributeError:
log(log_file, "Attribute error when trying to process comment. Likely Author returns None", print_to_console=False)
except TypeError:
log(log_file, "Type error when trying to process comment. Likely the buffering error. Let's wait and resume", print_to_console=False)
time.sleep(30)
cur_c += 1
console_lib.update_progress_bar(1, "Done. {} comments processed.".format(tot_c), end=True)
#Get active subreddits for user with number of content additions
#
#@input reddit<praw.Reddit>: reddit object
#@input user<string>: reddit username
#@return subreddits<dict>: dictionary of submitted subreddits with number of submissions
#
def _get_user_subreddits(reddit, user):
subreddits = {}
try:
user = reddit.get_redditor(user)
except:
user = None
if user != None:
comments = user.get_comments(limit=const.user_comments)
if comments != None:
try:
comment = next(comments, None)
except:
comment = None
cur_c = 0
while comment != None:
console_lib.update_progress_bar(cur_c/1000, "Processing comment {}".format(cur_c + 1))
try:
sub = comment.subreddit.display_name
if not sub in subreddits:
subreddits[sub] = 0
subreddits[sub] += 1
except:
pass
try:
comment = next(comments, None)
except:
comment = None
cur_c += 1
console_lib.update_progress_bar(1, "Processed {} comments".format(cur_c), end=True)
return subreddits
#Creates an empty square matrix
#
#@input side_length<int>: length of a side of the square
#@input default_val<int>: default value in the matrix
#@return matrix<list<list<int>>>: square matrix
#
def _create_square_matrix(side_length, default_val=0):
#return [[default_val] * side_length] * side_length
matrix = []
for ii in range(side_length):
row = []
for jj in range(side_length):
row.append(default_val)
matrix.append(row)
return matrix
#Writes matrix to file
#
#@input matrix<list<list<int>>>: square matrix
#@input output_file<string>: path to output file
#
def _write_matrix(matrix, output_file):
with open(output_file, 'w') as f:
for ii in range(0, len(matrix)):
for jj in range(0, len(matrix[ii])):
f.write("{}\t".format(matrix[ii][jj]))
f.write("\n")
#Compares two users and obtains their interest rating
#
#@input users<dict>: dictionary of all users and subreddits
#@input user_x<string>: name of first user
#@input user_y<string>: name of second user
#@return rating<float>: rating value
#
def _interest_map(users, user_x, user_y):
subreddits_x = users[user_x]['subreddits']
subreddits_y = users[user_y]['subreddits']
interest = 0
for subreddit in subreddits_x:
if subreddit in subreddits_y:
interest += min(subreddits_x[subreddit], subreddits_y[subreddit])
return interest
#Applies knn to row
#
#@input row<list<float>>: list of floating point values
#@input knn<int>: number of neighbors
#@return row<list<float>>: floating point values
#
def _apply_knn_to_row(row, knn):
temp_row = []
for ii in range(0, len(row)):
temp_row.append([ii, row[ii]])
temp_row.sort(key=lambda x:x[1])
temp_row.reverse()
keep_indices = []
for ii in range(0, knn):
keep_indices.append(temp_row[ii][0])
for ii in range(0, len(row)):
if not ii in keep_indices:
row[ii] = 0
return row
#Runs the crawl_for_user option
#
#@input log_file<string>: path to log file
#@input data_path<string>: path to data directory of stored values
#
def _crawl_for_users(log_file, data_path):
log(log_file, "Creating reddit user agent")
reddit = _create_user_agent(const.user_agent)
#Choose subreddit to check
with open(os.path.join(data_path, 'subreddits.txt'), 'r') as f:
subreddits = [x.strip() for x in f.readlines()]
p = os.path.join(data_path, 'checked_subreddits.json')
if not path_lib.file_exists(p):
with open(p, 'w') as f:
f.write("{}")
with open(os.path.join(data_path, 'checked_subreddits.json'), 'r') as f:
checked_subreddits = json.load(f)
for subreddit in subreddits:
if not subreddit in checked_subreddits:
checked_subreddits[subreddit] = []
checked_submissions = checked_subreddits[subreddit]
if len(checked_submissions) < const.number_of_submissions:
log(log_file, "Getting top {} submission comments from {}".format(const.number_of_submissions, subreddit))
comments, checked_submissions = _get_top_submission_comments(reddit, subreddit, const.number_of_submissions, checked_submissions)
checked_subreddits[subreddit] = checked_submissions
log(log_file, "{} comments obtained".format(len(comments)))
log(log_file, "Loading current user JSON file...")
user_file = os.path.join(data_path, 'users.json')
if not path_lib.file_exists(p):
with open(p, 'w') as f:
f.write("{}")
with open(os.path.join(data_path, 'users.json'), 'r') as f:
users = json.load(f)
log(log_file, "Processing all comments...")
_convert_comments_to_users(comments, users, log_file, user_file)
console_lib.update_progress_bar(3/4, "Saving checked subreddits list...")
with open(os.path.join(data_path, 'checked_subreddits.json'), 'w') as f:
json.dump(checked_subreddits, f, sort_keys=True, indent=4)
console_lib.update_progress_bar(1, "Done.", end=True)
#Combines all unique users in JSON files in <combine_folder> and the <user_file> and stores the resulting entries in <user_file>
#
#@input log_file<string>: path to log file
#@input data_path<string>: path to data directory of stored values
#@input user_file<string>: name of the users file to use
#@input combine_folder<string>: Folder where we store JSON files to combine with
#
def _combine_JSON_files(log_file, data_path, user_file, combine_folder):
log(log_file, 'Combining JSON files...')
log(log_file, 'Loading user_file...')
p = os.path.join(data_path, user_file)
with open(p, 'r') as f:
users = json.load(f)
tot_u = len(users.keys())
log(log_file, '{} users loaded'.format(tot_u))
log(log_file, 'Obtaining JSON filenames...')
p = os.path.join(data_path, combine_folder)
files = path_lib.get_all_files_in_directory_with_extension(p, 'json')
log(log_file, '{} files found.'.format(len(files)))
log(log_file, 'Processing files found...')
for file in files:
log(log_file, 'Loading {}...'.format(file))
p = os.path.join(data_path, combine_folder, file)
with open(p, 'r') as f:
users_temp = json.load(f)
log(log_file, 'File contains {} users.'.format(len(users_temp)))
cur_i = 0
tot_i = len(users_temp)
for user in users_temp:
console_lib.update_progress_bar(cur_i/tot_i, 'Processing user {}, {} out of {}...'.format(user, cur_i, tot_i))
u_structure = copy.deepcopy(users_temp[user])
if not user in users:
users[user] = u_structure
else:
if (not users[user]['processed']) and u_structure['processed']:
users[user] = u_structure
cur_i += 1
console_lib.update_progress_bar(1, 'File Processed. {} total users'.format(len(users.keys())), end=True)
log(log_file, 'Writing user_file...')
p = os.path.join(data_path, user_file)
with open(p, 'w') as f:
json.dump(users, f, sort_keys=True, indent=4)
log(log_file, 'File written.')
log(log_file, 'All files processed. {} total users'.format(len(users.keys())))
#Generates a list of users that have not been processed
#
#@input log_file<string>: path to log file
#@input data_path<string>: path to data directory of stored values
#@input user_file<string>: name of the users file to use
#
def _generate_unprocessed_user_list(log_file, data_path, user_file):
log(log_file, 'Generating unprocessed user list...')
log(log_file, 'Loading user JSON structure...')
p = os.path.join(data_path, user_file)
with open(p, 'r') as f:
users = json.load(f)
tot_u = len(users.keys())
log(log_file, "{} users loaded".format(tot_u))
unprocessed_users = []
cur_u = 0
log(log_file, 'Checking for unprocessed users...')
for user in users:
console_lib.update_progress_bar(cur_u/tot_u, 'Checking user {}, {} out of {}...'.format(user, cur_u, tot_u))
if not users[user]['processed']:
unprocessed_users.append(user)
cur_u += 1
console_lib.update_progress_bar(1, 'Done.', end=True)
log(log_file, '{} unprocessed users found'.format(len(unprocessed_users)))
log(log_file, 'Writing unprocessed user list to file...')
p = os.path.join(data_path, 'unprocessed_user_list.txt')
with open(p, 'w') as f:
for u in unprocessed_users:
f.write("{}\n".format(u))
#Obtains the current filename for user_structure storage
#
#@input storage_path<string>: path to storage structure
#@return user_structure_path<string>: path to user_structure storage file
#
def _get_current_filename_for_storage(storage_path):
files = path_lib.get_all_files_in_directory_with_extension(storage_path, 'json')
highest_number = 0
for file in files:
if 'user_partial_storage' in file:
file = file.split('.')
if int(file[1]) > highest_number:
highest_number = int(file[1])
filename = 'user_partial_storage.{}.json'.format(highest_number)
p = os.path.join(storage_path, filename)
if not path_lib.file_exists(p):
with open(p, 'w') as f:
f.write('{}\n')
with open(p, 'r') as f:
data = json.load(f)
if len(data.keys()) >= const.user_storage_max_users:
highest_number += 1
filename = 'user_partial_storage.{}.json'.format(highest_number)
p = os.path.join(storage_path, filename)
if not path_lib.file_exists(p):
with open(p, 'w') as f:
f.write('{}\n')
return p
#Processes users as according to script
#
#@input log_file<string>: path to log file
#@input data_path<string>: path to data directory of stored values
#@input user_file<string>: name of the users file to user
#@input combine_folder<string>: Folder where we store JSON files to combine with
#
def _process_users(log_file, data_path, user_file, combine_folder):
log(log_file, "Creating reddit user agent")
reddit = _create_user_agent(const.user_agent)
log(log_file, 'Loading unprocessed user list...')
p = os.path.join(data_path, 'unprocessed_user_list.txt')
with open(p, 'r') as f:
unprocessed_users = f.readlines()
tot_u = len(unprocessed_users)
cur_u = 0
u_s_p = _get_current_filename_for_storage(os.path.join(data_path, combine_folder))
with open(u_s_p, 'r') as f:
user_structure = json.load(f)
while len(unprocessed_users) > 0:
u = unprocessed_users[0].strip()
log(log_file, "Processing user {} [{}/{}]...".format(u, cur_u, tot_u))
user_structure[u] = {}
user_structure[u]['processed'] = False
try:
subreddits = _get_user_subreddits(reddit, u)
user_structure[u]['processed'] = True
user_structure[u]['subreddits'] = subreddits
except TypeError:
user_structure[u]['processed'] = True
user_structure[u]['subreddits'] = None
with open(u_s_p, 'w') as f:
json.dump(user_structure, f, sort_keys=True, indent=4)
if len(user_structure.keys()) >= const.user_storage_max_users:
u_s_p = _get_current_filename_for_storage(os.path.join(data_path, combine_folder))
with open(u_s_p, 'r') as f:
user_structure = json.load(f)
del unprocessed_users[0]
with open(p, 'w') as f:
for u in unprocessed_users:
f.write("{}\n".format(u.strip()))
cur_u += 1
console_lib.update_progress_bar(1, "{} users processed".format(cur_u), end=True)
#Converts user interest list into graph data
#
#@input log_file<string>: path to log file
#@input data_path<string>: path to data directory of stored values
#@input user_file<string>: name of the users file to use
#@input knn<string>: variable for using K-nearest neighbors. If 'None', knn not used
#
def _generate_graph(log_file, data_path, user_file, knn):
log(log_file, 'Loading user JSON structure...')
p = os.path.join(data_path, user_file)
if path_lib.file_exists(p):
with open(p, 'r') as f:
users = json.load(f)
tot_u = len(users.keys())
log(log_file, "{} users loaded".format(tot_u))
log(log_file, "Creating empty matrix...")
matrix = _create_square_matrix(tot_u)
log(log_file, "Empty matrix ready.")
key_list = list(users.keys())
full=False
if knn!=None:
full=True
cur_i = 0
tot_i = int(tot_u/2*(1+tot_u))
if full:
tot_i = tot_u*tot_u
for ii in range(0, len(key_list)):
initial = ii+1
if full:
initial = 0
for jj in range(initial, len(key_list)):
if ii != jj:
user_x = key_list[ii]
user_y = key_list[jj]
console_lib.update_progress_bar(cur_i/tot_i, 'Mapping interest between {} and {}...'.format(user_x, user_y))
matrix[ii][jj] = _interest_map(users, user_x, user_y)
cur_i += 1
console_lib.update_progress_bar(1, "Matrix complete.", end=True)
if const.create_labels:
log(log_file, "Creating labels for users...")
o_labels = os.path.join(data_path, "matrix_"+path_lib.get_filename_without_extension(user_file) + '.labels')
with open(o_labels, 'w') as f:
for ii in range(0, len(key_list)):
f.write("{}\n".format(key_list[ii]))
log(log_file, "Labels created.")
if knn != 'None':
log(log_file, 'Using nearest neighbor mapping for knn={}'.format(knn))
knn = int(knn)
cur_i = 0
tot_i = len(key_list)
for ii in range(0, len(key_list)):
console_lib.update_progress_bar(cur_i/tot_i, 'Applying KNN-{} to {}...'.format(knn, key_list[ii]))
matrix[ii] = _apply_knn_to_row(matrix[ii], knn)
cur_i += 1
console_lib.update_progress_bar(1, 'Done.', end=True)
log(log_file, "Writing matrix...")
o_p = os.path.join(data_path, "matrix_"+path_lib.get_filename_without_extension(user_file) + '.txt')
_write_matrix(matrix, o_p)
log(log_file, "Graph generated")
#Runs the script
#
#@input log_path<string>: path to log file to store results of script run
#@input data_path<string>: path to data directory of stored values
#@input user_file<string>: name of the users file to use
#@input knn<string>: variable for using K-nearest neighbors. If 'None', knn not used
#@input combine_folder<string>: Folder where we store JSON files to combine with
#@input crawl_for_users<boolean>: Indicates script should crawl for new usernames
#@input combine_JSON_files<boolean>: Combines all unique users from <combine_folder> with the <user_file>
#@input generate_unprocessed_user_list<boolean>: Generates a list of unprocessed users to compare to
#@input process_users<boolean>: Indicates script should process found usernames and extract interest subreddits
#@input generate_graph<boolean>: Indicates script should process interest chart and generate an adjacency graph
#
def _run(log_path, data_path, user_file, knn, combine_folder, crawl_for_users, combine_JSON_files, generate_unprocessed_user_list, process_users, generate_graph):
log_file = define_log_file(log_path, log_path=log_path)
script_timer = log_start(log_file)
path_lib.create_path(data_path)
if crawl_for_users:
_crawl_for_users(log_file, data_path)
elif combine_JSON_files:
_combine_JSON_files(log_file, data_path, user_file, combine_folder)
elif generate_unprocessed_user_list:
_generate_unprocessed_user_list(log_file, data_path, user_file)
elif process_users:
_process_users(log_file, data_path, user_file, combine_folder)
elif generate_graph:
_generate_graph(log_file, data_path, user_file, knn)
else:
log(log_file, 'ERROR: No options chosen for script!')
log_end(log_file, timer=script_timer)
#ARGUMENT PARSING CODE
'''
log_p = os.path.join(global_paths.logs, 'modules', module_name, module_name + '.log')
data_p = os.path.join(global_paths.data, 'modules', module_name)
users_file = 'users.json'
k = "None"
combine_f = 'storage'
description = 'Goes through reddit picking up profiles, obtaining their submission locations, and creating links between multiple profiles based on that. End result is an adjacency matrix'
arg_vars = {
'log_path' : {'help': 'Path to where log data is stored', 'value': log_p},
'data_path': {'help': 'Path to where data is stored', 'value': data_p},
'user_file': {'help': "Filename to users file to use in processing and graphing", 'value': users_file},
'knn' : {'help': 'Number of nearest neighbors to use', 'value': k},
'combine_folder': {'help': 'Folder where we store JSON files to combine with', 'value': combine_f}
}
flag_vars = {
"crawl_for_users" : {"help": "Crawling initiated for usernames", "value": True},
"combine_JSON_files": {"help": "Combines all unique users from <combine_folder> with the <user_file>", 'value': False},
"generate_unprocessed_user_list": {"help": "Generates a list of unprocessed users to compare to", "value": False},
"process_users": {"help": "Processing subreddit interests for users", "value": False},
"generate_graph": {"help": "Generating interest graph", "value": False}
}
arg_parser = arg_lib.ArgumentController(description=description, set_variables=arg_vars, flag_variables=flag_vars)
var_data = arg_parser.parse_args()
if var_data != None:
_run(**var_data)
'''
|
Python
|
CL
|
d3093aad15e98989a1d509ac0816b6d1cbd83cd1bab6e62a7ae194fab1d90741
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
""":synopsis: HTTP and HTTPS protocol client (requires sockets).
"""
class HTTPConnection:
"""
An :class:`HTTPConnection` instance represents one transaction with an HTTP
server. It should be instantiated passing it a host and optional port
number. If no port number is passed, the port is extracted from the host
string if it has the form ``host:port``, else the default HTTP port (80) is
used. When True, the optional parameter *strict* (which defaults to a false
value) causes ``BadStatusLine`` to
be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1
status line. If the optional *timeout* parameter is given, blocking
operations (like connection attempts) will timeout after that many seconds
(if it is not given, the global default timeout setting is used).
The optional *source_address* parameter may be a tuple of a (host, port)
to use as the source address the HTTP connection is made from.
For example, the following calls all create instances that connect to the server
at the same host and port::
>>> h1 = httplib.HTTPConnection('www.cwi.nl')
>>> h2 = httplib.HTTPConnection('www.cwi.nl:80')
>>> h3 = httplib.HTTPConnection('www.cwi.nl', 80)
>>> h3 = httplib.HTTPConnection('www.cwi.nl', 80, timeout=10)
"""
def __init__(self, ):
pass
def request(self, method,url,body,headers):
"""
This will send a request to the server using the HTTP request method *method*
and the selector *url*. If the *body* argument is present, it should be a
string of data to send after the headers are finished. Alternatively, it may
be an open file object, in which case the contents of the file is sent; this
file object should support ``fileno()`` and ``read()`` methods. The header
Content-Length is automatically set to the correct value. The *headers*
argument should be a mapping of extra HTTP headers to send with the request.
"""
pass
def getresponse(self, ):
"""
Should be called after a request is sent to get the response from the server.
Returns an :class:`HTTPResponse` instance.
"""
pass
def set_debuglevel(self, level):
"""
Set the debugging level (the amount of debugging output printed). The default
debug level is ``0``, meaning no debugging output is printed.
"""
pass
def set_tunnel(self, host,port=None,headers=None):
"""
Set the host and the port for HTTP Connect Tunnelling. Normally used when
it is required to do HTTPS Conection through a proxy server.
The headers argument should be a mapping of extra HTTP headers to to sent
with the CONNECT request.
"""
pass
def connect(self, ):
"""
Connect to the server specified when the object was created.
"""
pass
def close(self, ):
"""
Close the connection to the server.
As an alternative to using the :meth:`request` method described above, you can
also send your request step by step, by using the four functions below.
"""
pass
def putrequest(self, request,selector,skip_host,skip_accept_encoding):
"""
This should be the first call after the connection to the server has been made.
It sends a line to the server consisting of the *request* string, the *selector*
string, and the HTTP version (``HTTP/1.1``). To disable automatic sending of
``Host:`` or ``Accept-Encoding:`` headers (for example to accept additional
content encodings), specify *skip_host* or *skip_accept_encoding* with non-False
values.
"""
pass
def putheader(self, header,argument,more):
"""
Send an :rfc:`822`\ -style header to the server. It sends a line to the server
consisting of the header, a colon and a space, and the first argument. If more
arguments are given, continuation lines are sent, each consisting of a tab and
an argument.
"""
pass
def endheaders(self, ):
"""
Send a blank line to the server, signalling the end of the headers.
"""
pass
def send(self, data):
"""
Send data to the server. This should be used directly only after the
:meth:`endheaders` method has been called and before :meth:`getresponse` is
called.
.. TTPResponse Objects
--------------------
:class:`HTTPResponse` instances have the following methods and attributes:
"""
pass
class HTTPSConnection:
"""
A subclass of :class:`HTTPConnection` that uses SSL for communication with
secure servers. Default port is ``443``. *key_file* is the name of a PEM
formatted file that contains your private key. *cert_file* is a PEM formatted
certificate chain file.
"""
def __init__(self, ):
pass
class HTTPResponse:
"""
Class whose instances are returned upon successful connection. Not instantiated
directly by user.
"""
def __init__(self, ):
pass
def read(self, amt):
"""
Reads and returns the response body, or up to the next *amt* bytes.
"""
pass
def getheader(self, name,default):
"""
Get the contents of the header *name*, or *default* if there is no matching
header.
"""
pass
def getheaders(self, ):
"""
Return a list of (header, value) tuples.
"""
pass
def fileno(self, ):
"""
Returns the ``fileno`` of the underlying socket.
"""
pass
class HTTPMessage:
"""
An :class:`HTTPMessage` instance is used to hold the headers from an HTTP
response. It is implemented using the :class:`mimetools.Message` class and
provides utility functions to deal with HTTP Headers. It is not directly
instantiated by the users.
The following exceptions are raised as appropriate:
"""
def __init__(self, ):
pass
"""
The default port for the HTTP protocol (always ``80``).
"""
HTTP_PORT = None
"""
The default port for the HTTPS protocol (always ``443``).
and also the following constants for integer status codes:
+------------------------------------------+---------+-----------------------------------------------------------------------+
| Constant | Value | Definition |
+==========================================+=========+=======================================================================+
| :const:`CONTINUE` | ``100`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.1.1 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.1.1>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`SWITCHING_PROTOCOLS` | ``101`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.1.2 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.1.2>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`PROCESSING` | ``102`` | WEBDAV, `RFC 2518, Section 10.1 |
| | | <http://www.webdav.org/specs/rfc2518.html#STATUS_102>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`OK` | ``200`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.2.1 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.1>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`CREATED` | ``201`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.2.2 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.2>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`ACCEPTED` | ``202`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.2.3 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.3>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`NON_AUTHORITATIVE_INFORMATION` | ``203`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.2.4 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.4>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`NO_CONTENT` | ``204`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.2.5 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.5>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`RESET_CONTENT` | ``205`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.2.6 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.6>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`PARTIAL_CONTENT` | ``206`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.2.7 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.7>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`MULTI_STATUS` | ``207`` | WEBDAV `RFC 2518, Section 10.2 |
| | | <http://www.webdav.org/specs/rfc2518.html#STATUS_207>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`IM_USED` | ``226`` | Delta encoding in HTTP, |
| | | :rfc:`3229`, Section 10.4.1 |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`MULTIPLE_CHOICES` | ``300`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.3.1 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.1>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`MOVED_PERMANENTLY` | ``301`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.3.2 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.2>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`FOUND` | ``302`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.3.3 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.3>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`SEE_OTHER` | ``303`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.3.4 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`NOT_MODIFIED` | ``304`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.3.5 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`USE_PROXY` | ``305`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.3.6 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.6>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`TEMPORARY_REDIRECT` | ``307`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.3.8 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.8>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`BAD_REQUEST` | ``400`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.1 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.1>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`UNAUTHORIZED` | ``401`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.2 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.2>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`PAYMENT_REQUIRED` | ``402`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.3 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.3>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`FORBIDDEN` | ``403`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.4 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.4>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`NOT_FOUND` | ``404`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.5 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.5>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`METHOD_NOT_ALLOWED` | ``405`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.6 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.6>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`NOT_ACCEPTABLE` | ``406`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.7 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.7>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`PROXY_AUTHENTICATION_REQUIRED` | ``407`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.8 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.8>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`REQUEST_TIMEOUT` | ``408`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.9 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.9>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`CONFLICT` | ``409`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.10 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.10>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`GONE` | ``410`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.11 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.11>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`LENGTH_REQUIRED` | ``411`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.12 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.12>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`PRECONDITION_FAILED` | ``412`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.13 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.13>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`REQUEST_ENTITY_TOO_LARGE` | ``413`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.14 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.14>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`REQUEST_URI_TOO_LONG` | ``414`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.15 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.15>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`UNSUPPORTED_MEDIA_TYPE` | ``415`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.16 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.16>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`REQUESTED_RANGE_NOT_SATISFIABLE` | ``416`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.17 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.17>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`EXPECTATION_FAILED` | ``417`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.4.18 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.18>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`UNPROCESSABLE_ENTITY` | ``422`` | WEBDAV, `RFC 2518, Section 10.3 |
| | | <http://www.webdav.org/specs/rfc2518.html#STATUS_422>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`LOCKED` | ``423`` | WEBDAV `RFC 2518, Section 10.4 |
| | | <http://www.webdav.org/specs/rfc2518.html#STATUS_423>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`FAILED_DEPENDENCY` | ``424`` | WEBDAV, `RFC 2518, Section 10.5 |
| | | <http://www.webdav.org/specs/rfc2518.html#STATUS_424>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`UPGRADE_REQUIRED` | ``426`` | HTTP Upgrade to TLS, |
| | | :rfc:`2817`, Section 6 |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`INTERNAL_SERVER_ERROR` | ``500`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.5.1 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5.1>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`NOT_IMPLEMENTED` | ``501`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.5.2 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5.2>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`BAD_GATEWAY` | ``502`` | HTTP/1.1 `RFC 2616, Section |
| | | 10.5.3 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5.3>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`SERVICE_UNAVAILABLE` | ``503`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.5.4 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5.4>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`GATEWAY_TIMEOUT` | ``504`` | HTTP/1.1 `RFC 2616, Section |
| | | 10.5.5 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5.5>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`HTTP_VERSION_NOT_SUPPORTED` | ``505`` | HTTP/1.1, `RFC 2616, Section |
| | | 10.5.6 |
| | | <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5.6>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`INSUFFICIENT_STORAGE` | ``507`` | WEBDAV, `RFC 2518, Section 10.6 |
| | | <http://www.webdav.org/specs/rfc2518.html#STATUS_507>`_ |
+------------------------------------------+---------+-----------------------------------------------------------------------+
| :const:`NOT_EXTENDED` | ``510`` | An HTTP Extension Framework, |
| | | :rfc:`2774`, Section 7 |
+------------------------------------------+---------+-----------------------------------------------------------------------+
"""
HTTPS_PORT = None
"""
This dictionary maps the HTTP 1.1 status codes to the W3C names.
Example: ``httplib.responses[httplib.NOT_FOUND]`` is ``'Not Found'``.
"""
responses = None
|
Python
|
CL
|
6787309c058a484b32e43864f564fb1319853ad0ec7a16c3237c958bc7f2c8d7
|
# ------------------------------------------------------------------------------
# Test ical Format
# see also test_vevents.py, test_vutils.py and test_vcalendar.py
# ------------------------------------------------------------------------------
import sys
import datetime as dt
import pytz
from io import BytesIO
from icalendar import vDatetime
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.contrib import messages
from django.test import TestCase, RequestFactory
from django.utils import timezone
from wagtail.core.models import Site, Page
from ls.joyous.models.calendar import CalendarPage
from ls.joyous.models import (SimpleEventPage, MultidayEventPage,
RecurringEventPage, CancellationPage, MultidayRecurringEventPage,
RescheduleMultidayEventPage)
from ls.joyous.models import getAllEvents
from ls.joyous.utils.recurrence import Recurrence
from ls.joyous.utils.recurrence import WEEKLY, MONTHLY, TU, SA
from ls.joyous.formats.ical import ICalHandler
from freezegun import freeze_time
from .testutils import datetimetz
# ------------------------------------------------------------------------------
class TestImport(TestCase):
def setUp(self):
Site.objects.update(hostname="joy.test")
self.home = Page.objects.get(slug='home')
self.user = User.objects.create_user('i', 'i@joy.test', 's3cr3t')
self.requestFactory = RequestFactory()
self.calendar = CalendarPage(owner = self.user,
slug = "events",
title = "Events")
self.home.add_child(instance=self.calendar)
self.calendar.save_revision().publish()
self.handler = ICalHandler()
def _getRequest(self, path="/"):
request = self.requestFactory.get(path)
request.user = self.user
request.site = self.home.get_site()
request.session = {}
request._messages = FallbackStorage(request)
request.POST = request.POST.copy()
request.POST['action-publish'] = "action-publish"
return request
@freeze_time("2018-07-24 19:00:00")
def testMeetup(self):
stream = BytesIO(b"""\
BEGIN:VCALENDAR\r
VERSION:2.0\r
PRODID:-//Meetup//RemoteApi//EN\r
CALSCALE:GREGORIAN\r
METHOD:PUBLISH\r
X-ORIGINAL-URL:https://www.meetup.com/Code-for-Boston/events/249894034/ic\r
al/Weekly+Hack+Night.ics\r
X-WR-CALNAME:Events - Weekly Hack Night.ics\r
X-MS-OLK-FORCEINSPECTOROPEN:TRUE\r
BEGIN:VTIMEZONE\r
TZID:America/New_York\r
X-LIC-LOCATION:America/New_York\r
BEGIN:DAYLIGHT\r
TZOFFSETFROM:-0500\r
TZOFFSETTO:-0400\r
TZNAME:EDT\r
DTSTART:19700308T020000\r
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU\r
END:DAYLIGHT\r
BEGIN:STANDARD\r
TZOFFSETFROM:-0400\r
TZOFFSETTO:-0500\r
TZNAME:EST\r
DTSTART:19701101T020000\r
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU\r
END:STANDARD\r
END:VTIMEZONE\r
BEGIN:VEVENT\r
DTSTAMP:20180721T015100Z\r
DTSTART;TZID=America/New_York:20180724T190000\r
DTEND;TZID=America/New_York:20180724T213000\r
STATUS:CONFIRMED\r
SUMMARY:Weekly Hack Night\r
DESCRIPTION:Code for Boston\\nTuesday\\, July 24 at 7:00 PM\\n\\nOur weekly w\r
ork session will be at the Cambridge Innovation Center in Kendall Square\r
\\, on the FOURTH FLOOR\\, in the CAFE. These Hack Nights are our time...\\\r
n\\nhttps://www.meetup.com/Code-for-Boston/events/249894034/\r
CLASS:PUBLIC\r
CREATED:20180404T010420Z\r
GEO:42.36;-71.09\r
LOCATION:Cambridge Innovation Center\\, 4th Floor Cafe (1 Broadway\\, Cambr\r
idge\\, MA)\r
URL:https://www.meetup.com/Code-for-Boston/events/249894034/\r
LAST-MODIFIED:20180404T010420Z\r
UID:event_xwqmnpyxkbgc@meetup.com\r
END:VEVENT\r
END:VCALENDAR""")
self.handler.load(self.calendar, self._getRequest(), stream)
events = SimpleEventPage.events.child_of(self.calendar).all()
self.assertEqual(len(events), 1)
event = events[0]
self.assertEqual(event.owner, self.user)
self.assertEqual(event.slug, "weekly-hack-night")
self.assertEqual(event.title, "Weekly Hack Night")
self.assertEqual(event.details, "\n".join(["Code for Boston",
"Tuesday, July 24 at 7:00 PM", "",
"Our weekly work session will be at the Cambridge Innovation Center in Kendall Square"
", on the FOURTH FLOOR, in the CAFE. These Hack Nights are our time...", "",
"https://www.meetup.com/Code-for-Boston/events/249894034/"]))
self.assertEqual(event.date, dt.date(2018,7,24))
self.assertEqual(event.time_from, dt.time(19))
self.assertEqual(event.time_to, dt.time(21,30))
self.assertEqual(event.tz.zone, "America/New_York")
@freeze_time("2018-02-01")
@timezone.override("Pacific/Auckland")
def testGoogleCalendar(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Google Inc//Google Calendar 70.9054//EN
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:Test Data
X-WR-TIMEZONE:Pacific/Auckland
X-WR-CALDESC:Sample data for Joyous test_ical unittest
BEGIN:VTIMEZONE
TZID:Pacific/Auckland
X-LIC-LOCATION:Pacific/Auckland
BEGIN:DAYLIGHT
TZOFFSETFROM:+1200
TZOFFSETTO:+1300
TZNAME:NZDT
DTSTART:19700927T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:+1300
TZOFFSETTO:+1200
TZNAME:NZST
DTSTART:19700405T030000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART:20180725T210000Z
DTEND:20180726T083000Z
DTSTAMP:20180722T060025Z
UID:1uas8vo82gvhtn8jpr9nlnrmfk@google.com
CREATED:20180722T035919Z
DESCRIPTION:Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend
alits mily tente duse prommuniss ind sedships itommunte of perpollood.
LAST-MODIFIED:20180722T035919Z
LOCATION:
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Big Thursday
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=Pacific/Auckland:20180703T093000
DTEND;TZID=Pacific/Auckland:20180703T113000
RRULE:FREQ=WEEKLY;UNTIL=20180828T115959Z;BYDAY=TU
EXDATE;TZID=Pacific/Auckland:20180814T093000
DTSTAMP:20180722T060025Z
UID:113qbmq1j4jf0jbiolheruff6n@google.com
CREATED:20180722T035429Z
DESCRIPTION:\nFammulturacha matent theaminerviencess atinjuse it shin sue o
f Aothips to ming an sed prage thnisithass invernships oftegruct and encome
. Taimen in grose to to ner grough ingin orgagences' of Fries seed\n\nFrith
erovere Houps of custims analienessuppol. Tiriendindnew\, vality a gruccous
er to be the juse Truch ince lity Te therneramparcialues the the neshipland
s tortandamength\, Comene ups a mitioney dend peachassfy de are to entices
meand evelas of Friscerple th iseek arces a wind.
LAST-MODIFIED:20180722T035937Z
LOCATION:Coast Rd\, Barrytown\, New Zealand
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Tuesday Mornings
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART;VALUE=DATE:20180713
DTEND;VALUE=DATE:20180716
DTSTAMP:20180722T060025Z
UID:01likr2u3bchpv66o7vvq23avq@google.com
CREATED:20180722T040054Z
DESCRIPTION:
LAST-MODIFIED:20180722T040054Z
LOCATION:Home
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Three days off
TRANSP:TRANSPARENT
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=Pacific/Auckland:20180725T093000
DTEND;TZID=Pacific/Auckland:20180725T113000
DTSTAMP:20180722T060025Z
UID:113qbmq1j4jf0jbiolheruff6n@google.com
RECURRENCE-ID;TZID=Pacific/Auckland:20180724T093000
CREATED:20180722T035429Z
DESCRIPTION:\nFammulturacha matent theaminerviencess atinjuse it shin sue o
f Aothips to ming an sed prage thnisithass invernships oftegruct and encome
. Taimen in grose to to ner grough ingin orgagences' of Fries seed\n\nFrith
erovere Houps of custims analienessuppol. Tiriendindnew\, vality a gruccous
er to be the juse Truch ince lity Te therneramparcialues the the neshipland
s tortandamength\, Comene ups a mitioney dend peachassfy de are to entices
meand evelas of Friscerple th iseek arces a wind.
LAST-MODIFIED:20180722T051000Z
LOCATION:Coast Rd\, Barrytown\, New Zealand
SEQUENCE:1
STATUS:CONFIRMED
SUMMARY:Tuesday Mornings Postponed
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=Pacific/Auckland:20180731T093000
DTEND;TZID=Pacific/Auckland:20180731T113000
DTSTAMP:20180722T060025Z
UID:113qbmq1j4jf0jbiolheruff6n@google.com
RECURRENCE-ID;TZID=Pacific/Auckland:20180731T093000
CREATED:20180722T035429Z
DESCRIPTION:\nExtra Famin fork\, andivery\, Hough in the re of re whels ot
edshiplue porturat inve in nurectic.
LAST-MODIFIED:20180722T051201Z
LOCATION:Coast Rd\, Barrytown\, New Zealand
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Tuesday Morning Extra Info
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART:20180717T220000Z
DTEND:20180717T223000Z
DTSTAMP:20180722T060025Z
UID:3gqued55jui7omavqfr30civqp@google.com
CREATED:20180722T050847Z
DESCRIPTION:
LAST-MODIFIED:20180722T055756Z
LOCATION:Pariroa Beach
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Little Wednesday
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART:20180723T190000Z
DTEND:20180723T200000Z
DTSTAMP:20180722T060025Z
UID:1tqm6t508anprpeckn3rlndg6b@google.com
CREATED:20180722T055954Z
DESCRIPTION:
LAST-MODIFIED:20180722T055954Z
LOCATION:
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Conference Call
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
msgs = list(messages.get_messages(request))
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].level, messages.SUCCESS)
self.assertEqual(msgs[0].message, "5 iCal events loaded")
events = getAllEvents(request, home=self.calendar)
self.assertEqual(len(events), 5)
tueMorn, daysOff, lilWeds, cnfCall, bigThur = events
self.assertEqual(tueMorn.owner, self.user)
self.assertEqual(tueMorn.slug, "tuesday-mornings")
self.assertEqual(tueMorn.title, "Tuesday Mornings")
self.assertEqual(tueMorn.details, "\n".join(["",
"Fammulturacha matent theaminerviencess atinjuse it shin sue of "
"Aothips to ming an sed prage thnisithass invernships oftegruct "
"and encome. Taimen in grose to to ner grough ingin orgagences' "
"of Fries seed", "",
"Fritherovere Houps of custims analienessuppol. Tiriendindnew, "
"vality a gruccouser to be the juse Truch ince lity Te "
"therneramparcialues the the neshiplands tortandamength, "
"Comene ups a mitioney dend peachassfy de are to entices meand "
"evelas of Friscerple th iseek arces a wind."]))
self.assertEqual(tueMorn.tz.zone, "Pacific/Auckland")
self.assertEqual(tueMorn.time_from, dt.time(9,30))
self.assertEqual(tueMorn.time_to, dt.time(11,30))
self.assertEqual(tueMorn.location, "Coast Rd, Barrytown, New Zealand")
self.assertEqual(tueMorn.when,
"Tuesdays (until 28 August 2018) at 9:30am to 11:30am")
tueExceptions = tueMorn.get_children()
self.assertEqual(len(tueExceptions), 3)
tue24th, tue31st, tue14th = [page.specific for page in tueExceptions]
self.assertEqual(tue24th.owner, self.user)
self.assertEqual(tue24th.overrides, tueMorn)
self.assertEqual(tue24th.slug, "2018-07-24-postponement")
self.assertEqual(tue24th.title, "Postponement for Tuesday 24th of July")
self.assertEqual(tue24th.details, tueMorn.details)
self.assertEqual(tue24th.tz.zone, "Pacific/Auckland")
self.assertEqual(tue24th.except_date,dt.date(2018,7,24))
self.assertEqual(tue24th.date, dt.date(2018,7,25))
self.assertEqual(tue24th.time_from, dt.time(9,30))
self.assertEqual(tue24th.time_to, dt.time(11,30))
self.assertEqual(tue24th.location, "Coast Rd, Barrytown, New Zealand")
self.assertEqual(tue31st.owner, self.user)
self.assertEqual(tue31st.overrides, tueMorn)
self.assertEqual(tue31st.slug, "2018-07-31-extra-info")
self.assertEqual(tue31st.title, "Extra-Info for Tuesday 31st of July")
self.assertEqual(tue31st.extra_title,"Tuesday Morning Extra Info")
self.assertEqual(tue31st.extra_information, "\n".join(["",
"Extra Famin fork, andivery, Hough in the re of re whels "
"otedshiplue porturat inve in nurectic."]))
self.assertEqual(tue31st.tz.zone, "Pacific/Auckland")
self.assertEqual(tue31st.except_date,dt.date(2018,7,31))
self.assertEqual(tue14th.owner, self.user)
self.assertEqual(tue14th.overrides, tueMorn)
self.assertEqual(tue14th.slug, "2018-08-14-cancellation")
self.assertEqual(tue14th.title, "Cancellation for Tuesday 14th of August")
self.assertEqual(tue14th.cancellation_title, "")
self.assertEqual(tue14th.cancellation_details, "")
self.assertEqual(tue14th.tz.zone, "Pacific/Auckland")
self.assertEqual(tue14th.except_date,dt.date(2018,8,14))
self.assertEqual(daysOff.owner, self.user)
self.assertEqual(daysOff.slug, "three-days-off")
self.assertEqual(daysOff.title, "Three days off")
self.assertEqual(daysOff.details, "")
self.assertEqual(daysOff.tz.zone, "Pacific/Auckland")
self.assertEqual(daysOff.date_from, dt.date(2018,7,13))
self.assertEqual(daysOff.time_from, None)
self.assertEqual(daysOff.date_to, dt.date(2018,7,15))
self.assertEqual(daysOff.time_to, None)
self.assertEqual(daysOff.location, "Home")
self.assertEqual(lilWeds.owner, self.user)
self.assertEqual(lilWeds.slug, "little-wednesday")
self.assertEqual(lilWeds.title, "Little Wednesday")
self.assertEqual(lilWeds.details, "")
self.assertEqual(lilWeds.tz, pytz.utc)
self.assertEqual(lilWeds.date, dt.date(2018,7,17))
self.assertEqual(lilWeds.time_from, dt.time(22))
self.assertEqual(lilWeds.time_to, dt.time(22,30))
self.assertEqual(lilWeds.location, "Pariroa Beach")
self.assertEqual(lilWeds.when, "Wednesday 18th of July at 10am to 10:30am")
self.assertEqual(cnfCall.owner, self.user)
self.assertEqual(cnfCall.slug, "conference-call")
self.assertEqual(cnfCall.title, "Conference Call")
self.assertEqual(cnfCall.details, "")
self.assertEqual(cnfCall.tz, pytz.utc)
self.assertEqual(cnfCall.date, dt.date(2018,7,23))
self.assertEqual(cnfCall.time_from, dt.time(19))
self.assertEqual(cnfCall.time_to, dt.time(20))
self.assertEqual(bigThur.owner, self.user)
self.assertEqual(bigThur.slug, "big-thursday")
self.assertEqual(bigThur.title, "Big Thursday")
self.assertEqual(bigThur.details,
"Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend "
"alits mily tente duse prommuniss ind sedships itommunte of perpollood.")
self.assertEqual(bigThur.tz, pytz.utc)
self.assertEqual(bigThur.date_from, dt.date(2018,7,25))
self.assertEqual(bigThur.time_from, dt.time(21))
self.assertEqual(bigThur.date_to, dt.date(2018,7,26))
self.assertEqual(bigThur.time_to, dt.time(8,30))
self.assertEqual(bigThur.when, "Thursday 26th of July at 9am to 8:30pm")
@freeze_time("2018-02-01")
@timezone.override("Pacific/Auckland")
def testUtc2Local(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Google Inc//Google Calendar 70.9054//EN
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:Test Data
X-WR-TIMEZONE:Australia/Sydney
X-WR-CALDESC:Sample data for Joyous test_ical unittest
BEGIN:VEVENT
DTSTART:20180725T210000Z
DTEND:20180726T083000Z
DTSTAMP:20180722T060025Z
UID:1uas8vo82gvhtn8jpr9nlnrmfk@google.com
CREATED:20180722T035919Z
DESCRIPTION:Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend
alits mily tente duse prommuniss ind sedships itommunte of perpollood.
LAST-MODIFIED:20180722T035919Z
LOCATION:
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Big Thursday
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream, utc2local=True)
events = getAllEvents(request, home=self.calendar)
self.assertEqual(len(events), 1)
bigThur = events[0]
self.assertEqual(bigThur.owner, self.user)
self.assertEqual(bigThur.slug, "big-thursday")
self.assertEqual(bigThur.title, "Big Thursday")
self.assertEqual(bigThur.details,
"Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend "
"alits mily tente duse prommuniss ind sedships itommunte of perpollood.")
self.assertEqual(bigThur.tz.zone, "Australia/Sydney")
self.assertEqual(bigThur.date_from, dt.date(2018,7,26))
self.assertEqual(bigThur.time_from, dt.time(7))
self.assertEqual(bigThur.date_to, dt.date(2018,7,26))
self.assertEqual(bigThur.time_to, dt.time(18,30))
self.assertEqual(bigThur.when, "Thursday 26th of July at 9am to 8:30pm")
def testZipFile(self):
path = "{}/djm@software.net.nz.ical.zip".format(settings.TEST_IMPORT_DIR)
stream = open(path, "rb")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
msgs = list(messages.get_messages(request))
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].level, messages.SUCCESS)
self.assertEqual(msgs[0].message, "2 iCal events loaded")
def testBadZipFile(self):
path = "{}/junk.zip".format(settings.TEST_IMPORT_DIR)
stream = open(path, "rb")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
msgs = list(messages.get_messages(request))
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].level, messages.ERROR)
self.assertEqual(msgs[0].message, "Could not parse iCalendar file "+path)
def testZippedInvalidFile(self):
path = "{}/foobar.ical.zip".format(settings.TEST_IMPORT_DIR)
stream = open(path, "rb")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
msgs = list(messages.get_messages(request))
self.assertEqual(len(msgs), 2)
self.assertEqual(msgs[0].level, messages.ERROR)
self.assertEqual(msgs[0].message,
"Could not parse iCalendar file foobar@group.calendar.google.com.ics")
self.assertEqual(msgs[1].level, messages.SUCCESS)
self.assertEqual(msgs[1].message, "1 iCal events loaded")
def testOutlook(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Microsoft Corporation//Outlook 11.0 MIMEDIR//EN
VERSION:2.0
METHOD:PUBLISH
BEGIN:VEVENT
DTSTART:20180730T092500
DTEND:20180730T101500
UID:7N7Y7V6J4N2U4I3U7H0N7W5O4V2U0K3H2E4Q4O7A2H0W1A5M6N
DTSTAMP:20180728T035656
DESCRIPTION;ENCODING=QUOTED-PRINTABLE:Booking number 9876543=0D=0A=0D=0AYour outgoing route is Westport > Wellington.=0D=0AThis route departs Westport on 30/Jul/2018 09:25 and arrives at Wellington at 10:15. The check-in time is 08:55.=0A
SUMMARY;ENCODING=QUOTED-PRINTABLE:Sounds Air - Flight Reminder
PRIORITY:3
BEGIN:VALARM
TRIGGER:-PT24H
ACTION:DISPLAY
DESCRIPTION:Reminder
END:VALARM
END:VEVENT
END:VCALENDAR
BEGIN:VCALENDAR
PRODID:-//Microsoft Corporation//Outlook 11.0 MIMEDIR//EN
VERSION:2.0
METHOD:PUBLISH
BEGIN:VEVENT
DTSTART:20180731T081500
DTEND:20180731T090000
UID:1G0K0V7K4L0H4Q4T5F4R8U2E0D0S4H2M6O1J6M5C5S2R4D0S2Q
DTSTAMP:20180728T035656
DESCRIPTION;ENCODING=QUOTED-PRINTABLE:Booking number 9876543=0D=0A=0D=0A=0D=0AYour return route is Wellington > Westport.=0D=0AThis route departs Wellington on 31/Jul/2018 08:15 and arrives at Westport at 09:00. The check-in time is 07:45.=0A
SUMMARY;ENCODING=QUOTED-PRINTABLE:Sounds Air - Flight Reminder
PRIORITY:3
BEGIN:VALARM
TRIGGER:-PT24H
ACTION:DISPLAY
DESCRIPTION:Reminder
END:VALARM
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = [page.specific for page in self.calendar.get_children()]
self.assertEqual(len(events), 2)
flight1, flight2 = events
self.assertEqual(flight1.slug, "sounds-air-flight-reminder")
self.assertEqual(flight1.title, "Sounds Air - Flight Reminder")
self.assertEqual(flight1.details, "\r\n".join(["Booking number 9876543",
"", "Your outgoing route is Westport > Wellington.",
"This route departs Westport on 30/Jul/2018 09:25 and arrives at "
"Wellington at 10:15. The check-in time is 08:55.\n"]))
self.assertEqual(flight1.tz.zone, "Asia/Tokyo")
self.assertEqual(flight1.date, dt.date(2018,7,30))
self.assertEqual(flight1.time_from, dt.time(9,25))
self.assertEqual(flight1.time_to, dt.time(10,15))
self.assertEqual(flight2.slug, "sounds-air-flight-reminder-2")
self.assertEqual(flight2.title, "Sounds Air - Flight Reminder")
self.assertEqual(flight2.details, "\r\n".join(["Booking number 9876543",
"", "", "Your return route is Wellington > Westport.",
"This route departs Wellington on 31/Jul/2018 08:15 and arrives at "
"Westport at 09:00. The check-in time is 07:45.\n"]))
self.assertEqual(flight2.tz.zone, "Asia/Tokyo")
self.assertEqual(flight2.date, dt.date(2018,7,31))
self.assertEqual(flight2.time_from, dt.time(8,15))
self.assertEqual(flight2.time_to, dt.time(9))
def testFacebook(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Facebook//NONSGML Facebook Events V1.0//EN
X-PUBLISHED-TTL:PT12H
X-ORIGINAL-URL:https://www.facebook.com/events/501511573641525/
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
BEGIN:VEVENT
DTSTAMP:20180729T102010Z
LAST-MODIFIED:20180729T102010Z
CREATED:20180729T102010Z
SEQUENCE:0
ORGANIZER;CN=Jjjj Bbbbb:MAILTO:noreply@facebookmail.com
ATTENDEE;CN=Bbbbb Wwwwww;PARTSTAT=ACCEPTED:https://www.facebook.com/bbwwwwww
ATTENDEE;CN=Jjjj Bbbbb;PARTSTAT=ACCEPTED:https://www.facebook.com/jjjj.bbbbb
ATTENDEE;CN=Pppp Tttttt;PARTSTAT=TENTATIVE:https://www.facebook.com/pppp.tttttt.123
DTSTART:20180831T070000Z
DTEND:20180831T100000Z
UID:e501511573641525@facebook.com
SUMMARY:Photo Comp - Prize Giving
LOCATION:TBC
URL:https://www.facebook.com/events/501511573641525/
DESCRIPTION:The much anticipated 2018 West Coa
st Alpine Club is open!\nEntries cl
ose midnight Friday 24th August. F
ull details and entry form in the
linked PDF: https://www.dropbox.co
m/s/5vxnep33ccxok9z/PhotoCompDetai
ls.pdf?dl=0\nDetails of the prize g
iving will be added here in due co
urse\, but save the date in the mea
n time.\n\nhttps://www.facebook.com/
events/501511573641525/
CLASS:PUBLIC
STATUS:CONFIRMED
PARTSTAT:NEEDS-ACTION
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = self.calendar.get_children()
self.assertEqual(len(events), 1)
event = events[0].specific
self.assertEqual(event.slug, "photo-comp-prize-giving")
self.assertEqual(event.title, "Photo Comp - Prize Giving")
self.assertEqual(event.details, "\n".join([
"The much anticipated 2018 West Coast Alpine Club is open!",
"Entries close midnight Friday 24th August. Full details and "
"entry form in the linked PDF: https://www.dropbox.com/s/"
"5vxnep33ccxok9z/PhotoCompDetails.pdf?dl=0",
"Details of the prize giving will be added here in due course, "
"but save the date in the mean time.", "",
"https://www.facebook.com/events/501511573641525/"]))
self.assertEqual(event.tz.zone, "UTC")
self.assertEqual(event.date, dt.date(2018,8,31))
self.assertEqual(event.time_from, dt.time(7))
self.assertEqual(event.time_to, dt.time(10))
def testUntilTZ(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Google Inc//Google Calendar 70.9054//EN
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:djm6809@gmail.com
X-WR-TIMEZONE:Pacific/Auckland
BEGIN:VTIMEZONE
TZID:America/New_York
X-LIC-LOCATION:America/New_York
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:19700308T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:19701101T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20310101T050000
DTEND;TZID=America/New_York:20310101T070000
RRULE:FREQ=DAILY;UNTIL=20310108T045959Z
DTSTAMP:20190331T203301Z
UID:566vrur2ldqkvardnrb6tfrbdu@google.com
CREATED:20190331T200304Z
DESCRIPTION:New Year resolution
LAST-MODIFIED:20190331T203219Z
LOCATION:New York\, NY\, USA
SEQUENCE:5
STATUS:CONFIRMED
SUMMARY:Exercise
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = self.calendar.get_children()
self.assertEqual(len(events), 1)
event = events[0].specific
self.assertIs(type(event), RecurringEventPage)
self.assertEqual(event.slug, "exercise")
self.assertEqual(event.tz.zone, "America/New_York")
self.assertEqual(event.time_from, dt.time(5))
self.assertEqual(event.time_to, dt.time(7))
self.assertEqual(event.repeat.getCount(), 7)
self.assertTrue(event._occursOn(dt.date(2031,1,1)))
self.assertFalse(event._occursOn(dt.date(2031,1,8)))
def testMultidayRecurringEvent(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//linuxsoftware.nz//NONSGML Joyous v0.8//EN
BEGIN:VEVENT
SUMMARY:Bought from a Rubber Man
DTSTART;TZID=Pacific/Auckland:20190402T160000
DTEND;TZID=Pacific/Auckland:20190404T180000
DTSTAMP:20190405T054311Z
UID:e6936872-f15c-4c47-92f2-3559a6610c78
SEQUENCE:1
RRULE:FREQ=WEEKLY;BYDAY=TU;WKST=SU
CREATED:20190405T054255Z
DESCRIPTION:<p></p>
LAST-MODIFIED:20190405T054255Z
LOCATION:
URL:http://localhost/calendar/bought-rubber-man/
END:VEVENT
BEGIN:VTIMEZONE
TZID:Pacific/Auckland
BEGIN:DAYLIGHT
DTSTART;VALUE=DATE-TIME:20180930T030000
RDATE:20190929T030000,20200927T030000,20210926T030000,20220925T030000,2023
0924T030000,20240929T030000,20250928T030000,20260927T030000,20270926T03000
0,20280924T030000,20290930T030000,20300929T030000,20310928T030000,20320926
T030000,20330925T030000,20340924T030000,20350930T030000,20360928T030000,20
370927T030000
TZNAME:NZDT
TZOFFSETFROM:+1200
TZOFFSETTO:+1300
END:DAYLIGHT
BEGIN:STANDARD
DTSTART;VALUE=DATE-TIME:20190407T020000
RDATE:20200405T020000,20210404T020000,20220403T020000,20230402T020000,2024
0407T020000,20250406T020000,20260405T020000,20270404T020000,20280402T02000
0,20290401T020000,20300407T020000,20310406T020000,20320404T020000,20330403
T020000,20340402T020000,20350401T020000,20360406T020000,20370405T020000
TZNAME:NZST
TZOFFSETFROM:+1300
TZOFFSETTO:+1200
END:STANDARD
END:VTIMEZONE
END:VCALENDAR""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = self.calendar.get_children()
self.assertEqual(len(events), 1)
event = events[0].specific
self.assertIs(type(event), MultidayRecurringEventPage)
self.assertEqual(event.title, "Bought from a Rubber Man")
self.assertEqual(event.tz.zone, "Pacific/Auckland")
self.assertEqual(event.num_days, 3)
self.assertEqual(event.time_from, dt.time(16))
self.assertEqual(event.time_to, dt.time(18))
def testMultidayRescheduleEvent(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//linuxsoftware.nz//NONSGML Joyous v0.9//EN
BEGIN:VTIMEZONE
TZID:Pacific/Auckland
BEGIN:STANDARD
DTSTART;VALUE=DATE-TIME:20200405T020000
RDATE:20210404T020000,20220403T020000,20230402T020000,20240407T020000,2025
0406T020000,20260405T020000,20270404T020000,20280402T020000,20290401T02000
0,20300407T020000,20310406T020000,20320404T020000,20330403T020000,20340402
T020000,20350401T020000,20360406T020000,20370405T020000
TZNAME:NZST
TZOFFSETFROM:+1300
TZOFFSETTO:+1200
END:STANDARD
BEGIN:DAYLIGHT
DTSTART;VALUE=DATE-TIME:20190929T030000
RDATE:20200927T030000,20210926T030000,20220925T030000,20230924T030000,2024
0929T030000,20250928T030000,20260927T030000,20270926T030000,20280924T03000
0,20290930T030000,20300929T030000,20310928T030000,20320926T030000,20330925
T030000,20340924T030000,20350930T030000,20360928T030000,20370927T030000
TZNAME:NZDT
TZOFFSETFROM:+1200
TZOFFSETTO:+1300
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
SUMMARY:Colour In
DTSTART;TZID=Pacific/Auckland:20200101T103000
DTEND;TZID=Pacific/Auckland:20200102T140000
DTSTAMP:20200101T012156Z
UID:6ca93786-722e-410c-91a2-bc8a6ecdadb9
SEQUENCE:1
RRULE:FREQ=WEEKLY;BYDAY=WE;WKST=SU
CREATED:20200101T011254Z
DESCRIPTION:Paint that scene.
X-ALT-DESC;FMTTYPE=text/html:<h2>Paint that scene.</h2>
LAST-MODIFIED:20200101T011254Z
LOCATION:
URL:http://localhost/calendar/colour/
END:VEVENT
BEGIN:VEVENT
SUMMARY:Knock
DTSTART;TZID=Pacific/Auckland:20200108T110000
DTEND;TZID=Pacific/Auckland:20200109T140000
DTSTAMP:20200101T012156Z
UID:6ca93786-722e-410c-91a2-bc8a6ecdadb9
RECURRENCE-ID;TZID=Pacific/Auckland:20200108T103000
SEQUENCE:1
CREATED:20200101T011852Z
DESCRIPTION:
LAST-MODIFIED:20200101T011852Z
LOCATION:
URL:http://localhost/calendar/colour/2020-01-08-postponement/
END:VEVENT
BEGIN:VEVENT
SUMMARY:Change
DTSTART;TZID=Pacific/Auckland:20200116T110000
DTEND;TZID=Pacific/Auckland:20200116T143000
DTSTAMP:20200101T012156Z
UID:6ca93786-722e-410c-91a2-bc8a6ecdadb9
RECURRENCE-ID;TZID=Pacific/Auckland:20200115T103000
SEQUENCE:1
CREATED:20200101T012044Z
DESCRIPTION:
LAST-MODIFIED:20200101T012044Z
LOCATION:
URL:http://localhost/calendar/colour/2020-01-15-postponement/
END:VEVENT
END:VCALENDAR""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = self.calendar.get_children()
self.assertEqual(len(events), 1)
event = events[0].specific
self.assertIs(type(event), MultidayRecurringEventPage)
self.assertEqual(event.title, "Colour In")
self.assertEqual(event.details, "<h2>Paint that scene.</h2>")
self.assertEqual(event.tz.zone, "Pacific/Auckland")
self.assertEqual(event.num_days, 2)
self.assertEqual(event.time_from, dt.time(10,30))
self.assertEqual(event.time_to, dt.time(14))
exceptions = event.get_children()
self.assertEqual(len(exceptions), 2)
resched = exceptions[0].specific
self.assertIs(type(resched), RescheduleMultidayEventPage)
self.assertEqual(resched.postponement_title, "Knock")
self.assertEqual(resched.num_days, 2)
self.assertEqual(resched.time_from, dt.time(11))
self.assertEqual(resched.time_to, dt.time(14))
resched = exceptions[1].specific
self.assertIs(type(resched), RescheduleMultidayEventPage)
self.assertEqual(resched.postponement_title, "Change")
self.assertEqual(resched.num_days, 1)
self.assertEqual(resched.time_from, dt.time(11))
self.assertEqual(resched.time_to, dt.time(14,30))
def testLoadInvalidFile(self):
stream = BytesIO(rb"""FOO:BAR:SNAFU""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
msgs = list(messages.get_messages(request))
self.assertEqual(len(msgs), 1)
msg = msgs[0]
self.assertEqual(msg.level, messages.ERROR)
self.assertEqual(msg.message, "Could not parse iCalendar file ")
def testLoadEventMissingUID(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Bloor & Spadina - ECPv4.6.13//NONSGML v1.0//EN
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:Bloor & Spadina
X-ORIGINAL-URL:http://bloorneighbours.ca
X-WR-CALDESC:Events for Bloor & Spadina
BEGIN:VEVENT
DTSTART;TZID=UTC+0:20180407T093000
DTEND;TZID=UTC+0:20180407T113000
DTSTAMP:20180402T054745
CREATED:20180304T225154Z
LAST-MODIFIED:20180304T225154Z
SUMMARY:Mini-Fair & Garage Sale
DESCRIPTION:
URL:http://bloorneighbours.ca/event/mini-fair-garage-sale/
END:VEVENT
END:VCALENDAR""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = SimpleEventPage.events.child_of(self.calendar) \
.filter(date=dt.date(2018,4,7)).all()
self.assertEqual(len(events), 0)
msgs = list(messages.get_messages(request))
self.assertEqual(len(msgs), 1)
msg = msgs[0]
self.assertEqual(msg.level, messages.ERROR)
self.assertEqual(msg.message, "Could not load 1 iCal events")
# ------------------------------------------------------------------------------
class TestExport(TestCase):
def setUp(self):
Site.objects.update(hostname="joy.test")
self.home = Page.objects.get(slug='home')
self.user = User.objects.create_user('i', 'i@joy.test', 's3(R3t')
self.requestFactory = RequestFactory()
self.calendar = CalendarPage(owner = self.user,
slug = "events",
title = "Events")
self.home.add_child(instance=self.calendar)
self.calendar.save_revision().publish()
self.dicerun = SimpleEventPage(owner = self.user,
slug = "mercy-dice-run",
title = "Mercy Dice Run",
date = dt.date(2020,3,16),
location = "Newtown")
self.calendar.add_child(instance=self.dicerun)
self.dicerun.save_revision().publish()
event = SimpleEventPage(owner = self.user,
slug = "workshop",
title = "Workshop",
date = dt.date(2020,3,22))
self.calendar.add_child(instance=event)
event.save_revision().publish()
self.handler = ICalHandler()
def _getRequest(self, path="/"):
request = self.requestFactory.get(path)
request.user = self.user
request.site = self.home.get_site()
request.session = {}
request._messages = FallbackStorage(request)
request.POST = request.POST.copy()
request.POST['action-publish'] = "action-publish"
return request
def testServeCalendar(self):
response = self.handler.serve(self.calendar,
self._getRequest("/events/"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), "text/calendar")
self.assertEqual(response.get('Content-Disposition'),
"attachment; filename=events.ics")
self.assertEqual(response.content.count(b"BEGIN:VEVENT"), 2)
def testServeEvent(self):
response = self.handler.serve(self.dicerun,
self._getRequest("/events/mercy-dice-run/"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), "text/calendar")
self.assertEqual(response.get('Content-Disposition'),
"attachment; filename=mercy-dice-run.ics")
self.assertEqual(response.content.count(b"BEGIN:VEVENT"), 1)
self.assertIn(b"SUMMARY:Mercy Dice Run", response.content)
self.assertIn(b"DTSTART;TZID=Asia/Tokyo:20200316T000000", response.content)
self.assertIn(b"DTEND;TZID=Asia/Tokyo:20200316T235959", response.content)
self.assertIn(b"LOCATION:Newtown", response.content)
self.assertIn(b"URL:http://joy.test/events/mercy-dice-run", response.content)
def testServePage(self):
response = self.handler.serve(self.home, self._getRequest("/"))
self.assertIsNone(response)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
|
Python
|
CL
|
ca956f3df2c2adae13c7b03f953f91b438807785611c2503cf6c72867b4b8168
|
"""
Author: Andrew Garvey
Partner: Sargon Morad
Date: Aug 23, 2019
Client: Hospital for Sick Children
Purpose:
- Turn cleaned ED and DI data into usable ml data
"""
# clear variables
for name in dir():
if not name.startswith('_'):
del globals()[name]
del name
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib as mpl
import datetime as dt
import os
from pandasql import sqldf
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_selection import mutual_info_classif
#Set dir
os.chdir('/home/andrew/PycharmProjects/SickKidsMMAI/Generated_Outputs/Data/')
#------------------------------------------------------------------------------------------------------------------------
# Final Cleaning towards ML usable Model DF
#Import Cleaned Datasets
ED_Clean = pd.read_csv('/home/andrew/PycharmProjects/SickKidsMMAI/Generated_Outputs/Data/ED_Clean.csv')
DI_Clean = pd.read_csv('/home/andrew/PycharmProjects/SickKidsMMAI/Generated_Outputs/Data/DI_Clean.csv')
ED_Clean.shape
# Restrict the Joined rows to be based on order dates that are acceptable (arrived -> order -> discharge)
# Or just no tests for that visit is ok too, this will show up as null
# DI Timeframe entirely encompasses ED, so if they got a test they should be here.
# Could not find a clean way to do this that wouldn't take a bunch of extra work in python, using sql
pysqldf = lambda q: sqldf(q, globals()) # Imports all current global variables to be able to be used in sql as df
All_Clean = pysqldf("SELECT * FROM ED_Clean AS e "
"LEFT JOIN DI_Clean AS d " # left join because NO tests is a valid answer to incoming patient
"ON e.MRN = d.MRN " # same person
"AND e.Arrived < d.[Order Time]" # arrived before order
"AND e.[Disch Date/Time] > d.[Order Time]") # discharged after order
All_Clean.isna().sum()
# Drop rows that we cannot possibly have AT THE TIME this Model aims to be used (nearly all of DI, some of ED)
All_Clean_Reduced = All_Clean.drop(['ED Completed Length of Stay (Minutes)', 'Roomed', 'Disch Date/Time', 'Dispo',
'Roomed to Discharge', 'Roomed to Discharge', 'Arrived to Discharge',
'End Exam Time', 'Order Time', 'Finalized Time', 'Finalizing Physician', 'Order ID',
'Order to Protocolled (min)', 'Protocolled to Begin (min)', 'Order to Begin (min)',
'Begin to End (min)', 'End to Prelim (min)', 'End to Sign (min)',
'Order to End (min)', 'Order to Sign (min)', 'Protocolling Instant', 'Procedure id',
'Authorizing Provider id', 'Finalizing Physician id', 'Arrived to Roomed','ED Complaint' ], axis=1)
# drop second mrn column
di_mrn = len(All_Clean_Reduced.columns) -2 # second last column is dupe mrn
All_Clean_Reduced = All_Clean_Reduced.drop(All_Clean_Reduced.columns[di_mrn], axis=1)
# Arrived should focus hour of the day arrived, datetime format not likely useful for model
All_Clean_Reduced.dtypes
All_Clean_Reduced['Arrived'] = pd.to_datetime(All_Clean_Reduced['Arrived']).dt.hour #make this a dummy variable
# Replace category nan with "none" text, (shows warning)
All_Clean_Reduced['Category id'].loc[All_Clean_Reduced['Category id'].isna()] = 'none'
# Aggregate by everything except category or just csn, make a delimited column for this, (takes a few minutes)
All_Clean_Condensed_orig = All_Clean_Reduced.groupby('CSN', as_index=False).agg(lambda x: ', '.join(set(x.astype(str))))
All_Clean_Condensed = All_Clean_Condensed_orig # because it takes a while
# Dummy Variable all the things of relevance that should be converted to dummy variables
# not viable for CC, postal code, maybe later.
# Arrived(the hours one), day of arrival, province,
dummies = pd.get_dummies(All_Clean_Condensed['Province']).rename(columns=lambda x: 'Province_' + str(x))
All_Clean_Condensed = pd.concat([All_Clean_Condensed, dummies], axis=1)
dummies = pd.get_dummies(All_Clean_Condensed['Arrived']).rename(columns=lambda x: 'Arrived_Hour' + str(x))
All_Clean_Condensed = pd.concat([All_Clean_Condensed, dummies], axis=1)
dummies = pd.get_dummies(All_Clean_Condensed['Day of Arrival']).rename(columns=lambda x: 'Day_of_Arrival' + str(x))
All_Clean_Condensed = pd.concat([All_Clean_Condensed, dummies], axis=1)
dummies = pd.get_dummies(All_Clean_Condensed['Gender']).rename(columns=lambda x: 'Gender_' + str(x))
All_Clean_Condensed = pd.concat([All_Clean_Condensed, dummies], axis=1)
# Arrival Method simplified greatly , find the big ones , those get a 1/0 for containing
Arrival_Method_Options = All_Clean_Condensed.groupby('Arrival Method').count().sort_values('CSN',ascending = False)
"""
Biggest Options:
Ambula (covers ambulance/ambulatory)
Walk
Car
"""
All_Clean_Condensed['Method_Ambulance'] = (All_Clean_Condensed['Arrival Method'].str.contains('Ambula'))
All_Clean_Condensed['Method_Walk'] = (All_Clean_Condensed['Arrival Method'].str.contains('Walk'))
All_Clean_Condensed['Method_Car'] = (All_Clean_Condensed['Arrival Method'].str.contains('Car'))
## CC simplified Greatly, find big key words,
CC_Options = All_Clean_Condensed.groupby('CC').count().sort_values('CSN',ascending = False)
CC_Options = CC_Options.loc[CC_Options['CSN']>15]
# capture each index option that has more than 15 instances
cc_list = CC_Options.index.values.astype(str)
# do a regex check for each of those columns
for x in cc_list:
All_Clean_Condensed[x] = All_Clean_Condensed['CC'].str.contains(x)
# ----------------------------------------------------------------------------------------------------------------------
"""
Notable Categories for prediction
10 = X-Ray
9 = UltraSound
7 = MRI
2 = CT
"""
# Convert the category id column into 4 columns based on delimiter
All_Clean_Condensed['X-Ray'] = (All_Clean_Condensed['Category id'].str.contains('10.0'))
All_Clean_Condensed['US'] = (All_Clean_Condensed['Category id'].str.contains('9.0'))
All_Clean_Condensed['MRI'] = (All_Clean_Condensed['Category id'].str.contains('7.0'))
All_Clean_Condensed['CT'] = (All_Clean_Condensed['Category id'].str.contains('2.0'))
All_Clean_Condensed['Any'] = (All_Clean_Condensed['Category id'].str.contains(r'\d')) #any test of any kind
# for sharing, one time
# All_Clean_Condensed.to_csv(r'/home/andrew/PycharmProjects/SickKidsMMAI/Generated_Outputs/Data/ED_plus_Category_by_VISIT.csv', index = None, header=True)
# Remove columns if no longer needed for whatever reason
All_Clean_Dropped = All_Clean_Condensed.drop(['CSN', 'Arrival Method', 'CC', 'Postal Code',
'Province','Category id','Day of Arrival', 'Gender','Arrived' ], axis=1)
# Confirm all the columns are in use-able format
# All_Clean_Dropped.dtypes
# convert everything that is objects to floats or int
All_Clean_Dropped['Last Weight formatted'] = pd.to_numeric(All_Clean_Dropped['Last Weight formatted'], errors='coerce')
All_Clean_Dropped['Pulse Formatted'] = pd.to_numeric(All_Clean_Dropped['Pulse Formatted'], errors='coerce')
All_Clean_Dropped['Resp Formatted'] = pd.to_numeric(All_Clean_Dropped['Resp Formatted'], errors='coerce')
All_Clean_Dropped['Temp Formatted'] = pd.to_numeric(All_Clean_Dropped['Temp Formatted'], errors='coerce')
# Confirm all the columns are without nulls
All_Clean_Dropped = All_Clean_Dropped.dropna()
All_Clean_Dropped.isna().sum()
# ----------------------------------------------------------------------------------------------------------------------
# Remove some which have high dependencies/correlations, mostly caused by dummy variables
# corr matrix
corr = All_Clean_Dropped.iloc[:,[0,1,2,3,4,5,6,7,8,9,10]].corr()
sns.heatmap(corr)
plt.show()
plt.savefig("Corr Matrix.pdf")
# Remove them
All_Clean_Dropped = All_Clean_Dropped[All_Clean_Dropped.columns.drop(list(All_Clean_Dropped.filter(regex='Province|Arrived_|Method|Day_of_Arrival')))]
All_Clean_Dropped = All_Clean_Dropped.drop(['Gender_U', 'Encounter Number', 'Visits Since Aug 2018','Gender_F' ], axis=1)
# Information Gain style statistics
Modalities = ['Any', 'X-Ray', 'US', 'MRI', 'CT']
X = All_Clean_Dropped.drop(Modalities, axis=1)
y = All_Clean_Dropped[Modalities]
Info_Gain = pd.DataFrame(pd.Series(All_Clean_Dropped.columns), columns=['Columns'])
for index in range(0,len(Modalities)):
modality = Modalities[index]
y_mod = y.iloc[:,y.columns == modality]
gain = mutual_info_classif(X, y_mod, random_state=42)
Info_Gain[str(modality)] = pd.Series(gain)
Info_Gain.to_csv('Info_Gain_Matrix.csv')
# Determine a threshold and drop ones that don't meet it
Info_Gain['max'] = Info_Gain.max(axis=1)
keep_index = np.array((Info_Gain['max'] > 0.0005) | (Info_Gain['max'].isna())) # helpful somewhere, many are straight 0s
All_Clean_final = All_Clean_Dropped.iloc[:,keep_index]
# ----------------------------------------------------------------------------------------------------------------------
# Write it to csv for easy reference
All_Clean_final.to_csv(r'/home/andrew/PycharmProjects/SickKidsMMAI/Generated_Outputs/Data/ML_Clean.csv', index = None, header=True)
# -----------------------------------------------------------------------------------------------------------------------
print("done 2")
|
Python
|
CL
|
47a1ad1d952e8a1a6bf75e7c5ed4fe5afe9db22383b2f747efcc870e3aee6b19
|
# © Copyright Databand.ai, an IBM Company 2022
import typing
from datetime import datetime
from typing import List
import attr
from airflow.models import BaseOperator, DagRun, TaskInstance
from airflow.utils.net import get_hostname
from dbnd._core.utils.uid_utils import source_md5
from dbnd_airflow.export_plugin.helpers import (
_add_source_code,
_extract_args_from_dict,
_get_command_from_operator,
_get_module_code,
_get_source_code,
_read_dag_file,
interval_to_str,
resolve_attribute_or_default_attribute,
resolve_attribute_or_default_value,
)
if typing.TYPE_CHECKING:
from typing import List
from airflow.models import DAG, DagModel, DagTag
class ETask(object):
def __init__(
self,
upstream_task_ids=None,
downstream_task_ids=None,
task_type=None,
task_source_code=None,
task_source_hash=None,
task_module_code=None,
module_source_hash=None,
dag_id=None,
task_id=None,
retries=None,
command=None,
task_args=None,
):
self.upstream_task_ids = list(upstream_task_ids) # type: List[str]
self.downstream_task_ids = list(downstream_task_ids) # type: List[str]
self.task_type = task_type
self.task_source_code = task_source_code
self.task_source_hash = task_source_hash
self.task_module_code = task_module_code
self.module_source_hash = module_source_hash
self.dag_id = dag_id
self.task_id = task_id
self.retries = retries
self.command = command
self.task_args = task_args
@staticmethod
def from_task(t, include_task_args, dag, include_source=True):
# type: (BaseOperator, bool, DAG, bool) -> ETask
module_code = _get_module_code(t) or _read_dag_file(dag.fileloc)
return ETask(
upstream_task_ids=t.upstream_task_ids,
downstream_task_ids=t.downstream_task_ids,
task_type=t.task_type,
task_source_hash=source_md5(_get_source_code(t)),
module_source_hash=source_md5(module_code),
dag_id=t.dag_id,
task_id=t.task_id,
retries=t.retries,
command=_get_command_from_operator(t),
task_args=_extract_args_from_dict(vars(t)) if include_task_args else {},
)
def as_dict(self):
return dict(
upstream_task_ids=self.upstream_task_ids,
downstream_task_ids=self.downstream_task_ids,
task_type=self.task_type,
task_source_code=self.task_source_code,
task_source_hash=self.task_source_hash,
task_module_code=self.task_module_code,
module_source_hash=self.module_source_hash,
dag_id=self.dag_id,
task_id=self.task_id,
retries=self.retries,
command=self.command,
task_args=self.task_args,
)
class EDagRun(object):
db_fields = [
"dag_id",
"id",
"start_date",
"state",
"end_date",
"execution_date",
"conf",
"run_id",
]
@classmethod
def query_fields(cls):
return [getattr(DagRun, key) for key in cls.db_fields]
def __init__(
self,
dag_id,
dagrun_id,
start_date,
state,
end_date,
execution_date,
task_args,
run_id,
):
self.dag_id = dag_id
self.dagrun_id = dagrun_id
self.start_date = start_date
self.state = state
self.end_date = end_date
self.execution_date = execution_date
self.task_args = task_args
self.run_id = run_id
@classmethod
def from_db_fields(
cls,
dag_id,
dagrun_id,
start_date,
state,
end_date,
execution_date,
conf,
run_id,
):
return cls(
dag_id,
dagrun_id,
start_date,
state,
end_date,
execution_date,
(_extract_args_from_dict(conf) if conf else {}),
run_id,
)
def __hash__(self):
return hash(self.dagrun_id)
def __eq__(self, other):
return isinstance(other, EDagRun) and self.dagrun_id == other.dagrun_id
def as_dict(self):
return dict(
dag_id=self.dag_id,
dagrun_id=self.dagrun_id,
start_date=self.start_date,
state=self.state,
end_date=self.end_date,
execution_date=self.execution_date,
task_args=self.task_args,
run_id=self.run_id,
)
class EDag(object):
def __init__(
self,
description,
root_task_ids,
tasks,
owner,
dag_id,
schedule_interval,
catchup,
start_date,
end_date,
dag_folder,
hostname,
source_code,
module_source_hash,
tasks_hash_to_source,
is_subdag,
task_type,
task_args,
is_active,
is_paused,
git_commit,
is_committed,
tags,
):
self.description = description
self.root_task_ids = root_task_ids # type: List[str]
self.tasks = tasks # type: List[ETask]
self.tags = tags # type: List[DagTag]
self.owner = owner
self.dag_id = dag_id
self.schedule_interval = schedule_interval
self.catchup = catchup
self.start_date = start_date
self.end_date = end_date
self.dag_folder = dag_folder
self.hostname = hostname
self.source_code = source_code
self.module_source_hash = module_source_hash
self.tasks_hash_to_source = tasks_hash_to_source
self.is_subdag = is_subdag
self.task_type = task_type
self.task_args = task_args
self.is_active = is_active
self.is_paused = is_paused
self.git_commit = git_commit
self.is_committed = is_committed
@staticmethod
def from_dag(
dag,
dm,
dag_folder,
include_task_args,
git_commit,
is_committed,
raw_data_only=False,
include_source=True,
):
# type: (DAG, DagModel, str, bool, str, bool, bool, bool) -> EDag
# Can be Dag from DagBag or from DB, therefore not all attributes may exist
source_code = _read_dag_file(dag.fileloc)
tasks_hash_to_source = {}
if include_source:
tasks = getattr(dag, "tasks", [])
for task in tasks:
_add_source_code(
tasks_hash_to_source,
_get_module_code(task) or _read_dag_file(dag.fileloc),
)
_add_source_code(tasks_hash_to_source, _get_source_code(task))
return EDag(
description=dag.description or "",
root_task_ids=[t.task_id for t in getattr(dag, "roots", [])],
tasks=[
ETask.from_task(t, include_task_args, dag, include_source)
for t in getattr(dag, "tasks", [])
]
if not raw_data_only
else [],
owner=resolve_attribute_or_default_attribute(dag, ["owner", "owners"]),
dag_id=dag.dag_id,
schedule_interval=interval_to_str(dag.schedule_interval),
catchup=resolve_attribute_or_default_value(dag, "catchup", False),
start_date=resolve_attribute_or_default_value(dag, "start_date", None),
end_date=resolve_attribute_or_default_value(dag, "end_date", None),
dag_folder=dag_folder,
hostname=get_hostname(),
source_code=source_code if not raw_data_only and include_source else "",
tasks_hash_to_source=tasks_hash_to_source,
module_source_hash=source_md5(source_code),
is_subdag=dag.is_subdag,
tags=getattr(dm, "tags", []),
task_type="DAG",
task_args=_extract_args_from_dict(vars(dag)) if include_task_args else {},
is_active=dm.is_active,
is_paused=dm.is_paused,
git_commit=git_commit,
is_committed=is_committed,
)
def as_dict(self):
return dict(
description=self.description,
root_task_ids=self.root_task_ids,
tasks=[t.as_dict() for t in self.tasks],
tags=[tag.name for tag in self.tags],
owner=self.owner,
dag_id=self.dag_id,
schedule_interval=self.schedule_interval,
catchup=self.catchup,
start_date=self.start_date,
end_date=self.end_date,
is_committed=self.is_committed,
git_commit=self.git_commit,
dag_folder=self.dag_folder,
hostname=self.hostname,
source_code=self.source_code,
module_source_hash=self.module_source_hash,
tasks_hash_to_source=self.tasks_hash_to_source,
is_subdag=self.is_subdag,
task_type=self.task_type,
task_args=self.task_args,
)
@attr.s
class AirflowNewDagRun(object):
id = attr.ib() # type: int
dag_id = attr.ib() # type: str
execution_date = attr.ib() # type: datetime
state = attr.ib() # type: str
is_paused = attr.ib() # type: bool
has_updated_task_instances = attr.ib() # type: bool
max_log_id = attr.ib() # type: int
events = attr.ib() # type: List[str]
def as_dict(self):
return dict(
id=self.id,
dag_id=self.dag_id,
execution_date=self.execution_date,
state=self.state,
is_paused=self.is_paused,
has_updated_task_instances=self.has_updated_task_instances,
max_log_id=self.max_log_id,
events=self.events,
)
class AirflowTaskInstance(object):
def __init__(
self, dag_id, task_id, execution_date, state, try_number, start_date, end_date
):
self.execution_date = execution_date
self.dag_id = dag_id
self.state = state
self.try_number = try_number
self.task_id = task_id
self.start_date = start_date
self.end_date = end_date
db_fields = [
"dag_id",
"task_id",
"execution_date",
"state",
"_try_number",
"start_date",
"end_date",
]
@classmethod
def query_fields(cls):
return [getattr(TaskInstance, key) for key in cls.db_fields]
def as_dict(self):
return dict(
dag_id=self.dag_id,
task_id=self.task_id,
execution_date=self.execution_date,
state=self.state,
try_number=self.try_number,
start_date=self.start_date,
end_date=self.end_date,
)
@attr.s
class AirflowExportMeta(object):
airflow_version = attr.ib(default=None) # type: str
plugin_version = attr.ib(default=None) # type: str
airflow_instance_uid = attr.ib(default=None) # type: str
api_mode = attr.ib(default=None) # type: str
request_args = attr.ib(default=None) # type: dict
metrics = attr.ib(default=None) # type: dict
def as_dict(self):
return dict(
airflow_version=self.airflow_version,
plugin_version=self.plugin_version,
airflow_instance_uid=self.airflow_instance_uid,
api_mode=self.api_mode,
request_args=self.request_args,
metrics=self.metrics,
)
@attr.s
class AirflowExportData(object):
airflow_export_meta = attr.ib(default=None) # type: AirflowExportMeta
error_message = attr.ib(default=None) # type: str
def as_dict(self):
return dict(
airflow_export_meta=self.airflow_export_meta.as_dict(),
error_message=self.error_message,
)
@attr.s
class LastSeenData(AirflowExportData):
last_seen_dag_run_id = attr.ib(default=None) # type: int
last_seen_log_id = attr.ib(default=None) # type: int
def as_dict(self):
return dict(
last_seen_dag_run_id=self.last_seen_dag_run_id,
last_seen_log_id=self.last_seen_log_id,
airflow_export_meta=self.airflow_export_meta.as_dict(),
error_message=self.error_message,
)
@attr.s
class NewRunsData(AirflowExportData):
new_dag_runs = attr.ib(default=None) # type: List[AirflowNewDagRun]
last_seen_dag_run_id = attr.ib(default=None) # type: int
last_seen_log_id = attr.ib(default=None) # type: int
def as_dict(self):
return dict(
new_dag_runs=[new_dag_run.as_dict() for new_dag_run in self.new_dag_runs],
last_seen_dag_run_id=self.last_seen_dag_run_id,
last_seen_log_id=self.last_seen_log_id,
airflow_export_meta=self.airflow_export_meta.as_dict(),
error_message=self.error_message,
)
@attr.s
class FullRunsData(AirflowExportData):
task_instances = attr.ib(default=None) # type: List[AirflowTaskInstance]
dag_runs = attr.ib(default=None) # type: List[EDagRun]
dags = attr.ib(default=None) # type: List[EDag]
def as_dict(self):
return dict(
task_instances=[
task_instance.as_dict() for task_instance in self.task_instances
],
dag_runs=[run.as_dict() for run in self.dag_runs],
dags=[dag.as_dict() for dag in self.dags],
airflow_export_meta=self.airflow_export_meta.as_dict(),
error_message=self.error_message,
)
@attr.s
class DagRunsStatesData(AirflowExportData):
task_instances = attr.ib(default=None) # type: List[AirflowTaskInstance]
dag_runs = attr.ib(default=None) # type: List[EDagRun]
def as_dict(self):
return dict(
task_instances=[
task_instance.as_dict() for task_instance in self.task_instances
],
dag_runs=[run.as_dict() for run in self.dag_runs],
airflow_export_meta=self.airflow_export_meta.as_dict(),
error_message=self.error_message,
)
|
Python
|
CL
|
907dfc1931bd80e9ff675e2112ec0fc201ccc7b50a0ead7650b7000fde94fa98
|
#Embedded file name: ACEStream\Core\ProxyService\HelperMessageHandler.pyo
import sys, os
import binascii
from threading import Lock
from time import sleep
from ACEStream.Core.TorrentDef import *
from ACEStream.Core.Session import *
from ACEStream.Core.simpledefs import *
from ACEStream.Core.DownloadConfig import DownloadStartupConfig
from ACEStream.Core.Utilities.utilities import show_permid_short
from ACEStream.Core.BitTornado.bencode import bencode, bdecode
from ACEStream.Core.BitTornado.BT1.MessageID import *
from ACEStream.Core.CacheDB.CacheDBHandler import PeerDBHandler, TorrentDBHandler
from ACEStream.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
DEBUG = False
class HelperMessageHandler:
def __init__(self):
self.metadata_queue = {}
self.metadata_queue_lock = Lock()
self.overlay_bridge = OverlayThreadingBridge.getInstance()
self.received_challenges = {}
def register(self, session, metadata_handler, helpdir, dlconfig):
self.session = session
self.helpdir = helpdir
self.dlconfig = dlconfig
self.metadata_handler = metadata_handler
self.torrent_db = TorrentDBHandler.getInstance()
def handleMessage(self, permid, selversion, message):
t = message[0]
if DEBUG:
print >> sys.stderr, 'helper: received the message', getMessageName(t), 'from', show_permid_short(permid)
session_config = self.session.get_current_startup_config_copy()
if session_config.get_proxyservice_status() == PROXYSERVICE_OFF:
if DEBUG:
print >> sys.stderr, 'helper: ProxyService not active, ignoring message'
return
if t == ASK_FOR_HELP:
return self.got_ask_for_help(permid, message, selversion)
if t == STOP_HELPING:
return self.got_stop_helping(permid, message, selversion)
if t == REQUEST_PIECES:
return self.got_request_pieces(permid, message, selversion)
def got_ask_for_help(self, permid, message, selversion):
try:
infohash = message[1:21]
challenge = bdecode(message[21:])
except:
if DEBUG:
print >> sys.stderr, 'helper: got_ask_for_help: bad data in ask_for_help'
return False
if len(infohash) != 20:
if DEBUG:
print >> sys.stderr, 'helper: got_ask_for_help: bad infohash in ask_for_help'
return False
if DEBUG:
print >> sys.stderr, 'helper: got_ask_for_help: received a help request from', show_permid_short(permid), 'with challenge', challenge
self.received_challenges[permid] = challenge
helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
if helper_obj is None:
if DEBUG:
print >> sys.stderr, 'helper: got_ask_for_help: There is no current download for this infohash. A new download must be started.'
self.start_helper_download(permid, infohash, selversion)
return
network_got_ask_for_help_lambda = lambda : self.network_got_ask_for_help(permid, infohash)
self.session.lm.rawserver.add_task(network_got_ask_for_help_lambda, 0)
return True
def network_got_ask_for_help(self, permid, infohash):
helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
if helper_obj is None:
if DEBUG:
print >> sys.stderr, 'helper: network_got_ask_for_help: There is no current download for this infohash. Try again later...'
return
if not helper_obj.is_coordinator(permid):
if DEBUG:
print >> sys.stderr, 'helper: network_got_ask_for_help: The node asking for help is not the current coordinator'
challenge = self.received_challenges[permid]
helper_obj.got_ask_for_help(permid, infohash, challenge)
helper_obj.notify()
def start_helper_download(self, permid, infohash, selversion):
torrent_data = self.find_torrent(infohash)
if torrent_data:
self.new_download(infohash, torrent_data, permid)
else:
self.get_torrent_metadata(permid, infohash, selversion)
def new_download(self, infohash, torrent_data, permid):
basename = binascii.hexlify(infohash) + '.torrent'
torrentfilename = os.path.join(self.helpdir, basename)
tfile = open(torrentfilename, 'wb')
tfile.write(torrent_data)
tfile.close()
if DEBUG:
print >> sys.stderr, 'helper: new_download: Got metadata required for helping', show_permid_short(permid)
print >> sys.stderr, 'helper: new_download: torrent: ', torrentfilename
tdef = TorrentDef.load(torrentfilename)
if self.dlconfig is None:
dscfg = DownloadStartupConfig()
else:
dscfg = DownloadStartupConfig(self.dlconfig)
dscfg.set_coopdl_coordinator_permid(permid)
dscfg.set_dest_dir(self.helpdir)
dscfg.set_proxy_mode(PROXY_MODE_OFF)
if DEBUG:
print >> sys.stderr, 'helper: new_download: Starting a new download'
d = self.session.start_download(tdef, dscfg)
d.set_state_callback(self.state_callback, getpeerlist=False)
network_got_ask_for_help_lambda = lambda : self.network_got_ask_for_help(permid, infohash)
self.session.lm.rawserver.add_task(network_got_ask_for_help_lambda, 0)
def state_callback(self, ds):
d = ds.get_download()
print >> sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % (d.get_def().get_name(),
dlstatus_strings[ds.get_status()],
ds.get_progress() * 100,
ds.get_error(),
ds.get_current_speed(UPLOAD),
ds.get_current_speed(DOWNLOAD))
return (1.0, False)
def get_torrent_metadata(self, permid, infohash, selversion):
if DEBUG:
print >> sys.stderr, 'helper: get_torrent_metadata: Asking coordinator for the .torrent'
self.metadata_queue_lock.acquire()
try:
if not self.metadata_queue.has_key(infohash):
self.metadata_queue[infohash] = []
self.metadata_queue[infohash].append(permid)
finally:
self.metadata_queue_lock.release()
self.metadata_handler.send_metadata_request(permid, infohash, selversion, caller='dlhelp')
def metadatahandler_received_torrent(self, infohash, torrent_data):
if DEBUG:
print >> sys.stderr, 'helper: metadatahandler_received_torrent: the .torrent is in.'
self.metadata_queue_lock.acquire()
try:
if not self.metadata_queue.has_key(infohash) or not self.metadata_queue[infohash]:
if DEBUG:
print >> sys.stderr, 'helper: metadatahandler_received_torrent: a .torrent was received that we are not waiting for.'
return
infohash_queue = self.metadata_queue[infohash]
del self.metadata_queue[infohash]
for permid in infohash_queue:
self.new_download(infohash, torrent_data, permid)
finally:
self.metadata_queue_lock.release()
def find_torrent(self, infohash):
torrent = self.torrent_db.getTorrent(infohash)
if torrent is None:
if DEBUG:
print >> sys.stderr, 'helper: find_torrent: The .torrent file is not in the local cache'
return
if 'torrent_dir' in torrent:
fn = torrent['torrent_dir']
if os.path.isfile(fn):
f = open(fn, 'rb')
data = f.read()
f.close()
return data
else:
if DEBUG:
print >> sys.stderr, 'helper: find_torrent: The .torrent file path does not exist or the path is not for a file'
return
else:
if DEBUG:
print >> sys.stderr, 'helper: find_torrent: The torrent dictionary does not contain a torrent_dir field'
return
def got_stop_helping(self, permid, message, selversion):
try:
infohash = message[1:]
except:
if DEBUG:
print >> sys.stderr, 'helper: got_stop_helping: bad data in STOP_HELPING'
return False
if len(infohash) != 20:
if DEBUG:
print >> sys.stderr, 'helper: got_stop_helping: bad infohash in STOP_HELPING'
return False
network_got_stop_helping_lambda = lambda : self.network_got_stop_helping(permid, infohash, selversion)
self.session.lm.rawserver.add_task(network_got_stop_helping_lambda, 0)
return False
def network_got_stop_helping(self, permid, infohash, selversion):
helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
if helper_obj is None:
if DEBUG:
print >> sys.stderr, 'helper: network_got_stop_helping: There is no helper object associated with this infohash'
return
if not helper_obj.is_coordinator(permid):
if DEBUG:
print >> sys.stderr, 'helper: network_got_stop_helping: The node asking for help is not the current coordinator'
return
dlist = self.session.get_downloads()
for d in dlist:
if d.get_def().get_infohash() == infohash:
self.session.remove_download(d)
break
def got_request_pieces(self, permid, message, selversion):
try:
infohash = message[1:21]
pieces = bdecode(message[21:])
except:
print >> sys.stderr, 'helper: got_request_pieces: bad data in REQUEST_PIECES'
return False
network_got_request_pieces_lambda = lambda : self.network_got_request_pieces(permid, message, selversion, infohash, pieces)
self.session.lm.rawserver.add_task(network_got_request_pieces_lambda, 0)
return True
def network_got_request_pieces(self, permid, message, selversion, infohash, pieces):
helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
if helper_obj is None:
if DEBUG:
print >> sys.stderr, 'helper: network_got_request_pieces: There is no helper object associated with this infohash'
return
if not helper_obj.is_coordinator(permid):
if DEBUG:
print >> sys.stderr, 'helper: network_got_request_pieces: The node asking for help is not the current coordinator'
return
helper_obj.got_request_pieces(permid, pieces)
helper_obj.notify()
|
Python
|
CL
|
f442e9d5a4cb95d0229b8b51baf218dce37a5be155acd863490ef1543cb15021
|
import json
import pandas as pd
from plotly.graph_objs import Scattergeo, Layout
from plotly import offline
from loguru import logger
import itertools
# Global constants
filename = 'data/sample_br.json'
# N_LINES=5000
N_LINES = None
# Parse json file
def parse_file(n_lines=None):
""" Function to parse a json file and slice by the number of line we want (default: no slice)"""
with open(filename) as infile:
if n_lines is not None:
file_iterator = itertools.islice(infile, n_lines)
else:
file_iterator = infile
all_dict = list(map(json.loads, file_iterator))
logger.info(f"Loaded {len(all_dict)} rows")
return all_dict
# Create records
def map_localisation(coord_object):
"""Function to create a dictionary of our variables"""
# Avoid error with empty city name (namely Paris)
try:
city = coord_object['_source']['Bidrequest']['device']['geo']['city'].title()
except KeyError:
city = 'Paris'
record = {
'lons': coord_object['_source']['Bidrequest']['device']['geo']['lon'],
'lats': coord_object['_source']['Bidrequest']['device']['geo']['lat'],
'cities': city
}
return record
# Function for the marker's size
def size_marker(number):
"""Return a good size for markers"""
if number > 50:
number = 50
elif number < 10:
number = 10
return number
def plot_map(df):
logger.info("Preparing plot")
# Create map
data = [{
'type': 'scattergeo',
'lon': df.lons.tolist(),
'lat': df.lats.tolist(),
'text': [f'City: {x} <br> Number of BR: {y}' for x, y in list(zip(df.cities.tolist(), df.counts.tolist()))],
'hovertemplate': "%{text}",
'marker': {
'size': [size_marker(count / 100) for count in df.counts.tolist()],
'color': df.counts.tolist(),
'colorscale': 'Viridis',
'reversescale': True,
'colorbar': {'title': 'Number'},
},
}]
my_layout = Layout(title='Bid requests per localisation')
fig = {'data': data, 'layout': my_layout}
offline.plot(fig, filename='br_loc.html')
# Execute script
def main():
all_dicts = parse_file()
records = map(map_localisation, all_dicts)
df = pd.DataFrame.from_records(records)
df = df.groupby(['cities', 'lons', 'lats']).size().reset_index(name='counts')
logger.info(f"Built dataframe with {len(df)} records :\n{df}")
plot_map(df)
if __name__ == '__main__':
main()
|
Python
|
CL
|
b431186a401f992b73212732ed6be76dd3900031ec2cab17d29208e39662b99b
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListJobInfoDetailResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'jobs': 'GetTaskDetailListRspJobs',
'task_detail': 'str',
'instance': 'GetTaskDetailListRspInstance',
'entities': 'object',
'fail_reason': 'str'
}
attribute_map = {
'jobs': 'jobs',
'task_detail': 'taskDetail',
'instance': 'instance',
'entities': 'entities',
'fail_reason': 'fail_reason'
}
def __init__(self, jobs=None, task_detail=None, instance=None, entities=None, fail_reason=None):
"""ListJobInfoDetailResponse - a model defined in huaweicloud sdk"""
super(ListJobInfoDetailResponse, self).__init__()
self._jobs = None
self._task_detail = None
self._instance = None
self._entities = None
self._fail_reason = None
self.discriminator = None
if jobs is not None:
self.jobs = jobs
if task_detail is not None:
self.task_detail = task_detail
if instance is not None:
self.instance = instance
if entities is not None:
self.entities = entities
if fail_reason is not None:
self.fail_reason = fail_reason
@property
def jobs(self):
"""Gets the jobs of this ListJobInfoDetailResponse.
:return: The jobs of this ListJobInfoDetailResponse.
:rtype: GetTaskDetailListRspJobs
"""
return self._jobs
@jobs.setter
def jobs(self, jobs):
"""Sets the jobs of this ListJobInfoDetailResponse.
:param jobs: The jobs of this ListJobInfoDetailResponse.
:type: GetTaskDetailListRspJobs
"""
self._jobs = jobs
@property
def task_detail(self):
"""Gets the task_detail of this ListJobInfoDetailResponse.
任务执行的具体的参数信息,为空则不返回该字段。
:return: The task_detail of this ListJobInfoDetailResponse.
:rtype: str
"""
return self._task_detail
@task_detail.setter
def task_detail(self, task_detail):
"""Sets the task_detail of this ListJobInfoDetailResponse.
任务执行的具体的参数信息,为空则不返回该字段。
:param task_detail: The task_detail of this ListJobInfoDetailResponse.
:type: str
"""
self._task_detail = task_detail
@property
def instance(self):
"""Gets the instance of this ListJobInfoDetailResponse.
:return: The instance of this ListJobInfoDetailResponse.
:rtype: GetTaskDetailListRspInstance
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this ListJobInfoDetailResponse.
:param instance: The instance of this ListJobInfoDetailResponse.
:type: GetTaskDetailListRspInstance
"""
self._instance = instance
@property
def entities(self):
"""Gets the entities of this ListJobInfoDetailResponse.
根据不同的任务,显示不同的内容。
:return: The entities of this ListJobInfoDetailResponse.
:rtype: object
"""
return self._entities
@entities.setter
def entities(self, entities):
"""Sets the entities of this ListJobInfoDetailResponse.
根据不同的任务,显示不同的内容。
:param entities: The entities of this ListJobInfoDetailResponse.
:type: object
"""
self._entities = entities
@property
def fail_reason(self):
"""Gets the fail_reason of this ListJobInfoDetailResponse.
任务执行失败时的错误信息。
:return: The fail_reason of this ListJobInfoDetailResponse.
:rtype: str
"""
return self._fail_reason
@fail_reason.setter
def fail_reason(self, fail_reason):
"""Sets the fail_reason of this ListJobInfoDetailResponse.
任务执行失败时的错误信息。
:param fail_reason: The fail_reason of this ListJobInfoDetailResponse.
:type: str
"""
self._fail_reason = fail_reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListJobInfoDetailResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Python
|
CL
|
753dee295c4b238dd8631d5af9bf1d9bca964eded45a299938a117792a5e40f5
|
#!/usr/bin/env python3
# encoding=utf-8
from telegram import ReplyKeyboardRemove
from telegram.ext import MessageHandler, Filters, ConversationHandler, CommandHandler, RegexHandler, CallbackQueryHandler
from telegram.utils.request import Request
import os
import functions.misc as misc
import pickle
import config
from datetime import datetime, timedelta
import json
import functions.lang as lang
import logging
logger = logging.getLogger(__name__)
text = json.load(open('lang.json', encoding='utf8'))
lang.validator(text)
def start(bot, update, user_data):
user = update.message.from_user
logger.debug('user_data: %s', user_data)
logger.debug('bannedList: %s', misc.bannedList)
logger.debug('user: %s', user)
if user.username in misc.bannedList or str(user.id) in misc.bannedList:
update.message.reply_text(text['banned']['pl'])
return ConversationHandler.END
if 'lastUsed' in user_data:
cooldown = (user_data['lastUsed'] - datetime.now() +
timedelta(minutes=config.cooldown)).total_seconds()
if cooldown > 0:
update.message.reply_text(text['cooldown']['pl'] %round(cooldown))
return ConversationHandler.END
update.message.reply_text(text['start']['pl'])
return GET_REPORT
def startHandler(bot, update, user_data):
query = update.callback_query
data = misc.separateCallbackData(query.data)
action = data.pop(0)
if action == 'LANG':
user_data['lang'] = 'pl'
try:
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text=text['start']['pl'])
except:
bot.answer_callback_query(callback_query_id=query.id)
elif action == 'CANCEL':
try:
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text=text['start']['pl'])
except:
pass
return ConversationHandler.END
def forwardMsg(bot, update, user_data):
logger.debug('received message: %s', update.message)
alnumCount = 0
for char in update.message.text:
if char.isalnum():
alnumCount += 1
elif char.isspace():
alnumCount += 1
if len(update.message.text) < config.minMsgLen or alnumCount < config.minCharRatio * len(update.message.text):
update.message.reply_text(text['msgTooShort']['pl'])
return
bot.forward_message(chat_id=config.forwardDest,
from_chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_notification=config.silent
)
bot.send_message(chat_id=config.forwardDest,
text=update.message.from_user.full_name+' ('+update.message.from_user.name+')')
update.message.reply_text(text['end']['pl'])
user_data['lastUsed'] = datetime.now()
return ConversationHandler.END
def cancel(bot, update, user_data):
update.message.reply_text(
text['cancelled']['pl'], reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
GET_REPORT = range(1)
HANDLERS = (
ConversationHandler(
entry_points=[
(CommandHandler('start', start, pass_user_data=True, filters=~Filters.group))],
states={
GET_REPORT: [CallbackQueryHandler(startHandler, pass_user_data=True),
MessageHandler(Filters.text, forwardMsg, pass_user_data=True)]
},
fallbacks=[CommandHandler(
'cancel', cancel, pass_user_data=True)]
),
)
|
Python
|
CL
|
607546b92b203d98cbd5cb437068d0c7ffe4a43e0a20428980afa803accc6d6d
|
# encoding: utf-8
# module PyQt4.QtScript
# from /usr/lib64/python2.6/site-packages/PyQt4/QtScript.so
# by generator 1.136
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
# functions
def qScriptConnect(*args, **kwargs): # real signature unknown
pass
def qScriptDisconnect(*args, **kwargs): # real signature unknown
pass
# classes
class QScriptClass(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def engine(self, *args, **kwargs): # real signature unknown
pass
def extension(self, *args, **kwargs): # real signature unknown
pass
def name(self, *args, **kwargs): # real signature unknown
pass
def newIterator(self, *args, **kwargs): # real signature unknown
pass
def property(self, *args, **kwargs): # real signature unknown
pass
def propertyFlags(self, *args, **kwargs): # real signature unknown
pass
def prototype(self, *args, **kwargs): # real signature unknown
pass
def queryProperty(self, *args, **kwargs): # real signature unknown
pass
def setProperty(self, *args, **kwargs): # real signature unknown
pass
def supportsExtension(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Callable = 0
HandlesReadAccess = 1
HandlesWriteAccess = 2
HasInstance = 1
class QScriptClassPropertyIterator(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def flags(self, *args, **kwargs): # real signature unknown
pass
def hasNext(self, *args, **kwargs): # real signature unknown
pass
def hasPrevious(self, *args, **kwargs): # real signature unknown
pass
def id(self, *args, **kwargs): # real signature unknown
pass
def name(self, *args, **kwargs): # real signature unknown
pass
def next(self, *args, **kwargs): # real signature unknown
pass
def object(self, *args, **kwargs): # real signature unknown
pass
def previous(self, *args, **kwargs): # real signature unknown
pass
def toBack(self, *args, **kwargs): # real signature unknown
pass
def toFront(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class QScriptContext(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def activationObject(self, *args, **kwargs): # real signature unknown
pass
def argument(self, *args, **kwargs): # real signature unknown
pass
def argumentCount(self, *args, **kwargs): # real signature unknown
pass
def argumentsObject(self, *args, **kwargs): # real signature unknown
pass
def backtrace(self, *args, **kwargs): # real signature unknown
pass
def callee(self, *args, **kwargs): # real signature unknown
pass
def engine(self, *args, **kwargs): # real signature unknown
pass
def isCalledAsConstructor(self, *args, **kwargs): # real signature unknown
pass
def parentContext(self, *args, **kwargs): # real signature unknown
pass
def setActivationObject(self, *args, **kwargs): # real signature unknown
pass
def setThisObject(self, *args, **kwargs): # real signature unknown
pass
def state(self, *args, **kwargs): # real signature unknown
pass
def thisObject(self, *args, **kwargs): # real signature unknown
pass
def throwError(self, *args, **kwargs): # real signature unknown
pass
def throwValue(self, *args, **kwargs): # real signature unknown
pass
def toString(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
ExceptionState = 1
NormalState = 0
RangeError = 4
ReferenceError = 1
SyntaxError = 2
TypeError = 3
UnknownError = 0
URIError = 5
class QScriptContextInfo(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def columnNumber(self, *args, **kwargs): # real signature unknown
pass
def fileName(self, *args, **kwargs): # real signature unknown
pass
def functionEndLineNumber(self, *args, **kwargs): # real signature unknown
pass
def functionMetaIndex(self, *args, **kwargs): # real signature unknown
pass
def functionName(self, *args, **kwargs): # real signature unknown
pass
def functionParameterNames(self, *args, **kwargs): # real signature unknown
pass
def functionStartLineNumber(self, *args, **kwargs): # real signature unknown
pass
def functionType(self, *args, **kwargs): # real signature unknown
pass
def isNull(self, *args, **kwargs): # real signature unknown
pass
def lineNumber(self, *args, **kwargs): # real signature unknown
pass
def scriptId(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
NativeFunction = 3
QtFunction = 1
QtPropertyFunction = 2
ScriptFunction = 0
class QScriptEngine(__PyQt4_QtCore.QObject):
# no doc
def abortEvaluation(self, *args, **kwargs): # real signature unknown
pass
def agent(self, *args, **kwargs): # real signature unknown
pass
def availableExtensions(self, *args, **kwargs): # real signature unknown
pass
def canEvaluate(self, *args, **kwargs): # real signature unknown
pass
def checkSyntax(self, *args, **kwargs): # real signature unknown
pass
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def clearExceptions(self, *args, **kwargs): # real signature unknown
pass
def collectGarbage(self, *args, **kwargs): # real signature unknown
pass
def connectNotify(self, *args, **kwargs): # real signature unknown
pass
def currentContext(self, *args, **kwargs): # real signature unknown
pass
def customEvent(self, *args, **kwargs): # real signature unknown
pass
def defaultPrototype(self, *args, **kwargs): # real signature unknown
pass
def disconnectNotify(self, *args, **kwargs): # real signature unknown
pass
def evaluate(self, *args, **kwargs): # real signature unknown
pass
def globalObject(self, *args, **kwargs): # real signature unknown
pass
def hasUncaughtException(self, *args, **kwargs): # real signature unknown
pass
def importedExtensions(self, *args, **kwargs): # real signature unknown
pass
def importExtension(self, *args, **kwargs): # real signature unknown
pass
def installTranslatorFunctions(self, *args, **kwargs): # real signature unknown
pass
def isEvaluating(self, *args, **kwargs): # real signature unknown
pass
def newArray(self, *args, **kwargs): # real signature unknown
pass
def newDate(self, *args, **kwargs): # real signature unknown
pass
def newFunction(self, *args, **kwargs): # real signature unknown
pass
def newObject(self, *args, **kwargs): # real signature unknown
pass
def newQMetaObject(self, *args, **kwargs): # real signature unknown
pass
def newQObject(self, *args, **kwargs): # real signature unknown
pass
def newRegExp(self, *args, **kwargs): # real signature unknown
pass
def newVariant(self, *args, **kwargs): # real signature unknown
pass
def nullValue(self, *args, **kwargs): # real signature unknown
pass
def processEventsInterval(self, *args, **kwargs): # real signature unknown
pass
def receivers(self, *args, **kwargs): # real signature unknown
pass
def setAgent(self, *args, **kwargs): # real signature unknown
pass
def setDefaultPrototype(self, *args, **kwargs): # real signature unknown
pass
def setGlobalObject(self, *args, **kwargs): # real signature unknown
pass
def setProcessEventsInterval(self, *args, **kwargs): # real signature unknown
pass
def signalHandlerException(self, *args, **kwargs): # real signature unknown
"""
pyqtSignal(*types, name=str) -> signal attribute
types is normally a sequence of individual types. Each type is either a
type object or a string that is the name of a C++ type. Alternatively each
type could itself be a sequence of types each describing a different
overloaded signal.
name is the optional C++ name of the signal. If it is not specified then
the name of the class attribute that is bound to the signal is used.
"""
pass
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def toObject(self, *args, **kwargs): # real signature unknown
pass
def toStringHandle(self, *args, **kwargs): # real signature unknown
pass
def uncaughtException(self, *args, **kwargs): # real signature unknown
pass
def uncaughtExceptionBacktrace(self, *args, **kwargs): # real signature unknown
pass
def uncaughtExceptionLineNumber(self, *args, **kwargs): # real signature unknown
pass
def undefinedValue(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
AutoCreateDynamicProperties = 256
AutoOwnership = 2
ExcludeChildObjects = 1
ExcludeDeleteLater = 16
ExcludeSuperClassContents = 6
ExcludeSuperClassMethods = 2
ExcludeSuperClassProperties = 4
PreferExistingWrapperObject = 512
QtOwnership = 0
ScriptOwnership = 1
SkipMethodsInEnumeration = 8
class QScriptEngineAgent(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def contextPop(self, *args, **kwargs): # real signature unknown
pass
def contextPush(self, *args, **kwargs): # real signature unknown
pass
def engine(self, *args, **kwargs): # real signature unknown
pass
def exceptionCatch(self, *args, **kwargs): # real signature unknown
pass
def exceptionThrow(self, *args, **kwargs): # real signature unknown
pass
def extension(self, *args, **kwargs): # real signature unknown
pass
def functionEntry(self, *args, **kwargs): # real signature unknown
pass
def functionExit(self, *args, **kwargs): # real signature unknown
pass
def positionChange(self, *args, **kwargs): # real signature unknown
pass
def scriptLoad(self, *args, **kwargs): # real signature unknown
pass
def scriptUnload(self, *args, **kwargs): # real signature unknown
pass
def supportsExtension(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
DebuggerInvocationRequest = 0
class QScriptString(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def isValid(self, *args, **kwargs): # real signature unknown
pass
def toString(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class QScriptSyntaxCheckResult(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def errorColumnNumber(self, *args, **kwargs): # real signature unknown
pass
def errorLineNumber(self, *args, **kwargs): # real signature unknown
pass
def errorMessage(self, *args, **kwargs): # real signature unknown
pass
def state(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Error = 0
Intermediate = 1
Valid = 2
class QScriptValue(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def call(self, *args, **kwargs): # real signature unknown
pass
def construct(self, *args, **kwargs): # real signature unknown
pass
def data(self, *args, **kwargs): # real signature unknown
pass
def engine(self, *args, **kwargs): # real signature unknown
pass
def equals(self, *args, **kwargs): # real signature unknown
pass
def instanceOf(self, *args, **kwargs): # real signature unknown
pass
def isArray(self, *args, **kwargs): # real signature unknown
pass
def isBool(self, *args, **kwargs): # real signature unknown
pass
def isBoolean(self, *args, **kwargs): # real signature unknown
pass
def isDate(self, *args, **kwargs): # real signature unknown
pass
def isError(self, *args, **kwargs): # real signature unknown
pass
def isFunction(self, *args, **kwargs): # real signature unknown
pass
def isNull(self, *args, **kwargs): # real signature unknown
pass
def isNumber(self, *args, **kwargs): # real signature unknown
pass
def isObject(self, *args, **kwargs): # real signature unknown
pass
def isQMetaObject(self, *args, **kwargs): # real signature unknown
pass
def isQObject(self, *args, **kwargs): # real signature unknown
pass
def isRegExp(self, *args, **kwargs): # real signature unknown
pass
def isString(self, *args, **kwargs): # real signature unknown
pass
def isUndefined(self, *args, **kwargs): # real signature unknown
pass
def isValid(self, *args, **kwargs): # real signature unknown
pass
def isVariant(self, *args, **kwargs): # real signature unknown
pass
def lessThan(self, *args, **kwargs): # real signature unknown
pass
def property(self, *args, **kwargs): # real signature unknown
pass
def propertyFlags(self, *args, **kwargs): # real signature unknown
pass
def prototype(self, *args, **kwargs): # real signature unknown
pass
def scriptClass(self, *args, **kwargs): # real signature unknown
pass
def setData(self, *args, **kwargs): # real signature unknown
pass
def setProperty(self, *args, **kwargs): # real signature unknown
pass
def setPrototype(self, *args, **kwargs): # real signature unknown
pass
def setScriptClass(self, *args, **kwargs): # real signature unknown
pass
def strictlyEquals(self, *args, **kwargs): # real signature unknown
pass
def toBool(self, *args, **kwargs): # real signature unknown
pass
def toBoolean(self, *args, **kwargs): # real signature unknown
pass
def toDateTime(self, *args, **kwargs): # real signature unknown
pass
def toInt32(self, *args, **kwargs): # real signature unknown
pass
def toInteger(self, *args, **kwargs): # real signature unknown
pass
def toNumber(self, *args, **kwargs): # real signature unknown
pass
def toObject(self, *args, **kwargs): # real signature unknown
pass
def toQMetaObject(self, *args, **kwargs): # real signature unknown
pass
def toQObject(self, *args, **kwargs): # real signature unknown
pass
def toRegExp(self, *args, **kwargs): # real signature unknown
pass
def toString(self, *args, **kwargs): # real signature unknown
pass
def toUInt16(self, *args, **kwargs): # real signature unknown
pass
def toUInt32(self, *args, **kwargs): # real signature unknown
pass
def toVariant(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
KeepExistingFlags = 2048
NullValue = 0
PropertyGetter = 8
PropertySetter = 16
QObjectMember = 32
ReadOnly = 1
ResolveFull = 3
ResolveLocal = 0
ResolvePrototype = 1
ResolveScope = 2
SkipInEnumeration = 4
UndefinedValue = 1
Undeletable = 2
UserRange = -16777216
class QScriptValueIterator(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def flags(self, *args, **kwargs): # real signature unknown
pass
def hasNext(self, *args, **kwargs): # real signature unknown
pass
def hasPrevious(self, *args, **kwargs): # real signature unknown
pass
def name(self, *args, **kwargs): # real signature unknown
pass
def next(self, *args, **kwargs): # real signature unknown
pass
def previous(self, *args, **kwargs): # real signature unknown
pass
def remove(self, *args, **kwargs): # real signature unknown
pass
def scriptName(self, *args, **kwargs): # real signature unknown
pass
def setValue(self, *args, **kwargs): # real signature unknown
pass
def toBack(self, *args, **kwargs): # real signature unknown
pass
def toFront(self, *args, **kwargs): # real signature unknown
pass
def value(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
|
Python
|
CL
|
c342e4960b87060b1c45a2af120c2bf247bc3cbc410cb184629d87370323c53b
|
import os
import tarfile
import urllib.request
wayland_version = '1.18.0'
protocols_version = '1.20'
wayland_source = 'https://cgit.freedesktop.org/wayland/wayland/plain/protocol/wayland.xml?id={}'.format(wayland_version)
protocols_source = 'https://wayland.freedesktop.org/releases/wayland-protocols-{}.tar.xz'.format(protocols_version)
def protocols_build(output_dir):
from pywayland.scanner import Protocol
# first, we download the wayland.xml file
wayland_file = 'wayland.xml'
urllib.request.urlretrieve(wayland_source, wayland_file)
# download the protocols file and extract it
protocol_dest = 'wayland-protocols-{}'.format(protocols_version)
urllib.request.urlretrieve(protocols_source, protocol_dest + '.tar.xz')
with tarfile.open(protocol_dest + '.tar.xz') as f:
f.extractall()
# walk the directory and generate all the protocols
protocol_files = [wayland_file] + [
os.path.join(dirpath, filename)
for dirpath, _, filenames in os.walk(protocol_dest)
for filename in filenames
if os.path.splitext(filename)[1] == ".xml"
]
protocols = [Protocol.parse_file(protocol_file) for protocol_file in protocol_files]
protocol_imports = {
interface.name: protocol.name
for protocol in protocols
for interface in protocol.interface
}
for protocol in protocols:
protocol.output(output_dir, protocol_imports)
|
Python
|
CL
|
8750f71c6fc2bdb25371ac0e045338bcad60ffae9279c8fa2adfa237ac3e1260
|
# Lesson 02 Exercise: Grid Printer
# Jeremy Monroe
p = '+'
l = '|'
def grid_printer(g_size):
""" Prints a 4x4 grid where the length & width of each cell == g_size """
half_g = g_size // 2
m = '-' * (half_g)
s = ' ' * (half_g)
# This grid is always 2x2. So, loop twice
for i in range(2):
# print one + sign on each side of - sign times half the g_size
print(p + m + p + m + p)
# loop half the g_size to print vertical sides for each cell.
for i in range(half_g):
print(l + s + l + s + l)
# print the final row of + and - signs to finish bottom of the grid
print(p + m + p + m + p)
# grid_printer(15)
def fancy_grid_printer(rowCol, length):
""" Prints a grid where rowCol sets the number of rows and columns
and length sets the length & width of each cell. """
m = '-' * length
s = ' ' * length
# loops rowCol to create that many rows
for i in range(rowCol):
# loops rowCol to create that many columns
for j in range(rowCol):
# prints one + sign and - signs times the specified length
# to create the top of each row
print(p + m, end='')
# prints a final + sign to finish each row
print(p)
# loops length to create the vertical sides of each cell
for j in range(length):
# loops rowCol to create the proper number of vertical sides
for k in range(rowCol):
# prints one | sign and ' ' times the length
print(l + s, end="")
# prints final | sign to finish vertical side on last cell
print(l)
# Finishes the grid by printing + and - signs across the bottom in
# accordance witht the number of rowCol's
for j in range(rowCol):
print(p + m, end="")
print(p)
# fancy_grid_printer(3, 2)
fancy_grid_printer(5, 3)
|
Python
|
CL
|
56cd5616f7d39ce0ec0a7c9f5c53eed3ca23354df6248a316bf1149bc5f89dd8
|
import re
class BaseReaction(object):
url_path = ''
def __init__(self, poolbot):
"""Make poolbot available to all reactions."""
self.poolbot = poolbot
def match_request(self, message):
"""Return a boolean to indicate if the message should be processed
by this handler."""
return NotImplemented()
def process_request(self, message):
"""Return a message which poolbot should reply to the channel with. This
method is only called if the match_request() method returns True."""
return NotImplemented()
def _generate_url(self, **kwargs):
"""Join the host portion of the URL with the provided command path."""
path = self.url_path.format(**kwargs)
return self.poolbot.generate_url(path)
def _find_subtype_mentions(self, message):
"""Parses the message text and returns all user ids mentioned excluding
poolbot."""
subtype_mention_regex = '<@[a-zA-Z0-9]+|'
user_mentions = re.findall(subtype_mention_regex, message['text'])
user_ids = [mention.strip('@<>') for mention in user_mentions]
return [user_id for user_id in user_ids if user_id != self.poolbot.bot_id]
|
Python
|
CL
|
52bb0da34ba335e778013d720b460a5a48363a47913525470a161dbd0c3501fc
|
###############################################################################
# Author: Daniil Budanov
# Contact: danbudanov@gmail.com
# Summer Internship - 2016
###############################################################################
# Title: onlinevid.py
# Project: Security System
# Description:
# class for online video streaming
# OpenCV's built-in VideoCapture breaks when given URL
# this class opens a stream and parses out every frame of the video
# Last Modified: 7.14.2016
###############################################################################
import numpy as np
import cv2
import urllib
class OnlineVideo(object):
"""
USAGE:
cap = OnlineVideo(url)
ex. OnlineVideo('http://IP_ADDRESS:PORT/video.mjpg')
frame = cap.read()
see: http://stackoverflow.com/questions/21702477/how-to-parse-mjpeg-http-stream-from-ip-camera
"""
# open the url
def __init__(self, url):
self.stream = urllib.urlopen(url)
self.bytes = ''
self.frame = np.zeros((480, 640, 3), np.uint8)
print "url opened at: ", url
# read data frame-by-frame
def read(self):
# read data by chunks
numBytes = 13840
self.bytes += self.stream.read(numBytes)
# parse achunk
a = self.bytes.find('\xff\xd8')
b = self.bytes.find('\xff\xd9')
if a != -1 and b != -1:
jpg = self.bytes[a:b+2]
self.bytes = self.bytes[b+2:]
self.frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
# return frame availability and frame
return bool(self.frame), self.frame
# stop online stream
def release(self):
self.stream.close()
|
Python
|
CL
|
a9a0ed04a89839443a9392121080d69a50b46080bb04639ceb63f2b32293aae7
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# raise of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this raise of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'pivoter' de la commande 'canon'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmPivoter(Parametre):
"""Commande 'canon pivoter'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "pivoter", "pivot")
self.schema = "<nombre>"
self.aide_courte = "fait pivoter le canon"
self.aide_longue = \
"Cette commande permet de faire pivoter le canon " \
"horizontalement. Tous les canons ne peuvent pas être " \
"réorientés et ceux qui le peuvent disposent généralement " \
"d'angles de tir. Même quand ce n'est pas le cas, n'oubliez " \
"pas que vous devez faire attention à aligner le canon " \
"correctement (si vous le retournez complètement, c'est " \
"votre propre navire qui sera endommagé par l'explosion). " \
"Précisez l'angle en degrés : un nombre positif (par exemple " \
"|ent|90|ff| pour faire pivoter le canon de 90°) fera " \
"pivoter le canon vers tribord, un nombre négatif (par " \
"exemple |ent|-90|ff|) fera pivoter le canon sur bâbord."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nombre = self.noeud.get_masque("nombre")
nombre.proprietes["limite_inf"] = "-359"
nombre.proprietes["limite_sup"] = "359"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
personnage.agir("manip_canon")
canon = None
angle = dic_masques["nombre"].nombre
if hasattr(salle, "navire"):
for element in salle.elements:
if element.nom_type == "canon":
canon = element
break
if canon is None:
personnage << "|err|Aucun canon ne se trouve ici.|ff|"
return
if angle == 0:
personnage << "|err|Vous avez précisé un angle nul.|ff|"
return
if not hasattr(salle, "sabord_min"):
sabord_min = None
else:
sabord_min = (salle.sabord_min - salle.sabord_max) % 360
sabord_max = (salle.sabord_min + salle.sabord_max) % 360
if sabord_min is None or sabord_min == 0:
personnage << "|err|Vous ne pouvez faire pivoter ce canon.|ff|"
return
h_angle = canon.h_angle
m_angle = (h_angle + angle) % 360
if not salle.sabord_oriente(m_angle):
personnage << "|err|Vous ne pouvez faire pivoter ce canon " \
"dans ce sens.|ff|"
return
canon.h_angle = m_angle
cote = " tribord"
r_cote = "tribord"
if m_angle == 0:
cote = ""
elif m_angle < 0:
cote = " bâbord"
m_angle = -m_angle
if angle < 0:
r_cote = "bâbord"
personnage << "Vous faites pivoter {} sur {}.".format(canon.nom,
r_cote)
salle.envoyer("{{}} fait pivoter {} sur {}.".format(canon.nom, r_cote),
personnage)
personnage << "{} est à présent sur {}°{}.".format(
canon.nom.capitalize(), m_angle, cote)
|
Python
|
CL
|
3f5ac11a7f1cfaa9f2b905d7a6a8243730a09ed22c37c0743060487d894abe00
|
# Lucas J. Koerner
# 05/2018
# koerner.lucas@stthomas.edu
# University of St. Thomas
'''
The SCPI module includes the SCPI class, functions to convert return values, and builds
a SCPI object (using the function init_instrument) from a CSV file of commands and lookups.
'''
# standard library imports
import warnings
import time
import sys
import math
import ast
from collections import defaultdict
import functools
# imports that may need installation
import pandas as pd
import colorama
import numpy as np
import serial
import pyvisa as visa
from pyvisa.constants import StatusCode
# local package imports
from instrbuilder.command import Command
from instrbuilder import utils
# -----------------------------------------
# a dictionary of functions that are used to convert return values from getters
convert_return = defaultdict(lambda: str)
convert_return['string'] = str
convert_return['float'] = float
convert_return['double'] = float
convert_return['int'] = int
convert_return['nan'] = str
def arr_str(str_in):
""" convert string such as '2.3', '5.4', '9.9' to a list of floats """
return np.asarray(list(map(lambda x: float(x), str_in.split(','))))
def arr_bytes(bytes_in):
""" convert array of bytes such as b'1,0\r' to a list of floats """
str_in = bytes_in.decode('utf-8').rstrip()
return np.asarray(list(map(lambda x: int(x), str_in.split(','))))
def arr_bytes_floats(bytes_in):
""" convert array of bytes such as b'-3.051776e-004,-3.051776e-004,\r', to a list of floats """
str_in = bytes_in.decode('utf-8').rstrip()
return np.asarray(list(map(lambda x: float(x), list(filter(None, str_in.split(','))))))
def str_strip(str_in):
""" strip whitespace at right of string. Wrap string rstrip method into function """
return str(str_in.rstrip())
def keysight_error(str_in):
""" detect for an error return, specific to Keysight.
Parameters
----------
str_in : string
input string to check
Returns
----------
bool
"""
return str_in[0:2] != '+0'
# add attribute to the getter conversion function so that bluesky
# (or the generation of a bluesky signal) knows what to do
def returns_array(func):
func.returns_array = True
return func
nop = lambda x: x
convert_return['str'] = str_strip
convert_return['str_array_to_numarray'] = returns_array(arr_str)
convert_return['byte_array_to_numarray'] = returns_array(arr_bytes)
convert_return['byte_array_to_numarray_floats'] = returns_array(arr_bytes_floats)
convert_return['keysight_error'] = keysight_error
convert_return['pass'] = nop
convert_return['pass_array'] = returns_array(nop)
# getter conversion function to determine if a single bit is set. Returns True or False
for i in range(8):
convert_return['bit{}_set'.format(
i)] = lambda x: bool(functools.partial(utils.get_bit, bit=i)(int(x)))
# getter conversion function to determine if a single bit is cleared. Returns True or False
for i in range(8):
convert_return['bit{}_cleared'.format(
i
)] = lambda x: not bool(functools.partial(utils.get_bit, bit=i)(int(x)))
#### -----------------------------------------
divider_string = '='*80 + '\n'
getter_debug_value = '7' # when running headless (no instruments attached) all getters return this arbitrary value
class SCPI(object):
"""A SCPI (or SCPI like) instrument with a list of commands. The instrument has methods to get and set info of each command.
Parameters
----------
cmd_list : Command
A list of commands. Each command is an object of the class Command
comm_handle : Communication object
handle to the (general) hardware interface
Example is the pyvisa instrument object: inst
Needed when commands are overriden
must have a:
write method (Examples are pySerial write() or pyvisa inst.write())
and an ask method (Examples are pySerial ask() and pyvisa inst.query())
name : str, optional
Name of the instrument
unconnected : bool, optional
For simulation & testing without instruments
If true a "fake" ask and write command are configured. Ask always returns the same value (getter_debug_value).
Attributes
----------
unconnected : bool
if True the instrument is unconnected and returns appropriately
configured garbage values just for testing
vendor_id : str
id returned by the identification command
name : str
name the user assigns
comm_handle : object
the communication object (could be from pyvisa or pyserial)
Methods
----------
get(name, configs={}) :
get the value for the command of a given name
set(name, value=None, configs={}) :
set a value for the command of name
list_cmds() :
print all cmds
help_all(subsystem_list=None) :
list help for all commands (or for commands within a list of subsystems)
help(name):
print help on a command of the provided name
log_all_getters(filename=None, suppress_stdout=False):
write all values that can be read to a file or to stdout
test_command(name, set_vals=None, get_configs={}, set_configs={}):
test a specific command by sending a value and checking the readback of that value
test_all(skip_subsystem=['setup', 'status', 'system'],
skip_commands=['fast_transfer', 'reset']) :
test all commands
"""
def __init__(self,
cmd_list,
comm_handle,
name='not named',
unconnected=False):
self._cmds = {}
for cmd in cmd_list:
self._cmds[cmd.name] = cmd
self._write = comm_handle.write
try:
self._ask = comm_handle.query
except:
self._ask = comm_handle.ask #pyserial
self.unconnected = unconnected
# get the vendor ID, which often includes firmware revision and other useful info.
try:
vendor_id = self.get('id')
print('Opened Instrument: {}'.format(vendor_id))
except Exception as e:
print(e)
print(
'ID command not returned by instrument. Vendor ID set to None')
vendor_id = None
self.vendor_id = vendor_id
self.name = name
self.comm_handle = comm_handle
def __dir__(self):
return self._cmds.keys()
def __len__(self):
return len(self._cmds)
def get(self, name, configs={}):
if not self._cmds[name].getter:
print('This command {} is not a getter'.format(name))
raise NotImplementedError
if self._cmds[name].getter_override is not None:
return self._cmds[name].getter_override(**configs)
cmd_str = self._cmds[name].ascii_str_get
ret_val = self._ask(cmd_str.format(**configs))
# if the instrument is not connected, check if the command has a specific return value
if self.unconnected:
try:
ret_val = self._cmds[name]._unconnected_val
except Exception as inst:
print(inst)
pass
try:
val = self._cmds[name].getter_type(ret_val)
# check if a lookup table exists
if bool(self._cmds[name].lookup): # bool(dict) --> checks if dictionary is empty
try:
# check if this value matches a key in the lookup table
val = list(self._cmds[name].lookup.keys())[list(
self._cmds[name].lookup.values()).index(val)]
except ValueError:
print('Warning: {} value of {} not in the lookup table'.
format(name, val))
return val
except ValueError:
print('Warning! getter {} returned unexpected type'.format(
self._cmds[name].name))
print(' Returned {}; with type = {}; expects = {}'.format(
ret_val, type(ret_val), self._cmds[name].getter_type))
def set(self, name, value=None, configs={}):
""" set a value
Parameters
----------
name : string
name of the command (first column in the csv file)
value : Union[str, int, float, None]
the value to set
configs : dict, optional
special configurations beyond the 'value'; specified in the csv file
Returns
----------
str
.. todo:: check this and fix?
"""
cmd_str = self._cmds[name].ascii_str
if value is not None:
# check if this value is a key in the lookup table
if value in self._cmds[name].lookup:
try:
value = self._cmds[name].lookup[value]
except Exception as set_error:
pass # just keep value
self.check_set_range(value, name)
cmd_str = cmd_str.format(value=value, **configs)
# allow for a setter with no value (e.g. '*RST')
else: # is the value is None
cmd_str = cmd_str.format(value='').rstrip()
# for pytests
if self.unconnected:
self._cmds[name]._unconnected_val = value
# send the command to the instrument
return self._write(cmd_str)
def check_set_range(self, value, name):
""" check if the value to be set is within range
Parameters
----------
name : string
name of the command (first column in the csv file)
value : Union[str, int, float, None]
the value to set
Returns
----------
bool
True if in range
"""
if self._cmds[name].limits is None:
return True
if (len(self._cmds[name].limits) == 2) and (type(
self._cmds[name].limits[0]) is not str):
# numeric, check if less than or greater than
if (value >= self._cmds[name].limits[0]) and (
value <= self._cmds[name].limits[1]):
return True
else:
# throw out of range warning
self.out_of_range_warning(value, name)
return False
else:
# check if value is a member
if value in self._cmds[name].limits:
return True
else:
# throw out of range warning
self.out_of_range_warning(value, name)
return False
def out_of_range_warning(self, value, name):
""" throw a warning
Parameters
----------
value : Union[str, int, float, None]
the value to set
name : string
name of the command (first column in the csv file)
Returns
----------
UserWarning
"""
warnings.warn(
'\n {} value of {} is out of the range of {}'.format(
name, value, self._cmds[name].limits), UserWarning)
def list_cmds(self):
""" list all commands """
for key in self._cmds:
print('{}'.format(self._cmds[key].name))
def help_all(self, subsystem_list=None):
""" print help for all commands
Parameters
----------
subsystem_list : list, optional
a list of subsystems to limit the printing to
name : string
name of the command (first column in the csv file)
"""
if subsystem_list is None:
# get all subsystems
subsystems = [self._cmds[d].subsystem for d in self._cmds]
subsystems = [s if s is not None else 'Unassigned' for s in subsystems]
# create a list of unique subsystems
subsystem_set = set(subsystems)
else:
subsystem_set = set(subsystem_list)
for s in subsystem_set:
print(divider_string)
print(
f'Help for Subsytem: {colorama.Fore.RED}{s}{colorama.Style.RESET_ALL}:'
)
print('\n')
for k in self._cmds:
if self._cmds[k].subsystem == s:
self.help(k)
print('')
def help(self, name):
""" print help for a single command
Parameters
----------
name : str
the name of the command
"""
if self._cmds[name].subsystem is not None:
sub_sys = ' in subsystem: {}'.format(self._cmds[name].subsystem)
else:
sub_sys = ''
print(
f'Help for command {colorama.Fore.GREEN}{self._cmds[name].name}{colorama.Style.RESET_ALL}{sub_sys}:'
)
print(' {}'.format(self._cmds[name].doc))
if self._cmds[name].limits is not None:
print(' Allowable range is: {}'.format(
self._cmds[name].limits))
if len(self._cmds[name].set_config_keys) > 0:
print(
' The setter needs a configuration dictionary with keys: {}'.
format(', '.join(self._cmds[name].set_config_keys)))
if self._cmds[name].getter:
print(' Returns: {}'.format(
self._cmds[name].getter_type.__name__))
if len(self._cmds[name].set_config_keys) > 0:
print(
' Getting a value needs a configuration dictionary with keys: {}'.
format(', '.join(self._cmds[name].get_config_keys)))
if len(self._cmds[name].lookup) > 0:
print(' This command utilizes a lookup table on get and set:')
print(' ' + str(self._cmds[name].lookup))
def log_all_getters(self, filename=None, suppress_stdout=False):
""" save all gettable values to a file and send to stdout
Parameters
----------
filename : str, optional
name of the file (if None no file is saved)
suppress_stdout : bool, optional
if True the getters will not be printed to stdout
Returns
----------
dict
dictionary with the command name as keys and the results as values
"""
# .. todo:: read getters that need a configuration input
keys = []
results = []
for key in self._cmds:
if self._cmds[key].getter and (self._cmds[key].getter_inputs == 0):
keys.append(key)
results.append(self.get(key))
# print to stdout if not suppressed
if not suppress_stdout:
for (key, result) in zip(keys, results):
print('{} = {}'.format(key, result))
# write to a file if a file name is provided as input
if filename is not None:
with open(filename, 'w') as f:
print('Time = {}'.format(time.time()), file=f)
print('Instrument = {}'.format(self.instrument_name), file=f)
for (key, result) in zip(keys, results):
print('{} = {}'.format(key, result), file=f)
return dict(zip(keys, results))
def read_comm_err(self):
""" Read if the instrument has flagged a communciation error
The csv command file must have a getter with name comm_error that returns a bool
Returns
----------
bool
if True a comm error was detected
"""
try:
return self.get('comm_error')
except KeyError as inst:
print(
'Error: The command comm_error must be configured to read instrument errors'
)
sys.exit()
def test_command(self, name, set_vals=None, get_configs={},
set_configs={}):
""" Test a command by setting and getting to determine if:
1) the instrument reports a communcation error
2) the return value is of an unexpected type or an error threshold away from what was set
Parameters
----------
name : str
Name of the command
set_vals : list, optional
A list of values to test by a sequence of set and get.
If not provided the low and high limits are used
get_configs : dict, optional
A dictionary of configs to send the get command
set_configs : dict, optional
A dictionary of configs to send the set command
Returns
-------
bool
True if the command is successful, False otherwise.
Example
-------
dmm.test_command('curr_range', set_configs = {'ac_dc':'DC'}, get_configs = {'ac_dc':'DC'})
"""
comm_error = False
allowed_err = 0.02 # .. todo:: determine error magnitude that is allowed for automated checking
if (len(self._cmds[name].get_config_keys) != len(get_configs)) or (len(
self._cmds[name].set_config_keys) != len(set_configs)):
print('Skipping test of: {}'.format(name))
print(
' Automated test of getters or setters that require a configuration input is not yet implemented'
)
print('An input configuration dictionary is required')
return 'NotTested'
# if getter and setter
if (self._cmds[name].getter and self._cmds[name].setter):
ret = self.get(name, configs=get_configs)
comm_error |= self.read_comm_err()
if set_vals is None:
try:
set_vals = [
self._cmds[name].limits[0], self._cmds[name].limits[1]
]
except :
print(
'Skipping test of setter {} since limits are missing'.
format(name))
return 'NotTested'
for set_val in set_vals:
self.set(name, set_val, configs=set_configs)
comm_error |= self.read_comm_err()
ret = self.get(name, configs=get_configs)
# if present remove lookup table modification
try:
ret = self._cmds[name].lookup[ret]
except:
pass
comm_error |= self.read_comm_err()
if self._cmds[name].getter_type == float:
try:
deviates = np.abs(
(ret - set_val) / set_val) > allowed_err
except ZeroDivisionError:
deviates = (ret != set_val)
else:
deviates = (ret != set_val)
if deviates:
comm_error = True
if self._cmds[name].getter_type == float:
print(
'Get vs. set difference greater than {} %% for command {}'.
format(allowed_err * 100, name))
else:
print(
'Get vs. set difference for command {}'.format(
name))
print('Set {}; got {}'.format(set_val, ret))
print(divider_string)
# if setter only
elif self._cmds[name].setter:
if (self._cmds[name].limits) is None:
set_val = None
self.set(name, set_val, configs=set_configs)
comm_error |= self.read_comm_err()
elif (len(self._cmds[name].limits) > 2):
set_vals = [
self._cmds[name].limits[0], self._cmds[name].limits[-1]
]
for set_val in set_vals:
self.set(name, set_val, configs=set_configs)
comm_error |= self.read_comm_err()
else:
print('Skipping test of setter {}'.format(name))
return 'NotTested'
# if getter only
elif self._cmds[name].getter:
ret = self.get(name, configs=get_configs)
comm_error |= self.read_comm_err()
else:
print('Command is not a setter nor a getter, cannot test!')
return not comm_error
def test_all(self,
skip_subsystem=['setup', 'status', 'system'],
skip_commands=['fast_transfer', 'reset']):
""" Test all commands by setting and getting to determine if:
1) the instrument reports a communcation error
2) the return value is of an unexpected type or an error threshold away from what was set
Parameters
----------
skip_subsystem : list (of strings), default = ['setup', 'status']
subsystems to skip, an example might be commands in the status subsystem
that reset the instrument
skip_commands : list (of strings), default = ['fast_transfer', 'reset']
Commands to skip
Returns
-------
dict
Keys are each commands tested, value is True (command succeeded) or False (command errored)
"""
all_tests = {}
for key in self._cmds:
if (self._cmds[key].subsystem in skip_subsystem) or (
key in skip_commands):
pass
else:
print('Testing {}'.format(key))
status = self.test_command(key)
all_tests[key] = status
print('Result for {} = {}'.format(key, status))
#### ---- Print and return results -----
print('\n')
print(divider_string)
print('Command Test Results:')
import pprint
pprint.pprint(all_tests)
print('Returns True if command is successful')
return all_tests
class PyVisaUSB(object):
"""A USBPyVISA instrument (connected via a USB cable)
Parameters
----------
address: str
the address of the device
Attributes
----------
comm : visa communciation object
"""
def __init__(self, address):
try:
self.comm = self.open_visa(address)
except Exception as inst:
print(inst)
print('Device Opening failed')
def open_visa(self, addr):
""" open a VISA object
Parameters
----------
addr : str
the address of the device
Returns
----------
PyVISA object
.. todo::
* determine if error flag
* enable or disable of lookup table
"""
mgr = visa.ResourceManager()
resources = mgr.list_resources()
if addr in resources:
# open device .. todo:: check return value
obj = mgr.open_resource(addr)
elif addr not in resources:
print(
'Trying to open the device even though it was not found by the resource manager'
)
obj = mgr.open_resource(addr)
else:
print(
'This address {} was not recognized'.format(addr),
file=sys.stderr)
print('Returning an empty handle', file=sys.stderr)
obj = None
return obj
def ask(self, cmd):
""" Send a query to the instrument
Parameters
----------
cmd : str
the ASCII string sent to the device
Returns
----------
str
ASCII string returned by the device
"""
res = self.comm.query(cmd)
return res
def write(self, cmd):
""" Write a command to the instrument
Parameters
----------
cmd : str
the ASCII string sent to the device
Returns
----------
bool
if True transaction was successful
str
returned value .. todo:: check this
"""
ret = self.comm.write(cmd)
return ret[1] == StatusCode.success, ret
def close(self):
pass
class Serial(object):
"""A PySerial instrument (connected via a serial cable, i.e. RS232)
Parameters
----------
ser_port : str
the address of the device (example on a MAC is '/dev/tty.USA19H141113P1.1')
baudrate : int, optional
the serial channel baudrate to configure
parity : str, optional
options given by serial.PARITY_NONE, serial.PARITY_EVEN, serial.PARITY_ODD
bytesize : int, optional
options given by serial.EIGHTBITS, serial.FIVEBITS, serial.SEVENBITS
Attributes
----------
ser : the serial communciation object
terminator : the termination character to send
"""
def __init__(self, ser_port, **kwargs):
self.ser = serial.Serial(
port=ser_port,
baudrate=kwargs.get('baudrate', 9600),
parity=kwargs.get('parity', serial.PARITY_NONE),
bytesize=kwargs.get('bytesize', serial.EIGHTBITS))
self.terminator = kwargs.get('terminator', ' \n')
self.eol = kwargs.get('eol', b'\r')
self.open()
# some instruments need an initialization write,
# i.e. turn on remote interface mode
init_write = kwargs.get('init_write')
if init_write is not None:
self.write(init_write)
def open(self):
self.ser.close()
self.ser.open()
cnt = 0
while not self.ser.isOpen():
time.sleep(0.1)
cnt = cnt + 1
if cnt > 25:
print('Failed to open Serial interface at address: {}'.format(
self.ser_port))
def ask(self, cmd):
self.write(cmd)
res = self._readline()
return res
def write(self, cmd):
cmd = cmd + self.terminator
self.ser.write(cmd.encode('utf-8'))
return (True,
'no-details') # pyserial does not return a success upon write
def close(self):
self.ser.close()
# https://stackoverflow.com/questions/16470903/pyserial-2-6-specify-end-of-line-in-readline
def _readline(self):
#eol = b'\r'
leneol = len(self.eol)
line = bytearray()
while True:
c = self.ser.read(1)
if c:
line += c
if line[-leneol:] == self.eol:
break
else:
break
return bytes(line)
def init_instrument(cmd_map, addr, lookup=None, **kwargs):
"""
initialize an instrument with its address and CSV file of commands
Parameters
----------
cmd_map : str
path to the CSV file of instrument commands
addr : dict
key is one of pyserial, pyvisa; value is the address of the instrument
lookup : str, optional
filename of the CSV file of lookup table
Returns
----------
list
list of commands that will be used for building the instrument
object
communication handle
bool
True if instrument is not connected
"""
# Read CSV file of commands using Pandas
df = pd.read_csv(cmd_map)
# strip white space and end-of-line from column headers
df = df.rename(columns=lambda x: x.strip())
# strip white space and end-of-line from string inputs
df['setter_type'] = df['setter_type'].str.strip()
df['getter_type'] = df['getter_type'].str.strip()
# Read CSV file of lookups
if lookup:
df_look = pd.read_csv(lookup)
# strip white space and end-of-line from column headers
df_look = df_look.rename(columns=lambda x: x.strip())
# drop empty rows (for example, at the end)
df_look = df_look.dropna(how='all')
# make a dictionary for each command
cmd_lookups = {}
for index, row in df_look.iterrows():
if index == 0:
try:
if math.isnan(row['command']):
raise Exception(
'The first element of the lookup table is empty')
except Exception as inst:
pass
try:
if not math.isnan(row['command']):
current_cmd = current_cmd # shouldn't get here
except Exception as inst:
current_cmd = row['command']
try:
dict_key = float(row['name'])
except ValueError:
dict_key = str(row['name'])
if current_cmd in cmd_lookups.keys():
cmd_lookups[current_cmd][dict_key] = row['value']
else:
# initialize the dictionary
cmd_lookups[current_cmd] = {}
cmd_lookups[current_cmd][dict_key] = row['value']
cmd_list = []
for index, row in df.iterrows():
# convert getter, setter to Boolean True or False
for gs in ['getter', 'setter']:
if row[gs] in ['True', 'T', 'TRUE', 'true', True]:
tmp = True
elif row[gs] in ['False', 'F', 'FALSE', 'false', False]:
tmp = False
else:
tmp = False
row[gs] = tmp # converts to Boolean
if row['setter_range'] is not None:
try:
row['setter_range'] = ast.literal_eval(row['setter_range'])
except ValueError:
if not math.isnan(row["setter_range"]):
print(
f'Warning setter_range of {colorama.Fore.GREEN}{row["setter_range"]}{colorama.Style.RESET_ALL} for command {colorama.Fore.BLUE}{row["name"]}{colorama.Style.RESET_ALL} not of proper form'
)
row['setter_range'] = None
# pandas read default value is nan. Convert to None or 0 depending upon column
def modify_default(row_el, default_value):
try:
row_el = default_value if math.isnan(row_el) else row_el
except TypeError:
row_el = row_el
return row_el
row['setter_inputs'] = modify_default(row['setter_inputs'], 1)
row['getter_inputs'] = modify_default(row['getter_inputs'], 0)
row['ascii_str_get'] = modify_default(row['ascii_str_get'], None)
row['subsystem'] = modify_default(row['subsystem'], None)
if False:
print('---')
print(row['name'])
print(cmd_lookups.keys())
print('---')
if row['name'] in cmd_lookups.keys():
lookup_dict = cmd_lookups[row['name']]
else:
lookup_dict = {}
cmd = Command(
name=row['name'],
ascii_str=row['ascii_str'],
ascii_str_get=row['ascii_str_get'],
getter=row['getter'],
getter_type=convert_return[row['getter_type']],
setter=row['setter'],
setter_type=convert_return[row['setter_type']],
limits=row['setter_range'],
doc=row['doc'],
subsystem=row['subsystem'],
getter_inputs=row['getter_inputs'],
setter_inputs=row['setter_inputs'],
lookup=lookup_dict,
is_config=row['is_config'])
cmd_list.append(cmd)
# check to ensure the dictionary only has 0 or 1 entry
if len(addr) > 1:
sys.exit('Multiple keys: {}'.format(list(addr.keys())))
# pySerial:Serial
if 'pyserial' in addr:
try:
inst = Serial(addr['pyserial'], **kwargs)
inst_comm = inst
inst_comm.ser.flush()
unconnected = False
except Exception as inst:
print(inst)
unconnected = True
print('PySerial address not found {}'.format(addr['pyserial']))
print('Possible serial addresses:')
import glob
import platform
if platform.system() == 'Darwin':
print('On your MAC at /dev/tty.USA*')
print(glob.glob("/dev/tty.USA*"))
elif platform.system() == 'Linux':
print('On your Linux Box at /dev/tty.USA* ??')
print(glob.glob("/dev/tty.USA*"))
elif platform.system() == 'Windows':
print('On your Windows Machine I do not know how to check for available COM ports')
#print(glob.glob("/dev/tty.USA*"))
# pyvisa:PyVisaUSB
elif 'pyvisa' in addr:
try:
inst = PyVisaUSB(addr['pyvisa'])
inst_comm = inst.comm
unconnected = False
except Exception as e:
print(e)
unconnected = True
print('PyVISA address {} not found'.format(addr['pyvisa']))
# unattached instrument
else:
unconnected = True
if unconnected: #Targeting a PyVISA like instrument
# allow for debugging without instruments attached:
# print command to stdout, always return getter_debug_value
print(divider_string, end='')
print('Running in debug mode without instrument attached')
print('All commands sent to the instrument will be printed to stdout.')
print(
'Unless specified by cmd attribute _unconnected_val' +
' \ngetters will always return {} (getter_debug_value)'.
format(getter_debug_value))
print(divider_string)
class Comm():
pass
inst_comm = Comm()
def ask(str_input):
print(str_input)
return getter_debug_value
def write(str_input):
print(str_input)
inst_comm.write = write
inst_comm.query = ask
return cmd_list, inst_comm, unconnected
|
Python
|
CL
|
7ce7220c56afbbd35f3cccff93f327acbca18111d21b6b4d43510edb2e15bb54
|
from subprocess import PIPE, Popen
from time import sleep
def call_bin(path, stdin=None, *args, **kwargs):
"""
params:
path :: string
el path del coso que queres ejecutar
stdin :: string
el string que se manda a stdin. Si es None no se manda nada
todos los args que se pasan seran pasados como argumentos y
kwargs seran pasados como -clave valor
esta llamada es bloqueante
returns:
(stdout, sterr)
"""
args= list(args)
for k, v in kwargs.iteritems():
if len(k) > 1: k= '--%s' % k
else: k= '-%s' % k
args.insert(0, v)
args.insert(0, k)
args.insert(0, path)
if stdin is not None:
p= Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
res= p.communicate(stdin)
else:
p= Popen(args, stdout=PIPE, stderr=PIPE)
res= p.communicate()
return res
|
Python
|
CL
|
d7536d7f24172267b391f20b12a060d00a495f49258d449218d01563ab21b607
|
# -*- coding: UTF-8 -*-
# This file is part of the jetson_stats package (https://github.com/rbonghi/docker-dropbox-app or http://rnext.it).
# Copyright (c) 2020 Raffaello Bonghi.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import argparse
import sys
import os
import time
# Package imports
from dbsync import UpDown
# Create logger for jplotlib
logger = logging.getLogger(__name__)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def main():
"""Main program.
Parse command line, then iterate over files and directories under
rootdir and upload all files. Skips some temporary files and
directories, and avoids duplicate uploads by comparing size and
mtime with the server.
"""
parser = argparse.ArgumentParser(description='Sync ~/dropbox to Dropbox')
parser.add_argument('--rootdir',
default=os.environ['DROPBOX_ROOTDIR'] if "DROPBOX_ROOTDIR" in os.environ else "~/Downloads",
help='Local directory to upload')
parser.add_argument('--folder', '-f',
default=os.environ['DROPBOX_FOLDER'] if "DROPBOX_FOLDER" in os.environ else "",
help='Folder name in your Dropbox')
parser.add_argument('--appKey', default=os.environ['DROPBOX_APP_KEY'] if "DROPBOX_APP_KEY" in os.environ else "",
help='Application key')
parser.add_argument('--appSecret',
default=os.environ['DROPBOX_APP_SECRET'] if "DROPBOX_APP_SECRET" in os.environ else "",
help='Application secret')
parser.add_argument('--refreshToken',
default=os.environ['DROPBOX_REFRESH_TOKEN'] if "DROPBOX_REFRESH_TOKEN" in os.environ else "",
help='Refresh token')
parser.add_argument('--interval', '-i',
default=int(os.environ['DROPBOX_INTERVAL']) if "DROPBOX_INTERVAL" in os.environ else 10,
help='Interval to sync from dropbox')
parser.add_argument('--fromDropbox', action='store_true',
help='Direction to synchronize Dropbox')
parser.add_argument('--fromLocal', action='store_true',
help='Direction to synchronize Dropbox')
parser.add_argument('--verbose', '-v', action='store_true',
help='Show all Take default answer on all questions')
# Parser arguments
args = parser.parse_args()
# Initialize loggger
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=level, format='%(name)s - %(levelname)s - %(message)s')
# Check token
if not (args.appKey and args.appSecret):
print(f"{bcolors.FAIL}app key and app secret must be set{bcolors.ENDC}")
sys.exit(2)
# Check folders
folder = args.folder
rootdir = os.path.expanduser(args.rootdir)
if not os.path.exists(rootdir):
print(f"{bcolors.FAIL}{rootdir} does not exist on your filesystem{bcolors.ENDC}")
sys.exit(1)
elif not os.path.isdir(rootdir):
print(f"{bcolors.FAIL}{rootdir} is not a folder on your filesystem{bcolors.ENDC}")
sys.exit(1)
# Configure type of overwrite
if args.fromDropbox:
overwrite = "dropbox"
elif args.fromLocal:
overwrite = "host"
else:
overwrite = ""
# Start updown sync with refresh token, designed for long living
updown = UpDown(args.appKey, args.appSecret, args.refreshToken, folder, rootdir, interval=args.interval,
overwrite=overwrite)
# Run observer
logger.info("Server started")
updown.start()
# Run loop
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logger.debug("Keyboard interrupt")
# Stop server
updown.stop()
if __name__ == '__main__':
main()
# EOF
|
Python
|
CL
|
923a3cf8cd6459a27c4579eba7fa1510d713110822e0fea0463b6d6e9aefc7f7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module provides base classes that all controllers must inherit.
"""
from __future__ import print_function, division, absolute_import
from PyQt4.QtCore import QObject
class GAUDInspectBaseChildController(QObject):
"""
A base class that child controllers must inherit to be functional.
Child controllers are those who will be called by `.main.GAUDInspectController`
and are devoted to handle specific parts of the application.
Parameters
----------
parent : QObject
Most of the time, it will be the main instance of
`.main.GAUDInspectController`, declared along the
master view and model in the root `main.py`.
Needed to handle the parent mechanism of Qt.
tabindex : int, optional
If the controller handles a `QWidget` that is part of a `QTabWidget`, this
attribute keeps the tab index in the tabber. That way, some helper
methods can be defined.
Attributes
----------
app : PyQt4.QtGui.QApplication
Shortcut to the QApplication instance that runs all the GUI.
view : PyQt4.QtGui.QMainWindow
The main view of the application, extracted directly from the parent
controller.
model : object
The main model of the application, extracted directly from the parent
controller.
"""
def __init__(self, parent=None, tabindex=None, *args, **kwargs):
super(GAUDInspectBaseChildController, self).__init__(parent)
self.app = self.parent().app
self.view = self.parent().view
self.model = self.parent().model
# Optional attributes
self.tabindex = None
self.childmodel = None
# Standard API
def set_model(self, model):
"""
Sets child model if it was not declared at instance initialization.
"""
pass
def signals(self):
"""
Connects all signals to their respective slots. To be called
from `__init__`, if needed.
"""
pass
def slots(self):
"""
Since a slot can consist of new objects that are created
on demand with private methods, this method groups them
together with more friendly names.
"""
pass
# Convenience methods
def set_current(self):
"""
If `self.tabindex` is defined, set the focus to that tab.
"""
if self.tabindex is not None:
self.view.tabber.setCurrentIndex(self.tabindex)
def set_active(self):
"""
If `self.tabindex` is defined, set the focus to that tab
and disable any other visible tabs.
"""
if self.tabindex is not None:
for i in range(self.view.tabber.count()):
self.view.tabber.setTabEnabled(i, False)
self.view.tabber.setTabEnabled(self.tabindex, True)
self.view.tabber.setCurrentIndex(self.tabindex)
def restore_enabled(self):
"""
Reenable all tabs.
"""
for i in range(self.view.tabber.count()):
self.view.tabber.setTabEnabled(i, True)
|
Python
|
CL
|
53dac5d37752f42398a4a60021e55959d002c52f4cb0dc526ac6d601e3524e8e
|
import sklearn
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.utils import shuffle
# save our best model to later use so we don't have to re-train over and over again
# esentially we want to save our model that have the high accuracy by using pickel
import matplotlib.pyplot as pyplot
import pickle
from matplotlib import style
#read data in
#sep ~ seperator. In cvs file, each data is seperated by ';'
data = pd.read_csv("student-mat.csv", sep=";")
#trim data dowm to only attributes we want: G1, G2, studytime, failure, absences
# -pick attribute with int value. If it's a string we need to convert it to int
# -(entire data has ~32 attributes, see details on UCI Data Set info)
data = data[["G1", "G2", "G3", "studytime", "failures", "absences"]]
# print first 10 data
#print(data.head)
# set up label -> we want machine to determine/predict G3
predict = "G3"
# set up 2 arrays
# - 1 array will store our lable/lables
# - 1 array will store our attributes
# this returns a new dataframe that does NOT have G3 <- for later to train machine
x = np.array(data.drop([predict],1))
# return a new dataframe that only have G3 <- for later to compare with machine prediction
y= np.array(data[predict])
# taking our lables and attributes that we trying to predict, and split them into 4 different arrays
# - x_train is a portion of x ; y_train is a portion of y
# - x_test and y_test is used to test the accuracy of machine prediction
# it splits up 10% of our test data into test sample (x_test & y_test) to test the machine as it never seen that
# data before
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)
# best = 0
# time_train = 30 # trainning times. Could do more but time connsuming
#
# for _ in range(time_train):
# x_train, x_test,y_train, y_test = sklearn.model_selection.train_test_split(x,y,test_size=0.1)
#
# # create a training model
# linear = linear_model.LinearRegression()
#
# # find the best fit line of the training data
# linear.fit(x_train,y_train)
#
# # get the accuracy of the prediction. Check how well the algorithm is?
# acc = linear.score(x_test, y_test)
#
# #save the best model <- we only save BEST one (higher accuracy)
# if acc > best:
# best =acc
# # -- save our model for later use
# # create a pickle file for us in our directory that we can open and use that
# with open("studentmodel.pickle","wb") as f:
# pickle.dump(linear,f)
# read in our pickle file
pickle_in = open("studentmodel.pickle","rb")
# load pickle to our linear models
linear = pickle.load(pickle_in)
# get the accuracy of the prediction. Check how well the algorithm is?
acc = linear.score(x_test, y_test)
print ("The accuracy of prediction: ",acc) #output : 0.84 ~ 84% of accuracy
print ("Coefficient: ",linear.coef_) # slopes of the linear in multi-dimension
print("Intercepts:",linear.intercept_) # b in y= am+ b
# get machine predict G3 on each student data on the test data (x_test,the portion we did not train)
predictions = linear.predict(x_test)
# print out prediction
for x in range(len(predictions)):
print("predict result: ",predictions[x], "input dat: ",x_test[x], "actual result:",y_test[x])
# Plot <- see correlations we have between each attribute affect toward G3= final grade
p = 'G1'
style.use("ggplot")
pyplot.scatter(data[p], data["G3"])
pyplot.xlabel(p)
pyplot.ylabel("G3=Final Grade")
pyplot.show()
|
Python
|
CL
|
d878d8effd1382a31324ff785003e4fcd058d307aed8c02d26e0038572bc0bb5
|
# -*- coding: utf-8 -*-
# Copyright 2017 Rooms For (Hong Kong) Limted T/A OSCG
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, api, fields
class SupplierStock(models.Model):
_inherit = "supplier.stock"
# Using "display_name" field computed by name_get() method to create the form view's representation
_rec_name = 'display_name'
# Field to access through related field: Supplier.Stock > Product.Product > Product.Template
hk_retail = fields.Float(
'Retail HKD',
related='product_id.list_price',
store=True,
)
# quantity, computed field
partner_qty = fields.Char(
string='Evaluated Quantity',
store=True,
)
# Cheapest entry of product_id?
lowest_cost = fields.Boolean(
string='Cheapest entry',
store=True,
)
# Flags those ps that have multiple entries with same product_id
has_duplicates = fields.Boolean(
string='Has Duplicates',
store=True,
)
#For form view
image_medium = fields.Binary(
'Image',
related='product_id.product_tmpl_id.image_medium',
readonly=True,
)
short_loc_name = fields.Char(
"Location",
related='partner_loc_id.short_loc')
# # Overwriting display_name's method for Supplier Access User
# @api.multi
# def name_get(self, *args, **kwargs):
# result = []
# for rec in self:
# result.append(
# (rec.id, rec.product_id.name)
# )
# return result
@api.multi
def _get_quantity(self):
for ps in self:
if ps.quantity == 0.0:
ps.partner_qty = '0'
elif ps.quantity == 1.0:
ps.partner_qty = '1'
elif ps.quantity == 2.0:
ps.partner_qty = '2'
elif ps.quantity >= 3.0:
ps.partner_qty = '>=3'
ps_products= self.sudo().search(
[('product_id', '=', ps.product_id.id)], order='price_unit_base ASC'
)
if ps_products:
for psc in ps_products:
if len(ps_products) >=2:
psc.sudo().write({
'lowest_cost': False,
'has_duplicates': True
})
else:
psc.sudo().write({
'lowest_cost': False,
'has_duplicates': False,
})
ps_products[0].sudo().write({
'lowest_cost': True
})
@api.multi
def write(self, vals):
res = super(SupplierStock, self).write(vals)
if 'quantity' in vals or 'price_unit' in vals:
for ps in self:
ps._get_quantity()
return res
@api.model
def create(self,vals):
res =super(SupplierStock,self).create(vals)
res._get_quantity()
return res
|
Python
|
CL
|
6778ea1c35c083e31d7a834226592bf8b50eb6bbadb70e29e814d21b154c870f
|
import torch
from torch import nn
from einops.layers.torch import Rearrange, Reduce
class Affine(nn.Module):
"""This functions as layer norm in MLP-Mixer."""
def __init__(self, emb_dim) -> None:
super().__init__()
self.g = nn.Parameter(torch.ones(1, 1, emb_dim))
self.b = nn.Parameter(torch.ones(1, 1, emb_dim))
def forward(self, x): # [batch_size, seq_length, emb_dim]
return x * self.g + self.b
class PreAffinePostLayerScale(nn.Module):
"""From CaiT"""
def __init__(self, emb_dim, layer, fn):
super().__init__()
if layer <= 18:
init_eps = 0.1
elif 18 < layer <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.full((1, 1, emb_dim), init_eps)
self.scale = nn.Parameter(scale)
self.affine = Affine(emb_dim)
self.fn = fn
def forward(self, x): # [batch_size, seq_length, emb_dim]
return self.fn(self.affine(x)) * self.scale + x
class ResMLP(nn.Module):
def __init__(
self, num_layers, seq_length, emb_dim, num_classes, expansion=4
):
super().__init__()
self.seq_length = seq_length
self.emb_dim = emb_dim
self.expansion = expansion
self.wrapper = lambda num_layers, module: PreAffinePostLayerScale(
emb_dim, num_layers + 1, module
)
self.model = nn.Sequential(
*[
nn.Sequential(self._make_token_mixing(i), self._make_channel_mixing(i))
for i in range(num_layers)
]
)
self.head = nn.Sequential(
Affine(emb_dim), Reduce("b c d -> b d", "mean"), nn.Linear(emb_dim, num_classes)
)
def _make_token_mixing(self, layer):
return self.wrapper(layer, nn.Conv1d(self.seq_length, self.seq_length, 1))
def _make_channel_mixing(self, layer):
model = nn.Sequential(
nn.Linear(self.emb_dim, self.emb_dim * self.expansion),
nn.GELU(),
nn.Linear(self.emb_dim * self.expansion, self.emb_dim),
)
return self.wrapper(layer, model)
def forward(self, x):
return self.head(self.model(self.embedding(x)))
class ResMLPVision(ResMLP):
def __init__(self, num_layers, in_channels, input_size, patch_size, emb_dim, expansion, num_classes):
assert not input_size % patch_size
seq_length = (input_size // patch_size) ** 2
embedding = nn.Sequential(
nn.Conv2d(in_channels, emb_dim, patch_size, patch_size),
Rearrange("b d h w -> b (h w) d"),
)
super().__init__(num_layers, seq_length, emb_dim, num_classes, expansion=expansion)
self.embedding = embedding
class ResMLPNLP(ResMLP):
def __init__(self, num_layers, num_tokens, seq_length, emb_dim, expansion, num_classes):
super().__init__(num_layers, seq_length, emb_dim, num_classes, expansion=expansion)
self.embedding = nn.Embedding(num_tokens, emb_dim)
def test_resmlp():
batch_size = 11
num_layers = 6
num_classes = 10
model = ResMLPVision(num_layers, 3, 60, 6, 256, 3, num_classes)
data = torch.randn(batch_size, 3, 60, 60)
rprint(model(data).shape) # [batch_size, num_classes]
if __name__ == "__main__":
from rich import print as rprint
from rich.traceback import install
install()
import pytorch_lightning as pl
pl.seed_everything(42)
test_resmlp()
|
Python
|
CL
|
4ab9c17b2b41e8d5b3d3e94ccb5dbdd34c4d21da077cac337af93514c1c3210b
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose, PointStamped
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
#from styx_msgs.msg import TLStatus
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
from scipy.spatial import KDTree
import tf
from tf import transformations
import cv2
import yaml
import math
import time
import numpy as np
#import PyKDL
STATE_COUNT_THRESHOLD = 3
UPDATE_RATE = 2
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.waypoints_tree = None
self.camera_image = None
self.lights = []
self.is_site = True
self.has_image = True
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
# For debugging the image
self.image_display = rospy.Publisher('/image_proccessed', Image, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
# rospy.spin()
self.loop()
def loop(self):
rate = rospy.Rate(UPDATE_RATE)
while not rospy.is_shutdown():
self.find_traffic_lights()
rate.sleep()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x,
waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
def find_traffic_lights(self):
# Find the traffic light state and the way point related to it.
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = 0
if self.waypoint_tree is not None:
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
return closest_idx
def project_to_image_plane(self, point_in_world):
"""Project point from 3D world coordinates to 2D camera image location
Args:
point_in_world (Point): 3D location of a point in the world
Returns:
x (int): x coordinate of target point in image
y (int): y coordinate of target point in image
"""
fx = self.config['camera_info']['focal_length_x']
fy = self.config['camera_info']['focal_length_y']
image_width = self.config['camera_info']['image_width']
image_height = self.config['camera_info']['image_height']
rospy.loginfo("project_to_image called {} {} : f {} {}".format(image_width, image_height, fx, fy))
# get transform between pose of camera and world frame
trans = None
rot = None
try:
now = rospy.Time.now()
self.listener.waitForTransform("/base_link",
"/world", now, rospy.Duration(1.0))
(trans, rot) = self.listener.lookupTransform("/base_link",
"/world", now)
self.is_site = False
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
rospy.logerr("Failed to find camera to map transform")
self.is_site = True
return (None, None)
# Project traffic light pose in xyz to image pixels.
f = 2300
x_offset = -30
y_offset = 340
fx = f
fy = f
T3 = np.array([trans[0], trans[1], trans[2]]).transpose()
R2 = tf.transformations.quaternion_matrix(rot)
R3 = R2[:3,:3]
P2 = np.array([point_in_world.x, point_in_world.y, point_in_world.z]).transpose()
P3 = R3.dot(P2) + T3
x = -P3[1]/P3[0]*fx + image_width/2 + x_offset
y = -P3[2]/P3[0]*fy + image_height/2 + y_offset
return (int(x), int(y))
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# For testing
# return light.state
if(not light):
rospy.loginfo("Bad Light")
return False
if(not self.camera_image):
rospy.loginfo("Bad Image")
return False
if(not self.has_image):
self.prev_light_loc = None
rospy.loginfo("Project has no image")
return False
else:
rospy.loginfo("Project got an image")
image_orig = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
box, state = self.light_classifier.get_classification(image_orig)
rows = image_orig.shape[0]
cols = image_orig.shape[1]
x, y = self.project_to_image_plane(light.pose.pose.position)
'''
if (x<0) or (y<0) or (x>=cols) or (y>=rows):
self.has_image = False
return False
xcrop = 50
ycrop = 100
xmin = x - xcrop if (x-xcrop) >= 0 else 0
ymin = y - ycrop if (y-ycrop) >= 0 else 0
# TODO:
xmax = x + xcrop if (x + xcrop) <= cols-1 else cols-1
ymax = y + ycrop if (y + ycrop) <= rows-1 else rows-1
'''
if(box != None):
xmin, xmax, ymin, ymax = box
else:
xmin = 0
ymin = 0
xmax = cols
ymax = rows
image_cropped = image_orig[ymin:ymax,xmin:xmax]
# image_cropped = image_orig[0:rows,0:cols]
#TODO use light location to zoom in on traffic light in image
#state = self.light_classifier.get_classification(image_cropped)
image_message = self.bridge.cv2_to_imgmsg(image_cropped, "bgr8")
self.image_display.publish(image_message)
#Get classification
self.has_image = False
return state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = temp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
else:
if(len(self.lights) > 0):
closest_light = self.lights[0]
if closest_light:
state = self.get_light_state(closest_light)
if(state == TrafficLight.GREEN):
rospy.loginfo("TL_detector GREEN {}".format(line_wp_idx))
elif(state == TrafficLight.RED):
rospy.loginfo("TL_detector RED {}".format(line_wp_idx))
elif(state == TrafficLight.YELLOW):
rospy.loginfo("TL_detector YELLOW {}".format(line_wp_idx))
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
Python
|
CL
|
28d99b56d5bf86b3e209c2f2806ebfe057c085af3a97391632a295c16a7ada52
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.